CombinedText stringlengths 4 3.42M |
|---|
// Copyright 2021, The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A library for passing arbitrary file descriptors when spawning child processes.
//!
//! # Example
//!
//! ```rust
//! use command_fds::{CommandFdExt, FdMapping};
//! use std::fs::File;
//! use std::os::unix::io::AsRawFd;
//! use std::process::Command;
//!
//! // Open a file.
//! let file = File::open("Cargo.toml").unwrap();
//!
//! // Prepare to run `ls -l /proc/self/fd` with some FDs mapped.
//! let mut command = Command::new("ls");
//! command.arg("-l").arg("/proc/self/fd");
//! command
//! .fd_mappings(vec![
//! // Map `file` as FD 3 in the child process.
//! FdMapping {
//! parent_fd: file.as_raw_fd(),
//! child_fd: 3,
//! },
//! // Map this process's stdin as FD 5 in the child process.
//! FdMapping {
//! parent_fd: 0,
//! child_fd: 5,
//! },
//! ])
//! .unwrap();
//!
//! // Spawn the child process.
//! let mut child = command.spawn().unwrap();
//! child.wait().unwrap();
//! ```
use nix::fcntl::{fcntl, FcntlArg, FdFlag};
use nix::unistd::dup2;
use std::cmp::max;
use std::io::{self, ErrorKind};
use std::os::unix::io::RawFd;
use std::os::unix::process::CommandExt;
use std::process::Command;
use thiserror::Error;
/// A mapping from a file descriptor in the parent to a file descriptor in the child, to be applied
/// when spawning a child process.
///
/// The parent_fd must be kept open until after the child is spawned.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FdMapping {
pub parent_fd: RawFd,
pub child_fd: RawFd,
}
/// Error setting up FD mappings, because there were two or more mappings for the same child FD.
#[derive(Copy, Clone, Debug, Eq, Error, PartialEq)]
#[error("Two or more mappings for the same child FD")]
pub struct FdMappingCollision;
/// Extension to add file descriptor mappings to a [`Command`].
pub trait CommandFdExt {
/// Adds the given set of file descriptor to the command.
///
/// Calling this more than once on the same command may result in unexpected behaviour.
fn fd_mappings(&mut self, mappings: Vec<FdMapping>) -> Result<(), FdMappingCollision>;
}
impl CommandFdExt for Command {
fn fd_mappings(&mut self, mappings: Vec<FdMapping>) -> Result<(), FdMappingCollision> {
// Validate that there are no conflicting mappings to the same child FD.
let mut child_fds: Vec<RawFd> = mappings.iter().map(|mapping| mapping.child_fd).collect();
child_fds.sort_unstable();
child_fds.dedup();
if child_fds.len() != mappings.len() {
return Err(FdMappingCollision);
}
// Register the callback to apply the mappings after forking but before execing.
unsafe {
self.pre_exec(move || map_fds(&mappings));
}
Ok(())
}
}
fn map_fds(mappings: &[FdMapping]) -> io::Result<()> {
if mappings.is_empty() {
// No need to do anything, and finding first_unused_fd would fail.
return Ok(());
}
// Find the first FD which is higher than any parent or child FD in the mapping, so we can
// safely use it and higher FDs as temporary FDs. There may be other files open with these FDs,
// so we still need to ensure we don't conflict with them.
let first_safe_fd = mappings
.iter()
.map(|mapping| max(mapping.parent_fd, mapping.child_fd))
.max()
.unwrap()
+ 1;
// If any parent FDs conflict with child FDs, then first duplicate them to a temporary FD which
// is clear of either range. Mappings to the same FD are fine though, we can handle them by just
// removing the FD_CLOEXEC flag.
let child_fds: Vec<RawFd> = mappings.iter().map(|mapping| mapping.child_fd).collect();
let mappings = mappings
.iter()
.map(|mapping| {
Ok(
if child_fds.contains(&mapping.parent_fd) && mapping.parent_fd != mapping.child_fd {
let temporary_fd =
fcntl(mapping.parent_fd, FcntlArg::F_DUPFD_CLOEXEC(first_safe_fd))?;
FdMapping {
parent_fd: temporary_fd,
child_fd: mapping.child_fd,
}
} else {
mapping.to_owned()
},
)
})
.collect::<nix::Result<Vec<_>>>()
.map_err(nix_to_io_error)?;
// Now we can actually duplicate FDs to the desired child FDs.
for mapping in mappings {
if mapping.child_fd == mapping.parent_fd {
// Remove the FD_CLOEXEC flag, so the FD will be kept open when exec is called for the
// child.
fcntl(mapping.parent_fd, FcntlArg::F_SETFD(FdFlag::empty()))
.map_err(nix_to_io_error)?;
} else {
// This closes child_fd if it is already open as something else, and clears the
// FD_CLOEXEC flag on child_fd.
dup2(mapping.parent_fd, mapping.child_fd).map_err(nix_to_io_error)?;
}
}
Ok(())
}
/// Convert a [`nix::Error`] to a [`std::io::Error`].
fn nix_to_io_error(error: nix::Error) -> io::Error {
if let nix::Error::Sys(errno) = error {
io::Error::from_raw_os_error(errno as i32)
} else {
io::Error::new(ErrorKind::Other, error)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nix::unistd::close;
use std::collections::HashSet;
use std::fs::{read_dir, File};
use std::os::unix::io::AsRawFd;
use std::process::Output;
use std::str;
use std::sync::Once;
static SETUP: Once = Once::new();
#[test]
fn conflicting_mappings() {
setup();
let mut command = Command::new("ls");
// The same mapping can't be included twice.
assert_eq!(
command.fd_mappings(vec![
FdMapping {
child_fd: 4,
parent_fd: 5,
},
FdMapping {
child_fd: 4,
parent_fd: 5,
},
]),
Err(FdMappingCollision)
);
// Mapping two different FDs to the same FD isn't allowed either.
assert_eq!(
command.fd_mappings(vec![
FdMapping {
child_fd: 4,
parent_fd: 5,
},
FdMapping {
child_fd: 4,
parent_fd: 6,
},
]),
Err(FdMappingCollision)
);
}
#[test]
fn no_mappings() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
assert_eq!(command.fd_mappings(vec![]), Ok(()));
let output = command.output().unwrap();
expect_fds(&output, &[0, 1, 2, 3], 0);
}
#[test]
fn one_mapping() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
let file = File::open("testdata/file1.txt").unwrap();
// Map the file an otherwise unused FD.
assert_eq!(
command.fd_mappings(vec![FdMapping {
parent_fd: file.as_raw_fd(),
child_fd: 5,
},]),
Ok(())
);
let output = command.output().unwrap();
expect_fds(&output, &[0, 1, 2, 3, 5], 0);
}
#[test]
fn swap_mappings() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
let file1 = File::open("testdata/file1.txt").unwrap();
let file2 = File::open("testdata/file2.txt").unwrap();
let fd1 = file1.as_raw_fd();
let fd2 = file2.as_raw_fd();
// Map files to each other's FDs, to ensure that the temporary FD logic works.
assert_eq!(
command.fd_mappings(vec![
FdMapping {
parent_fd: fd1,
child_fd: fd2,
},
FdMapping {
parent_fd: fd2,
child_fd: fd1,
},
]),
Ok(())
);
let output = command.output().unwrap();
// Expect one more Fd for the /proc/self/fd directory. We can't predict what number it will
// be assigned, because 3 might or might not be taken already by fd1 or fd2.
expect_fds(&output, &[0, 1, 2, fd1, fd2], 1);
}
#[test]
fn one_to_one_mapping() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
let file1 = File::open("testdata/file1.txt").unwrap();
let file2 = File::open("testdata/file2.txt").unwrap();
let fd1 = file1.as_raw_fd();
// Map files to each other's FDs, to ensure that the temporary FD logic works.
assert_eq!(
command.fd_mappings(vec![FdMapping {
parent_fd: fd1,
child_fd: fd1,
}]),
Ok(())
);
let output = command.output().unwrap();
// Expect one more Fd for the /proc/self/fd directory. We can't predict what number it will
// be assigned, because 3 might or might not be taken already by fd1 or fd2.
expect_fds(&output, &[0, 1, 2, fd1], 1);
// Keep file2 open until the end, to ensure that it's not passed to the child.
drop(file2);
}
#[test]
fn map_stdin() {
setup();
let mut command = Command::new("cat");
let file = File::open("testdata/file1.txt").unwrap();
// Map the file to stdin.
assert_eq!(
command.fd_mappings(vec![FdMapping {
parent_fd: file.as_raw_fd(),
child_fd: 0,
},]),
Ok(())
);
let output = command.output().unwrap();
assert!(output.status.success());
assert_eq!(output.stdout, b"test 1");
}
/// Parse the output of ls into a set of filenames
fn parse_ls_output(output: &[u8]) -> HashSet<String> {
str::from_utf8(output)
.unwrap()
.split_terminator("\n")
.map(str::to_owned)
.collect()
}
/// Check that the output of `ls /proc/self/fd` contains the expected set of FDs, plus exactly
/// `extra` extra FDs.
fn expect_fds(output: &Output, expected_fds: &[RawFd], extra: usize) {
assert!(output.status.success());
let expected_fds: HashSet<String> = expected_fds.iter().map(RawFd::to_string).collect();
let fds = parse_ls_output(&output.stdout);
if extra == 0 {
assert_eq!(fds, expected_fds);
} else {
assert!(expected_fds.is_subset(&fds));
assert_eq!(fds.len(), expected_fds.len() + extra);
}
}
fn setup() {
SETUP.call_once(close_excess_fds);
}
/// Close all file descriptors apart from stdin, stdout and stderr.
///
/// This is necessary because GitHub Actions opens a bunch of others for some reason.
fn close_excess_fds() {
let dir = read_dir("/proc/self/fd").unwrap();
for entry in dir {
let entry = entry.unwrap();
let fd: RawFd = entry.file_name().to_str().unwrap().parse().unwrap();
if fd > 3 {
close(fd).unwrap();
}
}
}
}
Return &mut self from fd_mappings.
// Copyright 2021, The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A library for passing arbitrary file descriptors when spawning child processes.
//!
//! # Example
//!
//! ```rust
//! use command_fds::{CommandFdExt, FdMapping};
//! use std::fs::File;
//! use std::os::unix::io::AsRawFd;
//! use std::process::Command;
//!
//! // Open a file.
//! let file = File::open("Cargo.toml").unwrap();
//!
//! // Prepare to run `ls -l /proc/self/fd` with some FDs mapped.
//! let mut command = Command::new("ls");
//! command.arg("-l").arg("/proc/self/fd");
//! command
//! .fd_mappings(vec![
//! // Map `file` as FD 3 in the child process.
//! FdMapping {
//! parent_fd: file.as_raw_fd(),
//! child_fd: 3,
//! },
//! // Map this process's stdin as FD 5 in the child process.
//! FdMapping {
//! parent_fd: 0,
//! child_fd: 5,
//! },
//! ])
//! .unwrap();
//!
//! // Spawn the child process.
//! let mut child = command.spawn().unwrap();
//! child.wait().unwrap();
//! ```
use nix::fcntl::{fcntl, FcntlArg, FdFlag};
use nix::unistd::dup2;
use std::cmp::max;
use std::io::{self, ErrorKind};
use std::os::unix::io::RawFd;
use std::os::unix::process::CommandExt;
use std::process::Command;
use thiserror::Error;
/// A mapping from a file descriptor in the parent to a file descriptor in the child, to be applied
/// when spawning a child process.
///
/// The parent_fd must be kept open until after the child is spawned.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FdMapping {
pub parent_fd: RawFd,
pub child_fd: RawFd,
}
/// Error setting up FD mappings, because there were two or more mappings for the same child FD.
#[derive(Copy, Clone, Debug, Eq, Error, PartialEq)]
#[error("Two or more mappings for the same child FD")]
pub struct FdMappingCollision;
/// Extension to add file descriptor mappings to a [`Command`].
pub trait CommandFdExt {
/// Adds the given set of file descriptor to the command.
///
/// Calling this more than once on the same command may result in unexpected behaviour.
fn fd_mappings(&mut self, mappings: Vec<FdMapping>) -> Result<&mut Self, FdMappingCollision>;
}
impl CommandFdExt for Command {
fn fd_mappings(&mut self, mappings: Vec<FdMapping>) -> Result<&mut Self, FdMappingCollision> {
// Validate that there are no conflicting mappings to the same child FD.
let mut child_fds: Vec<RawFd> = mappings.iter().map(|mapping| mapping.child_fd).collect();
child_fds.sort_unstable();
child_fds.dedup();
if child_fds.len() != mappings.len() {
return Err(FdMappingCollision);
}
// Register the callback to apply the mappings after forking but before execing.
unsafe {
self.pre_exec(move || map_fds(&mappings));
}
Ok(self)
}
}
fn map_fds(mappings: &[FdMapping]) -> io::Result<()> {
if mappings.is_empty() {
// No need to do anything, and finding first_unused_fd would fail.
return Ok(());
}
// Find the first FD which is higher than any parent or child FD in the mapping, so we can
// safely use it and higher FDs as temporary FDs. There may be other files open with these FDs,
// so we still need to ensure we don't conflict with them.
let first_safe_fd = mappings
.iter()
.map(|mapping| max(mapping.parent_fd, mapping.child_fd))
.max()
.unwrap()
+ 1;
// If any parent FDs conflict with child FDs, then first duplicate them to a temporary FD which
// is clear of either range. Mappings to the same FD are fine though, we can handle them by just
// removing the FD_CLOEXEC flag.
let child_fds: Vec<RawFd> = mappings.iter().map(|mapping| mapping.child_fd).collect();
let mappings = mappings
.iter()
.map(|mapping| {
Ok(
if child_fds.contains(&mapping.parent_fd) && mapping.parent_fd != mapping.child_fd {
let temporary_fd =
fcntl(mapping.parent_fd, FcntlArg::F_DUPFD_CLOEXEC(first_safe_fd))?;
FdMapping {
parent_fd: temporary_fd,
child_fd: mapping.child_fd,
}
} else {
mapping.to_owned()
},
)
})
.collect::<nix::Result<Vec<_>>>()
.map_err(nix_to_io_error)?;
// Now we can actually duplicate FDs to the desired child FDs.
for mapping in mappings {
if mapping.child_fd == mapping.parent_fd {
// Remove the FD_CLOEXEC flag, so the FD will be kept open when exec is called for the
// child.
fcntl(mapping.parent_fd, FcntlArg::F_SETFD(FdFlag::empty()))
.map_err(nix_to_io_error)?;
} else {
// This closes child_fd if it is already open as something else, and clears the
// FD_CLOEXEC flag on child_fd.
dup2(mapping.parent_fd, mapping.child_fd).map_err(nix_to_io_error)?;
}
}
Ok(())
}
/// Convert a [`nix::Error`] to a [`std::io::Error`].
fn nix_to_io_error(error: nix::Error) -> io::Error {
if let nix::Error::Sys(errno) = error {
io::Error::from_raw_os_error(errno as i32)
} else {
io::Error::new(ErrorKind::Other, error)
}
}
#[cfg(test)]
mod tests {
use super::*;
use nix::unistd::close;
use std::collections::HashSet;
use std::fs::{read_dir, File};
use std::os::unix::io::AsRawFd;
use std::process::Output;
use std::str;
use std::sync::Once;
static SETUP: Once = Once::new();
#[test]
fn conflicting_mappings() {
setup();
let mut command = Command::new("ls");
// The same mapping can't be included twice.
assert!(command
.fd_mappings(vec![
FdMapping {
child_fd: 4,
parent_fd: 5,
},
FdMapping {
child_fd: 4,
parent_fd: 5,
},
])
.is_err());
// Mapping two different FDs to the same FD isn't allowed either.
assert!(command
.fd_mappings(vec![
FdMapping {
child_fd: 4,
parent_fd: 5,
},
FdMapping {
child_fd: 4,
parent_fd: 6,
},
])
.is_err());
}
#[test]
fn no_mappings() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
assert!(command.fd_mappings(vec![]).is_ok());
let output = command.output().unwrap();
expect_fds(&output, &[0, 1, 2, 3], 0);
}
#[test]
fn one_mapping() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
let file = File::open("testdata/file1.txt").unwrap();
// Map the file an otherwise unused FD.
assert!(command
.fd_mappings(vec![FdMapping {
parent_fd: file.as_raw_fd(),
child_fd: 5,
},])
.is_ok());
let output = command.output().unwrap();
expect_fds(&output, &[0, 1, 2, 3, 5], 0);
}
#[test]
fn swap_mappings() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
let file1 = File::open("testdata/file1.txt").unwrap();
let file2 = File::open("testdata/file2.txt").unwrap();
let fd1 = file1.as_raw_fd();
let fd2 = file2.as_raw_fd();
// Map files to each other's FDs, to ensure that the temporary FD logic works.
assert!(command
.fd_mappings(vec![
FdMapping {
parent_fd: fd1,
child_fd: fd2,
},
FdMapping {
parent_fd: fd2,
child_fd: fd1,
},
])
.is_ok(),);
let output = command.output().unwrap();
// Expect one more Fd for the /proc/self/fd directory. We can't predict what number it will
// be assigned, because 3 might or might not be taken already by fd1 or fd2.
expect_fds(&output, &[0, 1, 2, fd1, fd2], 1);
}
#[test]
fn one_to_one_mapping() {
setup();
let mut command = Command::new("ls");
command.arg("/proc/self/fd");
let file1 = File::open("testdata/file1.txt").unwrap();
let file2 = File::open("testdata/file2.txt").unwrap();
let fd1 = file1.as_raw_fd();
// Map files to each other's FDs, to ensure that the temporary FD logic works.
assert!(command
.fd_mappings(vec![FdMapping {
parent_fd: fd1,
child_fd: fd1,
}])
.is_ok());
let output = command.output().unwrap();
// Expect one more Fd for the /proc/self/fd directory. We can't predict what number it will
// be assigned, because 3 might or might not be taken already by fd1 or fd2.
expect_fds(&output, &[0, 1, 2, fd1], 1);
// Keep file2 open until the end, to ensure that it's not passed to the child.
drop(file2);
}
#[test]
fn map_stdin() {
setup();
let mut command = Command::new("cat");
let file = File::open("testdata/file1.txt").unwrap();
// Map the file to stdin.
assert!(command
.fd_mappings(vec![FdMapping {
parent_fd: file.as_raw_fd(),
child_fd: 0,
},])
.is_ok());
let output = command.output().unwrap();
assert!(output.status.success());
assert_eq!(output.stdout, b"test 1");
}
/// Parse the output of ls into a set of filenames
fn parse_ls_output(output: &[u8]) -> HashSet<String> {
str::from_utf8(output)
.unwrap()
.split_terminator("\n")
.map(str::to_owned)
.collect()
}
/// Check that the output of `ls /proc/self/fd` contains the expected set of FDs, plus exactly
/// `extra` extra FDs.
fn expect_fds(output: &Output, expected_fds: &[RawFd], extra: usize) {
assert!(output.status.success());
let expected_fds: HashSet<String> = expected_fds.iter().map(RawFd::to_string).collect();
let fds = parse_ls_output(&output.stdout);
if extra == 0 {
assert_eq!(fds, expected_fds);
} else {
assert!(expected_fds.is_subset(&fds));
assert_eq!(fds.len(), expected_fds.len() + extra);
}
}
fn setup() {
SETUP.call_once(close_excess_fds);
}
/// Close all file descriptors apart from stdin, stdout and stderr.
///
/// This is necessary because GitHub Actions opens a bunch of others for some reason.
fn close_excess_fds() {
let dir = read_dir("/proc/self/fd").unwrap();
for entry in dir {
let entry = entry.unwrap();
let fd: RawFd = entry.file_name().to_str().unwrap().parse().unwrap();
if fd > 3 {
close(fd).unwrap();
}
}
}
}
|
extern crate ogg_sys;
extern crate vorbis_sys;
extern crate vorbisfile_sys;
extern crate libc;
extern crate rand;
use std::io::{self, Read, Seek};
use rand::Rng;
/// Allows you to decode a sound file stream into packets.
pub struct Decoder<R> where R: Read + Seek {
// further informations are boxed so that a pointer can be passed to callbacks
data: Box<DecoderData<R>>,
}
///
pub struct PacketsIter<'a, R: 'a + Read + Seek>(&'a mut Decoder<R>);
///
pub struct PacketsIntoIter<R: Read + Seek>(Decoder<R>);
/// Errors that can happen while decoding & encoding
#[derive(Debug)]
pub enum VorbisError {
ReadError(io::Error),
NotVorbis,
VersionMismatch,
BadHeader,
Hole,
InvalidSetup, // OV_EINVAL - Invalid setup request, eg, out of range argument.
Unimplemented, // OV_EIMPL - Unimplemented mode; unable to comply with quality level request.
}
impl std::error::Error for VorbisError {
fn description(&self) -> &str {
match self {
&VorbisError::ReadError(_) => "A read from media returned an error",
&VorbisError::NotVorbis => "Bitstream does not contain any Vorbis data",
&VorbisError::VersionMismatch => "Vorbis version mismatch",
&VorbisError::BadHeader => "Invalid Vorbis bitstream header",
&VorbisError::InvalidSetup => "Invalid setup request, eg, out of range argument or initial file headers are corrupt",
&VorbisError::Hole => "Interruption of data",
&VorbisError::Unimplemented => "Unimplemented mode; unable to comply with quality level request.",
}
}
fn cause(&self) -> Option<&std::error::Error> {
match self {
&VorbisError::ReadError(ref err) => Some(err as &std::error::Error),
_ => None
}
}
}
impl std::fmt::Display for VorbisError {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(fmt, "{}", std::error::Error::description(self))
}
}
impl From<io::Error> for VorbisError {
fn from(err: io::Error) -> VorbisError {
VorbisError::ReadError(err)
}
}
struct DecoderData<R> where R: Read + Seek {
vorbis: vorbisfile_sys::OggVorbis_File,
reader: R,
current_logical_bitstream: libc::c_int,
read_error: Option<io::Error>,
}
unsafe impl<R: Read + Seek + Send> Send for DecoderData<R> {}
/// Packet of data.
///
/// Each sample is an `i16` ranging from I16_MIN to I16_MAX.
///
/// The channels are interleaved in the data. For example if you have two channels, you will
/// get a sample from channel 1, then a sample from channel 2, than a sample from channel 1, etc.
#[derive(Clone, Debug)]
pub struct Packet {
pub data: Vec<i16>,
pub channels: u16,
pub rate: u64,
pub bitrate_upper: u64,
pub bitrate_nominal: u64,
pub bitrate_lower: u64,
pub bitrate_window: u64,
}
impl<R> Decoder<R> where R: Read + Seek {
pub fn new(input: R) -> Result<Decoder<R>, VorbisError> {
extern fn read_func<R>(ptr: *mut libc::c_void, size: libc::size_t, nmemb: libc::size_t,
datasource: *mut libc::c_void) -> libc::size_t where R: Read + Seek
{
use std::slice;
/*
* In practice libvorbisfile always sets size to 1.
* This assumption makes things much simpler
*/
assert_eq!(size, 1);
let ptr = ptr as *mut u8;
let data: &mut DecoderData<R> = unsafe { std::mem::transmute(datasource) };
let buffer = unsafe { slice::from_raw_parts_mut(ptr as *mut u8, nmemb as usize) };
loop {
match data.reader.read(buffer) {
Ok(nb) => return nb as libc::size_t,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => (),
Err(e) => {
data.read_error = Some(e);
return 0
}
}
}
}
extern fn seek_func<R>(datasource: *mut libc::c_void, offset: ogg_sys::ogg_int64_t,
whence: libc::c_int) -> libc::c_int where R: Read + Seek
{
let data: &mut DecoderData<R> = unsafe { std::mem::transmute(datasource) };
let result = match whence {
libc::SEEK_SET => data.reader.seek(io::SeekFrom::Start(offset as u64)),
libc::SEEK_CUR => data.reader.seek(io::SeekFrom::Current(offset)),
libc::SEEK_END => data.reader.seek(io::SeekFrom::End(offset)),
_ => unreachable!()
};
match result {
Ok(_) => 0,
Err(_) => -1
}
}
extern fn tell_func<R>(datasource: *mut libc::c_void) -> libc::c_long
where R: Read + Seek
{
let data: &mut DecoderData<R> = unsafe { std::mem::transmute(datasource) };
data.reader.seek(io::SeekFrom::Current(0)).map(|v| v as libc::c_long).unwrap_or(-1)
}
let callbacks = {
let mut callbacks: vorbisfile_sys::ov_callbacks = unsafe { std::mem::zeroed() };
callbacks.read_func = read_func::<R>;
callbacks.seek_func = seek_func::<R>;
callbacks.tell_func = tell_func::<R>;
callbacks
};
let mut data = Box::new(DecoderData {
vorbis: unsafe { std::mem::uninitialized() },
reader: input,
current_logical_bitstream: 0,
read_error: None,
});
// initializing
unsafe {
let data_ptr = &mut *data as *mut DecoderData<R>;
let data_ptr = data_ptr as *mut libc::c_void;
try!(check_errors(vorbisfile_sys::ov_open_callbacks(data_ptr, &mut data.vorbis,
std::ptr::null(), 0, callbacks)));
}
Ok(Decoder {
data: data,
})
}
pub fn time_seek(&mut self, s: f64) -> Result<(), VorbisError> {
unsafe {
check_errors(vorbisfile_sys::ov_time_seek(&mut self.data.vorbis, s))
}
}
pub fn time_tell(&mut self) -> Result<f64, VorbisError> {
unsafe {
Ok(vorbisfile_sys::ov_time_tell(&mut self.data.vorbis))
}
}
pub fn packets(&mut self) -> PacketsIter<R> {
PacketsIter(self)
}
pub fn into_packets(self) -> PacketsIntoIter<R> {
PacketsIntoIter(self)
}
fn next_packet(&mut self) -> Option<Result<Packet, VorbisError>> {
let mut buffer = std::iter::repeat(0i16).take(2048).collect::<Vec<_>>();
let buffer_len = buffer.len() * 2;
match unsafe {
vorbisfile_sys::ov_read(&mut self.data.vorbis, buffer.as_mut_ptr() as *mut libc::c_char,
buffer_len as libc::c_int, 0, 2, 1, &mut self.data.current_logical_bitstream)
} {
0 => {
match self.data.read_error.take() {
Some(err) => Some(Err(VorbisError::ReadError(err))),
None => None,
}
},
err if err < 0 => {
match check_errors(err as libc::c_int) {
Err(e) => Some(Err(e)),
Ok(_) => unreachable!()
}
},
len => {
buffer.truncate(len as usize / 2);
let infos = unsafe { vorbisfile_sys::ov_info(&mut self.data.vorbis,
self.data.current_logical_bitstream) };
let infos: &vorbis_sys::vorbis_info = unsafe { std::mem::transmute(infos) };
Some(Ok(Packet {
data: buffer,
channels: infos.channels as u16,
rate: infos.rate as u64,
bitrate_upper: infos.bitrate_upper as u64,
bitrate_nominal: infos.bitrate_nominal as u64,
bitrate_lower: infos.bitrate_lower as u64,
bitrate_window: infos.bitrate_window as u64,
}))
}
}
}
}
impl<'a, R> Iterator for PacketsIter<'a, R> where R: 'a + Read + Seek {
type Item = Result<Packet, VorbisError>;
fn next(&mut self) -> Option<Result<Packet, VorbisError>> {
self.0.next_packet()
}
}
impl<R> Iterator for PacketsIntoIter<R> where R: Read + Seek {
type Item = Result<Packet, VorbisError>;
fn next(&mut self) -> Option<Result<Packet, VorbisError>> {
self.0.next_packet()
}
}
impl<R> Drop for Decoder<R> where R: Read + Seek {
fn drop(&mut self) {
unsafe {
vorbisfile_sys::ov_clear(&mut self.data.vorbis);
}
}
}
fn check_errors(code: libc::c_int) -> Result<(), VorbisError> {
match code {
0 => Ok(()),
vorbis_sys::OV_ENOTVORBIS => Err(VorbisError::NotVorbis),
vorbis_sys::OV_EVERSION => Err(VorbisError::VersionMismatch),
vorbis_sys::OV_EBADHEADER => Err(VorbisError::BadHeader),
vorbis_sys::OV_EINVAL => Err(VorbisError::InvalidSetup),
vorbis_sys::OV_HOLE => Err(VorbisError::Hole),
vorbis_sys::OV_EREAD => unimplemented!(),
vorbis_sys::OV_EIMPL => Err(VorbisError::Unimplemented),
// indicates a bug or heap/stack corruption
vorbis_sys::OV_EFAULT => panic!("Internal libvorbis error"),
_ => panic!("Unknown vorbis error {}", code)
}
}
#[derive(Debug)]
pub enum VorbisQuality {
VeryHighQuality,
HighQuality,
Quality,
Midium,
Performance,
HighPerforamnce,
VeryHighPerformance,
}
pub struct Encoder {
data: Vec<u8>,
state: vorbis_sys::vorbis_dsp_state,
block: vorbis_sys::vorbis_block,
info: vorbis_sys::vorbis_info,
comment: vorbis_sys::vorbis_comment,
stream: ogg_sys::ogg_stream_state,
page: ogg_sys::ogg_page,
packet: ogg_sys::ogg_packet,
}
impl Encoder {
pub fn new(channels: u8, rate: u64, quality: VorbisQuality) -> Result<Self, VorbisError> {
let mut encoder = Encoder {
data: Vec::new(),
state: unsafe { std::mem::zeroed() },
block: unsafe { std::mem::zeroed() },
info: unsafe { std::mem::zeroed() },
comment: unsafe { std::mem::zeroed() },
stream: unsafe { std::mem::zeroed() },
page: unsafe { std::mem::zeroed() },
packet: unsafe { std::mem::zeroed() },
};
unsafe {
vorbis_sys::vorbis_info_init(&mut encoder.info as *mut vorbis_sys::vorbis_info);
let quality = match quality {
VorbisQuality::VeryHighQuality => 1.0,
VorbisQuality::HighQuality => 0.9,
VorbisQuality::Quality => 0.7,
VorbisQuality::Midium => 0.5,
VorbisQuality::Performance => 0.3,
VorbisQuality::HighPerforamnce => 0.1,
VorbisQuality::VeryHighPerformance => -0.1,
};
try!(check_errors(vorbis_sys::vorbis_encode_init_vbr(
&mut encoder.info as *mut vorbis_sys::vorbis_info,
channels as libc::c_long, rate as libc::c_long, quality)));
vorbis_sys::vorbis_comment_init(&mut encoder.comment as *mut vorbis_sys::vorbis_comment);
vorbis_sys::vorbis_analysis_init(
&mut encoder.state as *mut vorbis_sys::vorbis_dsp_state ,
&mut encoder.info as *mut vorbis_sys::vorbis_info);
vorbis_sys::vorbis_block_init(
&mut encoder.state as *mut vorbis_sys::vorbis_dsp_state,
&mut encoder.block as *mut vorbis_sys::vorbis_block);
let mut rnd = rand::os::OsRng::new().unwrap();
ogg_sys::ogg_stream_init(&mut encoder.stream as *mut ogg_sys::ogg_stream_state, rnd.gen());
{
let mut header: ogg_sys::ogg_packet = std::mem::zeroed();
let mut header_comm: ogg_sys::ogg_packet = std::mem::zeroed();
let mut header_code: ogg_sys::ogg_packet = std::mem::zeroed();
vorbis_sys::vorbis_analysis_headerout(
&mut encoder.state as *mut vorbis_sys::vorbis_dsp_state,
&mut encoder.comment as *mut vorbis_sys::vorbis_comment,
&mut header as *mut ogg_sys::ogg_packet,
&mut header_comm as *mut ogg_sys::ogg_packet,
&mut header_code as *mut ogg_sys::ogg_packet);
ogg_sys::ogg_stream_packetin(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut header as *mut ogg_sys::ogg_packet);
ogg_sys::ogg_stream_packetin(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut header_comm as *mut ogg_sys::ogg_packet);
ogg_sys::ogg_stream_packetin(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut header_code as *mut ogg_sys::ogg_packet);
loop {
let result = ogg_sys::ogg_stream_flush(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut encoder.page as *mut ogg_sys::ogg_page);
if result == 0 {
break;
}
encoder.data.extend_from_slice(std::slice::from_raw_parts(
encoder.page.header as *const u8, encoder.page.header_len as usize));
encoder.data.extend_from_slice(std::slice::from_raw_parts(
encoder.page.body as *const u8, encoder.page.body_len as usize));
}
}
}
return Ok(encoder);
}
// data is an interleaved array of samples, they must be in (-1.0 1.0)
pub fn encode(&mut self, data: &[f32]) -> Result<Vec<u8>, VorbisError> {
let samples = data.len() as i32 / self.info.channels;
let buffer: *mut *mut libc::c_float = unsafe { vorbis_sys::vorbis_analysis_buffer(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state, samples) };
let mut data_index = 0;
for b in 0..samples {
for c in 0..self.info.channels {
unsafe {
*((*(buffer.offset(c as isize))).offset(b as isize)) =
data[data_index] as libc::c_float;
}
data_index += 1;
}
}
try!(check_errors( unsafe { vorbis_sys::vorbis_analysis_wrote(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state,
samples) }));
try!(self.read_block());
let result = Ok(self.data.clone());
self.data = Vec::new();
return result;
}
fn read_block(&mut self) -> Result<(), VorbisError> {
loop { // TODO: mmm! it could be better but it does not have high priority
let block_out = unsafe { vorbis_sys::vorbis_analysis_blockout(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state,
&mut self.block as *mut vorbis_sys::vorbis_block) };
match block_out {
1 => {},
0 => {
break;
},
_ => {
try!(check_errors(block_out));
},
}
try!(check_errors(unsafe { vorbis_sys::vorbis_analysis(
&mut self.block as *mut vorbis_sys::vorbis_block,
0 as *mut ogg_sys::ogg_packet)}));
try!(check_errors(unsafe { vorbis_sys::vorbis_bitrate_addblock(
&mut self.block as *mut vorbis_sys::vorbis_block)}));
loop {
let flush_packet = unsafe { vorbis_sys::vorbis_bitrate_flushpacket(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state,
&mut self.packet as *mut ogg_sys::ogg_packet)};
match flush_packet {
1 => {},
0 => {
break;
},
_ => {
try!(check_errors(block_out));
},
}
unsafe { ogg_sys::ogg_stream_packetin(
&mut self.stream as *mut ogg_sys::ogg_stream_state,
&mut self.packet as *mut ogg_sys::ogg_packet);}
loop {
let result = unsafe { ogg_sys::ogg_stream_pageout(
&mut self.stream as *mut ogg_sys::ogg_stream_state,
&mut self.page as *mut ogg_sys::ogg_page) };
if result == 0 {
break;
}
self.data.extend_from_slice(unsafe { std::slice::from_raw_parts(
self.page.header as *const u8, self.page.header_len as usize) });
self.data.extend_from_slice(unsafe { std::slice::from_raw_parts(
self.page.body as *const u8, self.page.body_len as usize) });
if unsafe { ogg_sys::ogg_page_eos(&mut self.page as *mut ogg_sys::ogg_page) } != 0 {
panic!("Unexpected behavior. Please call the package author.");
}
}
}
}
Ok(())
}
pub fn flush(&mut self) -> Result<Vec<u8>, VorbisError> {
try!(check_errors(unsafe { vorbis_sys::vorbis_analysis_wrote(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state, 0)
}));
try!(self.read_block());
let result = Ok(self.data.clone());
self.data = Vec::new();
return result;
}
}
impl Drop for Encoder {
fn drop(&mut self) {
unsafe {
ogg_sys::ogg_stream_clear(&mut self.stream as *mut ogg_sys::ogg_stream_state);
vorbis_sys::vorbis_block_clear(&mut self.block as *mut vorbis_sys::vorbis_block);
vorbis_sys::vorbis_dsp_clear(&mut self.state as *mut vorbis_sys::vorbis_dsp_state);
vorbis_sys::vorbis_comment_clear(&mut self.comment as *mut vorbis_sys::vorbis_comment);
vorbis_sys::vorbis_info_clear(&mut self.info as *mut vorbis_sys::vorbis_info);
}
}
}
nothinggit add --allwq
extern crate ogg_sys;
extern crate vorbis_sys;
extern crate vorbisfile_sys;
extern crate libc;
extern crate rand;
use std::io::{self, Read, Seek};
use rand::Rng;
/// Allows you to decode a sound file stream into packets.
pub struct Decoder<R> where R: Read + Seek {
// further informations are boxed so that a pointer can be passed to callbacks
data: Box<DecoderData<R>>,
}
///
pub struct PacketsIter<'a, R: 'a + Read + Seek>(&'a mut Decoder<R>);
///
pub struct PacketsIntoIter<R: Read + Seek>(Decoder<R>);
/// Errors that can happen while decoding & encoding
#[derive(Debug)]
pub enum VorbisError {
ReadError(io::Error),
NotVorbis,
VersionMismatch,
BadHeader,
Hole,
InvalidSetup, // OV_EINVAL - Invalid setup request, eg, out of range argument.
Unimplemented, // OV_EIMPL - Unimplemented mode; unable to comply with quality level request.
}
impl std::error::Error for VorbisError {
fn description(&self) -> &str {
match self {
&VorbisError::ReadError(_) => "A read from media returned an error",
&VorbisError::NotVorbis => "Bitstream does not contain any Vorbis data",
&VorbisError::VersionMismatch => "Vorbis version mismatch",
&VorbisError::BadHeader => "Invalid Vorbis bitstream header",
&VorbisError::InvalidSetup => "Invalid setup request, eg, out of range argument or initial file headers are corrupt",
&VorbisError::Hole => "Interruption of data",
&VorbisError::Unimplemented => "Unimplemented mode; unable to comply with quality level request.",
}
}
fn cause(&self) -> Option<&std::error::Error> {
match self {
&VorbisError::ReadError(ref err) => Some(err as &std::error::Error),
_ => None
}
}
}
impl std::fmt::Display for VorbisError {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(fmt, "{}", std::error::Error::description(self))
}
}
impl From<io::Error> for VorbisError {
fn from(err: io::Error) -> VorbisError {
VorbisError::ReadError(err)
}
}
struct DecoderData<R> where R: Read + Seek {
vorbis: vorbisfile_sys::OggVorbis_File,
reader: R,
current_logical_bitstream: libc::c_int,
read_error: Option<io::Error>,
}
unsafe impl<R: Read + Seek + Send> Send for DecoderData<R> {}
/// Packet of data.
///
/// Each sample is an `i16` ranging from I16_MIN to I16_MAX.
///
/// The channels are interleaved in the data. For example if you have two channels, you will
/// get a sample from channel 1, then a sample from channel 2, than a sample from channel 1, etc.
#[derive(Clone, Debug)]
pub struct Packet {
pub data: Vec<i16>,
pub channels: u16,
pub rate: u64,
pub bitrate_upper: u64,
pub bitrate_nominal: u64,
pub bitrate_lower: u64,
pub bitrate_window: u64,
}
impl<R> Decoder<R> where R: Read + Seek {
pub fn new(input: R) -> Result<Decoder<R>, VorbisError> {
extern fn read_func<R>(ptr: *mut libc::c_void, size: libc::size_t, nmemb: libc::size_t,
datasource: *mut libc::c_void) -> libc::size_t where R: Read + Seek
{
use std::slice;
/*
* In practice libvorbisfile always sets size to 1.
* This assumption makes things much simpler
*/
assert_eq!(size, 1);
let ptr = ptr as *mut u8;
let data: &mut DecoderData<R> = unsafe { std::mem::transmute(datasource) };
let buffer = unsafe { slice::from_raw_parts_mut(ptr as *mut u8, nmemb as usize) };
loop {
match data.reader.read(buffer) {
Ok(nb) => return nb as libc::size_t,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => (),
Err(e) => {
data.read_error = Some(e);
return 0
}
}
}
}
extern fn seek_func<R>(datasource: *mut libc::c_void, offset: ogg_sys::ogg_int64_t,
whence: libc::c_int) -> libc::c_int where R: Read + Seek
{
let data: &mut DecoderData<R> = unsafe { std::mem::transmute(datasource) };
let result = match whence {
libc::SEEK_SET => data.reader.seek(io::SeekFrom::Start(offset as u64)),
libc::SEEK_CUR => data.reader.seek(io::SeekFrom::Current(offset)),
libc::SEEK_END => data.reader.seek(io::SeekFrom::End(offset)),
_ => unreachable!()
};
match result {
Ok(_) => 0,
Err(_) => -1
}
}
extern fn tell_func<R>(datasource: *mut libc::c_void) -> libc::c_long
where R: Read + Seek
{
let data: &mut DecoderData<R> = unsafe { std::mem::transmute(datasource) };
data.reader.seek(io::SeekFrom::Current(0)).map(|v| v as libc::c_long).unwrap_or(-1)
}
let callbacks = {
let mut callbacks: vorbisfile_sys::ov_callbacks = unsafe { std::mem::zeroed() };
callbacks.read_func = read_func::<R>;
callbacks.seek_func = seek_func::<R>;
callbacks.tell_func = tell_func::<R>;
callbacks
};
let mut data = Box::new(DecoderData {
vorbis: unsafe { std::mem::uninitialized() },
reader: input,
current_logical_bitstream: 0,
read_error: None,
});
// initializing
unsafe {
let data_ptr = &mut *data as *mut DecoderData<R>;
let data_ptr = data_ptr as *mut libc::c_void;
try!(check_errors(vorbisfile_sys::ov_open_callbacks(data_ptr, &mut data.vorbis,
std::ptr::null(), 0, callbacks)));
}
Ok(Decoder {
data: data,
})
}
pub fn time_seek(&mut self, s: f64) -> Result<(), VorbisError> {
unsafe {
check_errors(vorbisfile_sys::ov_time_seek(&mut self.data.vorbis, s))
}
}
pub fn time_tell(&mut self) -> Result<f64, VorbisError> {
unsafe {
Ok(vorbisfile_sys::ov_time_tell(&mut self.data.vorbis))
}
}
pub fn packets(&mut self) -> PacketsIter<R> {
PacketsIter(self)
}
pub fn into_packets(self) -> PacketsIntoIter<R> {
PacketsIntoIter(self)
}
fn next_packet(&mut self) -> Option<Result<Packet, VorbisError>> {
let mut buffer = std::iter::repeat(0i16).take(2048).collect::<Vec<_>>();
let buffer_len = buffer.len() * 2;
match unsafe {
vorbisfile_sys::ov_read(&mut self.data.vorbis, buffer.as_mut_ptr() as *mut libc::c_char,
buffer_len as libc::c_int, 0, 2, 1, &mut self.data.current_logical_bitstream)
} {
0 => {
match self.data.read_error.take() {
Some(err) => Some(Err(VorbisError::ReadError(err))),
None => None,
}
},
err if err < 0 => {
match check_errors(err as libc::c_int) {
Err(e) => Some(Err(e)),
Ok(_) => unreachable!()
}
},
len => {
buffer.truncate(len as usize / 2);
let infos = unsafe { vorbisfile_sys::ov_info(&mut self.data.vorbis,
self.data.current_logical_bitstream) };
let infos: &vorbis_sys::vorbis_info = unsafe { std::mem::transmute(infos) };
Some(Ok(Packet {
data: buffer,
channels: infos.channels as u16,
rate: infos.rate as u64,
bitrate_upper: infos.bitrate_upper as u64,
bitrate_nominal: infos.bitrate_nominal as u64,
bitrate_lower: infos.bitrate_lower as u64,
bitrate_window: infos.bitrate_window as u64,
}))
}
}
}
}
impl<'a, R> Iterator for PacketsIter<'a, R> where R: 'a + Read + Seek {
type Item = Result<Packet, VorbisError>;
fn next(&mut self) -> Option<Result<Packet, VorbisError>> {
self.0.next_packet()
}
}
impl<R> Iterator for PacketsIntoIter<R> where R: Read + Seek {
type Item = Result<Packet, VorbisError>;
fn next(&mut self) -> Option<Result<Packet, VorbisError>> {
self.0.next_packet()
}
}
impl<R> Drop for Decoder<R> where R: Read + Seek {
fn drop(&mut self) {
unsafe {
vorbisfile_sys::ov_clear(&mut self.data.vorbis);
}
}
}
fn check_errors(code: libc::c_int) -> Result<(), VorbisError> {
match code {
0 => Ok(()),
vorbis_sys::OV_ENOTVORBIS => Err(VorbisError::NotVorbis),
vorbis_sys::OV_EVERSION => Err(VorbisError::VersionMismatch),
vorbis_sys::OV_EBADHEADER => Err(VorbisError::BadHeader),
vorbis_sys::OV_EINVAL => Err(VorbisError::InvalidSetup),
vorbis_sys::OV_HOLE => Err(VorbisError::Hole),
vorbis_sys::OV_EREAD => unimplemented!(),
vorbis_sys::OV_EIMPL => Err(VorbisError::Unimplemented),
// indicates a bug or heap/stack corruption
vorbis_sys::OV_EFAULT => panic!("Internal libvorbis error"),
_ => panic!("Unknown vorbis error {}", code)
}
}
#[derive(Debug)]
pub enum VorbisQuality {
VeryHighQuality,
HighQuality,
Quality,
Midium,
Performance,
HighPerforamnce,
VeryHighPerformance,
}
pub struct Encoder {
data: Vec<u8>,
state: vorbis_sys::vorbis_dsp_state,
block: vorbis_sys::vorbis_block,
info: vorbis_sys::vorbis_info,
comment: vorbis_sys::vorbis_comment,
stream: ogg_sys::ogg_stream_state,
page: ogg_sys::ogg_page,
packet: ogg_sys::ogg_packet,
}
impl Encoder {
pub fn new(channels: u8, rate: u64, quality: VorbisQuality) -> Result<Self, VorbisError> {
let mut encoder = Encoder {
data: Vec::new(),
state: unsafe { std::mem::zeroed() },
block: unsafe { std::mem::zeroed() },
info: unsafe { std::mem::zeroed() },
comment: unsafe { std::mem::zeroed() },
stream: unsafe { std::mem::zeroed() },
page: unsafe { std::mem::zeroed() },
packet: unsafe { std::mem::zeroed() },
};
unsafe {
vorbis_sys::vorbis_info_init(&mut encoder.info as *mut vorbis_sys::vorbis_info);
let quality = match quality {
VorbisQuality::VeryHighQuality => 1.0,
VorbisQuality::HighQuality => 0.9,
VorbisQuality::Quality => 0.7,
VorbisQuality::Midium => 0.5,
VorbisQuality::Performance => 0.3,
VorbisQuality::HighPerforamnce => 0.1,
VorbisQuality::VeryHighPerformance => -0.1,
};
try!(check_errors(vorbis_sys::vorbis_encode_init_vbr(
&mut encoder.info as *mut vorbis_sys::vorbis_info,
channels as libc::c_long, rate as libc::c_long, quality as libc::c_float)));
vorbis_sys::vorbis_comment_init(&mut encoder.comment as *mut vorbis_sys::vorbis_comment);
vorbis_sys::vorbis_analysis_init(
&mut encoder.state as *mut vorbis_sys::vorbis_dsp_state ,
&mut encoder.info as *mut vorbis_sys::vorbis_info);
vorbis_sys::vorbis_block_init(
&mut encoder.state as *mut vorbis_sys::vorbis_dsp_state,
&mut encoder.block as *mut vorbis_sys::vorbis_block);
let mut rnd = rand::os::OsRng::new().unwrap();
ogg_sys::ogg_stream_init(&mut encoder.stream as *mut ogg_sys::ogg_stream_state, rnd.gen());
{
let mut header: ogg_sys::ogg_packet = std::mem::zeroed();
let mut header_comm: ogg_sys::ogg_packet = std::mem::zeroed();
let mut header_code: ogg_sys::ogg_packet = std::mem::zeroed();
vorbis_sys::vorbis_analysis_headerout(
&mut encoder.state as *mut vorbis_sys::vorbis_dsp_state,
&mut encoder.comment as *mut vorbis_sys::vorbis_comment,
&mut header as *mut ogg_sys::ogg_packet,
&mut header_comm as *mut ogg_sys::ogg_packet,
&mut header_code as *mut ogg_sys::ogg_packet);
ogg_sys::ogg_stream_packetin(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut header as *mut ogg_sys::ogg_packet);
ogg_sys::ogg_stream_packetin(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut header_comm as *mut ogg_sys::ogg_packet);
ogg_sys::ogg_stream_packetin(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut header_code as *mut ogg_sys::ogg_packet);
loop {
let result = ogg_sys::ogg_stream_flush(
&mut encoder.stream as *mut ogg_sys::ogg_stream_state,
&mut encoder.page as *mut ogg_sys::ogg_page);
if result == 0 {
break;
}
encoder.data.extend_from_slice(std::slice::from_raw_parts(
encoder.page.header as *const u8, encoder.page.header_len as usize));
encoder.data.extend_from_slice(std::slice::from_raw_parts(
encoder.page.body as *const u8, encoder.page.body_len as usize));
}
}
}
return Ok(encoder);
}
// data is an interleaved array of samples, they must be in (-1.0 1.0)
pub fn encode(&mut self, data: &[f32]) -> Result<Vec<u8>, VorbisError> {
let samples = data.len() as i32 / self.info.channels;
let buffer: *mut *mut libc::c_float = unsafe { vorbis_sys::vorbis_analysis_buffer(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state, samples) };
let mut data_index = 0;
for b in 0..samples {
for c in 0..self.info.channels {
unsafe {
*((*(buffer.offset(c as isize))).offset(b as isize)) =
data[data_index] as libc::c_float;
}
data_index += 1;
}
}
try!(check_errors( unsafe { vorbis_sys::vorbis_analysis_wrote(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state,
samples) }));
try!(self.read_block());
let result = Ok(self.data.clone());
self.data = Vec::new();
return result;
}
fn read_block(&mut self) -> Result<(), VorbisError> {
loop { // TODO: mmm! it could be better but it does not have high priority
let block_out = unsafe { vorbis_sys::vorbis_analysis_blockout(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state,
&mut self.block as *mut vorbis_sys::vorbis_block) };
match block_out {
1 => {},
0 => {
break;
},
_ => {
try!(check_errors(block_out));
},
}
try!(check_errors(unsafe { vorbis_sys::vorbis_analysis(
&mut self.block as *mut vorbis_sys::vorbis_block,
0 as *mut ogg_sys::ogg_packet)}));
try!(check_errors(unsafe { vorbis_sys::vorbis_bitrate_addblock(
&mut self.block as *mut vorbis_sys::vorbis_block)}));
loop {
let flush_packet = unsafe { vorbis_sys::vorbis_bitrate_flushpacket(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state,
&mut self.packet as *mut ogg_sys::ogg_packet)};
match flush_packet {
1 => {},
0 => {
break;
},
_ => {
try!(check_errors(block_out));
},
}
unsafe { ogg_sys::ogg_stream_packetin(
&mut self.stream as *mut ogg_sys::ogg_stream_state,
&mut self.packet as *mut ogg_sys::ogg_packet);}
loop {
let result = unsafe { ogg_sys::ogg_stream_pageout(
&mut self.stream as *mut ogg_sys::ogg_stream_state,
&mut self.page as *mut ogg_sys::ogg_page) };
if result == 0 {
break;
}
self.data.extend_from_slice(unsafe { std::slice::from_raw_parts(
self.page.header as *const u8, self.page.header_len as usize) });
self.data.extend_from_slice(unsafe { std::slice::from_raw_parts(
self.page.body as *const u8, self.page.body_len as usize) });
if unsafe { ogg_sys::ogg_page_eos(&mut self.page as *mut ogg_sys::ogg_page) } != 0 {
panic!("Unexpected behavior. Please call the package author.");
}
}
}
}
Ok(())
}
pub fn flush(&mut self) -> Result<Vec<u8>, VorbisError> {
try!(check_errors(unsafe { vorbis_sys::vorbis_analysis_wrote(
&mut self.state as *mut vorbis_sys::vorbis_dsp_state, 0)
}));
try!(self.read_block());
let result = Ok(self.data.clone());
self.data = Vec::new();
return result;
}
}
impl Drop for Encoder {
fn drop(&mut self) {
unsafe {
ogg_sys::ogg_stream_clear(&mut self.stream as *mut ogg_sys::ogg_stream_state);
vorbis_sys::vorbis_block_clear(&mut self.block as *mut vorbis_sys::vorbis_block);
vorbis_sys::vorbis_dsp_clear(&mut self.state as *mut vorbis_sys::vorbis_dsp_state);
vorbis_sys::vorbis_comment_clear(&mut self.comment as *mut vorbis_sys::vorbis_comment);
vorbis_sys::vorbis_info_clear(&mut self.info as *mut vorbis_sys::vorbis_info);
}
}
}
|
#![crate_name="gdal"]
#![crate_type="lib"]
#![feature(unsafe_destructor)]
#![feature(associated_types)]
extern crate libc;
#[cfg(test)] extern crate test;
use libc::c_char;
use std::c_str::ToCStr;
mod utils;
pub mod raster;
pub mod vector;
pub mod proj;
pub mod geom;
pub mod warp;
#[link(name="gdal")]
extern {
fn GDALVersionInfo(key: *const c_char) -> *const c_char;
}
pub fn version_info(key: &str) -> String {
let info = key.with_c_str(|c_key| {
let rv = unsafe { GDALVersionInfo(c_key) };
return utils::_string(rv);
});
return info;
}
#[cfg(test)]
mod tests {
use super::version_info;
#[test]
fn test_version_info() {
let release_date = version_info("RELEASE_DATE");
let release_name = version_info("RELEASE_NAME");
let version_text = version_info("--version");
let expected_text: String = format!(
"GDAL {}, released {}/{}/{}",
release_name,
release_date.as_slice().slice(0, 4),
release_date.as_slice().slice(4, 6),
release_date.as_slice().slice(6, 8),
);
assert_eq!(version_text, expected_text);
}
}
remove 'associated_types' feature
it's on by default
#![crate_name="gdal"]
#![crate_type="lib"]
#![feature(unsafe_destructor)]
extern crate libc;
#[cfg(test)] extern crate test;
use libc::c_char;
use std::c_str::ToCStr;
mod utils;
pub mod raster;
pub mod vector;
pub mod proj;
pub mod geom;
pub mod warp;
#[link(name="gdal")]
extern {
fn GDALVersionInfo(key: *const c_char) -> *const c_char;
}
pub fn version_info(key: &str) -> String {
let info = key.with_c_str(|c_key| {
let rv = unsafe { GDALVersionInfo(c_key) };
return utils::_string(rv);
});
return info;
}
#[cfg(test)]
mod tests {
use super::version_info;
#[test]
fn test_version_info() {
let release_date = version_info("RELEASE_DATE");
let release_name = version_info("RELEASE_NAME");
let version_text = version_info("--version");
let expected_text: String = format!(
"GDAL {}, released {}/{}/{}",
release_name,
release_date.as_slice().slice(0, 4),
release_date.as_slice().slice(4, 6),
release_date.as_slice().slice(6, 8),
);
assert_eq!(version_text, expected_text);
}
}
|
#![crate_id = "typedopts"]
#![crate_type = "lib"]
extern crate getopts;
extern crate serialize;
use getopts::Matches;
use std::from_str::FromStr;
use std::str::StrSlice;
use serialize::Decodable;
#[deriving(Eq, Show)]
pub enum ErrorType {
UnimplementedDecoder,
MissingField(~str),
ExpectedType(~str, ~str, ~str)
}
#[deriving(Eq, Show)]
pub struct Error {
e: ErrorType
}
pub type DecodeResult<T> = Result<T, ErrorType>;
pub struct Decoder {
priv matches: Matches,
priv cur: ~str,
priv current_type: ~str
}
impl Decoder {
pub fn new(matches: Matches) -> Decoder {
Decoder {
matches: matches,
cur: ~"",
current_type: ~""
}
}
fn expected(&self, expected_type: &str) -> ErrorType {
ExpectedType(self.cur.to_owned(),
expected_type.to_owned(),
self.matches.opt_str(self.cur).unwrap())
}
}
pub fn decode<T:Send+Decodable<Decoder, ErrorType>>(matches: Matches) -> DecodeResult<T> {
let mut decoder = Decoder::new(matches);
Decodable::decode(&mut decoder)
}
impl ErrorType {
pub fn to_err_msg(self) -> ~str {
match self {
UnimplementedDecoder => format!("this function is not implemented"),
MissingField(ref s) => format!("the required field '{}' is not present", s),
ExpectedType(ref field, ref expected, ref value) => {
format!("Expected type '{}' for field '{}' but got value '{}'", expected, field, value)
}
}
}
}
impl<T:FromStr> Decoder {
fn get_field<T:FromStr>(&self, field: &str) -> Option<T> {
match self.matches.opt_str(self.cur) {
None => None,
Some(s) => FromStr::from_str(s)
}
}
}
impl serialize::Decoder<ErrorType> for Decoder {
fn read_nil(&mut self) -> DecodeResult<()> {
Err(UnimplementedDecoder)
}
fn read_u64(&mut self) -> DecodeResult<u64> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected(~"u64")),
Some(nb) => Ok(nb)
}
}
}
fn read_u32(&mut self) -> DecodeResult<u32> { Ok(try!(self.read_u64()) as u32) }
fn read_u16(&mut self) -> DecodeResult<u16> { Ok(try!(self.read_u64()) as u16) }
fn read_u8 (&mut self) -> DecodeResult<u8> { Ok(try!(self.read_u64()) as u8) }
fn read_uint(&mut self) -> DecodeResult<uint> { Ok(try!(self.read_u64()) as uint) }
fn read_i64(&mut self) -> DecodeResult<i64> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected(~"i64")),
Some(nb) => Ok(nb)
}
}
}
fn read_i32(&mut self) -> DecodeResult<i32> { Ok(try!(self.read_i64()) as i32) }
fn read_i16(&mut self) -> DecodeResult<i16> { Ok(try!(self.read_i64()) as i16) }
fn read_i8 (&mut self) -> DecodeResult<i8> { Ok(try!(self.read_i64()) as i8) }
fn read_int(&mut self) -> DecodeResult<int> { Ok(try!(self.read_i64()) as int) }
fn read_f32(&mut self) -> DecodeResult<f32> { Ok(try!(self.read_f64()) as f32) }
fn read_f64(&mut self) -> DecodeResult<f64> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected(~"f64")),
Some(nb) => Ok(nb)
}
}
}
fn read_bool(&mut self) -> DecodeResult<bool> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected("boolean")),
Some(b) => Ok(b)
}
}
}
fn read_char(&mut self) -> DecodeResult<char> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => if s.char_len() == 1 { Ok(s.char_at(0)) } else { Err(self.expected("char")) }
}
}
fn read_str(&mut self) -> DecodeResult<~str> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => Ok(s)
}
}
fn read_enum<T>(&mut self, name: &str, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum: {}", name);
self.current_type = name.to_owned();
f(self)
}
fn read_enum_variant<T>(&mut self, names: &[&str], f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum variant({}): {}", self.cur, names);
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match names.iter().position(|&e| e == s) {
None => Err(self.expected(self.current_type + " enum")),
Some(i) => f(self, i)
}
}
}
fn read_enum_variant_arg<T>(&mut self, a_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum variant_arg: {}", a_idx);
f(self);
Err(UnimplementedDecoder)
}
fn read_enum_struct_variant<T>(&mut self, names: &[&str], f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum struct variant: {}", names);
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_enum_struct_variant_field<T>(&mut self, f_name: &str, f_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum struct variant field: {}, {}", f_name, f_idx);
f(self);
Err(UnimplementedDecoder)
}
fn read_struct<T>(&mut self, s_name: &str, len: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading struct: {} | len = {}", s_name, len);
self.cur = s_name.to_owned();
f(self)
}
fn read_struct_field<T>(&mut self, f_name: &str, f_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading struct field: {} | idx = {}", f_name, f_idx);
self.cur = f_name.to_owned();
let data = f(self);
//println!("got struct field data: {}", data);
data
}
fn read_option<T>(&mut self, f: |&mut Decoder, bool| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("read_option");
match self.matches.opt_str(self.cur) {
None => {
//println!("option not there");
f(self, false)
},
Some(s) => {
//println!("option is there");
f(self, true)
}
}
}
fn read_tuple<T>(&mut self, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_tuple_arg<T>(&mut self, a_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_tuple_struct<T>(&mut self, s_name: &str, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_tuple_struct_arg<T>(&mut self, a_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_seq<T>(&mut self, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_seq_elt<T>(&mut self, idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_map<T>(&mut self, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_map_elt_key<T>(&mut self, idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_map_elt_val<T>(&mut self, idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
}
remove privacy tags
#![crate_id = "typedopts"]
#![crate_type = "lib"]
extern crate getopts;
extern crate serialize;
use getopts::Matches;
use std::from_str::FromStr;
use std::str::StrSlice;
use serialize::Decodable;
#[deriving(Eq, Show)]
pub enum ErrorType {
UnimplementedDecoder,
MissingField(~str),
ExpectedType(~str, ~str, ~str)
}
#[deriving(Eq, Show)]
pub struct Error {
e: ErrorType
}
pub type DecodeResult<T> = Result<T, ErrorType>;
pub struct Decoder {
matches: Matches,
cur: ~str,
current_type: ~str
}
impl Decoder {
pub fn new(matches: Matches) -> Decoder {
Decoder {
matches: matches,
cur: ~"",
current_type: ~""
}
}
fn expected(&self, expected_type: &str) -> ErrorType {
ExpectedType(self.cur.to_owned(),
expected_type.to_owned(),
self.matches.opt_str(self.cur).unwrap())
}
}
pub fn decode<T:Send+Decodable<Decoder, ErrorType>>(matches: Matches) -> DecodeResult<T> {
let mut decoder = Decoder::new(matches);
Decodable::decode(&mut decoder)
}
impl ErrorType {
pub fn to_err_msg(self) -> ~str {
match self {
UnimplementedDecoder => format!("this function is not implemented"),
MissingField(ref s) => format!("the required field '{}' is not present", s),
ExpectedType(ref field, ref expected, ref value) => {
format!("Expected type '{}' for field '{}' but got value '{}'", expected, field, value)
}
}
}
}
impl<T:FromStr> Decoder {
fn get_field<T:FromStr>(&self, field: &str) -> Option<T> {
match self.matches.opt_str(self.cur) {
None => None,
Some(s) => FromStr::from_str(s)
}
}
}
impl serialize::Decoder<ErrorType> for Decoder {
fn read_nil(&mut self) -> DecodeResult<()> {
Err(UnimplementedDecoder)
}
fn read_u64(&mut self) -> DecodeResult<u64> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected(~"u64")),
Some(nb) => Ok(nb)
}
}
}
fn read_u32(&mut self) -> DecodeResult<u32> { Ok(try!(self.read_u64()) as u32) }
fn read_u16(&mut self) -> DecodeResult<u16> { Ok(try!(self.read_u64()) as u16) }
fn read_u8 (&mut self) -> DecodeResult<u8> { Ok(try!(self.read_u64()) as u8) }
fn read_uint(&mut self) -> DecodeResult<uint> { Ok(try!(self.read_u64()) as uint) }
fn read_i64(&mut self) -> DecodeResult<i64> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected(~"i64")),
Some(nb) => Ok(nb)
}
}
}
fn read_i32(&mut self) -> DecodeResult<i32> { Ok(try!(self.read_i64()) as i32) }
fn read_i16(&mut self) -> DecodeResult<i16> { Ok(try!(self.read_i64()) as i16) }
fn read_i8 (&mut self) -> DecodeResult<i8> { Ok(try!(self.read_i64()) as i8) }
fn read_int(&mut self) -> DecodeResult<int> { Ok(try!(self.read_i64()) as int) }
fn read_f32(&mut self) -> DecodeResult<f32> { Ok(try!(self.read_f64()) as f32) }
fn read_f64(&mut self) -> DecodeResult<f64> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected(~"f64")),
Some(nb) => Ok(nb)
}
}
}
fn read_bool(&mut self) -> DecodeResult<bool> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match FromStr::from_str(s) {
None => Err(self.expected("boolean")),
Some(b) => Ok(b)
}
}
}
fn read_char(&mut self) -> DecodeResult<char> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => if s.char_len() == 1 { Ok(s.char_at(0)) } else { Err(self.expected("char")) }
}
}
fn read_str(&mut self) -> DecodeResult<~str> {
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => Ok(s)
}
}
fn read_enum<T>(&mut self, name: &str, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum: {}", name);
self.current_type = name.to_owned();
f(self)
}
fn read_enum_variant<T>(&mut self, names: &[&str], f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum variant({}): {}", self.cur, names);
match self.matches.opt_str(self.cur) {
None => Err(MissingField(self.cur.clone())),
Some(s) => match names.iter().position(|&e| e == s) {
None => Err(self.expected(self.current_type + " enum")),
Some(i) => f(self, i)
}
}
}
fn read_enum_variant_arg<T>(&mut self, a_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum variant_arg: {}", a_idx);
f(self);
Err(UnimplementedDecoder)
}
fn read_enum_struct_variant<T>(&mut self, names: &[&str], f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum struct variant: {}", names);
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_enum_struct_variant_field<T>(&mut self, f_name: &str, f_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading enum struct variant field: {}, {}", f_name, f_idx);
f(self);
Err(UnimplementedDecoder)
}
fn read_struct<T>(&mut self, s_name: &str, len: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading struct: {} | len = {}", s_name, len);
self.cur = s_name.to_owned();
f(self)
}
fn read_struct_field<T>(&mut self, f_name: &str, f_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("reading struct field: {} | idx = {}", f_name, f_idx);
self.cur = f_name.to_owned();
let data = f(self);
//println!("got struct field data: {}", data);
data
}
fn read_option<T>(&mut self, f: |&mut Decoder, bool| -> DecodeResult<T>) -> DecodeResult<T> {
//println!("read_option");
match self.matches.opt_str(self.cur) {
None => {
//println!("option not there");
f(self, false)
},
Some(s) => {
//println!("option is there");
f(self, true)
}
}
}
fn read_tuple<T>(&mut self, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_tuple_arg<T>(&mut self, a_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_tuple_struct<T>(&mut self, s_name: &str, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_tuple_struct_arg<T>(&mut self, a_idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_seq<T>(&mut self, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_seq_elt<T>(&mut self, idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_map<T>(&mut self, f: |&mut Decoder, uint| -> DecodeResult<T>) -> DecodeResult<T> {
f(self, 0);
Err(UnimplementedDecoder)
}
fn read_map_elt_key<T>(&mut self, idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
fn read_map_elt_val<T>(&mut self, idx: uint, f: |&mut Decoder| -> DecodeResult<T>) -> DecodeResult<T> {
f(self);
Err(UnimplementedDecoder)
}
}
|
pub mod gregorian {
/// Easter in the Gregorian calendar
pub fn month_day(year: i32) -> (u32, u32) {
let a: i32 = year % 19;
let b: i32 = year / 100;
let c: i32 = year % 100;
let d: i32 = b / 4;
let e: i32 = b % 4;
let f: i32 = (b + 8) / 25;
let g: i32 = (b - f + 1) / 3;
let h: i32 = (19 * a + b - d - g + 15) % 30;
let i: i32 = c / 4;
let k: i32 = c % 4;
let l: i32 = (32 + 2 * e + 2 * i - h - k) % 7;
let m: i32 = (a + 11 * h + 22 * l) / 451;
let month: i32 = (h + l - 7 * m + 114) / 31;
let day: i32 = (h + l - 7 * m + 114) % 31 + 1;
(month as u32, day as u32)
}
}
pub mod julian {
/// Easter in the Julian calendar
pub fn month_day(year: i32) -> (u32, u32) {
let a: i32 = year % 4;
let b: i32 = year % 7;
let c: i32 = year % 19;
let d: i32 = (19 * c + 15) % 30;
let e: i32 = (2 * a + 4 * b - d + 34) % 7;
let f: i32 = d + e + 114;
let month: i32 = f / 31;
let day: i32 = f % 31 + 1;
(month as u32, day as u32)
}
}
#[cfg(test)]
mod tests {
#[test]
fn gregorian_month_day() {
use super::gregorian::month_day;
assert_eq!(month_day(1961), (4, 2));
assert_eq!(month_day(1996), (4, 7));
assert_eq!(month_day(1997), (3, 30));
assert_eq!(month_day(1998), (4, 12));
assert_eq!(month_day(2000), (4, 23));
assert_eq!(month_day(2001), (4, 15));
assert_eq!(month_day(2002), (3, 31));
assert_eq!(month_day(2003), (4, 20));
assert_eq!(month_day(2004), (4, 11));
assert_eq!(month_day(2005), (3, 27));
assert_eq!(month_day(2006), (4, 16));
assert_eq!(month_day(2007), (4, 8));
assert_eq!(month_day(2008), (3, 23));
assert_eq!(month_day(2009), (4, 12));
assert_eq!(month_day(2010), (4, 4));
assert_eq!(month_day(2011), (4, 24));
assert_eq!(month_day(2012), (4, 8));
assert_eq!(month_day(2013), (3, 31));
assert_eq!(month_day(2014), (4, 20));
assert_eq!(month_day(2015), (4, 5));
assert_eq!(month_day(2016), (3, 27));
assert_eq!(month_day(2017), (4, 16));
assert_eq!(month_day(2018), (4, 1));
assert_eq!(month_day(2019), (4, 21));
assert_eq!(month_day(2020), (4, 12));
}
#[test]
fn julian_month_day() {
use super::julian::month_day;
assert_eq!(month_day(1961), (3, 27));
assert_eq!(month_day(1996), (4, 1));
assert_eq!(month_day(1997), (4, 14));
assert_eq!(month_day(1998), (4, 6));
assert_eq!(month_day(1999), (3, 29));
assert_eq!(month_day(2000), (4, 17));
assert_eq!(month_day(2001), (4, 2));
assert_eq!(month_day(2002), (4, 22));
assert_eq!(month_day(2003), (4, 14));
assert_eq!(month_day(2004), (3, 29));
assert_eq!(month_day(2005), (4, 18));
assert_eq!(month_day(2006), (4, 10));
assert_eq!(month_day(2007), (3, 26));
assert_eq!(month_day(2008), (4, 14));
assert_eq!(month_day(2009), (4, 6));
assert_eq!(month_day(2010), (3, 22));
assert_eq!(month_day(2011), (4, 11));
assert_eq!(month_day(2012), (4, 2));
assert_eq!(month_day(2013), (4, 22));
assert_eq!(month_day(2014), (4, 7));
assert_eq!(month_day(2015), (3, 30));
assert_eq!(month_day(2016), (4, 18));
assert_eq!(month_day(2017), (4, 3));
assert_eq!(month_day(2018), (3, 26));
assert_eq!(month_day(2019), (4, 15));
assert_eq!(month_day(2020), (4, 6));
}
}
don't be so specific about i32
pub mod gregorian {
/// Easter in the Gregorian calendar
pub fn month_day(year: i32) -> (u32, u32) {
let a = year % 19;
let b = year / 100;
let c = year % 100;
let d = b / 4;
let e = b % 4;
let f = (b + 8) / 25;
let g = (b - f + 1) / 3;
let h = (19 * a + b - d - g + 15) % 30;
let i = c / 4;
let k = c % 4;
let l = (32 + 2 * e + 2 * i - h - k) % 7;
let m = (a + 11 * h + 22 * l) / 451;
let month = (h + l - 7 * m + 114) / 31;
let day = (h + l - 7 * m + 114) % 31 + 1;
(month as u32, day as u32)
}
}
pub mod julian {
/// Easter in the Julian calendar
pub fn month_day(year: i32) -> (u32, u32) {
let a = year % 4;
let b = year % 7;
let c = year % 19;
let d = (19 * c + 15) % 30;
let e = (2 * a + 4 * b - d + 34) % 7;
let f = d + e + 114;
let month = f / 31;
let day = f % 31 + 1;
(month as u32, day as u32)
}
}
#[cfg(test)]
mod tests {
#[test]
fn gregorian_month_day() {
use super::gregorian::month_day;
assert_eq!(month_day(1961), (4, 2));
assert_eq!(month_day(1996), (4, 7));
assert_eq!(month_day(1997), (3, 30));
assert_eq!(month_day(1998), (4, 12));
assert_eq!(month_day(2000), (4, 23));
assert_eq!(month_day(2001), (4, 15));
assert_eq!(month_day(2002), (3, 31));
assert_eq!(month_day(2003), (4, 20));
assert_eq!(month_day(2004), (4, 11));
assert_eq!(month_day(2005), (3, 27));
assert_eq!(month_day(2006), (4, 16));
assert_eq!(month_day(2007), (4, 8));
assert_eq!(month_day(2008), (3, 23));
assert_eq!(month_day(2009), (4, 12));
assert_eq!(month_day(2010), (4, 4));
assert_eq!(month_day(2011), (4, 24));
assert_eq!(month_day(2012), (4, 8));
assert_eq!(month_day(2013), (3, 31));
assert_eq!(month_day(2014), (4, 20));
assert_eq!(month_day(2015), (4, 5));
assert_eq!(month_day(2016), (3, 27));
assert_eq!(month_day(2017), (4, 16));
assert_eq!(month_day(2018), (4, 1));
assert_eq!(month_day(2019), (4, 21));
assert_eq!(month_day(2020), (4, 12));
}
#[test]
fn julian_month_day() {
use super::julian::month_day;
assert_eq!(month_day(1961), (3, 27));
assert_eq!(month_day(1996), (4, 1));
assert_eq!(month_day(1997), (4, 14));
assert_eq!(month_day(1998), (4, 6));
assert_eq!(month_day(1999), (3, 29));
assert_eq!(month_day(2000), (4, 17));
assert_eq!(month_day(2001), (4, 2));
assert_eq!(month_day(2002), (4, 22));
assert_eq!(month_day(2003), (4, 14));
assert_eq!(month_day(2004), (3, 29));
assert_eq!(month_day(2005), (4, 18));
assert_eq!(month_day(2006), (4, 10));
assert_eq!(month_day(2007), (3, 26));
assert_eq!(month_day(2008), (4, 14));
assert_eq!(month_day(2009), (4, 6));
assert_eq!(month_day(2010), (3, 22));
assert_eq!(month_day(2011), (4, 11));
assert_eq!(month_day(2012), (4, 2));
assert_eq!(month_day(2013), (4, 22));
assert_eq!(month_day(2014), (4, 7));
assert_eq!(month_day(2015), (3, 30));
assert_eq!(month_day(2016), (4, 18));
assert_eq!(month_day(2017), (4, 3));
assert_eq!(month_day(2018), (3, 26));
assert_eq!(month_day(2019), (4, 15));
assert_eq!(month_day(2020), (4, 6));
}
}
|
#![crate_name = "spread"]
#![comment = "A Rust client library for the Spread toolkit"]
#![crate_type = "lib"]
#![license = "MIT"]
#![feature(phase)]
#[deny(non_camel_case_types)]
extern crate encoding;
#[phase(plugin, link)] extern crate log;
use encoding::{Encoding, EncodeStrict, DecodeStrict};
use encoding::all::ISO_8859_1;
use std::io::{ConnectionFailed, ConnectionRefused, IoError, IoResult, OtherIoError};
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::result::Result;
use util::{bytes_to_int, flip_endianness, int_to_bytes, same_endianness};
mod test;
mod util;
pub static DefaultSpreadPort: i16 = 4803;
static MaxPrivateNameLength: uint = 10;
static DefaultAuthName: &'static str = "NULL";
static MaxAuthNameLength: uint = 30;
static MaxAuthMethodCount: uint = 3;
static MaxGroupNameLength: uint = 32;
// Control message types.
// NOTE: The only currently-implemented service type for messaging is "reliable".
enum ControlServiceType {
JoinMessage = 0x00010000,
LeaveMessage = 0x00020000,
KillMessage = 0x00040000,
ReliableMessage = 0x00000002
}
static SpreadMajorVersion: u8 = 4;
static SpreadMinorVersion: u8 = 4;
static SpreadPatchVersion: u8 = 0;
// Error codes, as per http://www.spread.org/docs/spread_docs_4/docs/error_codes.html
pub enum SpreadError {
AcceptSession = 1,
IllegalSpread = -1,
CouldNotConnection = -2,
RejectQuota = -3,
RejectNOName = -4,
RejectIllegalName = -5,
RejectNotUnique = -6,
RejectVersion = -7,
ConnectionClosed = -8,
RejectAuth = -9,
IllegalSession = -11,
IllegalService = -12,
IllegalMessage = -13,
IllegalGroup = -14,
BufferTooShort = -15,
GroupsTooShort = -16,
MessageTooLong = -17,
NetErrorOnSession = -18
}
/// A message to be sent or received by a Spread client to/from a group.
pub struct SpreadMessage {
service_type: u32,
pub groups: Vec<String>,
pub sender: String,
pub data: Vec<u8>,
}
/// Representation of a client connection to a Spread daemon.
pub struct SpreadClient {
stream: TcpStream,
pub private_name: String,
pub groups: Vec<String>,
receive_membership_messages: bool
}
// Construct a byte vector representation of a connect message for the given
// connection arguments.
fn encode_connect_message(
private_name: &str,
receive_membership_messages: bool
) -> Result<Vec<u8>, String> {
let mut vec: Vec<u8> = Vec::new();
// Set Spread version.
vec.push(SpreadMajorVersion);
vec.push(SpreadMinorVersion);
vec.push(SpreadPatchVersion);
// Apply masks for group membership (and priority, which is unimplemented).
let mask = if receive_membership_messages {
0x10
} else {
0
};
vec.push(mask);
let private_name_buf = try!(ISO_8859_1.encode(private_name, EncodeStrict).map_err(
|_| format!("Failed to encode private name: {}", private_name)
));
vec.push(private_name.char_len() as u8);
vec.push_all_move(private_name_buf);
Ok(vec)
}
/// Establishes a named connection to a Spread daemon running at a given
/// `SocketAddr`.
///
/// *Arguments:*
///
/// - `addr`: The address at which the Spread daemon is running.
/// - `private_name`: A name to use privately to refer to the connection.
/// - `receive_membership_messages`: If true, membership messages will be
/// received by the resultant client.
pub fn connect(
addr: SocketAddr,
private_name: &str,
receive_membership_messages: bool
) -> IoResult<SpreadClient> {
// Truncate (if necessary) and write `private_name`.
let truncated_private_name = match private_name {
too_long if too_long.char_len() > MaxPrivateNameLength =>
too_long.slice_to(MaxPrivateNameLength),
just_fine => just_fine
};
// Send the initial connect message.
let connect_message = try!(encode_connect_message(
truncated_private_name,
receive_membership_messages
).map_err(|error_msg| IoError {
kind: ConnectionFailed,
desc: "",
detail: Some(error_msg)
}));
let mut stream = try!(TcpStream::connect(addr.ip.to_string().as_slice(), addr.port));
debug!("Sending connect message to {}", addr);
try!(stream.write(connect_message.as_slice()));
// Read the authentication methods.
let authname_len = try!(stream.read_byte()) as i32;
if authname_len == -1 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Connection closed during connect attempt to read auth name length",
detail: None
});
} else if authname_len >= 128 {
return Err(IoError {
kind: ConnectionRefused,
desc: "Connection attempt rejected",
detail: Some(format!("{}", (0xffffff00 | authname_len as u32) as i32))
});
}
// Ignore the list.
// TODO: Support IP-based auth?
let authname_vec = try!(stream.read_exact(authname_len as uint));
let authname = try!(ISO_8859_1.decode(authname_vec.as_slice(), DecodeStrict).map_err(|error| IoError {
kind: OtherIoError,
desc: "Failed to decode received authname",
detail: Some(String::from_str(error.as_slice()))
}));
debug!("Received authentication method choice(s): {}", authname);
// Send auth method choice.
let mut authname_vec: Vec<u8> = match ISO_8859_1.encode(DefaultAuthName, EncodeStrict) {
Ok(vec) => vec,
Err(error) => return Err(IoError {
kind: ConnectionFailed,
desc: "Failed to encode authname",
detail: Some(format!("{}", error))
})
};
for _ in range(authname_len as uint, (MaxAuthNameLength * MaxAuthMethodCount + 1)) {
authname_vec.push(0);
}
debug!("Sending authentication method choice of {}", DefaultAuthName);
try!(stream.write(authname_vec.as_slice()));
// Check for an accept message.
let accepted: u8 = try!(stream.read_byte());
if accepted != AcceptSession as u8 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Connection attempt rejected",
detail: Some(format!("{}", (0xffffff00 | accepted as u32) as i32))
});
}
debug!("Received session acceptance message from daemon");
// Read the version of Spread that the server is running.
let (major, minor, patch) =
(try!(stream.read_byte()) as i32, try!(stream.read_byte()) as i32, try!(stream.read_byte()) as i32);
debug!(
"Received version message: daemon running Spread version {}.{}.{}",
major, minor, patch
);
if major == -1 || minor == -1 || patch == -1 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Invalid version returned from server",
detail: Some(format!("{}.{}.{}", major, minor, patch))
});
}
let version_sum = (major*10000) + (minor*100) + patch;
if version_sum < 30100 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Server is running old, unsupported version of Spread",
detail: Some(format!("{}.{}.{}", major, minor, patch))
});
}
// Read the private group name.
let group_name_len = try!(stream.read_byte()) as i32;
if group_name_len == -1 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Connection closed during connect attempt to read group name length",
detail: None
});
}
let group_name_buf = try!(stream.read_exact(group_name_len as uint));
let private_group_name = match String::from_utf8(group_name_buf) {
Ok(group_name) => group_name,
Err(error) => return Err(IoError {
kind: ConnectionFailed,
desc: "Server sent invalid group name",
detail: Some(format!("{}", error))
})
};
debug!("Received private name assignment from daemon: {}", private_group_name);
debug!("Client connected to daemon at {}", addr);
Ok(SpreadClient {
stream: stream,
private_name: private_group_name,
groups: Vec::new(),
receive_membership_messages: receive_membership_messages
})
}
impl SpreadClient {
// Encode a service message for dispatch to a Spread daemon.
fn encode_message(
service_type: u32,
private_name: &str,
groups: &[&str],
data: &[u8]
) -> Result<Vec<u8>, String> {
let mut vec: Vec<u8> = Vec::new();
vec.push_all_move(int_to_bytes(service_type));
let private_name_buf = try!(ISO_8859_1.encode(private_name, EncodeStrict).map_err(
|_| format!("Failed to encode private name: {}", private_name)
));
vec.push_all_move(private_name_buf);
for _ in range(private_name.len(), (MaxGroupNameLength)) {
vec.push(0);
}
vec.push_all_move(int_to_bytes(groups.len() as u32));
vec.push_all_move(int_to_bytes(0));
vec.push_all_move(int_to_bytes(data.len() as u32));
// Encode and push each group name, converting any encoding errors
// to error message strings.
for group in groups.iter() {
let group_buf = try!(ISO_8859_1.encode(*group, EncodeStrict).map_err(
|_| format!("Failed to encode group name: {}", group)
));
vec.push_all_move(group_buf);
for _ in range(group.len(), (MaxGroupNameLength)) {
vec.push(0);
}
}
vec.push_all(data);
Ok(vec)
}
/// Disconnects the client from the Spread daemon.
// TODO: Prevent further usage of client?
pub fn disconnect(&mut self) -> IoResult<()> {
let name_slice = self.private_name.as_slice();
let kill_message = try!(SpreadClient::encode_message(
KillMessage as u32,
name_slice,
[name_slice],
[]
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Disconnection failed",
detail: Some(error_msg)
}));
debug!("Disconnecting from daemon at {}", try!(self.stream.peer_name()));
self.stream.write(kill_message.as_slice())
}
/// Join a named Spread group.
///
/// All messages sent to the group will be received by the client until it
/// has left the group.
pub fn join(&mut self, group_name: &str) -> IoResult<()> {
let join_message = try!(SpreadClient::encode_message(
JoinMessage as u32,
self.private_name.as_slice(),
[group_name],
[]
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Disconnection failed",
detail: Some(error_msg)
}));
debug!("Client \"{}\" joining group \"{}\"", self.private_name, group_name);
try!(self.stream.write(join_message.as_slice()));
self.groups.push(group_name.to_string());
Ok(())
}
/// Leave a named Spread group.
pub fn leave(&mut self, group_name: &str) -> IoResult<()> {
let leave_message = try!(SpreadClient::encode_message(
LeaveMessage as u32,
self.private_name.as_slice(),
[group_name],
[]
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Disconnection failed",
detail: Some(error_msg)
}));
debug!("Client \"{}\" leaving group \"{}\"", self.private_name, group_name);
try!(self.stream.write(leave_message.as_slice()));
self.groups.push(group_name.to_string());
Ok(())
}
/// Send a message to a set of named groups.
pub fn multicast(
&mut self,
groups: &[&str],
data: &[u8]
) -> IoResult<()> {
let message = try!(SpreadClient::encode_message(
ReliableMessage as u32,
self.private_name.as_slice(),
groups,
data
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Disconnection failed",
detail: Some(error_msg)
}));
debug!("Client \"{}\" multicasting {} bytes to group(s) {}",
self.private_name, data.len(), groups);
self.stream.write(message.as_slice())
}
/// Receive the next available message. If there are no messages available,
/// the call will block until either a message is received or a timeout
/// expires.
pub fn receive(&mut self) -> IoResult<SpreadMessage> {
// Header format (sizes in bytes):
// svc_type: 4
// sender: 32
// num_groups: 4
// hint: 4
// data_len: 4
let header_vec = try!(self.stream.read_exact(MaxGroupNameLength + 16));
let is_correct_endianness = same_endianness(bytes_to_int(header_vec.slice(0, 4)));
let svc_type = match (is_correct_endianness, bytes_to_int(header_vec.slice(0, 4))) {
(true, correct) => correct,
(false, incorrect) => flip_endianness(incorrect)
};
let sender = try!(
ISO_8859_1.decode(header_vec.slice(4, 36), DecodeStrict).map_err(|error| IoError {
kind: OtherIoError,
desc: "Failed to decode sender name",
detail: Some(String::from_str(error.as_slice()))
})
);
let num_groups = match (is_correct_endianness, bytes_to_int(header_vec.slice(36, 40))) {
(true, correct) => correct,
(false, incorrect) => flip_endianness(incorrect)
};
let data_len = match (is_correct_endianness, bytes_to_int(header_vec.slice(44, 48))) {
(true, correct) => correct,
(false, incorrect) => flip_endianness(incorrect)
};
// Groups format (sizes in bytes):
// groups: num_groups
let groups_vec =
try!(self.stream.read_exact(MaxGroupNameLength * num_groups as uint));
let mut groups = Vec::new();
for n in range(0, num_groups) {
let i: uint = n as uint * MaxGroupNameLength;
let group = try!(
ISO_8859_1.decode(groups_vec.slice(i, i + MaxGroupNameLength), DecodeStrict)
.map_err(|error| IoError {
kind: OtherIoError,
desc: "Failed to decode group name",
detail: Some(String::from_str(error.as_slice()))
}));
groups.push(group);
}
// Data format (sizes in bytes):
// data: data_len
let data_vec = try!(self.stream.read_exact(data_len as uint));
debug!("Received {} bytes from \"{}\" sent to group(s) {}",
data_len, sender, groups);
Ok(SpreadMessage {
service_type: svc_type as u32,
groups: groups,
sender: sender,
data: data_vec
})
}
}
Fix incorrect error messages, closing-paren indentation.
#![crate_name = "spread"]
#![comment = "A Rust client library for the Spread toolkit"]
#![crate_type = "lib"]
#![license = "MIT"]
#![feature(phase)]
#[deny(non_camel_case_types)]
extern crate encoding;
#[phase(plugin, link)] extern crate log;
use encoding::{Encoding, EncodeStrict, DecodeStrict};
use encoding::all::ISO_8859_1;
use std::io::{ConnectionFailed, ConnectionRefused, IoError, IoResult, OtherIoError};
use std::io::net::ip::SocketAddr;
use std::io::net::tcp::TcpStream;
use std::result::Result;
use util::{bytes_to_int, flip_endianness, int_to_bytes, same_endianness};
mod test;
mod util;
pub static DefaultSpreadPort: i16 = 4803;
static MaxPrivateNameLength: uint = 10;
static DefaultAuthName: &'static str = "NULL";
static MaxAuthNameLength: uint = 30;
static MaxAuthMethodCount: uint = 3;
static MaxGroupNameLength: uint = 32;
// Control message types.
// NOTE: The only currently-implemented service type for messaging is "reliable".
enum ControlServiceType {
JoinMessage = 0x00010000,
LeaveMessage = 0x00020000,
KillMessage = 0x00040000,
ReliableMessage = 0x00000002
}
static SpreadMajorVersion: u8 = 4;
static SpreadMinorVersion: u8 = 4;
static SpreadPatchVersion: u8 = 0;
// Error codes, as per http://www.spread.org/docs/spread_docs_4/docs/error_codes.html
pub enum SpreadError {
AcceptSession = 1,
IllegalSpread = -1,
CouldNotConnection = -2,
RejectQuota = -3,
RejectNOName = -4,
RejectIllegalName = -5,
RejectNotUnique = -6,
RejectVersion = -7,
ConnectionClosed = -8,
RejectAuth = -9,
IllegalSession = -11,
IllegalService = -12,
IllegalMessage = -13,
IllegalGroup = -14,
BufferTooShort = -15,
GroupsTooShort = -16,
MessageTooLong = -17,
NetErrorOnSession = -18
}
/// A message to be sent or received by a Spread client to/from a group.
pub struct SpreadMessage {
service_type: u32,
pub groups: Vec<String>,
pub sender: String,
pub data: Vec<u8>,
}
/// Representation of a client connection to a Spread daemon.
pub struct SpreadClient {
stream: TcpStream,
pub private_name: String,
pub groups: Vec<String>,
receive_membership_messages: bool
}
// Construct a byte vector representation of a connect message for the given
// connection arguments.
fn encode_connect_message(
private_name: &str,
receive_membership_messages: bool
) -> Result<Vec<u8>, String> {
let mut vec: Vec<u8> = Vec::new();
// Set Spread version.
vec.push(SpreadMajorVersion);
vec.push(SpreadMinorVersion);
vec.push(SpreadPatchVersion);
// Apply masks for group membership (and priority, which is unimplemented).
let mask = if receive_membership_messages {
0x10
} else {
0
};
vec.push(mask);
let private_name_buf = try!(ISO_8859_1.encode(private_name, EncodeStrict).map_err(
|_| format!("Failed to encode private name: {}", private_name)
));
vec.push(private_name.char_len() as u8);
vec.push_all_move(private_name_buf);
Ok(vec)
}
/// Establishes a named connection to a Spread daemon running at a given
/// `SocketAddr`.
///
/// *Arguments:*
///
/// - `addr`: The address at which the Spread daemon is running.
/// - `private_name`: A name to use privately to refer to the connection.
/// - `receive_membership_messages`: If true, membership messages will be
/// received by the resultant client.
pub fn connect(
addr: SocketAddr,
private_name: &str,
receive_membership_messages: bool
) -> IoResult<SpreadClient> {
// Truncate (if necessary) and write `private_name`.
let truncated_private_name = match private_name {
too_long if too_long.char_len() > MaxPrivateNameLength =>
too_long.slice_to(MaxPrivateNameLength),
just_fine => just_fine
};
// Send the initial connect message.
let connect_message = try!(encode_connect_message(
truncated_private_name,
receive_membership_messages
).map_err(|error_msg| IoError {
kind: ConnectionFailed,
desc: "",
detail: Some(error_msg)
}));
let mut stream = try!(TcpStream::connect(addr.ip.to_string().as_slice(), addr.port));
debug!("Sending connect message to {}", addr);
try!(stream.write(connect_message.as_slice()));
// Read the authentication methods.
let authname_len = try!(stream.read_byte()) as i32;
if authname_len == -1 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Connection closed during connect attempt to read auth name length",
detail: None
});
} else if authname_len >= 128 {
return Err(IoError {
kind: ConnectionRefused,
desc: "Connection attempt rejected",
detail: Some(format!("{}", (0xffffff00 | authname_len as u32) as i32))
});
}
// Ignore the list.
// TODO: Support IP-based auth?
let authname_vec = try!(stream.read_exact(authname_len as uint));
let authname = try!(ISO_8859_1.decode(authname_vec.as_slice(), DecodeStrict).map_err(|error| IoError {
kind: OtherIoError,
desc: "Failed to decode received authname",
detail: Some(String::from_str(error.as_slice()))
}));
debug!("Received authentication method choice(s): {}", authname);
// Send auth method choice.
let mut authname_vec: Vec<u8> = match ISO_8859_1.encode(DefaultAuthName, EncodeStrict) {
Ok(vec) => vec,
Err(error) => return Err(IoError {
kind: ConnectionFailed,
desc: "Failed to encode authname",
detail: Some(format!("{}", error))
})
};
for _ in range(authname_len as uint, (MaxAuthNameLength * MaxAuthMethodCount + 1)) {
authname_vec.push(0);
}
debug!("Sending authentication method choice of {}", DefaultAuthName);
try!(stream.write(authname_vec.as_slice()));
// Check for an accept message.
let accepted: u8 = try!(stream.read_byte());
if accepted != AcceptSession as u8 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Connection attempt rejected",
detail: Some(format!("{}", (0xffffff00 | accepted as u32) as i32))
});
}
debug!("Received session acceptance message from daemon");
// Read the version of Spread that the server is running.
let (major, minor, patch) =
(try!(stream.read_byte()) as i32, try!(stream.read_byte()) as i32, try!(stream.read_byte()) as i32);
debug!(
"Received version message: daemon running Spread version {}.{}.{}",
major, minor, patch
);
if major == -1 || minor == -1 || patch == -1 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Invalid version returned from server",
detail: Some(format!("{}.{}.{}", major, minor, patch))
});
}
let version_sum = (major*10000) + (minor*100) + patch;
if version_sum < 30100 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Server is running old, unsupported version of Spread",
detail: Some(format!("{}.{}.{}", major, minor, patch))
});
}
// Read the private group name.
let group_name_len = try!(stream.read_byte()) as i32;
if group_name_len == -1 {
return Err(IoError {
kind: ConnectionFailed,
desc: "Connection closed during connect attempt to read group name length",
detail: None
});
}
let group_name_buf = try!(stream.read_exact(group_name_len as uint));
let private_group_name = match String::from_utf8(group_name_buf) {
Ok(group_name) => group_name,
Err(error) => return Err(IoError {
kind: ConnectionFailed,
desc: "Server sent invalid group name",
detail: Some(format!("{}", error))
})
};
debug!("Received private name assignment from daemon: {}", private_group_name);
debug!("Client connected to daemon at {}", addr);
Ok(SpreadClient {
stream: stream,
private_name: private_group_name,
groups: Vec::new(),
receive_membership_messages: receive_membership_messages
})
}
impl SpreadClient {
// Encode a service message for dispatch to a Spread daemon.
fn encode_message(
service_type: u32,
private_name: &str,
groups: &[&str],
data: &[u8]
) -> Result<Vec<u8>, String> {
let mut vec: Vec<u8> = Vec::new();
vec.push_all_move(int_to_bytes(service_type));
let private_name_buf = try!(ISO_8859_1.encode(private_name, EncodeStrict).map_err(
|_| format!("Failed to encode private name: {}", private_name)
));
vec.push_all_move(private_name_buf);
for _ in range(private_name.len(), (MaxGroupNameLength)) {
vec.push(0);
}
vec.push_all_move(int_to_bytes(groups.len() as u32));
vec.push_all_move(int_to_bytes(0));
vec.push_all_move(int_to_bytes(data.len() as u32));
// Encode and push each group name, converting any encoding errors
// to error message strings.
for group in groups.iter() {
let group_buf = try!(ISO_8859_1.encode(*group, EncodeStrict).map_err(
|_| format!("Failed to encode group name: {}", group)
));
vec.push_all_move(group_buf);
for _ in range(group.len(), (MaxGroupNameLength)) {
vec.push(0);
}
}
vec.push_all(data);
Ok(vec)
}
/// Disconnects the client from the Spread daemon.
// TODO: Prevent further usage of client?
pub fn disconnect(&mut self) -> IoResult<()> {
let name_slice = self.private_name.as_slice();
let kill_message = try!(SpreadClient::encode_message(
KillMessage as u32,
name_slice,
[name_slice],
[]
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Disconnection failed",
detail: Some(error_msg)
}));
debug!("Disconnecting from daemon at {}", try!(self.stream.peer_name()));
self.stream.write(kill_message.as_slice())
}
/// Join a named Spread group.
///
/// All messages sent to the group will be received by the client until it
/// has left the group.
pub fn join(&mut self, group_name: &str) -> IoResult<()> {
let join_message = try!(SpreadClient::encode_message(
JoinMessage as u32,
self.private_name.as_slice(),
[group_name],
[]
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Group join failed",
detail: Some(error_msg)
}));
debug!("Client \"{}\" joining group \"{}\"", self.private_name, group_name);
try!(self.stream.write(join_message.as_slice()));
self.groups.push(group_name.to_string());
Ok(())
}
/// Leave a named Spread group.
pub fn leave(&mut self, group_name: &str) -> IoResult<()> {
let leave_message = try!(SpreadClient::encode_message(
LeaveMessage as u32,
self.private_name.as_slice(),
[group_name],
[]
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Group leave failed",
detail: Some(error_msg)
}));
debug!("Client \"{}\" leaving group \"{}\"", self.private_name, group_name);
try!(self.stream.write(leave_message.as_slice()));
self.groups.push(group_name.to_string());
Ok(())
}
/// Send a message to a set of named groups.
pub fn multicast(
&mut self,
groups: &[&str],
data: &[u8]
) -> IoResult<()> {
let message = try!(SpreadClient::encode_message(
ReliableMessage as u32,
self.private_name.as_slice(),
groups,
data
).map_err(|error_msg| IoError {
kind: OtherIoError,
desc: "Multicast failed",
detail: Some(error_msg)
}));
debug!("Client \"{}\" multicasting {} bytes to group(s) {}",
self.private_name, data.len(), groups);
self.stream.write(message.as_slice())
}
/// Receive the next available message. If there are no messages available,
/// the call will block until either a message is received or a timeout
/// expires.
pub fn receive(&mut self) -> IoResult<SpreadMessage> {
// Header format (sizes in bytes):
// svc_type: 4
// sender: 32
// num_groups: 4
// hint: 4
// data_len: 4
let header_vec = try!(self.stream.read_exact(MaxGroupNameLength + 16));
let is_correct_endianness = same_endianness(bytes_to_int(header_vec.slice(0, 4)));
let svc_type = match (is_correct_endianness, bytes_to_int(header_vec.slice(0, 4))) {
(true, correct) => correct,
(false, incorrect) => flip_endianness(incorrect)
};
let sender = try!(
ISO_8859_1.decode(header_vec.slice(4, 36), DecodeStrict).map_err(|error| IoError {
kind: OtherIoError,
desc: "Failed to decode sender name",
detail: Some(String::from_str(error.as_slice()))
})
);
let num_groups = match (is_correct_endianness, bytes_to_int(header_vec.slice(36, 40))) {
(true, correct) => correct,
(false, incorrect) => flip_endianness(incorrect)
};
let data_len = match (is_correct_endianness, bytes_to_int(header_vec.slice(44, 48))) {
(true, correct) => correct,
(false, incorrect) => flip_endianness(incorrect)
};
// Groups format (sizes in bytes):
// groups: num_groups
let groups_vec =
try!(self.stream.read_exact(MaxGroupNameLength * num_groups as uint));
let mut groups = Vec::new();
for n in range(0, num_groups) {
let i: uint = n as uint * MaxGroupNameLength;
let group = try!(
ISO_8859_1.decode(groups_vec.slice(i, i + MaxGroupNameLength), DecodeStrict)
.map_err(|error| IoError {
kind: OtherIoError,
desc: "Failed to decode group name",
detail: Some(String::from_str(error.as_slice()))
}));
groups.push(group);
}
// Data format (sizes in bytes):
// data: data_len
let data_vec = try!(self.stream.read_exact(data_len as uint));
debug!("Received {} bytes from \"{}\" sent to group(s) {}",
data_len, sender, groups);
Ok(SpreadMessage {
service_type: svc_type as u32,
groups: groups,
sender: sender,
data: data_vec
})
}
}
|
#![feature(plugin_registrar, quote, rustc_private, custom_attribute)]
extern crate rustc_plugin;
extern crate syntax;
use rustc_plugin::registry::Registry;
use syntax::ast::{Attribute, Block, Expr, ExprKind, Ident, Item, ItemKind, Mac,
MetaItem, MetaItemKind};
use syntax::fold::{self, Folder};
use syntax::ptr::P;
use syntax::codemap::{DUMMY_SP, Span};
use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension};
use syntax::ext::build::AstBuilder;
use syntax::feature_gate::AttributeType;
use syntax::symbol::Symbol;
use syntax::util::small_vector::SmallVector;
pub fn insert_flame_guard(cx: &mut ExtCtxt, _span: Span, _mi: &MetaItem,
a: Annotatable) -> Annotatable {
match a {
Annotatable::Item(i) => Annotatable::Item(
Flamer { cx: cx, ident: i.ident }.fold_item(i).expect_one("expected exactly one item")),
Annotatable::TraitItem(i) => Annotatable::TraitItem(
i.map(|i| Flamer { cx: cx, ident: i.ident }.fold_trait_item(i).expect_one("expected exactly one item"))),
Annotatable::ImplItem(i) => Annotatable::ImplItem(
i.map(|i| Flamer { cx: cx, ident: i.ident }.fold_impl_item(i).expect_one("expected exactly one item"))),
}
}
struct Flamer<'a, 'cx: 'a> {
ident: Ident,
cx: &'a mut ExtCtxt<'cx>,
}
impl<'a, 'cx> Folder for Flamer<'a, 'cx> {
fn fold_item(&mut self, item: P<Item>) -> SmallVector<P<Item>> {
if let ItemKind::Mac(_) = item.node {
let expanded = self.cx.expander().fold_item(item);
expanded.into_iter()
.flat_map(|i| fold::noop_fold_item(i, self).into_iter())
.collect()
} else {
fold::noop_fold_item(item, self)
}
}
fn fold_item_simple(&mut self, i: Item) -> Item {
fn is_flame_annotation(attr: &Attribute) -> bool {
match attr.value.node {
MetaItemKind::Word => {
let name = &*attr.value.name.as_str();
name == "flame" || name == "noflame"
},
_ => false
}
}
// don't double-flame nested annotations
if i.attrs.iter().any(is_flame_annotation) { return i; }
if let ItemKind::Mac(_) = i.node {
return i;
} else {
self.ident = i.ident; // update in case of nested items
fold::noop_fold_item_simple(i, self)
}
}
fn fold_block(&mut self, block: P<Block>) -> P<Block> {
block.map(|block| {
let name = self.cx.expr_str(DUMMY_SP, self.ident.name);
quote_block!(self.cx, {
let g = ::flame::start_guard($name);
let r = $block;
g.end();
r
}).unwrap()
})
}
fn fold_expr(&mut self, expr: P<Expr>) -> P<Expr> {
if let ExprKind::Mac(_) = expr.node {
self.cx.expander().fold_expr(expr)
.map(|e| fold::noop_fold_expr(e, self))
} else {
expr
}
}
fn fold_mac(&mut self, mac: Mac) -> Mac {
mac
}
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_syntax_extension(Symbol::intern("flame"),
SyntaxExtension::MultiModifier(Box::new(insert_flame_guard)));
reg.register_attribute(String::from("noflame"), AttributeType::Whitelisted);
}
rustup
This updates the attribute-checking code to work with the TokenStream-
based interface.
#![feature(plugin_registrar, quote, rustc_private, custom_attribute)]
extern crate rustc_plugin;
extern crate syntax;
use rustc_plugin::registry::Registry;
use syntax::ast::{Attribute, Block, Expr, ExprKind, Ident, Item, ItemKind, Mac,
MetaItem};
use syntax::fold::{self, Folder};
use syntax::ptr::P;
use syntax::codemap::{DUMMY_SP, Span};
use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension};
use syntax::ext::build::AstBuilder;
use syntax::feature_gate::AttributeType;
use syntax::symbol::Symbol;
use syntax::util::small_vector::SmallVector;
pub fn insert_flame_guard(cx: &mut ExtCtxt, _span: Span, _mi: &MetaItem,
a: Annotatable) -> Annotatable {
match a {
Annotatable::Item(i) => Annotatable::Item(
Flamer { cx: cx, ident: i.ident }.fold_item(i).expect_one("expected exactly one item")),
Annotatable::TraitItem(i) => Annotatable::TraitItem(
i.map(|i| Flamer { cx: cx, ident: i.ident }.fold_trait_item(i).expect_one("expected exactly one item"))),
Annotatable::ImplItem(i) => Annotatable::ImplItem(
i.map(|i| Flamer { cx: cx, ident: i.ident }.fold_impl_item(i).expect_one("expected exactly one item"))),
}
}
struct Flamer<'a, 'cx: 'a> {
ident: Ident,
cx: &'a mut ExtCtxt<'cx>,
}
impl<'a, 'cx> Folder for Flamer<'a, 'cx> {
fn fold_item(&mut self, item: P<Item>) -> SmallVector<P<Item>> {
if let ItemKind::Mac(_) = item.node {
let expanded = self.cx.expander().fold_item(item);
expanded.into_iter()
.flat_map(|i| fold::noop_fold_item(i, self).into_iter())
.collect()
} else {
fold::noop_fold_item(item, self)
}
}
fn fold_item_simple(&mut self, i: Item) -> Item {
fn is_flame_annotation(attr: &Attribute) -> bool {
attr.name().map_or(false, |name|
name == "flame" || name == "noflame")
}
// don't double-flame nested annotations
if i.attrs.iter().any(is_flame_annotation) { return i; }
if let ItemKind::Mac(_) = i.node {
return i;
} else {
self.ident = i.ident; // update in case of nested items
fold::noop_fold_item_simple(i, self)
}
}
fn fold_block(&mut self, block: P<Block>) -> P<Block> {
block.map(|block| {
let name = self.cx.expr_str(DUMMY_SP, self.ident.name);
quote_block!(self.cx, {
let g = ::flame::start_guard($name);
let r = $block;
g.end();
r
}).unwrap()
})
}
fn fold_expr(&mut self, expr: P<Expr>) -> P<Expr> {
if let ExprKind::Mac(_) = expr.node {
self.cx.expander().fold_expr(expr)
.map(|e| fold::noop_fold_expr(e, self))
} else {
expr
}
}
fn fold_mac(&mut self, mac: Mac) -> Mac {
mac
}
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_syntax_extension(Symbol::intern("flame"),
SyntaxExtension::MultiModifier(Box::new(insert_flame_guard)));
reg.register_attribute(String::from("noflame"), AttributeType::Whitelisted);
}
|
use std::collections::BTreeMap;
use std::fmt;
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum Token {
Output,
Input,
Loop,
EndLoop,
Move(i32),
Add(i32, i32),
Set(i32, i32),
MulCopy(i32, i32, i32),
Scan(i32),
LoadOut(i32, i32),
LoadOutSet(i32),
If(i32),
EndIf,
}
use Token::*;
impl fmt::Debug for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Output =>
write!(f, "Output"),
Input =>
write!(f, "Input"),
Loop =>
write!(f, "Loop"),
EndLoop =>
write!(f, "EndLoop"),
Move(offset) =>
write!(f, "Move(offset={})", offset),
Add(offset, value) =>
write!(f, "Add(offset={}, value={})", offset, value),
Set(offset, value) =>
write!(f, "Set(offset={}, value={})", offset, value),
MulCopy(src, dest, mul) =>
write!(f, "MulCopy(src={}, dest={}, mul={})", src, dest, mul),
Scan(offset) =>
write!(f, "Scan(offset={})", offset),
LoadOut(offset, add) =>
write!(f, "LoadOut(offset={}, add={})", offset, add),
LoadOutSet(value) =>
write!(f, "LoadOutSet(value={})", value),
If(offset) =>
write!(f, "If(offset={})", offset),
EndIf =>
write!(f, "EndIf\n"),
}
}
}
pub fn parse(code: &str) -> Vec<Token> {
let mut tokens = Vec::with_capacity(code.len());
for i in code.chars() {
match i {
'+' => tokens.push(Add(0, 1)),
'-' => tokens.push(Add(0, -1)),
'>' => tokens.push(Move(1)),
'<' => tokens.push(Move(-1)),
'[' => tokens.push(Loop),
']' => tokens.push(EndLoop),
',' => tokens.push(Input),
'.' => {
tokens.push(LoadOut(0, 0));
tokens.push(Output);
},
_ => ()
};
}
tokens
}
pub fn optimize(tokens: Vec<Token>) -> Vec<Token> {
let mut newtokens: Vec<Token> = Vec::with_capacity(tokens.len());
let mut shift = 0;
let mut do_output = false;
// With HashMap, the order sometimes switches
// in recursion, and the optimizer never exits.
let mut adds: BTreeMap<i32, i32> = BTreeMap::new();
let mut sets: BTreeMap<i32, i32> = BTreeMap::new();
let mut pre_loop_sets: BTreeMap<i32, i32> = BTreeMap::new();
for token in tokens.iter() {
if *token == EndLoop && newtokens.last() == Some(&Loop) && shift == 0 && adds.contains_key(&0) {
if adds.len() == 1 {
newtokens.pop(); // Remove Loop
if !sets.is_empty() {
newtokens.push(If(0));
for (offset, value) in sets.iter() {
newtokens.push(Set(*offset, *value));
}
sets.clear();
newtokens.push(Set(0, 0));
newtokens.push(EndIf);
} else {
sets.insert(0, 0);
}
pre_loop_sets.clear();
adds.clear();
continue
} else if adds.get(&0) == Some(&-1) {
newtokens.pop(); // Remove Loop
if !sets.is_empty() {
newtokens.push(If(0));
for (offset, value) in sets.iter() {
newtokens.push(Set(*offset, *value));
}
}
for (offset, value) in adds.iter() {
if *offset != 0 {
let src = 0;
let dest = *offset;
let mul = *value;
if pre_loop_sets.contains_key(&src) {
let val = pre_loop_sets.get(&src).unwrap() * mul;
newtokens.push(Add(dest, val));
} else {
newtokens.push(MulCopy(src, dest, mul));
}
}
}
if !sets.is_empty() {
newtokens.push(EndIf);
}
pre_loop_sets.clear();
adds.clear();
sets.clear();
sets.insert(0, 0);
continue
}
}
match *token {
Loop => {
pre_loop_sets.clear();
for (offset, value) in sets.iter() {
pre_loop_sets.insert(*offset+shift, *value);
}
},
Set(..) | Add(..) | Move(_) => {},
_ => pre_loop_sets.clear()
}
match *token {
Set(..) | Add(..) | Move(_) | LoadOut(..) | LoadOutSet(_) | Output => {},
_ => {
if do_output {
newtokens.push(Output);
do_output = false;
}
for (offset, value) in sets.iter() {
newtokens.push(Set(*offset, *value));
}
for (offset, value) in adds.iter() {
newtokens.push(Add(*offset, *value));
}
sets.clear();
adds.clear();
}
}
if shift != 0 {
match *token {
Loop | Input | Scan(_) => {
newtokens.push(Move(shift));
shift = 0;
},
_ => {}
}
}
match *token {
Set(mut offset, val) => {
offset += shift;
// Add before Set does nothing; remove it
adds.remove(&offset);
sets.insert(offset, val);
},
Add(mut offset, mut val) => {
offset += shift;
if sets.contains_key(&offset) {
val = sets.get(&offset).unwrap() + val;
sets.insert(offset, val);
} else {
val = adds.get(&offset).unwrap_or(&0) + val;
adds.insert(offset, val);
}
},
MulCopy(src, dest, mul) =>
newtokens.push(MulCopy(src+shift, dest+shift, mul)),
// XXX Deal with shift in if, if those are ever generated
If(offset) =>
newtokens.push(If(offset+shift)),
Move(offset) =>
shift += offset,
Output =>
do_output = true,
LoadOut(mut offset, add) => {
offset += shift;
if sets.contains_key(&offset) {
newtokens.push(LoadOutSet(sets.get(&offset).unwrap() + add));
} else {
newtokens.push(LoadOut(offset, adds.get(&offset).unwrap_or(&0) + add));
}
},
EndLoop => {
if newtokens.last() == Some(&Loop) && shift != 0 && sets.is_empty() && adds.is_empty() {
newtokens.pop(); // Remove StartLoop
newtokens.push(Scan(shift));
} else {
if shift != 0 {
newtokens.push(Move(shift));
}
newtokens.push(EndLoop);
}
shift = 0;
},
EndIf | LoadOutSet(_) | Loop | Input | Scan(_) =>
newtokens.push(*token),
}
}
// Any remaining add/set/shift is ignored, as it would have no effect
if do_output {
newtokens.push(Output);
}
// Optimize recursively
if newtokens != tokens {
optimize(newtokens)
} else {
newtokens
}
}
Correct indentation
use std::collections::BTreeMap;
use std::fmt;
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum Token {
Output,
Input,
Loop,
EndLoop,
Move(i32),
Add(i32, i32),
Set(i32, i32),
MulCopy(i32, i32, i32),
Scan(i32),
LoadOut(i32, i32),
LoadOutSet(i32),
If(i32),
EndIf,
}
use Token::*;
impl fmt::Debug for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Output =>
write!(f, "Output"),
Input =>
write!(f, "Input"),
Loop =>
write!(f, "Loop"),
EndLoop =>
write!(f, "EndLoop"),
Move(offset) =>
write!(f, "Move(offset={})", offset),
Add(offset, value) =>
write!(f, "Add(offset={}, value={})", offset, value),
Set(offset, value) =>
write!(f, "Set(offset={}, value={})", offset, value),
MulCopy(src, dest, mul) =>
write!(f, "MulCopy(src={}, dest={}, mul={})", src, dest, mul),
Scan(offset) =>
write!(f, "Scan(offset={})", offset),
LoadOut(offset, add) =>
write!(f, "LoadOut(offset={}, add={})", offset, add),
LoadOutSet(value) =>
write!(f, "LoadOutSet(value={})", value),
If(offset) =>
write!(f, "If(offset={})", offset),
EndIf =>
write!(f, "EndIf\n"),
}
}
}
pub fn parse(code: &str) -> Vec<Token> {
let mut tokens = Vec::with_capacity(code.len());
for i in code.chars() {
match i {
'+' => tokens.push(Add(0, 1)),
'-' => tokens.push(Add(0, -1)),
'>' => tokens.push(Move(1)),
'<' => tokens.push(Move(-1)),
'[' => tokens.push(Loop),
']' => tokens.push(EndLoop),
',' => tokens.push(Input),
'.' => {
tokens.push(LoadOut(0, 0));
tokens.push(Output);
},
_ => ()
};
}
tokens
}
pub fn optimize(tokens: Vec<Token>) -> Vec<Token> {
let mut newtokens: Vec<Token> = Vec::with_capacity(tokens.len());
let mut shift = 0;
let mut do_output = false;
// With HashMap, the order sometimes switches
// in recursion, and the optimizer never exits.
let mut adds: BTreeMap<i32, i32> = BTreeMap::new();
let mut sets: BTreeMap<i32, i32> = BTreeMap::new();
let mut pre_loop_sets: BTreeMap<i32, i32> = BTreeMap::new();
for token in tokens.iter() {
if *token == EndLoop && newtokens.last() == Some(&Loop) && shift == 0 && adds.contains_key(&0) {
if adds.len() == 1 {
newtokens.pop(); // Remove Loop
if !sets.is_empty() {
newtokens.push(If(0));
for (offset, value) in sets.iter() {
newtokens.push(Set(*offset, *value));
}
sets.clear();
newtokens.push(Set(0, 0));
newtokens.push(EndIf);
} else {
sets.insert(0, 0);
}
pre_loop_sets.clear();
adds.clear();
continue
} else if adds.get(&0) == Some(&-1) {
newtokens.pop(); // Remove Loop
if !sets.is_empty() {
newtokens.push(If(0));
for (offset, value) in sets.iter() {
newtokens.push(Set(*offset, *value));
}
}
for (offset, value) in adds.iter() {
if *offset != 0 {
let src = 0;
let dest = *offset;
let mul = *value;
if pre_loop_sets.contains_key(&src) {
let val = pre_loop_sets.get(&src).unwrap() * mul;
newtokens.push(Add(dest, val));
} else {
newtokens.push(MulCopy(src, dest, mul));
}
}
}
if !sets.is_empty() {
newtokens.push(EndIf);
}
pre_loop_sets.clear();
adds.clear();
sets.clear();
sets.insert(0, 0);
continue
}
}
match *token {
Loop => {
pre_loop_sets.clear();
for (offset, value) in sets.iter() {
pre_loop_sets.insert(*offset+shift, *value);
}
},
Set(..) | Add(..) | Move(_) => {},
_ => pre_loop_sets.clear()
}
match *token {
Set(..) | Add(..) | Move(_) | LoadOut(..) | LoadOutSet(_) | Output => {},
_ => {
if do_output {
newtokens.push(Output);
do_output = false;
}
for (offset, value) in sets.iter() {
newtokens.push(Set(*offset, *value));
}
for (offset, value) in adds.iter() {
newtokens.push(Add(*offset, *value));
}
sets.clear();
adds.clear();
}
}
if shift != 0 {
match *token {
Loop | Input | Scan(_) => {
newtokens.push(Move(shift));
shift = 0;
},
_ => {}
}
}
match *token {
Set(mut offset, val) => {
offset += shift;
// Add before Set does nothing; remove it
adds.remove(&offset);
sets.insert(offset, val);
},
Add(mut offset, mut val) => {
offset += shift;
if sets.contains_key(&offset) {
val = sets.get(&offset).unwrap() + val;
sets.insert(offset, val);
} else {
val = adds.get(&offset).unwrap_or(&0) + val;
adds.insert(offset, val);
}
},
MulCopy(src, dest, mul) =>
newtokens.push(MulCopy(src+shift, dest+shift, mul)),
// XXX Deal with shift in if, if those are ever generated
If(offset) =>
newtokens.push(If(offset+shift)),
Move(offset) =>
shift += offset,
Output =>
do_output = true,
LoadOut(mut offset, add) => {
offset += shift;
if sets.contains_key(&offset) {
newtokens.push(LoadOutSet(sets.get(&offset).unwrap() + add));
} else {
newtokens.push(LoadOut(offset, adds.get(&offset).unwrap_or(&0) + add));
}
},
EndLoop => {
if newtokens.last() == Some(&Loop) && shift != 0 && sets.is_empty() && adds.is_empty() {
newtokens.pop(); // Remove StartLoop
newtokens.push(Scan(shift));
} else {
if shift != 0 {
newtokens.push(Move(shift));
}
newtokens.push(EndLoop);
}
shift = 0;
},
EndIf | LoadOutSet(_) | Loop | Input | Scan(_) =>
newtokens.push(*token),
}
}
// Any remaining add/set/shift is ignored, as it would have no effect
if do_output {
newtokens.push(Output);
}
// Optimize recursively
if newtokens != tokens {
optimize(newtokens)
} else {
newtokens
}
}
|
//! # Mime
//!
//! Mime is now Media Type, technically, but `Mime` is more immediately
//! understandable, so the main type here is `Mime`.
//!
//! ## What is Mime?
//!
//! Example mime string: `text/plain;charset=utf-8`
//!
//! ```rust
//! # use std::from_str::FromStr;
//! use mime::{Mime, Text, Plain, Charset, Utf8};
//! let mime: Mime = FromStr::from_str("text/plain;charset=utf-8").unwrap();
//! assert_eq!(mime, Mime(Text, Plain, vec![(Charset, Utf8)]));
//! ```
#![license = "MIT"]
#![doc(html_root_url = "http://seanmonstar.github.io/mime.rs")]
#![experimental]
#![feature(macro_rules, phase)]
#[phase(plugin, link)]
extern crate log;
#[cfg(test)]
extern crate test;
use std::ascii::OwnedAsciiExt;
use std::cmp::Equiv;
use std::fmt;
use std::from_str::FromStr;
use std::iter::Enumerate;
use std::str::Chars;
macro_rules! inspect(
($s:expr, $t:expr) => ({
let t = $t;
debug!("inspect {}: {}", $s, t);
t
})
)
/// Mime, or Media Type. Encapsulates common registers types.
///
/// Consider that a traditional mime type contains a "top level type",
/// a "sub level type", and 0-N "parameters". And they're all strings.
/// Strings everywhere. Strings mean typos. Rust has type safety. We should
/// use types!
///
/// So, Mime bundles together this data into types so the compiler can catch
/// your typos.
///
/// This improves things so you use match without Strings:
///
/// ```rust
/// use std::from_str::from_str;
/// use mime::{Mime, Application, Json};
///
/// let mime: mime::Mime = from_str("application/json").unwrap();
///
/// match mime {
/// Mime(Application, Json, _) => println!("matched json!"),
/// _ => ()
/// }
/// ```
#[deriving(Clone, PartialEq)]
pub struct Mime(pub TopLevel, pub SubLevel, pub Vec<Param>);
macro_rules! enoom (
(pub enum $en:ident; $ext:ident; $($ty:ident, $text:expr;)*) => (
#[deriving(Clone, PartialEq)]
pub enum $en {
$($ty),*,
$ext(String)
}
impl fmt::Show for $en {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
$($ty => $text),*,
$ext(ref s) => return s.fmt(fmt)
}.fmt(fmt)
}
}
impl FromStr for $en {
fn from_str(s: &str) -> Option<$en> {
Some(match s {
$(_s if _s == $text => $ty),*,
s => $ext(inspect!(stringify!($ext), s).to_string())
})
}
}
)
)
enoom! {
pub enum TopLevel;
TopExt;
TopStar, "*"; // remove Top prefix if enums gain namespaces
Text, "text";
Image, "image";
Audio, "audio";
Video, "video";
Application, "application";
Multipart, "multipart";
Message, "message";
Model, "model";
}
enoom! {
pub enum SubLevel;
SubExt;
SubStar, "*"; // remove Sub prefix if enums gain namespaces
// common text/*
Plain, "plain";
Html, "html";
Xml, "xml";
Javascript, "javascript";
Css, "css";
// common application/*
Json, "json";
// common image/*
Png, "png";
Gif, "gif";
Bmp, "bmp";
Jpeg, "jpeg";
}
enoom! {
pub enum Attr;
AttrExt;
Charset, "charset";
Q, "q";
}
enoom! {
pub enum Value;
ValueExt;
Utf8, "utf-8";
}
pub type Param = (Attr, Value);
impl Equiv<Mime> for Mime {
fn equiv(&self, other: &Mime) -> bool {
//im so sorry
//TODO: be less sorry. dont to_string()
self.to_string() == other.to_string()
}
}
impl fmt::Show for Mime {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Mime(ref top, ref sub, ref params) = *self;
try!(write!(fmt, "{}/{}", top, sub));
fmt_params(params.as_slice(), fmt)
}
}
impl FromStr for Mime {
fn from_str(raw: &str) -> Option<Mime> {
let ascii = raw.to_string().into_ascii_lower(); // lifetimes :(
let raw = ascii.as_slice();
let len = raw.len();
let mut iter = raw.chars().enumerate();
let mut params = vec![];
// toplevel
let mut start;
let mut top;
loop {
match inspect!("top iter", iter.next()) {
Some((0, c)) if is_restricted_name_first_char(c) => (),
Some((i, c)) if i > 0 && is_restricted_name_char(c) => (),
Some((i, '/')) if i > 0 => match FromStr::from_str(raw.slice_to(i)) {
Some(t) => {
top = t;
start = i + 1;
break;
}
None => return None
},
_ => return None // EOF and no toplevel is no Mime
};
}
// sublevel
let mut sub;
loop {
match inspect!("sub iter", iter.next()) {
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(raw.slice(start, i)) {
Some(s) => {
sub = s;
start = i + 1;
break;
}
None => return None
},
None => match FromStr::from_str(raw.slice_from(start)) {
Some(s) => return Some(Mime(top, s, params)),
None => return None
},
_ => return None
};
}
// params
debug!("starting params, len={}", len);
loop {
match inspect!("param", param_from_str(raw, &mut iter, start)) {
Some((p, end)) => {
params.push(p);
start = end;
if start >= len {
break;
}
}
None => break
}
}
Some(Mime(top, sub, params))
}
}
fn param_from_str(raw: &str, iter: &mut Enumerate<Chars>, mut start: uint) -> Option<(Param, uint)> {
let mut attr;
debug!("param_from_str, start={}", start);
loop {
match inspect!("attr iter", iter.next()) {
Some((i, ' ')) if i == start => start = i + 1,
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, '=')) if i > start => match FromStr::from_str(raw.slice(start, i)) {
Some(a) => {
attr = inspect!("attr", a);
start = i + 1;
break;
},
None => return None
},
_ => return None
}
}
let mut value;
// values must be restrict-name-char or "anything goes"
let mut is_quoted = false;
loop {
match inspect!("value iter", iter.next()) {
Some((i, '"')) if i == start => {
debug!("quoted");
is_quoted = true;
start = i + 1;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, '"')) if i > start && is_quoted => match FromStr::from_str(raw.slice(start, i)) {
Some(v) => {
value = v;
start = i + 1;
break;
},
None => return None
},
Some((i, c)) if i > start && is_quoted || is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(raw.slice(start, i)) {
Some(v) => {
value = v;
start = i + 1;
break;
},
None => return None
},
None => match FromStr::from_str(raw.slice_from(start)) {
Some(v) => {
value = v;
start = raw.len();
break;
},
None => return None
},
_ => return None
}
}
Some(((attr, value), start))
}
// From [RFC6838](http://tools.ietf.org/html/rfc6838#section-4.2):
//
// > All registered media types MUST be assigned top-level type and
// > subtype names. The combination of these names serves to uniquely
// > identify the media type, and the subtype name facet (or the absence
// > of one) identifies the registration tree. Both top-level type and
// > subtype names are case-insensitive.
// >
// > Type and subtype names MUST conform to the following ABNF:
// >
// > type-name = restricted-name
// > subtype-name = restricted-name
// >
// > restricted-name = restricted-name-first *126restricted-name-chars
// > restricted-name-first = ALPHA / DIGIT
// > restricted-name-chars = ALPHA / DIGIT / "!" / "#" /
// > "$" / "&" / "-" / "^" / "_"
// > restricted-name-chars =/ "." ; Characters before first dot always
// > ; specify a facet name
// > restricted-name-chars =/ "+" ; Characters after last plus always
// > ; specify a structured syntax suffix
//
fn is_restricted_name_first_char(c: char) -> bool {
match c {
'a'...'z' |
'0'...'9' => true,
_ => false
}
}
fn is_restricted_name_char(c: char) -> bool {
if is_restricted_name_first_char(c) {
true
} else {
match c {
'!' |
'#' |
'$' |
'&' |
'-' |
'^' |
'.' |
'+' |
'_' => true,
_ => false
}
}
}
#[inline]
fn fmt_params<T: AsSlice<Param>>(params: T, fmt: &mut fmt::Formatter) -> fmt::Result {
for param in params.as_slice().iter() {
try!(fmt_param(param, fmt));
}
Ok(())
}
#[inline]
fn fmt_param(param: &Param, fmt: &mut fmt::Formatter) -> fmt::Result {
let (ref attr, ref value) = *param;
write!(fmt, "; {}={}", attr, value)
}
#[cfg(test)]
mod tests {
use std::from_str::{FromStr, from_str};
use test::Bencher;
use super::{Mime, Text, Plain, Charset, Utf8, AttrExt, ValueExt};
#[test]
fn test_mime_show() {
let mime = Mime(Text, Plain, vec![]);
assert_eq!(mime.to_string(), "text/plain".to_string());
let mime = Mime(Text, Plain, vec![(Charset, Utf8)]);
assert_eq!(mime.to_string(), "text/plain; charset=utf-8".to_string());
}
#[test]
fn test_mime_from_str() {
assert_eq!(FromStr::from_str("text/plain"), Some(Mime(Text, Plain, vec![])));
assert_eq!(FromStr::from_str("TEXT/PLAIN"), Some(Mime(Text, Plain, vec![])));
assert_eq!(FromStr::from_str("text/plain; charset=utf-8"), Some(Mime(Text, Plain, vec![(Charset, Utf8)])));
assert_eq!(FromStr::from_str("text/plain;charset=\"utf-8\""), Some(Mime(Text, Plain, vec![(Charset, Utf8)])));
assert_eq!(FromStr::from_str("text/plain; charset=utf-8; foo=bar"),
Some(Mime(Text, Plain, vec![(Charset, Utf8),
(AttrExt("foo".to_string()), ValueExt("bar".to_string())) ])));
}
#[bench]
fn bench_show(b: &mut Bencher) {
let mime = Mime(Text, Plain, vec![(Charset, Utf8), (AttrExt("foo".to_string()), ValueExt("bar".to_string()))]);
b.bytes = mime.to_string().as_bytes().len() as u64;
b.iter(|| mime.to_string())
}
#[bench]
fn bench_from_str(b: &mut Bencher) {
let s = "text/plain; charset=utf-8; foo=bar";
b.bytes = s.as_bytes().len() as u64;
b.iter(|| from_str::<Mime>(s))
}
}
Use AsciiExt instead of OwnedAsciiExt
//! # Mime
//!
//! Mime is now Media Type, technically, but `Mime` is more immediately
//! understandable, so the main type here is `Mime`.
//!
//! ## What is Mime?
//!
//! Example mime string: `text/plain;charset=utf-8`
//!
//! ```rust
//! # use std::from_str::FromStr;
//! use mime::{Mime, Text, Plain, Charset, Utf8};
//! let mime: Mime = FromStr::from_str("text/plain;charset=utf-8").unwrap();
//! assert_eq!(mime, Mime(Text, Plain, vec![(Charset, Utf8)]));
//! ```
#![license = "MIT"]
#![doc(html_root_url = "http://seanmonstar.github.io/mime.rs")]
#![experimental]
#![feature(macro_rules, phase)]
#[phase(plugin, link)]
extern crate log;
#[cfg(test)]
extern crate test;
use std::ascii::AsciiExt;
use std::cmp::Equiv;
use std::fmt;
use std::from_str::FromStr;
use std::iter::Enumerate;
use std::str::Chars;
macro_rules! inspect(
($s:expr, $t:expr) => ({
let t = $t;
debug!("inspect {}: {}", $s, t);
t
})
)
/// Mime, or Media Type. Encapsulates common registers types.
///
/// Consider that a traditional mime type contains a "top level type",
/// a "sub level type", and 0-N "parameters". And they're all strings.
/// Strings everywhere. Strings mean typos. Rust has type safety. We should
/// use types!
///
/// So, Mime bundles together this data into types so the compiler can catch
/// your typos.
///
/// This improves things so you use match without Strings:
///
/// ```rust
/// use std::from_str::from_str;
/// use mime::{Mime, Application, Json};
///
/// let mime: mime::Mime = from_str("application/json").unwrap();
///
/// match mime {
/// Mime(Application, Json, _) => println!("matched json!"),
/// _ => ()
/// }
/// ```
#[deriving(Clone, PartialEq)]
pub struct Mime(pub TopLevel, pub SubLevel, pub Vec<Param>);
macro_rules! enoom (
(pub enum $en:ident; $ext:ident; $($ty:ident, $text:expr;)*) => (
#[deriving(Clone, PartialEq)]
pub enum $en {
$($ty),*,
$ext(String)
}
impl fmt::Show for $en {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
$($ty => $text),*,
$ext(ref s) => return s.fmt(fmt)
}.fmt(fmt)
}
}
impl FromStr for $en {
fn from_str(s: &str) -> Option<$en> {
Some(match s {
$(_s if _s == $text => $ty),*,
s => $ext(inspect!(stringify!($ext), s).to_string())
})
}
}
)
)
enoom! {
pub enum TopLevel;
TopExt;
TopStar, "*"; // remove Top prefix if enums gain namespaces
Text, "text";
Image, "image";
Audio, "audio";
Video, "video";
Application, "application";
Multipart, "multipart";
Message, "message";
Model, "model";
}
enoom! {
pub enum SubLevel;
SubExt;
SubStar, "*"; // remove Sub prefix if enums gain namespaces
// common text/*
Plain, "plain";
Html, "html";
Xml, "xml";
Javascript, "javascript";
Css, "css";
// common application/*
Json, "json";
// common image/*
Png, "png";
Gif, "gif";
Bmp, "bmp";
Jpeg, "jpeg";
}
enoom! {
pub enum Attr;
AttrExt;
Charset, "charset";
Q, "q";
}
enoom! {
pub enum Value;
ValueExt;
Utf8, "utf-8";
}
pub type Param = (Attr, Value);
impl Equiv<Mime> for Mime {
fn equiv(&self, other: &Mime) -> bool {
//im so sorry
//TODO: be less sorry. dont to_string()
self.to_string() == other.to_string()
}
}
impl fmt::Show for Mime {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Mime(ref top, ref sub, ref params) = *self;
try!(write!(fmt, "{}/{}", top, sub));
fmt_params(params.as_slice(), fmt)
}
}
impl FromStr for Mime {
fn from_str(raw: &str) -> Option<Mime> {
let ascii = raw.to_ascii_lower(); // lifetimes :(
let raw = ascii.as_slice();
let len = raw.len();
let mut iter = raw.chars().enumerate();
let mut params = vec![];
// toplevel
let mut start;
let mut top;
loop {
match inspect!("top iter", iter.next()) {
Some((0, c)) if is_restricted_name_first_char(c) => (),
Some((i, c)) if i > 0 && is_restricted_name_char(c) => (),
Some((i, '/')) if i > 0 => match FromStr::from_str(raw.slice_to(i)) {
Some(t) => {
top = t;
start = i + 1;
break;
}
None => return None
},
_ => return None // EOF and no toplevel is no Mime
};
}
// sublevel
let mut sub;
loop {
match inspect!("sub iter", iter.next()) {
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(raw.slice(start, i)) {
Some(s) => {
sub = s;
start = i + 1;
break;
}
None => return None
},
None => match FromStr::from_str(raw.slice_from(start)) {
Some(s) => return Some(Mime(top, s, params)),
None => return None
},
_ => return None
};
}
// params
debug!("starting params, len={}", len);
loop {
match inspect!("param", param_from_str(raw, &mut iter, start)) {
Some((p, end)) => {
params.push(p);
start = end;
if start >= len {
break;
}
}
None => break
}
}
Some(Mime(top, sub, params))
}
}
fn param_from_str(raw: &str, iter: &mut Enumerate<Chars>, mut start: uint) -> Option<(Param, uint)> {
let mut attr;
debug!("param_from_str, start={}", start);
loop {
match inspect!("attr iter", iter.next()) {
Some((i, ' ')) if i == start => start = i + 1,
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, c)) if i > start && is_restricted_name_char(c) => (),
Some((i, '=')) if i > start => match FromStr::from_str(raw.slice(start, i)) {
Some(a) => {
attr = inspect!("attr", a);
start = i + 1;
break;
},
None => return None
},
_ => return None
}
}
let mut value;
// values must be restrict-name-char or "anything goes"
let mut is_quoted = false;
loop {
match inspect!("value iter", iter.next()) {
Some((i, '"')) if i == start => {
debug!("quoted");
is_quoted = true;
start = i + 1;
},
Some((i, c)) if i == start && is_restricted_name_first_char(c) => (),
Some((i, '"')) if i > start && is_quoted => match FromStr::from_str(raw.slice(start, i)) {
Some(v) => {
value = v;
start = i + 1;
break;
},
None => return None
},
Some((i, c)) if i > start && is_quoted || is_restricted_name_char(c) => (),
Some((i, ';')) if i > start => match FromStr::from_str(raw.slice(start, i)) {
Some(v) => {
value = v;
start = i + 1;
break;
},
None => return None
},
None => match FromStr::from_str(raw.slice_from(start)) {
Some(v) => {
value = v;
start = raw.len();
break;
},
None => return None
},
_ => return None
}
}
Some(((attr, value), start))
}
// From [RFC6838](http://tools.ietf.org/html/rfc6838#section-4.2):
//
// > All registered media types MUST be assigned top-level type and
// > subtype names. The combination of these names serves to uniquely
// > identify the media type, and the subtype name facet (or the absence
// > of one) identifies the registration tree. Both top-level type and
// > subtype names are case-insensitive.
// >
// > Type and subtype names MUST conform to the following ABNF:
// >
// > type-name = restricted-name
// > subtype-name = restricted-name
// >
// > restricted-name = restricted-name-first *126restricted-name-chars
// > restricted-name-first = ALPHA / DIGIT
// > restricted-name-chars = ALPHA / DIGIT / "!" / "#" /
// > "$" / "&" / "-" / "^" / "_"
// > restricted-name-chars =/ "." ; Characters before first dot always
// > ; specify a facet name
// > restricted-name-chars =/ "+" ; Characters after last plus always
// > ; specify a structured syntax suffix
//
fn is_restricted_name_first_char(c: char) -> bool {
match c {
'a'...'z' |
'0'...'9' => true,
_ => false
}
}
fn is_restricted_name_char(c: char) -> bool {
if is_restricted_name_first_char(c) {
true
} else {
match c {
'!' |
'#' |
'$' |
'&' |
'-' |
'^' |
'.' |
'+' |
'_' => true,
_ => false
}
}
}
#[inline]
fn fmt_params<T: AsSlice<Param>>(params: T, fmt: &mut fmt::Formatter) -> fmt::Result {
for param in params.as_slice().iter() {
try!(fmt_param(param, fmt));
}
Ok(())
}
#[inline]
fn fmt_param(param: &Param, fmt: &mut fmt::Formatter) -> fmt::Result {
let (ref attr, ref value) = *param;
write!(fmt, "; {}={}", attr, value)
}
#[cfg(test)]
mod tests {
use std::from_str::{FromStr, from_str};
use test::Bencher;
use super::{Mime, Text, Plain, Charset, Utf8, AttrExt, ValueExt};
#[test]
fn test_mime_show() {
let mime = Mime(Text, Plain, vec![]);
assert_eq!(mime.to_string(), "text/plain".to_string());
let mime = Mime(Text, Plain, vec![(Charset, Utf8)]);
assert_eq!(mime.to_string(), "text/plain; charset=utf-8".to_string());
}
#[test]
fn test_mime_from_str() {
assert_eq!(FromStr::from_str("text/plain"), Some(Mime(Text, Plain, vec![])));
assert_eq!(FromStr::from_str("TEXT/PLAIN"), Some(Mime(Text, Plain, vec![])));
assert_eq!(FromStr::from_str("text/plain; charset=utf-8"), Some(Mime(Text, Plain, vec![(Charset, Utf8)])));
assert_eq!(FromStr::from_str("text/plain;charset=\"utf-8\""), Some(Mime(Text, Plain, vec![(Charset, Utf8)])));
assert_eq!(FromStr::from_str("text/plain; charset=utf-8; foo=bar"),
Some(Mime(Text, Plain, vec![(Charset, Utf8),
(AttrExt("foo".to_string()), ValueExt("bar".to_string())) ])));
}
#[bench]
fn bench_show(b: &mut Bencher) {
let mime = Mime(Text, Plain, vec![(Charset, Utf8), (AttrExt("foo".to_string()), ValueExt("bar".to_string()))]);
b.bytes = mime.to_string().as_bytes().len() as u64;
b.iter(|| mime.to_string())
}
#[bench]
fn bench_from_str(b: &mut Bencher) {
let s = "text/plain; charset=utf-8; foo=bar";
b.bytes = s.as_bytes().len() as u64;
b.iter(|| from_str::<Mime>(s))
}
}
|
#![crate_type = "lib"]
#![crate_name = "currency"]
use std::cmp::PartialEq;
use std::cmp::PartialOrd;
use std::cmp::Ordering;
use std::ops::Add;
use std::ops::Sub;
use std::ops::Mul;
use std::ops::Div;
use std::fmt::Display;
use std::fmt::Formatter;
use std::fmt::Result;
use std::marker::Copy;
/// Represents currency through an optional symbol and amount of coin.
///
/// Each 100 coins results in a banknote. (100 is formatted as 1.00)
/// The currency will be formatted as such:
/// Currency('$', 432) ==> "$4.32"
pub struct Currency(pub Option<char>, pub i64);
impl Currency {
/// Creates a blank Currency as Currency(None, 0)
fn new() -> Currency {
Currency(None, 0)
}
/// Parses a string literal and turns it into a currency.
///
/// Parsing ignores spaces and commas, only taking note of the digits and
/// leading sign.
///
/// # Examples
/// Currency::from_string("$4.32") -> Currency(Some('$'), 432)
/// Currency::from_string("424.44") -> Currency(None, 42444)
/// Currency::from_string("@12") -> Currency(Some('@'), 1200)
///
/// # Failures
/// Fails to take note of the floating points position.
/// Currency::from_string("$42.012) -> Currency(Some('$'), 42012)
/// Currency::from_string("42.") -> Currency(None, 42)
///
/// # Panics
/// Panics if a number fails to be parsed; this only occurs if the string
/// argument has no numbers in it.
///
/// # Safety
/// If a decimal point is intended to be marked, always use '.'
/// A "European style" ',' will be ignored.
/// String::from_string("€4.32") instead of String::from_string("€4,32")
fn from_string(s: &str) -> Currency {
// Try to find the sign
let mut sign = None;
let mut unicode: u8 = s.chars().next().unwrap() as u8;
// If the first character is not a letter
if unicode <= 0x30 || unicode >= 0x39 {
sign = Some(unicode as char);
}
// Find the numbers
let mut should_multiply = true; // May later change if a '.' is specified
let mut coin_str = String::new();
for c in s.chars() {
unicode = c as u8;
// Only pay attention to numbers
if unicode >= 0x30 && unicode <= 0x39 {
coin_str = coin_str + &c.to_string();
}
// If coins are explicitly specified (via a '.'), then we shouldn't
// multiply at the end
if unicode == 0x2E {
should_multiply = false;
}
}
// Parse out the resulting number
let mut coin: i64 = coin_str.parse()
.ok()
.expect("Failed to convert string to currency");
if should_multiply {
coin *= 100;
}
// Return result
Currency(sign, coin)
}
}
/// Overloads the '==' operator for Currency objects.
///
/// # Panics
/// Panics if the two comparators are different types of currency, as denoted by
/// the Currency's symbol.
impl PartialEq<Currency> for Currency {
fn eq(&self, rhs: &Currency) -> bool {
self.0 == rhs.0 && self.1 == rhs.1
}
fn ne(&self, rhs: &Currency) -> bool {
self.0 != rhs.0 || self.1 != rhs.1
}
}
/// Overloads the order operators for Currency objects.
///
/// These operators include '<', '<=', '>', and '>='.
///
/// # Panics
/// Panics if the two comparators are different types of currency, as denoted by
/// the Currency's symbol.
impl PartialOrd<Currency> for Currency {
fn partial_cmp(&self, rhs: &Currency) -> Option<Ordering> {
if self.0 == rhs.0 {
if self < rhs { return Some(Ordering::Less) }
if self == rhs { return Some(Ordering::Equal) }
if self > rhs { return Some(Ordering::Greater) }
}
None
}
// TODO: sign checking here
fn lt(&self, rhs: &Currency) -> bool {
self.1 < rhs.1
}
fn le(&self, rhs: &Currency) -> bool {
self < rhs || self == rhs
}
fn gt(&self, rhs: &Currency) -> bool {
self.1 > rhs.1
}
fn ge(&self, rhs: &Currency) -> bool {
self > rhs || self == rhs
}
}
/// Overloads the '+' operator for Currency objects.
///
/// # Panics
/// Panics if the two addends are different types of currency, as denoted by the
/// Currency's symbol.
impl Add for Currency {
type Output = Currency;
fn add(self, rhs: Currency) -> Currency {
if self.0 == rhs.0 {
Currency(self.0, self.1 + rhs.1)
} else {
panic!("Cannot do arithmetic on two different types of currency!");
}
}
}
/// Overloads the '-' operator for Currency objects.
///
/// # Panics
/// Panics if the minuend and subtrahend are two different types of currency,
/// as denoted by the Currency's symbol.
impl Sub for Currency {
type Output = Currency;
fn sub(self, rhs: Currency) -> Currency {
if self.0 == rhs.0 {
Currency(self.0, self.1 - rhs.1)
} else {
panic!("Cannot do arithmetic on two different types of currency!");
}
}
}
/// Overloads the '*' operator for Currency objects.
///
/// Allows a Currency to be multiplied by an i64.
impl Mul<i64> for Currency {
type Output = Currency;
fn mul(self, rhs: i64) -> Currency {
Currency(self.0, self.1 * rhs)
}
}
/// Overloads the '*' operator for i64.
///
/// Allows an i64 to be multiplied by a Currency.
/// Completes the commutative property for i64 multiplied by Currency.
impl Mul<Currency> for i64 {
type Output = Currency;
fn mul(self, rhs: Currency) -> Currency {
Currency(rhs.0, rhs.1 * self)
}
}
/// Overloads the '/' operator for Currency objects.
///
/// Allows a Currency to be divided by an i64.
impl Div<i64> for Currency {
type Output = Currency;
fn div(self, rhs: i64) -> Currency {
Currency(self.0, self.1 / rhs)
}
}
/// Allows Currencies to be displayed as Strings
///
/// # Examples
/// Currency(Some('$'), 1210).to_string() == "$12.10"
/// Currency(None, 1210.to_string() == "12.10"
impl Display for Currency {
fn fmt(&self, f: &mut Formatter) -> Result {
let decimal = (self.1 / 100).to_string()
+ &('.').to_string()
+ &(self.1 % 100).to_string();
match self.0 {
Some(c) => write!(f, "{}{}", c, decimal),
None => write!(f, "{}", decimal),
}
}
}
/// Allows Currencies to be copied, rather than using move semantics.
impl Copy for Currency { }
impl Clone for Currency {
fn clone(&self) -> Currency {
*self
}
}
#[test]
fn eq_works() {
let a = Currency(Some('$'), 1210);
let b = Currency(Some('$'), 1210);
let c = Currency(Some('$'), 1251);
assert!(a == b);
assert!(b == b);
assert!(b == a);
assert!(a != c);
}
#[test]
fn ord_works() {
let a = Currency(Some('$'), 1210);
let b = Currency(Some('$'), 1211);
let c = Currency(Some('$'), 1311);
let d = Currency(Some('$'), 1210);
assert_eq!(a.partial_cmp(&b), Some(Ordering::Less));
assert_eq!(a.partial_cmp(&c), Some(Ordering::Less));
assert_eq!(a.partial_cmp(&d), Some(Ordering::Equal));
assert_eq!(c.partial_cmp(&a), Some(Ordering::Greater));
assert!(a < b);
assert!(a < c);
assert!(a <= a);
assert!(a <= c);
assert!(b > a);
assert!(c > a);
assert!(a >= a);
assert!(c >= a);
}
#[test]
fn arithmetic_works() {
let x = Currency(Some('$'), 1206);
let y = Currency(Some('$'), 1143);
assert!(x + y == Currency(Some('$'), 2349)
&& y + x == Currency(Some('$'), 2349));
assert!(x - y == Currency(Some('$'), 63));
assert!(y - x == Currency(Some('$'), -63));
assert!(x * 2 == Currency(Some('$'), 2412)
&& 2 * x == Currency(Some('$'), 2412));
assert!(x / 2 == Currency(Some('$'), 603));
}
#[test]
fn parse_works() {
let a = Currency(Some('$'), 1210);
let b = Currency::from_string("$12.10");
assert!(a == b);
let c = Currency(Some('$'), 1200);
let d = Currency::from_string("$12");
assert!(c == d);
}
#[test]
fn display_works() {
assert!(Currency(Some('$'), 1210).to_string() == "$12.10");
assert!(Currency(None, 1210).to_string() == "12.10");
}
Added inlines and modified methods to be pub
#![crate_type = "lib"]
#![crate_name = "currency"]
use std::cmp::PartialEq;
use std::cmp::PartialOrd;
use std::cmp::Ordering;
use std::ops::Add;
use std::ops::Sub;
use std::ops::Mul;
use std::ops::Div;
use std::fmt::Display;
use std::fmt::Formatter;
use std::fmt::Result;
use std::marker::Copy;
/// Represents currency through an optional symbol and amount of coin.
///
/// Each 100 coins results in a banknote. (100 is formatted as 1.00)
/// The currency will be formatted as such:
/// Currency(Some('$'), 432) ==> "$4.32"
pub struct Currency(pub Option<char>, pub i64);
impl Currency {
/// Creates a blank Currency as Currency(None, 0)
///
/// # Examples
///
/// ```
/// let mut c = Currency::new();
/// ```
#[inline]
#[allow(dead_code)]
pub fn new() -> Currency {
Currency(None, 0)
}
/// Parses a string literal and turns it into a currency.
///
/// Parsing ignores spaces and commas, only taking note of the digits and
/// leading sign.
///
/// # Examples
/// ```
/// Currency::from_string("$4.32") -> Currency(Some('$'), 432)
/// Currency::from_string("424.44") -> Currency(None, 42444)
/// Currency::from_string("@12") -> Currency(Some('@'), 1200)
/// ```
///
/// # Failures
/// Fails to take note of the floating points position.
/// ```
/// Currency::from_string("$42.012) -> Currency(Some('$'), 42012)
/// Currency::from_string("42.") -> Currency(None, 42)
/// ```
///
/// # Panics
/// Panics if a number fails to be parsed; this only occurs if the string
/// argument has no numbers in it.
///
/// # Safety
/// If a decimal point is intended to be marked, always use '.'
/// A "European style" ',' will be ignored.
/// String::from_string("€4.32") instead of String::from_string("€4,32")
#[allow(dead_code)]
pub fn from_string(s: &str) -> Currency {
// Try to find the sign
let mut sign = None;
let mut unicode: u8 = s.chars().next().unwrap() as u8;
// If the first character is not a letter
if unicode <= 0x30 || unicode >= 0x39 {
sign = Some(unicode as char);
}
// Find the numbers
let mut should_multiply = true; // May later change if '.' is specified
let mut coin_str = String::new();
for c in s.chars() {
unicode = c as u8;
// Only pay attention to numbers
if unicode >= 0x30 && unicode <= 0x39 {
coin_str = coin_str + &c.to_string();
}
// If coins are explicitly specified (via a '.'), then we shouldn't
// multiply at the end
if unicode == 0x2E {
should_multiply = false;
}
}
// Parse out the resulting number
let mut coin: i64 = coin_str.parse()
.ok()
.expect("Failed to convert string to currency");
if should_multiply {
coin *= 100;
}
// Return result
Currency(sign, coin)
}
}
/// Overloads the '==' operator for Currency objects.
///
/// # Panics
/// Panics if the two comparators are different types of currency, as denoted by
/// the Currency's symbol.
impl PartialEq<Currency> for Currency {
#[inline]
fn eq(&self, rhs: &Currency) -> bool {
self.0 == rhs.0 && self.1 == rhs.1
}
#[inline]
fn ne(&self, rhs: &Currency) -> bool {
self.0 != rhs.0 || self.1 != rhs.1
}
}
/// Overloads the order operators for Currency objects.
///
/// These operators include '<', '<=', '>', and '>='.
///
/// # Panics
/// Panics if the two comparators are different types of currency, as denoted by
/// the Currency's symbol.
impl PartialOrd<Currency> for Currency {
#[inline]
fn partial_cmp(&self, rhs: &Currency) -> Option<Ordering> {
if self.0 == rhs.0 {
if self < rhs { return Some(Ordering::Less) }
if self == rhs { return Some(Ordering::Equal) }
if self > rhs { return Some(Ordering::Greater) }
}
None
}
#[inline]
fn lt(&self, rhs: &Currency) -> bool {
if self.0 == rhs.0 {
self.1 < rhs.1
}
else {
panic!("Cannot compare two different types of currency.");
}
}
#[inline]
fn le(&self, rhs: &Currency) -> bool {
self < rhs || self == rhs
}
#[inline]
fn gt(&self, rhs: &Currency) -> bool {
if self.0 == rhs.0 {
self.1 > rhs.1
}
else {
panic!("Cannot compare two different types of currency.");
}
}
#[inline]
fn ge(&self, rhs: &Currency) -> bool {
self > rhs || self == rhs
}
}
/// Overloads the '+' operator for Currency objects.
///
/// # Panics
/// Panics if the two addends are different types of currency, as denoted by the
/// Currency's symbol.
impl Add for Currency {
type Output = Currency;
#[inline]
fn add(self, rhs: Currency) -> Currency {
if self.0 == rhs.0 {
Currency(self.0, self.1 + rhs.1)
} else {
panic!("Cannot add two different types of currency!");
}
}
}
/// Overloads the '-' operator for Currency objects.
///
/// # Panics
/// Panics if the minuend and subtrahend are two different types of currency,
/// as denoted by the Currency's symbol.
impl Sub for Currency {
type Output = Currency;
#[inline]
fn sub(self, rhs: Currency) -> Currency {
if self.0 == rhs.0 {
Currency(self.0, self.1 - rhs.1)
} else {
panic!("Cannot subtract two different types of currency!");
}
}
}
/// Overloads the '*' operator for Currency objects.
///
/// Allows a Currency to be multiplied by an i64.
impl Mul<i64> for Currency {
type Output = Currency;
#[inline]
fn mul(self, rhs: i64) -> Currency {
Currency(self.0, self.1 * rhs)
}
}
/// Overloads the '*' operator for i64.
///
/// Allows an i64 to be multiplied by a Currency.
/// Completes the commutative property for i64 multiplied by Currency.
impl Mul<Currency> for i64 {
type Output = Currency;
#[inline]
fn mul(self, rhs: Currency) -> Currency {
Currency(rhs.0, rhs.1 * self)
}
}
/// Overloads the '/' operator for Currency objects.
///
/// Allows a Currency to be divided by an i64.
impl Div<i64> for Currency {
type Output = Currency;
#[inline]
fn div(self, rhs: i64) -> Currency {
Currency(self.0, self.1 / rhs)
}
}
/// Allows Currencies to be displayed as Strings
///
/// # Examples
/// ```
/// Currency(Some('$'), 1210).to_string() == "$12.10"
/// Currency(None, 1210.to_string() == "12.10"
/// ```
impl Display for Currency {
#[inline]
fn fmt(&self, f: &mut Formatter) -> Result {
let decimal = (self.1 / 100).to_string()
+ &('.').to_string()
+ &(self.1 % 100).to_string();
match self.0 {
Some(c) => write!(f, "{}{}", c, decimal),
None => write!(f, "{}", decimal),
}
}
}
/// Allows Currencies to be copied, rather than using move semantics.
impl Copy for Currency { }
impl Clone for Currency {
#[inline]
fn clone(&self) -> Currency { *self }
}
#[test]
fn eq_works() {
let a = Currency(Some('$'), 1210);
let b = Currency(Some('$'), 1210);
let c = Currency(Some('$'), 1251);
assert!(a == b);
assert!(b == b);
assert!(b == a);
assert!(a != c);
}
#[test]
fn ord_works() {
let a = Currency(Some('$'), 1210);
let b = Currency(Some('$'), 1211);
let c = Currency(Some('$'), 1311);
let d = Currency(Some('$'), 1210);
assert_eq!(a.partial_cmp(&b), Some(Ordering::Less));
assert_eq!(a.partial_cmp(&c), Some(Ordering::Less));
assert_eq!(a.partial_cmp(&d), Some(Ordering::Equal));
assert_eq!(c.partial_cmp(&a), Some(Ordering::Greater));
assert!(a < b);
assert!(a < c);
assert!(a <= a);
assert!(a <= c);
assert!(b > a);
assert!(c > a);
assert!(a >= a);
assert!(c >= a);
}
#[test]
fn arithmetic_works() {
let x = Currency(Some('$'), 1206);
let y = Currency(Some('$'), 1143);
assert!(x + y == Currency(Some('$'), 2349)
&& y + x == Currency(Some('$'), 2349));
assert!(x - y == Currency(Some('$'), 63));
assert!(y - x == Currency(Some('$'), -63));
assert!(x * 2 == Currency(Some('$'), 2412)
&& 2 * x == Currency(Some('$'), 2412));
assert!(x / 2 == Currency(Some('$'), 603));
}
#[test]
fn parse_works() {
let a = Currency(Some('$'), 1210);
let b = Currency::from_string("$12.10");
assert!(a == b);
let c = Currency(Some('$'), 1200);
let d = Currency::from_string("$12");
assert!(c == d);
}
#[test]
fn display_works() {
assert!(Currency(Some('$'), 1210).to_string() == "$12.10");
assert!(Currency(None, 1210).to_string() == "12.10");
} |
//! A work-in-progress futures library for Rust.
//!
//! This library is an **experimental** implementation of Futures in Rust, and
//! is very likely to change over time and break compatibility without notice.
//! Be warned!
//!
//! The documentation of this library is also very much a work in progress, but
//! if anything is unclear please open an issue and hopefully it'll be
//! documented quickly!
// OPEN QUESTIONS:
//
// 1. Can Send + 'static be removed from lost of places?
// * probably not Future as trait objects are everywhere (b/c of tailcall)
// and those need to be Send
// * probably not item types because...
// * they're stored in Join (eh)
// * makes util::recover sketchy (oh dear)
// * Future for Empty<T, E> requires both T/E to be 'static?
#![deny(missing_docs)]
use std::sync::Arc;
mod lock;
mod slot;
mod util;
mod token;
pub use token::Tokens;
pub mod executor;
// Primitive futures
mod collect;
mod done;
mod empty;
mod failed;
mod finished;
mod lazy;
mod promise;
pub use collect::{collect, Collect};
pub use done::{done, Done};
pub use empty::{empty, Empty};
pub use failed::{failed, Failed};
pub use finished::{finished, Finished};
pub use lazy::{lazy, Lazy};
pub use promise::{promise, Promise, Complete, Canceled};
// combinators
mod and_then;
mod flatten;
mod fuse;
mod join;
mod map;
mod map_err;
mod or_else;
mod select;
mod then;
pub use and_then::AndThen;
pub use flatten::Flatten;
pub use fuse::Fuse;
pub use join::Join;
pub use map::Map;
pub use map_err::MapErr;
pub use or_else::OrElse;
pub use select::{Select, SelectNext};
pub use then::Then;
// streams
pub mod stream;
// impl details
mod chain;
mod impls;
mod forget;
/// Trait for types which represent a placeholder of a value that will become
/// available at possible some later point in time.
///
/// Futures are used to provide a sentinel through which a value can be
/// referenced. They crucially allow chaining operations through consumption
/// which allows expressing entire trees of computation as one sentinel value.
///
/// The ergonomics and implementation of the `Future` trait are very similar to
/// the `Iterator` trait in Rust which is where there is a small handful of
/// methods to implement and a load of default methods that consume a `Future`,
/// producing a new value.
///
/// # Core methods
///
/// The core methods of futures, currently `poll`, `schedule`, and `tailcall`,
/// are not intended to be called in general. These are used to drive an entire
/// task of many futures composed together only from the top level.
///
/// More documentation can be found on each method about what its purpose is,
/// but in general all of the combinators are the main methods that should be
/// used.
///
/// # Combinators
///
/// Like iterators, futures provide a large number of combinators to work with
/// futures to express computations in a much more natural method than
/// scheduling a number of callbacks. For example the `map` method can change
/// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could
/// create a future after the first one is done and only be resolved when the
/// second is done.
///
/// Combinators act very similarly to the methods on the `Iterator` trait itself
/// or those on `Option` and `Result`. Like with iterators, the combinators are
/// zero-cost and don't impose any extra layers of indirection you wouldn't
/// otherwise have to write down.
// TODO: expand this
pub trait Future: Send + 'static {
/// The type of value that this future will resolved with if it is
/// successful.
type Item: Send + 'static;
/// The type of error that this future will resolve with if it fails in a
/// normal fashion.
///
/// Futures may also fail due to panics or cancellation, but that is
/// expressed through the `PollError` type, not this type.
type Error: Send + 'static;
/// Query this future to see if its value has become available.
///
/// This function will check the internal state of the future and assess
/// whether the value is ready to be produced. Implementors of this function
/// should ensure that a call to this **never blocks** as event loops may
/// not work properly otherwise.
///
/// Callers of this function may provide an optional set of "interested
/// tokens" in the `tokens` argument which indicates which tokens are likely
/// ready to be looked at. Tokens are learned about through the `schedule`
/// method below and communicated through the callback in that method.
///
/// Implementors of the `Future` trait may safely assume that if tokens of
/// interest are not in `tokens` then futures may not need to be polled
/// (skipping work in `poll` in some cases).
///
/// # Return value
///
/// This function returns `None` if the future is not ready yet, or `Some`
/// with the result of this future if it's ready. Once a future has returned
/// `Some` it is considered a contract error to continue polling it.
///
/// # Panics
///
/// Once a future has completed (returned `Some` from `poll`), then any
/// future calls to `poll` may panic, block forever, or otherwise cause
/// wrong behavior. The `Future` trait itself provides no guarantees about
/// the behavior of `poll` after `Some` has been returned at least once.
///
/// Callers who may call `poll` too many times may want to consider using
/// the `fuse` adaptor which defines the behavior of `poll`, but comes with
/// a little bit of extra cost.
///
/// # Errors
///
/// If `Some` is returned, then a `Result<Item, Error>` is returned. This
/// future may have failed to finish the computation, in which case the
/// `Err` variant will be returned with an appropriate payload of an error.
fn poll(&mut self, tokens: &Tokens)
-> Option<Result<Self::Item, Self::Error>>;
/// Register a callback to be run whenever this future can make progress
/// again.
///
/// Throughout the lifetime of a future it may frequently be `poll`'d on to
/// test whether the value is ready yet. If `None` is returned, however, the
/// caller may then register a callback via this function to get a
/// notification when the future can indeed make progress.
///
/// The `wake` argument provided here will receive a notification (get
/// called) when this future can make progress. It may also be called
/// spuriously when the future may not be able to make progress. Whenever
/// called, however, it is recommended to call `poll` next to try to move
/// the future forward.
///
/// Implementors of the `Future` trait are recommended to just blindly pass
/// around this callback rather than manufacture new callbacks for contained
/// futures.
///
/// When the `wake` callback is invoked it will be provided a set of tokens
/// that represent the set of events which have happened since it was last
/// called (or the last call to `poll`). These events can then be used to
/// pass back into the `poll` function above to ensure the future does not
/// unnecessarily `poll` too much.
///
/// # Multiple callbacks
///
/// This function cannot be used to queue up multiple callbacks to be
/// invoked when a future is ready to make progress. Only the most recent
/// call to `schedule` is guaranteed to have notifications received when
/// `schedule` is called multiple times.
///
/// If this function is called twice, it may be the case that the previous
/// callback is never invoked. It is recommended that this function is
/// called with the same callback for the entire lifetime of this future.
///
/// # Panics
///
/// Once a future has returned `Some` (it's been completed) then future
/// calls to either `poll` or this function, `schedule`, should not be
/// expected to behave well. A call to `schedule` after a poll has succeeded
/// may panic, block forever, or otherwise exhibit odd behavior.
///
/// Callers who may call `schedule` after a future is finished may want to
/// consider using the `fuse` adaptor which defines the behavior of
/// `schedule` after a successful poll, but comes with a little bit of
/// extra cost.
fn schedule(&mut self, wake: Arc<Wake>);
/// Perform tail-call optimization on this future.
///
/// A particular future may actually represent a large tree of computation,
/// the structure of which can be optimized periodically after some of the
/// work has completed. This function is intended to be called after an
/// unsuccessful `poll` to ensure that the computation graph of a future
/// remains at a reasonable size.
///
/// This function is intended to be idempotent. If `None` is returned then
/// the internal structure may have been optimized, but this future itself
/// must stick around to represent the computation at hand.
///
/// If `Some` is returned then the returned future will be realized with the
/// same value that this future *would* have been had this method not been
/// called. Essentially, if `Some` is returned, then this future can be
/// forgotten and instead the returned value is used.
///
/// Note that this is a default method which returns `None`, but any future
/// adaptor should implement it to flatten the underlying future, if any.
fn tailcall(&mut self)
-> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
None
}
/// Convenience function for turning this future into a trait object.
///
/// This simply avoids the need to write `Box::new` and can often help with
/// type inference as well by always returning a trait object.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let a: Box<Future<Item=i32, Error=i32>> = done(Ok(1)).boxed();
/// ```
fn boxed(self) -> Box<Future<Item=Self::Item, Error=Self::Error>>
where Self: Sized
{
Box::new(self)
}
/// Map this future's result to a different type, returning a new future of
/// the resulting type.
///
/// This function is similar to the `Option::map` or `Iterator::map` where
/// it will change the type of the underlying future. This is useful to
/// chain along a computation once a future has been resolved.
///
/// The closure provided will only be called if this future is resolved
/// successfully. If this future returns an error, panics, or is canceled,
/// then the closure provided will never be invoked.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_1 = finished::<u32, u32>(1);
/// let future_of_4 = future_of_1.map(|x| x + 3);
/// ```
fn map<F, U>(self, f: F) -> Map<Self, F>
where F: FnOnce(Self::Item) -> U + Send + 'static,
U: Send + 'static,
Self: Sized,
{
assert_future::<U, Self::Error, _>(map::new(self, f))
}
/// Map this future's error to a different error, returning a new future.
///
/// This function is similar to the `Result::map_err` where it will change
/// the error type of the underlying future. This is useful for example to
/// ensure that futures have the same error type when used with combinators
/// like `select` and `join`.
///
/// The closure provided will only be called if this future is resolved
/// with an error. If this future returns a success, panics, or is
/// canceled, then the closure provided will never be invoked.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// let future_of_err_4 = future_of_err_1.map_err(|x| x + 3);
/// ```
fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
where F: FnOnce(Self::Error) -> E + Send + 'static,
E: Send + 'static,
Self: Sized,
{
assert_future::<Self::Item, E, _>(map_err::new(self, f))
}
/// Chain on a computation for when a future finished, passing the result of
/// the future to the provided closure `f`.
///
/// This function can be used to ensure a computation runs regardless of
/// the conclusion of the future. The closure provided will be yielded a
/// `Result` once the future is complete.
///
/// The returned value of the closure must implement the `IntoFuture` trait
/// and can represent some more work to be done before the composed future
/// is finished. Note that the `Result` type implements the `IntoFuture`
/// trait so it is possible to simply alter the `Result` yielded to the
/// closure and return it.
///
/// If this future is canceled or panics then the closure `f` will not be
/// run.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_1 = finished::<u32, u32>(1);
/// let future_of_4 = future_of_1.then(|x| {
/// x.map(|y| y + 3)
/// });
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// let future_of_4 = future_of_err_1.then(|x| {
/// match x {
/// Ok(_) => panic!("expected an error"),
/// Err(y) => finished::<u32, u32>(y + 3),
/// }
/// });
/// ```
fn then<F, B>(self, f: F) -> Then<Self, B, F>
where F: FnOnce(Result<Self::Item, Self::Error>) -> B + Send + 'static,
B: IntoFuture,
Self: Sized,
{
assert_future::<B::Item, B::Error, _>(then::new(self, f))
}
/// Execute another future after this one has resolved successfully.
///
/// This function can be used to chain two futures together and ensure that
/// the final future isn't resolved until both have finished. The closure
/// provided is yielded the successful result of this future and returns
/// another value which can be converted into a future.
///
/// Note that because `Result` implements the `IntoFuture` trait this method
/// can also be useful for chaining fallible and serial computations onto
/// the end of one future.
///
/// If this future is canceled, panics, or completes with an error then the
/// provided closure `f` is never called.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_1 = finished::<u32, u32>(1);
/// let future_of_4 = future_of_1.and_then(|x| {
/// Ok(x + 3)
/// });
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// future_of_err_1.and_then(|_| -> Done<u32, u32> {
/// panic!("should not be called in case of an error");
/// });
/// ```
fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F>
where F: FnOnce(Self::Item) -> B + Send + 'static,
B: IntoFuture<Error = Self::Error>,
Self: Sized,
{
assert_future::<B::Item, Self::Error, _>(and_then::new(self, f))
}
/// Execute another future after this one has resolved with an error.
///
/// This function can be used to chain two futures together and ensure that
/// the final future isn't resolved until both have finished. The closure
/// provided is yielded the error of this future and returns another value
/// which can be converted into a future.
///
/// Note that because `Result` implements the `IntoFuture` trait this method
/// can also be useful for chaining fallible and serial computations onto
/// the end of one future.
///
/// If this future is canceled, panics, or completes successfully then the
/// provided closure `f` is never called.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
/// Ok(x + 3)
/// });
///
/// let future_of_1 = finished::<u32, u32>(1);
/// future_of_1.or_else(|_| -> Done<u32, u32> {
/// panic!("should not be called in case of success");
/// });
/// ```
fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F>
where F: FnOnce(Self::Error) -> B + Send + 'static,
B: IntoFuture<Item = Self::Item>,
Self: Sized,
{
assert_future::<Self::Item, B::Error, _>(or_else::new(self, f))
}
/// Waits for either one of two futures to complete.
///
/// This function will return a new future which awaits for either this or
/// the `other` future to complete. The returned future will finish with
/// both the value resolved and a future representing the completion of the
/// other work. Both futures must have the same item and error type.
///
/// If either future is canceled or panics, the other is canceled and the
/// original error is propagated upwards.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// // A poor-man's join implemented on top of select
///
/// fn join<A>(a: A, b: A)
/// -> Box<Future<Item=(A::Item, A::Item), Error=A::Error>>
/// where A: Future,
/// {
/// a.select(b).then(|res| {
/// match res {
/// Ok((a, b)) => b.map(|b| (a, b)).boxed(),
/// Err((a, _)) => failed(a).boxed(),
/// }
/// }).boxed()
/// }
/// ```
fn select<B>(self, other: B) -> Select<Self, B::Future>
where B: IntoFuture<Item=Self::Item, Error=Self::Error>,
Self: Sized,
{
let f = select::new(self, other.into_future());
assert_future::<(Self::Item, SelectNext<Self, B::Future>),
(Self::Error, SelectNext<Self, B::Future>), _>(f)
}
/// Joins the result of two futures, waiting for them both to complete.
///
/// This function will return a new future which awaits both this and the
/// `other` future to complete. The returned future will finish with a tuple
/// of both results.
///
/// Both futures must have the same error type, and if either finishes with
/// an error then the other will be canceled and that error will be
/// returned.
///
/// If either future is canceled or panics, the other is canceled and the
/// original error is propagated upwards.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let a = finished::<u32, u32>(1);
/// let b = finished::<u32, u32>(2);
/// let pair = a.join(b);
///
/// pair.map(|(a, b)| {
/// assert_eq!(a, 1);
/// assert_eq!(b, 1);
/// });
/// ```
fn join<B>(self, other: B) -> Join<Self, B::Future>
where B: IntoFuture<Error=Self::Error>,
Self: Sized,
{
let f = join::new(self, other.into_future());
assert_future::<(Self::Item, B::Item), Self::Error, _>(f)
}
/// Flatten the execution of this future when the successful result of this
/// future is itself another future.
///
/// This can be useful when combining futures together to flatten the
/// computation out the the final result. This method can only be called
/// when the successful result of this future itself implements the
/// `IntoFuture` trait and the error can be created from this future's error
/// type.
///
/// This method is equivalent to `self.then(|x| x)`.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_a_future = finished::<_, u32>(finished::<u32, u32>(1));
/// let future_of_1 = future_of_a_future.flatten();
/// ```
fn flatten(self) -> Flatten<Self>
where Self::Item: IntoFuture,
<<Self as Future>::Item as IntoFuture>::Error:
From<<Self as Future>::Error>,
Self: Sized
{
let f = flatten::new(self);
assert_future::<<<Self as Future>::Item as IntoFuture>::Item,
<<Self as Future>::Item as IntoFuture>::Error,
_>(f)
}
/// Fuse a future such that `poll` will never again be called once it has
/// returned a success.
///
/// Currently once a future has returned `Some` from `poll` any further
/// calls could exhibit bad behavior such as block forever, panic, never
/// return, etc. If it is known that `poll` may be called too often then
/// this method can be used to ensure that it has defined semantics.
///
/// Once a future has been `fuse`d and it returns success from `poll`, then
/// it will forever return `None` from `poll` again (never resolve). This,
/// unlike the trait's `poll` method, is guaranteed.
///
/// Additionally, once a future has completed, this `Fuse` combinator will
/// ensure that all registered callbacks will not be registered with the
/// underlying future.
///
/// # Examples
///
/// ```rust
/// use futures::*;
///
/// # let tokens = &Tokens::all();
/// let mut future = finished::<i32, u32>(2);
/// assert!(future.poll(&tokens).is_some());
///
/// // Normally, a call such as this would panic:
/// //future.poll(&tokens);
///
/// // This, however, is guaranteed to not panic
/// let mut future = finished::<i32, u32>(2).fuse();
/// assert!(future.poll(&tokens).is_some());
/// assert!(future.poll(&tokens).is_none());
/// ```
fn fuse(self) -> Fuse<Self>
where Self: Sized
{
let f = fuse::new(self);
assert_future::<Self::Item, Self::Error, _>(f)
}
/// Consume this future and allow it to execute without cancelling it.
///
/// Normally whenever a future is dropped it signals that the underlying
/// computation should be cancelled ASAP. This function, however, will
/// consume the future and arrange for the future itself to get dropped only
/// when the computation has completed.
///
/// This function can be useful to ensure that futures with side effects can
/// run "in the background", but it is discouraged as it doesn't allow any
/// control over the future in terms of cancellation.
///
/// Generally applications should retain handles on futures to ensure
/// they're properly cleaned up if something unexpected happens.
fn forget(self) where Self: Sized {
forget::forget(self);
}
}
// Just a helper function to ensure the futures we're returning all have the
// right implementations.
fn assert_future<A, B, F>(t: F) -> F
where F: Future<Item=A, Error=B>,
A: Send + 'static,
B: Send + 'static,
{
t
}
/// A trait essentially representing `Fn(&Tokens) + Send + Send + 'static`.
///
/// This is used as an argument to the `Future::schedule` function.
pub trait Wake: Send + Sync + 'static {
/// Invokes this callback indicating that the provided set of events have
/// activity and the associated futures may make progress.
fn wake(&self, tokens: &Tokens);
}
impl<F> Wake for F
where F: Fn(&Tokens) + Send + Sync + 'static
{
fn wake(&self, tokens: &Tokens) {
self(tokens)
}
}
/// Class of types which can be converted themselves into a future.
///
/// This trait is very similar to the `IntoIterator` trait and is intended to be
/// used in a very similar fashion.
pub trait IntoFuture: Send + 'static {
/// The future that this type can be converted into.
type Future: Future<Item=Self::Item, Error=Self::Error>;
/// The item that the future may resolve with.
type Item: Send + 'static;
/// The error that the future may resolve with.
type Error: Send + 'static;
/// Consumes this object and produces a future.
fn into_future(self) -> Self::Future;
}
impl<F: Future> IntoFuture for F {
type Future = F;
type Item = F::Item;
type Error = F::Error;
fn into_future(self) -> F {
self
}
}
impl<T, E> IntoFuture for Result<T, E>
where T: Send + 'static,
E: Send + 'static,
{
type Future = Done<T, E>;
type Item = T;
type Error = E;
fn into_future(self) -> Done<T, E> {
done(self)
}
}
Send+'static not an open question any more
usage of trait objects basically ties our hands
//! A work-in-progress futures library for Rust.
//!
//! This library is an **experimental** implementation of Futures in Rust, and
//! is very likely to change over time and break compatibility without notice.
//! Be warned!
//!
//! The documentation of this library is also very much a work in progress, but
//! if anything is unclear please open an issue and hopefully it'll be
//! documented quickly!
#![deny(missing_docs)]
use std::sync::Arc;
mod lock;
mod slot;
mod util;
mod token;
pub use token::Tokens;
pub mod executor;
// Primitive futures
mod collect;
mod done;
mod empty;
mod failed;
mod finished;
mod lazy;
mod promise;
pub use collect::{collect, Collect};
pub use done::{done, Done};
pub use empty::{empty, Empty};
pub use failed::{failed, Failed};
pub use finished::{finished, Finished};
pub use lazy::{lazy, Lazy};
pub use promise::{promise, Promise, Complete, Canceled};
// combinators
mod and_then;
mod flatten;
mod fuse;
mod join;
mod map;
mod map_err;
mod or_else;
mod select;
mod then;
pub use and_then::AndThen;
pub use flatten::Flatten;
pub use fuse::Fuse;
pub use join::Join;
pub use map::Map;
pub use map_err::MapErr;
pub use or_else::OrElse;
pub use select::{Select, SelectNext};
pub use then::Then;
// streams
pub mod stream;
// impl details
mod chain;
mod impls;
mod forget;
/// Trait for types which represent a placeholder of a value that will become
/// available at possible some later point in time.
///
/// Futures are used to provide a sentinel through which a value can be
/// referenced. They crucially allow chaining operations through consumption
/// which allows expressing entire trees of computation as one sentinel value.
///
/// The ergonomics and implementation of the `Future` trait are very similar to
/// the `Iterator` trait in Rust which is where there is a small handful of
/// methods to implement and a load of default methods that consume a `Future`,
/// producing a new value.
///
/// # Core methods
///
/// The core methods of futures, currently `poll`, `schedule`, and `tailcall`,
/// are not intended to be called in general. These are used to drive an entire
/// task of many futures composed together only from the top level.
///
/// More documentation can be found on each method about what its purpose is,
/// but in general all of the combinators are the main methods that should be
/// used.
///
/// # Combinators
///
/// Like iterators, futures provide a large number of combinators to work with
/// futures to express computations in a much more natural method than
/// scheduling a number of callbacks. For example the `map` method can change
/// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could
/// create a future after the first one is done and only be resolved when the
/// second is done.
///
/// Combinators act very similarly to the methods on the `Iterator` trait itself
/// or those on `Option` and `Result`. Like with iterators, the combinators are
/// zero-cost and don't impose any extra layers of indirection you wouldn't
/// otherwise have to write down.
// TODO: expand this
pub trait Future: Send + 'static {
/// The type of value that this future will resolved with if it is
/// successful.
type Item: Send + 'static;
/// The type of error that this future will resolve with if it fails in a
/// normal fashion.
///
/// Futures may also fail due to panics or cancellation, but that is
/// expressed through the `PollError` type, not this type.
type Error: Send + 'static;
/// Query this future to see if its value has become available.
///
/// This function will check the internal state of the future and assess
/// whether the value is ready to be produced. Implementors of this function
/// should ensure that a call to this **never blocks** as event loops may
/// not work properly otherwise.
///
/// Callers of this function may provide an optional set of "interested
/// tokens" in the `tokens` argument which indicates which tokens are likely
/// ready to be looked at. Tokens are learned about through the `schedule`
/// method below and communicated through the callback in that method.
///
/// Implementors of the `Future` trait may safely assume that if tokens of
/// interest are not in `tokens` then futures may not need to be polled
/// (skipping work in `poll` in some cases).
///
/// # Return value
///
/// This function returns `None` if the future is not ready yet, or `Some`
/// with the result of this future if it's ready. Once a future has returned
/// `Some` it is considered a contract error to continue polling it.
///
/// # Panics
///
/// Once a future has completed (returned `Some` from `poll`), then any
/// future calls to `poll` may panic, block forever, or otherwise cause
/// wrong behavior. The `Future` trait itself provides no guarantees about
/// the behavior of `poll` after `Some` has been returned at least once.
///
/// Callers who may call `poll` too many times may want to consider using
/// the `fuse` adaptor which defines the behavior of `poll`, but comes with
/// a little bit of extra cost.
///
/// # Errors
///
/// If `Some` is returned, then a `Result<Item, Error>` is returned. This
/// future may have failed to finish the computation, in which case the
/// `Err` variant will be returned with an appropriate payload of an error.
fn poll(&mut self, tokens: &Tokens)
-> Option<Result<Self::Item, Self::Error>>;
/// Register a callback to be run whenever this future can make progress
/// again.
///
/// Throughout the lifetime of a future it may frequently be `poll`'d on to
/// test whether the value is ready yet. If `None` is returned, however, the
/// caller may then register a callback via this function to get a
/// notification when the future can indeed make progress.
///
/// The `wake` argument provided here will receive a notification (get
/// called) when this future can make progress. It may also be called
/// spuriously when the future may not be able to make progress. Whenever
/// called, however, it is recommended to call `poll` next to try to move
/// the future forward.
///
/// Implementors of the `Future` trait are recommended to just blindly pass
/// around this callback rather than manufacture new callbacks for contained
/// futures.
///
/// When the `wake` callback is invoked it will be provided a set of tokens
/// that represent the set of events which have happened since it was last
/// called (or the last call to `poll`). These events can then be used to
/// pass back into the `poll` function above to ensure the future does not
/// unnecessarily `poll` too much.
///
/// # Multiple callbacks
///
/// This function cannot be used to queue up multiple callbacks to be
/// invoked when a future is ready to make progress. Only the most recent
/// call to `schedule` is guaranteed to have notifications received when
/// `schedule` is called multiple times.
///
/// If this function is called twice, it may be the case that the previous
/// callback is never invoked. It is recommended that this function is
/// called with the same callback for the entire lifetime of this future.
///
/// # Panics
///
/// Once a future has returned `Some` (it's been completed) then future
/// calls to either `poll` or this function, `schedule`, should not be
/// expected to behave well. A call to `schedule` after a poll has succeeded
/// may panic, block forever, or otherwise exhibit odd behavior.
///
/// Callers who may call `schedule` after a future is finished may want to
/// consider using the `fuse` adaptor which defines the behavior of
/// `schedule` after a successful poll, but comes with a little bit of
/// extra cost.
fn schedule(&mut self, wake: Arc<Wake>);
/// Perform tail-call optimization on this future.
///
/// A particular future may actually represent a large tree of computation,
/// the structure of which can be optimized periodically after some of the
/// work has completed. This function is intended to be called after an
/// unsuccessful `poll` to ensure that the computation graph of a future
/// remains at a reasonable size.
///
/// This function is intended to be idempotent. If `None` is returned then
/// the internal structure may have been optimized, but this future itself
/// must stick around to represent the computation at hand.
///
/// If `Some` is returned then the returned future will be realized with the
/// same value that this future *would* have been had this method not been
/// called. Essentially, if `Some` is returned, then this future can be
/// forgotten and instead the returned value is used.
///
/// Note that this is a default method which returns `None`, but any future
/// adaptor should implement it to flatten the underlying future, if any.
fn tailcall(&mut self)
-> Option<Box<Future<Item=Self::Item, Error=Self::Error>>> {
None
}
/// Convenience function for turning this future into a trait object.
///
/// This simply avoids the need to write `Box::new` and can often help with
/// type inference as well by always returning a trait object.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let a: Box<Future<Item=i32, Error=i32>> = done(Ok(1)).boxed();
/// ```
fn boxed(self) -> Box<Future<Item=Self::Item, Error=Self::Error>>
where Self: Sized
{
Box::new(self)
}
/// Map this future's result to a different type, returning a new future of
/// the resulting type.
///
/// This function is similar to the `Option::map` or `Iterator::map` where
/// it will change the type of the underlying future. This is useful to
/// chain along a computation once a future has been resolved.
///
/// The closure provided will only be called if this future is resolved
/// successfully. If this future returns an error, panics, or is canceled,
/// then the closure provided will never be invoked.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_1 = finished::<u32, u32>(1);
/// let future_of_4 = future_of_1.map(|x| x + 3);
/// ```
fn map<F, U>(self, f: F) -> Map<Self, F>
where F: FnOnce(Self::Item) -> U + Send + 'static,
U: Send + 'static,
Self: Sized,
{
assert_future::<U, Self::Error, _>(map::new(self, f))
}
/// Map this future's error to a different error, returning a new future.
///
/// This function is similar to the `Result::map_err` where it will change
/// the error type of the underlying future. This is useful for example to
/// ensure that futures have the same error type when used with combinators
/// like `select` and `join`.
///
/// The closure provided will only be called if this future is resolved
/// with an error. If this future returns a success, panics, or is
/// canceled, then the closure provided will never be invoked.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// let future_of_err_4 = future_of_err_1.map_err(|x| x + 3);
/// ```
fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
where F: FnOnce(Self::Error) -> E + Send + 'static,
E: Send + 'static,
Self: Sized,
{
assert_future::<Self::Item, E, _>(map_err::new(self, f))
}
/// Chain on a computation for when a future finished, passing the result of
/// the future to the provided closure `f`.
///
/// This function can be used to ensure a computation runs regardless of
/// the conclusion of the future. The closure provided will be yielded a
/// `Result` once the future is complete.
///
/// The returned value of the closure must implement the `IntoFuture` trait
/// and can represent some more work to be done before the composed future
/// is finished. Note that the `Result` type implements the `IntoFuture`
/// trait so it is possible to simply alter the `Result` yielded to the
/// closure and return it.
///
/// If this future is canceled or panics then the closure `f` will not be
/// run.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_1 = finished::<u32, u32>(1);
/// let future_of_4 = future_of_1.then(|x| {
/// x.map(|y| y + 3)
/// });
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// let future_of_4 = future_of_err_1.then(|x| {
/// match x {
/// Ok(_) => panic!("expected an error"),
/// Err(y) => finished::<u32, u32>(y + 3),
/// }
/// });
/// ```
fn then<F, B>(self, f: F) -> Then<Self, B, F>
where F: FnOnce(Result<Self::Item, Self::Error>) -> B + Send + 'static,
B: IntoFuture,
Self: Sized,
{
assert_future::<B::Item, B::Error, _>(then::new(self, f))
}
/// Execute another future after this one has resolved successfully.
///
/// This function can be used to chain two futures together and ensure that
/// the final future isn't resolved until both have finished. The closure
/// provided is yielded the successful result of this future and returns
/// another value which can be converted into a future.
///
/// Note that because `Result` implements the `IntoFuture` trait this method
/// can also be useful for chaining fallible and serial computations onto
/// the end of one future.
///
/// If this future is canceled, panics, or completes with an error then the
/// provided closure `f` is never called.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_1 = finished::<u32, u32>(1);
/// let future_of_4 = future_of_1.and_then(|x| {
/// Ok(x + 3)
/// });
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// future_of_err_1.and_then(|_| -> Done<u32, u32> {
/// panic!("should not be called in case of an error");
/// });
/// ```
fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F>
where F: FnOnce(Self::Item) -> B + Send + 'static,
B: IntoFuture<Error = Self::Error>,
Self: Sized,
{
assert_future::<B::Item, Self::Error, _>(and_then::new(self, f))
}
/// Execute another future after this one has resolved with an error.
///
/// This function can be used to chain two futures together and ensure that
/// the final future isn't resolved until both have finished. The closure
/// provided is yielded the error of this future and returns another value
/// which can be converted into a future.
///
/// Note that because `Result` implements the `IntoFuture` trait this method
/// can also be useful for chaining fallible and serial computations onto
/// the end of one future.
///
/// If this future is canceled, panics, or completes successfully then the
/// provided closure `f` is never called.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_err_1 = failed::<u32, u32>(1);
/// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
/// Ok(x + 3)
/// });
///
/// let future_of_1 = finished::<u32, u32>(1);
/// future_of_1.or_else(|_| -> Done<u32, u32> {
/// panic!("should not be called in case of success");
/// });
/// ```
fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F>
where F: FnOnce(Self::Error) -> B + Send + 'static,
B: IntoFuture<Item = Self::Item>,
Self: Sized,
{
assert_future::<Self::Item, B::Error, _>(or_else::new(self, f))
}
/// Waits for either one of two futures to complete.
///
/// This function will return a new future which awaits for either this or
/// the `other` future to complete. The returned future will finish with
/// both the value resolved and a future representing the completion of the
/// other work. Both futures must have the same item and error type.
///
/// If either future is canceled or panics, the other is canceled and the
/// original error is propagated upwards.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// // A poor-man's join implemented on top of select
///
/// fn join<A>(a: A, b: A)
/// -> Box<Future<Item=(A::Item, A::Item), Error=A::Error>>
/// where A: Future,
/// {
/// a.select(b).then(|res| {
/// match res {
/// Ok((a, b)) => b.map(|b| (a, b)).boxed(),
/// Err((a, _)) => failed(a).boxed(),
/// }
/// }).boxed()
/// }
/// ```
fn select<B>(self, other: B) -> Select<Self, B::Future>
where B: IntoFuture<Item=Self::Item, Error=Self::Error>,
Self: Sized,
{
let f = select::new(self, other.into_future());
assert_future::<(Self::Item, SelectNext<Self, B::Future>),
(Self::Error, SelectNext<Self, B::Future>), _>(f)
}
/// Joins the result of two futures, waiting for them both to complete.
///
/// This function will return a new future which awaits both this and the
/// `other` future to complete. The returned future will finish with a tuple
/// of both results.
///
/// Both futures must have the same error type, and if either finishes with
/// an error then the other will be canceled and that error will be
/// returned.
///
/// If either future is canceled or panics, the other is canceled and the
/// original error is propagated upwards.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let a = finished::<u32, u32>(1);
/// let b = finished::<u32, u32>(2);
/// let pair = a.join(b);
///
/// pair.map(|(a, b)| {
/// assert_eq!(a, 1);
/// assert_eq!(b, 1);
/// });
/// ```
fn join<B>(self, other: B) -> Join<Self, B::Future>
where B: IntoFuture<Error=Self::Error>,
Self: Sized,
{
let f = join::new(self, other.into_future());
assert_future::<(Self::Item, B::Item), Self::Error, _>(f)
}
/// Flatten the execution of this future when the successful result of this
/// future is itself another future.
///
/// This can be useful when combining futures together to flatten the
/// computation out the the final result. This method can only be called
/// when the successful result of this future itself implements the
/// `IntoFuture` trait and the error can be created from this future's error
/// type.
///
/// This method is equivalent to `self.then(|x| x)`.
///
/// Note that this function consumes the receiving future and returns a
/// wrapped version of it.
///
/// # Examples
///
/// ```
/// use futures::*;
///
/// let future_of_a_future = finished::<_, u32>(finished::<u32, u32>(1));
/// let future_of_1 = future_of_a_future.flatten();
/// ```
fn flatten(self) -> Flatten<Self>
where Self::Item: IntoFuture,
<<Self as Future>::Item as IntoFuture>::Error:
From<<Self as Future>::Error>,
Self: Sized
{
let f = flatten::new(self);
assert_future::<<<Self as Future>::Item as IntoFuture>::Item,
<<Self as Future>::Item as IntoFuture>::Error,
_>(f)
}
/// Fuse a future such that `poll` will never again be called once it has
/// returned a success.
///
/// Currently once a future has returned `Some` from `poll` any further
/// calls could exhibit bad behavior such as block forever, panic, never
/// return, etc. If it is known that `poll` may be called too often then
/// this method can be used to ensure that it has defined semantics.
///
/// Once a future has been `fuse`d and it returns success from `poll`, then
/// it will forever return `None` from `poll` again (never resolve). This,
/// unlike the trait's `poll` method, is guaranteed.
///
/// Additionally, once a future has completed, this `Fuse` combinator will
/// ensure that all registered callbacks will not be registered with the
/// underlying future.
///
/// # Examples
///
/// ```rust
/// use futures::*;
///
/// # let tokens = &Tokens::all();
/// let mut future = finished::<i32, u32>(2);
/// assert!(future.poll(&tokens).is_some());
///
/// // Normally, a call such as this would panic:
/// //future.poll(&tokens);
///
/// // This, however, is guaranteed to not panic
/// let mut future = finished::<i32, u32>(2).fuse();
/// assert!(future.poll(&tokens).is_some());
/// assert!(future.poll(&tokens).is_none());
/// ```
fn fuse(self) -> Fuse<Self>
where Self: Sized
{
let f = fuse::new(self);
assert_future::<Self::Item, Self::Error, _>(f)
}
/// Consume this future and allow it to execute without cancelling it.
///
/// Normally whenever a future is dropped it signals that the underlying
/// computation should be cancelled ASAP. This function, however, will
/// consume the future and arrange for the future itself to get dropped only
/// when the computation has completed.
///
/// This function can be useful to ensure that futures with side effects can
/// run "in the background", but it is discouraged as it doesn't allow any
/// control over the future in terms of cancellation.
///
/// Generally applications should retain handles on futures to ensure
/// they're properly cleaned up if something unexpected happens.
fn forget(self) where Self: Sized {
forget::forget(self);
}
}
// Just a helper function to ensure the futures we're returning all have the
// right implementations.
fn assert_future<A, B, F>(t: F) -> F
where F: Future<Item=A, Error=B>,
A: Send + 'static,
B: Send + 'static,
{
t
}
/// A trait essentially representing `Fn(&Tokens) + Send + Send + 'static`.
///
/// This is used as an argument to the `Future::schedule` function.
pub trait Wake: Send + Sync + 'static {
/// Invokes this callback indicating that the provided set of events have
/// activity and the associated futures may make progress.
fn wake(&self, tokens: &Tokens);
}
impl<F> Wake for F
where F: Fn(&Tokens) + Send + Sync + 'static
{
fn wake(&self, tokens: &Tokens) {
self(tokens)
}
}
/// Class of types which can be converted themselves into a future.
///
/// This trait is very similar to the `IntoIterator` trait and is intended to be
/// used in a very similar fashion.
pub trait IntoFuture: Send + 'static {
/// The future that this type can be converted into.
type Future: Future<Item=Self::Item, Error=Self::Error>;
/// The item that the future may resolve with.
type Item: Send + 'static;
/// The error that the future may resolve with.
type Error: Send + 'static;
/// Consumes this object and produces a future.
fn into_future(self) -> Self::Future;
}
impl<F: Future> IntoFuture for F {
type Future = F;
type Item = F::Item;
type Error = F::Error;
fn into_future(self) -> F {
self
}
}
impl<T, E> IntoFuture for Result<T, E>
where T: Send + 'static,
E: Send + 'static,
{
type Future = Done<T, E>;
type Item = T;
type Error = E;
fn into_future(self) -> Done<T, E> {
done(self)
}
}
|
#[macro_use]
extern crate log;
extern crate getopts;
extern crate unix_daemonize;
extern crate byteorder;
extern crate udt;
extern crate time;
extern crate sodiumoxide;
extern crate rustc_serialize;
pub mod connection;
use unix_daemonize::{daemonize_redirect, ChdirMode};
use std::process::Command;
use std::net::{SocketAddr, SocketAddrV4, IpAddr};
use std::str;
use std::env;
use std::thread;
use std::str::FromStr;
use std::fs::{OpenOptions, File};
use std::path::{Path, PathBuf};
use std::time::{Instant, Duration};
use std::io::{Cursor, Error, Seek, SeekFrom, stderr, Read, Write};
use log::{LogRecord, LogLevel, LogMetadata};
use std::sync::mpsc;
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use sodiumoxide::crypto::secretbox;
use sodiumoxide::crypto::secretbox::xsalsa20poly1305::Key;
use rustc_serialize::hex::{FromHex, ToHex};
// TODO config
const INITIAL_ACCEPT_TIMEOUT_SECONDS: u64 = 60;
macro_rules! overprint {
($fmt: expr) => {
print!(concat!("\x1b[2K\r", $fmt));
std::io::stdout().flush().unwrap();
};
($fmt:expr, $($arg:tt)*) => {
print!(concat!("\x1b[2K\r", $fmt) , $($arg)*);
std::io::stdout().flush().unwrap();
};
}
macro_rules! die {
($fmt: expr) => {
error!($fmt);
panic!($fmt);
};
($fmt:expr, $($arg:tt)*) => {
error!($fmt, $($arg)*);
panic!($fmt, $($arg)*);
};
}
pub struct Server<'a> {
pub ip: String,
filename: &'a str,
conn: connection::Server,
}
#[allow(dead_code)]
enum ShoopErrKind {
Severed,
Fatal,
}
struct ShoopErr {
kind: ShoopErrKind,
msg: Option<String>,
finished: u64,
}
pub struct ShoopLogger;
impl log::Log for ShoopLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Info
}
fn log(&self, record: &LogRecord) {
if self.enabled(record.metadata()) {
let line = format!("{} - {}\n", record.level(), record.args());
print!("{}", line);
}
}
}
impl ShoopLogger {
pub fn init() -> Result<(), log::SetLoggerError> {
log::set_logger(|max_log_level| {
max_log_level.set(log::LogLevelFilter::Info);
Box::new(ShoopLogger)
})
}
}
impl ShoopErr {
pub fn new(kind: ShoopErrKind, msg: &str, finished: u64) -> ShoopErr {
ShoopErr { kind: kind, msg: Some(String::from(msg)), finished: finished }
}
#[allow(dead_code)]
pub fn from(err: Error, finished: u64) -> ShoopErr {
ShoopErr { kind: ShoopErrKind::Severed, msg: Some(format!("{:?}", err)), finished: finished }
}
}
impl<'a> Server<'a> {
pub fn new(port_range : connection::PortRange,
filename : &str)
-> Server
{
let sshconnstr = match env::var("SSH_CONNECTION") {
Ok(s) => s.trim().to_owned(),
Err(_) => { die!("SSH_CONNECTION env variable unset and required. Quitting."); }
};
let sshconn: Vec<&str> = sshconnstr.split(" ").collect();
let ip = sshconn[2].to_owned();
let key = secretbox::gen_key();
let Key(keybytes) = key;
let port = connection::Server::get_open_port(&port_range).unwrap();
println!("shoop 0 {} {} {}", ip, port, keybytes.to_hex());
let stdout = Some(Path::new(&env::var("HOME").unwrap()).join(".shoop.log"));
let stderr = Some(Path::new(&env::var("HOME").unwrap()).join(".shoop.log"));
daemonize_redirect(stdout, stderr, ChdirMode::ChdirRoot).unwrap();
let conn = connection::Server::new(IpAddr::from_str(&ip).unwrap(), port, key);
Server { ip: ip, conn: conn, filename: filename }
}
pub fn start(&self) {
self.conn.listen().unwrap();
let mut connection_count: usize = 0;
info!("listening...");
loop {
info!("waiting for connection...");
let (tx, rx) = mpsc::channel();
if connection_count == 0 {
thread::spawn(move || {
thread::sleep(Duration::from_secs(INITIAL_ACCEPT_TIMEOUT_SECONDS));
if let Err(_) = rx.try_recv() {
error!("timed out waiting for initial connection. exiting.");
std::process::exit(1);
}
});
}
let client = match self.conn.accept() {
Ok(client) => client,
Err(e) => { die!("error on sock accept() {:?}", e); }
};
connection_count += 1;
tx.send(()).unwrap();
info!("accepted connection!");
if let Ok(starthdr) = client.recv() {
let version = starthdr[0];
let mut rdr = Cursor::new(starthdr);
rdr.set_position(1);
let offset = rdr.read_u64::<LittleEndian>().unwrap();
if version == 0x00 {
match self.send_file(&client, offset) {
Ok(_) => {
info!("done sending file");
let _ = client.close();
break;
}
Err(ShoopErr{ kind: ShoopErrKind::Severed, msg, finished}) => {
info!("connection severed, msg: {:?}, finished: {}", msg, finished);
let _ = client.close();
continue;
}
Err(ShoopErr{ kind: ShoopErrKind::Fatal, msg, finished}) => {
info!("connection fatal, msg: {:?}, finished: {}", msg, finished);
panic!("{:?}", msg);
}
}
} else {
die!("unrecognized version");
}
} else {
die!("failed to receive version byte from client");
}
}
info!("exiting listen loop.");
}
fn send_file(&self, client: &connection::ServerConnection, offset: u64) -> Result<(), ShoopErr> {
let mut f = File::open(self.filename).unwrap();
f.seek(SeekFrom::Start(offset)).unwrap();
let metadata = f.metadata().unwrap();
let remaining = metadata.len() - offset;
info!("total {} bytes", remaining);
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(remaining).unwrap();
match client.send(&wtr[..]) {
Ok(()) => {
return Err(ShoopErr::new(ShoopErrKind::Severed, "failed to write filesize header before timeout", remaining))
},
Err(e) => {
return Err(ShoopErr::new(ShoopErrKind::Severed, &format!("{:?}", e), remaining))
}
}
let mut payload = vec![0; 1300];
f.seek(SeekFrom::Start(offset)).unwrap();
info!("sending file...");
loop {
match f.read(&mut payload) {
Ok(0) => {
break;
}
Ok(read) => {
match client.send(&payload[0..read]) {
Ok(()) => {
return Err(ShoopErr::new(ShoopErrKind::Severed, "failed to write filesize header before timeout", remaining))
},
Err(e) => {
return Err(ShoopErr::new(ShoopErrKind::Severed, &format!("{:?}", e), remaining))
}
}
},
Err(e) => {
client.close().expect("Error closing stream");
error!("failed to read from file.");
panic!("{:?}", e);
}
}
}
client.close().expect("Error closing stream.");
Ok(())
}
}
pub fn download(remote_ssh_host : &str,
port_range : connection::PortRange,
remote_path : &str,
local_path : PathBuf)
{
let cmd = format!("shoop -s '{}' -p {}", remote_path, port_range);
// println!("addr: {}, path: {}, cmd: {}", addr, path, cmd);
overprint!(" - establishing SSH session...");
assert!(command_exists("ssh"), "`ssh` is required!");
let output = Command::new("ssh")
.arg(remote_ssh_host.to_owned())
.arg(cmd)
.output()
.unwrap_or_else(|e| {
panic!("failed to execute process: {}", e);
});
let infostring = String::from_utf8_lossy(&output.stdout).to_owned().trim().to_owned();
let info: Vec<&str> = infostring.split(" ").collect();
if info.len() != 5 {
panic!("Unexpected response from server. Are you suuuuure shoop is setup on the server?");
}
let (magic, version, ip, port, keyhex) = (info[0], info[1], info[2], info[3], info[4]);
overprint!(" - opening UDT connection...");
if magic != "shoop" || version != "0" {
panic!("Unexpected response from server. Are you suuuuure shoop is setup on the server?");
}
let mut keybytes = [0u8; 32];
keybytes.copy_from_slice(&keyhex.from_hex().unwrap()[..]);
let key = Key(keybytes);
let addr: SocketAddr = SocketAddr::V4(SocketAddrV4::from_str(&format!("{}:{}", ip, port)[..]).unwrap());
let conn = connection::Client::new(addr, key);
let mut offset = 0u64;
let mut filesize = None;
let start_ts = Instant::now();
loop {
match conn.connect() {
Ok(()) => {
overprint!(" - connection opened, shakin' hands, makin' frands");
},
Err(e) => {
panic!("errrrrrrr connecting to {}:{} - {:?}", ip, port, e);
}
}
let mut wtr = vec![];
wtr.push(0);
wtr.write_u64::<LittleEndian>(offset).unwrap();
match conn.send(&wtr[..]) {
Err(_) => { conn.close().unwrap(); continue; }
_ => {}
}
match conn.recv() {
Ok(msg) => {
if msg.len() == 0 {
panic!("failed to get filesize from server, probable timeout.");
}
let mut rdr = Cursor::new(msg);
filesize = filesize.or(Some(rdr.read_u64::<LittleEndian>().unwrap()));
overprint!(" + downloading {} ({:.1}MB)\n", local_path.to_string_lossy(), (filesize.unwrap() as f64)/(1024f64*1024f64));
match recv_file(&conn, filesize.unwrap(), &local_path, offset) {
Ok(_) => {
break;
}
Err(ShoopErr{ kind: ShoopErrKind::Severed, msg: _, finished}) => {
println!(" * [[SEVERED]]");
offset = finished;
}
Err(ShoopErr{ kind: ShoopErrKind::Fatal, msg, finished: _}) => {
panic!("{:?}", msg);
}
}
}
Err(_) => {}
}
let _ = conn.close();
}
let elapsed = start_ts.elapsed().as_secs();
let fmt_time = if elapsed < 60 {
format!("{}s", elapsed)
} else if elapsed < 60 * 60 {
format!("{}m{}s", elapsed / 60, elapsed % 60)
} else {
format!("{}h{}m{}s", elapsed / (60 * 60), elapsed / 60, elapsed % 60)
};
println!("shooped it all up in {}", fmt_time);
}
fn command_exists(command: &str) -> bool {
match Command::new("which").arg(command).output() {
Ok(output) => output.status.success(),
Err(_) => false
}
}
fn recv_file(conn: &connection::Client, filesize: u64, filename: &PathBuf, offset: u64) -> Result<(), ShoopErr> {
let mut f = OpenOptions::new().write(true).create(true).truncate(false).open(filename).unwrap();
f.seek(SeekFrom::Start(offset)).unwrap();
let start = Instant::now();
let mut ts = Instant::now();
let mut total = offset;
let mut speed_ts = Instant::now();
let mut speed_total = total;
let mut speed = 0u64;
loop {
let buf = try!(conn.recv()
.map_err(|e| ShoopErr::new(ShoopErrKind::Severed, &format!("{:?}", e), total)));
if buf.len() < 1 {
return Err(ShoopErr::new(ShoopErrKind::Severed, "empty msg", total));
}
f.write_all(&buf[..]).unwrap();
total += buf.len() as u64;
let speed_elapsed = speed_ts.elapsed();
if speed_elapsed > Duration::new(1, 0) {
speed = ((total - speed_total) as f64 / ((speed_elapsed.as_secs() as f64) + (speed_elapsed.subsec_nanos() as f64) / 1_000_000_000f64)) as u64;
speed_ts = Instant::now();
speed_total = total;
}
let speedfmt = if speed < 1024 {
format!("{} b/s", speed)
} else if speed < 1024 * 1024 {
format!("{} kb/s", speed / 1024)
} else {
format!("{:.1} MB/s", ((speed / 1024) as f64) / 1024f64)
};
if ts.elapsed() > Duration::new(0, 100_000_000) {
overprint!(" {:.1}M / {:.1}M ({:.1}%) [ {} ]", (total as f64)/(1024f64*1024f64), (filesize as f64)/(1024f64*1024f64), (total as f64) / (filesize as f64) * 100f64, speedfmt);
ts = Instant::now();
}
if total >= filesize {
overprint!(" {0:.1}M / {0:.1}M (100%) [ avg {1:.1} MB/s ]", (filesize as f64)/(1024f64*1024f64), ((total - offset) / start.elapsed().as_secs() / 1024) as f64 / 1024f64);
println!("\ndone.");
break;
}
}
let _ = conn.close();
Ok(())
}
don't die on success
#[macro_use]
extern crate log;
extern crate getopts;
extern crate unix_daemonize;
extern crate byteorder;
extern crate udt;
extern crate time;
extern crate sodiumoxide;
extern crate rustc_serialize;
pub mod connection;
use unix_daemonize::{daemonize_redirect, ChdirMode};
use std::process::Command;
use std::net::{SocketAddr, SocketAddrV4, IpAddr};
use std::str;
use std::env;
use std::thread;
use std::str::FromStr;
use std::fs::{OpenOptions, File};
use std::path::{Path, PathBuf};
use std::time::{Instant, Duration};
use std::io::{Cursor, Error, Seek, SeekFrom, stderr, Read, Write};
use log::{LogRecord, LogLevel, LogMetadata};
use std::sync::mpsc;
use byteorder::{ReadBytesExt, WriteBytesExt, LittleEndian};
use sodiumoxide::crypto::secretbox;
use sodiumoxide::crypto::secretbox::xsalsa20poly1305::Key;
use rustc_serialize::hex::{FromHex, ToHex};
// TODO config
const INITIAL_ACCEPT_TIMEOUT_SECONDS: u64 = 60;
macro_rules! overprint {
($fmt: expr) => {
print!(concat!("\x1b[2K\r", $fmt));
std::io::stdout().flush().unwrap();
};
($fmt:expr, $($arg:tt)*) => {
print!(concat!("\x1b[2K\r", $fmt) , $($arg)*);
std::io::stdout().flush().unwrap();
};
}
macro_rules! die {
($fmt: expr) => {
error!($fmt);
panic!($fmt);
};
($fmt:expr, $($arg:tt)*) => {
error!($fmt, $($arg)*);
panic!($fmt, $($arg)*);
};
}
pub struct Server<'a> {
pub ip: String,
filename: &'a str,
conn: connection::Server,
}
#[allow(dead_code)]
enum ShoopErrKind {
Severed,
Fatal,
}
struct ShoopErr {
kind: ShoopErrKind,
msg: Option<String>,
finished: u64,
}
pub struct ShoopLogger;
impl log::Log for ShoopLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Info
}
fn log(&self, record: &LogRecord) {
if self.enabled(record.metadata()) {
let line = format!("{} - {}\n", record.level(), record.args());
print!("{}", line);
}
}
}
impl ShoopLogger {
pub fn init() -> Result<(), log::SetLoggerError> {
log::set_logger(|max_log_level| {
max_log_level.set(log::LogLevelFilter::Info);
Box::new(ShoopLogger)
})
}
}
impl ShoopErr {
pub fn new(kind: ShoopErrKind, msg: &str, finished: u64) -> ShoopErr {
ShoopErr { kind: kind, msg: Some(String::from(msg)), finished: finished }
}
#[allow(dead_code)]
pub fn from(err: Error, finished: u64) -> ShoopErr {
ShoopErr { kind: ShoopErrKind::Severed, msg: Some(format!("{:?}", err)), finished: finished }
}
}
impl<'a> Server<'a> {
pub fn new(port_range : connection::PortRange,
filename : &str)
-> Server
{
let sshconnstr = match env::var("SSH_CONNECTION") {
Ok(s) => s.trim().to_owned(),
Err(_) => { die!("SSH_CONNECTION env variable unset and required. Quitting."); }
};
let sshconn: Vec<&str> = sshconnstr.split(" ").collect();
let ip = sshconn[2].to_owned();
let key = secretbox::gen_key();
let Key(keybytes) = key;
let port = connection::Server::get_open_port(&port_range).unwrap();
println!("shoop 0 {} {} {}", ip, port, keybytes.to_hex());
let stdout = Some(Path::new(&env::var("HOME").unwrap()).join(".shoop.log"));
let stderr = Some(Path::new(&env::var("HOME").unwrap()).join(".shoop.log"));
daemonize_redirect(stdout, stderr, ChdirMode::ChdirRoot).unwrap();
let conn = connection::Server::new(IpAddr::from_str(&ip).unwrap(), port, key);
Server { ip: ip, conn: conn, filename: filename }
}
pub fn start(&self) {
self.conn.listen().unwrap();
let mut connection_count: usize = 0;
info!("listening...");
loop {
info!("waiting for connection...");
let (tx, rx) = mpsc::channel();
if connection_count == 0 {
thread::spawn(move || {
thread::sleep(Duration::from_secs(INITIAL_ACCEPT_TIMEOUT_SECONDS));
if let Err(_) = rx.try_recv() {
error!("timed out waiting for initial connection. exiting.");
std::process::exit(1);
}
});
}
let client = match self.conn.accept() {
Ok(client) => client,
Err(e) => { die!("error on sock accept() {:?}", e); }
};
connection_count += 1;
tx.send(()).unwrap();
info!("accepted connection!");
if let Ok(starthdr) = client.recv() {
let version = starthdr[0];
let mut rdr = Cursor::new(starthdr);
rdr.set_position(1);
let offset = rdr.read_u64::<LittleEndian>().unwrap();
if version == 0x00 {
match self.send_file(&client, offset) {
Ok(_) => {
info!("done sending file");
let _ = client.close();
break;
}
Err(ShoopErr{ kind: ShoopErrKind::Severed, msg, finished}) => {
info!("connection severed, msg: {:?}, finished: {}", msg, finished);
let _ = client.close();
continue;
}
Err(ShoopErr{ kind: ShoopErrKind::Fatal, msg, finished}) => {
info!("connection fatal, msg: {:?}, finished: {}", msg, finished);
panic!("{:?}", msg);
}
}
} else {
die!("unrecognized version");
}
} else {
die!("failed to receive version byte from client");
}
}
info!("exiting listen loop.");
}
fn send_file(&self, client: &connection::ServerConnection, offset: u64) -> Result<(), ShoopErr> {
let mut f = File::open(self.filename).unwrap();
f.seek(SeekFrom::Start(offset)).unwrap();
let metadata = f.metadata().unwrap();
let remaining = metadata.len() - offset;
info!("total {} bytes", remaining);
let mut wtr = vec![];
wtr.write_u64::<LittleEndian>(remaining).unwrap();
match client.send(&wtr[..]) {
Ok(()) => { info!("wrote filesize header.") },
Err(e) => {
return Err(ShoopErr::new(ShoopErrKind::Severed, &format!("{:?}", e), remaining))
}
}
let mut payload = vec![0; 1300];
f.seek(SeekFrom::Start(offset)).unwrap();
info!("sending file...");
loop {
match f.read(&mut payload) {
Ok(0) => {
break;
}
Ok(read) => {
match client.send(&payload[0..read]) {
Ok(()) => { },
Err(e) => {
return Err(ShoopErr::new(ShoopErrKind::Severed, &format!("{:?}", e), remaining))
}
}
},
Err(e) => {
client.close().expect("Error closing stream");
error!("failed to read from file.");
panic!("{:?}", e);
}
}
}
client.close().expect("Error closing stream.");
Ok(())
}
}
pub fn download(remote_ssh_host : &str,
port_range : connection::PortRange,
remote_path : &str,
local_path : PathBuf)
{
let cmd = format!("shoop -s '{}' -p {}", remote_path, port_range);
// println!("addr: {}, path: {}, cmd: {}", addr, path, cmd);
overprint!(" - establishing SSH session...");
assert!(command_exists("ssh"), "`ssh` is required!");
let output = Command::new("ssh")
.arg(remote_ssh_host.to_owned())
.arg(cmd)
.output()
.unwrap_or_else(|e| {
panic!("failed to execute process: {}", e);
});
let infostring = String::from_utf8_lossy(&output.stdout).to_owned().trim().to_owned();
let info: Vec<&str> = infostring.split(" ").collect();
if info.len() != 5 {
panic!("Unexpected response from server. Are you suuuuure shoop is setup on the server?");
}
let (magic, version, ip, port, keyhex) = (info[0], info[1], info[2], info[3], info[4]);
overprint!(" - opening UDT connection...");
if magic != "shoop" || version != "0" {
panic!("Unexpected response from server. Are you suuuuure shoop is setup on the server?");
}
let mut keybytes = [0u8; 32];
keybytes.copy_from_slice(&keyhex.from_hex().unwrap()[..]);
let key = Key(keybytes);
let addr: SocketAddr = SocketAddr::V4(SocketAddrV4::from_str(&format!("{}:{}", ip, port)[..]).unwrap());
let conn = connection::Client::new(addr, key);
let mut offset = 0u64;
let mut filesize = None;
let start_ts = Instant::now();
loop {
match conn.connect() {
Ok(()) => {
overprint!(" - connection opened, shakin' hands, makin' frands");
},
Err(e) => {
panic!("errrrrrrr connecting to {}:{} - {:?}", ip, port, e);
}
}
let mut wtr = vec![];
wtr.push(0);
wtr.write_u64::<LittleEndian>(offset).unwrap();
match conn.send(&wtr[..]) {
Err(_) => { conn.close().unwrap(); continue; }
_ => {}
}
match conn.recv() {
Ok(msg) => {
if msg.len() == 0 {
panic!("failed to get filesize from server, probable timeout.");
}
let mut rdr = Cursor::new(msg);
filesize = filesize.or(Some(rdr.read_u64::<LittleEndian>().unwrap()));
overprint!(" + downloading {} ({:.1}MB)\n", local_path.to_string_lossy(), (filesize.unwrap() as f64)/(1024f64*1024f64));
match recv_file(&conn, filesize.unwrap(), &local_path, offset) {
Ok(_) => {
break;
}
Err(ShoopErr{ kind: ShoopErrKind::Severed, msg: _, finished}) => {
println!(" * [[SEVERED]]");
offset = finished;
}
Err(ShoopErr{ kind: ShoopErrKind::Fatal, msg, finished: _}) => {
panic!("{:?}", msg);
}
}
}
Err(_) => {}
}
let _ = conn.close();
}
let elapsed = start_ts.elapsed().as_secs();
let fmt_time = if elapsed < 60 {
format!("{}s", elapsed)
} else if elapsed < 60 * 60 {
format!("{}m{}s", elapsed / 60, elapsed % 60)
} else {
format!("{}h{}m{}s", elapsed / (60 * 60), elapsed / 60, elapsed % 60)
};
println!("shooped it all up in {}", fmt_time);
}
fn command_exists(command: &str) -> bool {
match Command::new("which").arg(command).output() {
Ok(output) => output.status.success(),
Err(_) => false
}
}
fn recv_file(conn: &connection::Client, filesize: u64, filename: &PathBuf, offset: u64) -> Result<(), ShoopErr> {
let mut f = OpenOptions::new().write(true).create(true).truncate(false).open(filename).unwrap();
f.seek(SeekFrom::Start(offset)).unwrap();
let start = Instant::now();
let mut ts = Instant::now();
let mut total = offset;
let mut speed_ts = Instant::now();
let mut speed_total = total;
let mut speed = 0u64;
loop {
let buf = try!(conn.recv()
.map_err(|e| ShoopErr::new(ShoopErrKind::Severed, &format!("{:?}", e), total)));
if buf.len() < 1 {
return Err(ShoopErr::new(ShoopErrKind::Severed, "empty msg", total));
}
f.write_all(&buf[..]).unwrap();
total += buf.len() as u64;
let speed_elapsed = speed_ts.elapsed();
if speed_elapsed > Duration::new(1, 0) {
speed = ((total - speed_total) as f64 / ((speed_elapsed.as_secs() as f64) + (speed_elapsed.subsec_nanos() as f64) / 1_000_000_000f64)) as u64;
speed_ts = Instant::now();
speed_total = total;
}
let speedfmt = if speed < 1024 {
format!("{} b/s", speed)
} else if speed < 1024 * 1024 {
format!("{} kb/s", speed / 1024)
} else {
format!("{:.1} MB/s", ((speed / 1024) as f64) / 1024f64)
};
if ts.elapsed() > Duration::new(0, 100_000_000) {
overprint!(" {:.1}M / {:.1}M ({:.1}%) [ {} ]", (total as f64)/(1024f64*1024f64), (filesize as f64)/(1024f64*1024f64), (total as f64) / (filesize as f64) * 100f64, speedfmt);
ts = Instant::now();
}
if total >= filesize {
overprint!(" {0:.1}M / {0:.1}M (100%) [ avg {1:.1} MB/s ]", (filesize as f64)/(1024f64*1024f64), ((total - offset) / start.elapsed().as_secs() / 1024) as f64 / 1024f64);
println!("\ndone.");
break;
}
}
let _ = conn.close();
Ok(())
}
|
//! [![github]](https://github.com/dtolnay/serde-yaml) [![crates-io]](https://crates.io/crates/serde-yaml) [![docs-rs]](https://docs.rs/serde-yaml)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K
//!
//! <br>
//!
//! This crate is a Rust library for using the [Serde] serialization framework
//! with data in [YAML] file format.
//!
//! This library does not reimplement a YAML parser; it uses [yaml-rust] which
//! is a pure Rust YAML 1.2 implementation.
//!
//! [Serde]: https://github.com/serde-rs/serde
//! [YAML]: https://yaml.org/
//! [yaml-rust]: https://github.com/chyh1990/yaml-rust
//!
//! # Examples
//!
//! ```
//! use std::collections::BTreeMap;
//!
//! fn main() -> Result<(), serde_yaml::Error> {
//! // You have some type.
//! let mut map = BTreeMap::new();
//! map.insert("x".to_string(), 1.0);
//! map.insert("y".to_string(), 2.0);
//!
//! // Serialize it to a YAML string.
//! let s = serde_yaml::to_string(&map)?;
//! assert_eq!(s, "---\nx: 1.0\ny: 2.0\n");
//!
//! // Deserialize it back to a Rust type.
//! let deserialized_map: BTreeMap<String, f64> = serde_yaml::from_str(&s)?;
//! assert_eq!(map, deserialized_map);
//! Ok(())
//! }
//! ```
//!
//! ## Using Serde derive
//!
//! It can also be used with Serde's serialization code generator `serde_derive` to
//! handle structs and enums defined in your own program.
//!
//! ```
//! # use serde_derive::{Serialize, Deserialize};
//! use serde::{Serialize, Deserialize};
//!
//! #[derive(Debug, PartialEq, Serialize, Deserialize)]
//! struct Point {
//! x: f64,
//! y: f64,
//! }
//!
//! fn main() -> Result<(), serde_yaml::Error> {
//! let point = Point { x: 1.0, y: 2.0 };
//!
//! let s = serde_yaml::to_string(&point)?;
//! assert_eq!(s, "---\nx: 1.0\ny: 2.0\n");
//!
//! let deserialized_point: Point = serde_yaml::from_str(&s)?;
//! assert_eq!(point, deserialized_point);
//! Ok(())
//! }
//! ```
#![doc(html_root_url = "https://docs.rs/serde_yaml/0.8.18")]
#![deny(missing_docs)]
// Suppressed clippy_pedantic lints
#![allow(
// private Deserializer::next
clippy::should_implement_trait,
// things are often more readable this way
clippy::cast_lossless,
clippy::module_name_repetitions,
clippy::needless_pass_by_value,
clippy::option_if_let_else,
clippy::single_match_else,
// code is acceptable
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
// noisy
clippy::missing_errors_doc,
clippy::must_use_candidate,
)]
pub use crate::de::{from_reader, from_slice, from_str, Deserializer};
pub use crate::error::{Error, Location, Result};
pub use crate::ser::{to_string, to_vec, to_writer, Serializer};
pub use crate::value::{from_value, to_value, Index, Number, Sequence, Value};
#[doc(inline)]
pub use crate::mapping::Mapping;
/// Entry points for deserializing with pre-existing state.
///
/// These functions are only exposed this way because we don't yet expose a
/// Deserializer type. Data formats that have a public Deserializer should not
/// copy these signatures.
pub mod seed {
pub use super::de::{from_reader_seed, from_slice_seed, from_str_seed};
}
mod de;
mod error;
pub mod mapping;
mod number;
mod path;
mod ser;
mod value;
Silence match_same_arms Clippy pedantic lint
error: this `match` has identical arm bodies
--> src/number.rs:357:33
|
357 | (_, N::Float(_)) => Some(Ordering::Less),
| ^^^^^^^^^^^^^^^^^^^^
|
= note: `-D clippy::match-same-arms` implied by `-D clippy::pedantic`
note: same as this
--> src/number.rs:345:45
|
345 | (N::NegInt(_), N::PosInt(_)) => Some(Ordering::Less),
| ^^^^^^^^^^^^^^^^^^^^
help: consider refactoring into `(N::NegInt(_), N::PosInt(_)) | (_, N::Float(_))`
--> src/number.rs:345:13
|
345 | (N::NegInt(_), N::PosInt(_)) => Some(Ordering::Less),
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: ...or consider changing the match arm bodies
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#match_same_arms
error: this `match` has identical arm bodies
--> src/number.rs:358:33
|
358 | (N::Float(_), _) => Some(Ordering::Greater),
| ^^^^^^^^^^^^^^^^^^^^^^^
|
note: same as this
--> src/number.rs:346:45
|
346 | (N::PosInt(_), N::NegInt(_)) => Some(Ordering::Greater),
| ^^^^^^^^^^^^^^^^^^^^^^^
help: consider refactoring into `(N::PosInt(_), N::NegInt(_)) | (N::Float(_), _)`
--> src/number.rs:346:13
|
346 | (N::PosInt(_), N::NegInt(_)) => Some(Ordering::Greater),
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: ...or consider changing the match arm bodies
= help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#match_same_arms
//! [![github]](https://github.com/dtolnay/serde-yaml) [![crates-io]](https://crates.io/crates/serde-yaml) [![docs-rs]](https://docs.rs/serde-yaml)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K
//!
//! <br>
//!
//! This crate is a Rust library for using the [Serde] serialization framework
//! with data in [YAML] file format.
//!
//! This library does not reimplement a YAML parser; it uses [yaml-rust] which
//! is a pure Rust YAML 1.2 implementation.
//!
//! [Serde]: https://github.com/serde-rs/serde
//! [YAML]: https://yaml.org/
//! [yaml-rust]: https://github.com/chyh1990/yaml-rust
//!
//! # Examples
//!
//! ```
//! use std::collections::BTreeMap;
//!
//! fn main() -> Result<(), serde_yaml::Error> {
//! // You have some type.
//! let mut map = BTreeMap::new();
//! map.insert("x".to_string(), 1.0);
//! map.insert("y".to_string(), 2.0);
//!
//! // Serialize it to a YAML string.
//! let s = serde_yaml::to_string(&map)?;
//! assert_eq!(s, "---\nx: 1.0\ny: 2.0\n");
//!
//! // Deserialize it back to a Rust type.
//! let deserialized_map: BTreeMap<String, f64> = serde_yaml::from_str(&s)?;
//! assert_eq!(map, deserialized_map);
//! Ok(())
//! }
//! ```
//!
//! ## Using Serde derive
//!
//! It can also be used with Serde's serialization code generator `serde_derive` to
//! handle structs and enums defined in your own program.
//!
//! ```
//! # use serde_derive::{Serialize, Deserialize};
//! use serde::{Serialize, Deserialize};
//!
//! #[derive(Debug, PartialEq, Serialize, Deserialize)]
//! struct Point {
//! x: f64,
//! y: f64,
//! }
//!
//! fn main() -> Result<(), serde_yaml::Error> {
//! let point = Point { x: 1.0, y: 2.0 };
//!
//! let s = serde_yaml::to_string(&point)?;
//! assert_eq!(s, "---\nx: 1.0\ny: 2.0\n");
//!
//! let deserialized_point: Point = serde_yaml::from_str(&s)?;
//! assert_eq!(point, deserialized_point);
//! Ok(())
//! }
//! ```
#![doc(html_root_url = "https://docs.rs/serde_yaml/0.8.18")]
#![deny(missing_docs)]
// Suppressed clippy_pedantic lints
#![allow(
// private Deserializer::next
clippy::should_implement_trait,
// things are often more readable this way
clippy::cast_lossless,
clippy::match_same_arms,
clippy::module_name_repetitions,
clippy::needless_pass_by_value,
clippy::option_if_let_else,
clippy::single_match_else,
// code is acceptable
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
// noisy
clippy::missing_errors_doc,
clippy::must_use_candidate,
)]
pub use crate::de::{from_reader, from_slice, from_str, Deserializer};
pub use crate::error::{Error, Location, Result};
pub use crate::ser::{to_string, to_vec, to_writer, Serializer};
pub use crate::value::{from_value, to_value, Index, Number, Sequence, Value};
#[doc(inline)]
pub use crate::mapping::Mapping;
/// Entry points for deserializing with pre-existing state.
///
/// These functions are only exposed this way because we don't yet expose a
/// Deserializer type. Data formats that have a public Deserializer should not
/// copy these signatures.
pub mod seed {
pub use super::de::{from_reader_seed, from_slice_seed, from_str_seed};
}
mod de;
mod error;
pub mod mapping;
mod number;
mod path;
mod ser;
mod value;
|
// Copyright 2017 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Testkit for Exonum blockchain framework, allowing to test service APIs synchronously
//! and in the same process as the testkit.
//!
//! # Example
//! ```
//! #[macro_use]
//! extern crate exonum;
//! #[macro_use]
//! extern crate exonum_testkit;
//! extern crate serde_json;
//!
//! use exonum::crypto::{gen_keypair, PublicKey};
//! use exonum::blockchain::{Block, Schema, Service, Transaction};
//! use exonum::messages::{Message, RawTransaction};
//! use exonum::storage::Fork;
//! use exonum::encoding;
//! use exonum_testkit::{ApiKind, TestKitBuilder};
//!
//! // Simple service implementation.
//!
//! const SERVICE_ID: u16 = 1;
//! const TX_TIMESTAMP_ID: u16 = 1;
//!
//! message! {
//! struct TxTimestamp {
//! const TYPE = SERVICE_ID;
//! const ID = TX_TIMESTAMP_ID;
//! const SIZE = 40;
//!
//! field from: &PublicKey [0 => 32]
//! field msg: &str [32 => 40]
//! }
//! }
//!
//! struct TimestampingService;
//!
//! impl Transaction for TxTimestamp {
//! fn verify(&self) -> bool {
//! self.verify_signature(self.from())
//! }
//!
//! fn execute(&self, _fork: &mut Fork) {}
//!
//! fn info(&self) -> serde_json::Value {
//! serde_json::to_value(self).unwrap()
//! }
//! }
//!
//! impl Service for TimestampingService {
//! fn service_name(&self) -> &'static str {
//! "timestamping"
//! }
//!
//! fn service_id(&self) -> u16 {
//! SERVICE_ID
//! }
//!
//! fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<Transaction>, encoding::Error> {
//! let trans: Box<Transaction> = match raw.message_type() {
//! TX_TIMESTAMP_ID => Box::new(TxTimestamp::from_raw(raw)?),
//! _ => {
//! return Err(encoding::Error::IncorrectMessageType {
//! message_type: raw.message_type(),
//! });
//! }
//! };
//! Ok(trans)
//! }
//! }
//!
//! fn main() {
//! // Create testkit for network with four validators.
//! let mut testkit = TestKitBuilder::validator()
//! .with_validators(4)
//! .with_service(TimestampingService)
//! .create();
//!
//! // Create few transactions.
//! let keypair = gen_keypair();
//! let tx1 = TxTimestamp::new(&keypair.0, "Down To Earth", &keypair.1);
//! let tx2 = TxTimestamp::new(&keypair.0, "Cry Over Spilt Milk", &keypair.1);
//! let tx3 = TxTimestamp::new(&keypair.0, "Dropping Like Flies", &keypair.1);
//! // Commit them into blockchain.
//! testkit.create_block_with_transactions(txvec![
//! tx1.clone(), tx2.clone(), tx3.clone()
//! ]);
//!
//! // Add a single transaction.
//! let tx4 = TxTimestamp::new(&keypair.0, "Barking up the wrong tree", &keypair.1);
//! testkit.create_block_with_transaction(tx4.clone());
//!
//! // Check results with schema.
//! let snapshot = testkit.snapshot();
//! let schema = Schema::new(&snapshot);
//! assert!(schema.transactions().contains(&tx1.hash()));
//! assert!(schema.transactions().contains(&tx2.hash()));
//! assert!(schema.transactions().contains(&tx3.hash()));
//! assert!(schema.transactions().contains(&tx4.hash()));
//!
//! // Check results with api.
//! let api = testkit.api();
//! let blocks: Vec<Block> = api.get(ApiKind::Explorer, "v1/blocks?count=10");
//! assert_eq!(blocks.len(), 3);
//! api.get::<serde_json::Value>(
//! ApiKind::System,
//! &format!("v1/transactions/{}", tx1.hash().to_string()),
//! );
//! }
//! ```
#![deny(missing_debug_implementations, missing_docs)]
extern crate exonum;
extern crate futures;
extern crate iron;
extern crate iron_test;
extern crate mount;
extern crate router;
extern crate serde;
extern crate serde_json;
use futures::Stream;
use futures::executor::{self, Spawn};
use futures::sync::mpsc;
use iron::IronError;
use iron::headers::{ContentType, Headers};
use iron::status::StatusClass;
use iron_test::{request, response};
use mount::Mount;
use router::Router;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::sync::{Arc, RwLock, RwLockReadGuard};
use std::fmt;
use exonum::blockchain::{Blockchain, ConsensusConfig, GenesisConfig, Schema as CoreSchema,
Service, StoredConfiguration, Transaction, ValidatorKeys};
use exonum::crypto;
use exonum::helpers::{Height, Round, ValidatorId};
use exonum::messages::{Message, Precommit, Propose};
use exonum::node::{ApiSender, ExternalMessage, State as NodeState, TransactionSend, TxPool};
use exonum::storage::{MemoryDB, Snapshot};
#[macro_use]
mod macros;
mod checkpoint_db;
pub mod compare;
mod greedy_fold;
#[doc(hidden)]
pub use greedy_fold::GreedilyFoldable;
pub use compare::ComparableSnapshot;
use checkpoint_db::{CheckpointDb, CheckpointDbHandler};
/// Emulated test network.
#[derive(Debug)]
pub struct TestNetwork {
us: TestNode,
validators: Vec<TestNode>,
}
impl TestNetwork {
/// Creates a new emulated network.
pub fn new(validator_count: u16) -> Self {
let validators = (0..validator_count)
.map(ValidatorId)
.map(TestNode::new_validator)
.collect::<Vec<_>>();
let us = validators[0].clone();
TestNetwork { validators, us }
}
/// Returns the node in the emulated network, from whose perspective the testkit operates.
pub fn us(&self) -> &TestNode {
&self.us
}
/// Returns a slice of all validators in the network.
pub fn validators(&self) -> &[TestNode] {
&self.validators
}
/// Returns config encoding the network structure usable for creating the genesis block of
/// a blockchain.
pub fn genesis_config(&self) -> GenesisConfig {
GenesisConfig::new(self.validators.iter().map(TestNode::public_keys))
}
/// Updates the test network by the new set of nodes.
pub fn update<I: IntoIterator<Item = TestNode>>(&mut self, mut us: TestNode, validators: I) {
let validators = validators
.into_iter()
.enumerate()
.map(|(id, mut validator)| {
let validator_id = ValidatorId(id as u16);
validator.change_role(Some(validator_id));
if us.public_keys().consensus_key == validator.public_keys().consensus_key {
us.change_role(Some(validator_id));
}
validator
})
.collect::<Vec<_>>();
self.validators = validators;
self.us.clone_from(&us);
}
/// Returns service public key of the validator with given id.
pub fn service_public_key_of(&self, id: ValidatorId) -> Option<&crypto::PublicKey> {
self.validators().get(id.0 as usize).map(|x| {
&x.service_public_key
})
}
/// Returns consensus public key of the validator with given id.
pub fn consensus_public_key_of(&self, id: ValidatorId) -> Option<&crypto::PublicKey> {
self.validators().get(id.0 as usize).map(|x| {
&x.consensus_public_key
})
}
}
/// An emulated node in the test network.
#[derive(Debug, Clone, PartialEq)]
pub struct TestNode {
consensus_secret_key: crypto::SecretKey,
consensus_public_key: crypto::PublicKey,
service_secret_key: crypto::SecretKey,
service_public_key: crypto::PublicKey,
validator_id: Option<ValidatorId>,
}
impl TestNode {
/// Creates a new auditor.
pub fn new_auditor() -> Self {
let (consensus_public_key, consensus_secret_key) = crypto::gen_keypair();
let (service_public_key, service_secret_key) = crypto::gen_keypair();
TestNode {
consensus_secret_key,
consensus_public_key,
service_secret_key,
service_public_key,
validator_id: None,
}
}
/// Creates a new validator with the given id.
pub fn new_validator(validator_id: ValidatorId) -> Self {
let (consensus_public_key, consensus_secret_key) = crypto::gen_keypair();
let (service_public_key, service_secret_key) = crypto::gen_keypair();
TestNode {
consensus_secret_key,
consensus_public_key,
service_secret_key,
service_public_key,
validator_id: Some(validator_id),
}
}
/// Constructs a new node from the given keypairs.
pub fn from_parts(
consensus_keypair: (crypto::PublicKey, crypto::SecretKey),
service_keypair: (crypto::PublicKey, crypto::SecretKey),
validator_id: Option<ValidatorId>,
) -> TestNode {
TestNode {
consensus_public_key: consensus_keypair.0,
consensus_secret_key: consensus_keypair.1,
service_public_key: service_keypair.0,
service_secret_key: service_keypair.1,
validator_id,
}
}
/// Creates a `Propose` message signed by this validator.
pub fn create_propose(
&self,
height: Height,
last_hash: &crypto::Hash,
tx_hashes: &[crypto::Hash],
) -> Propose {
Propose::new(
self.validator_id.expect(
"An attempt to create propose from a non-validator node.",
),
height,
Round::first(),
last_hash,
tx_hashes,
&self.consensus_secret_key,
)
}
/// Creates a `Precommit` message signed by this validator.
pub fn create_precommit(&self, propose: &Propose, block_hash: &crypto::Hash) -> Precommit {
use std::time::SystemTime;
Precommit::new(
self.validator_id.expect(
"An attempt to create propose from a non-validator node.",
),
propose.height(),
propose.round(),
&propose.hash(),
block_hash,
SystemTime::now(),
&self.consensus_secret_key,
)
}
/// Returns public keys of the node.
pub fn public_keys(&self) -> ValidatorKeys {
ValidatorKeys {
consensus_key: self.consensus_public_key,
service_key: self.service_public_key,
}
}
/// Returns the current validator id of node if it is validator of the test network.
pub fn validator_id(&self) -> Option<ValidatorId> {
self.validator_id
}
/// Changes node role.
pub fn change_role(&mut self, role: Option<ValidatorId>) {
self.validator_id = role;
}
/// Returns the service keypair.
pub fn service_keypair(&self) -> (&crypto::PublicKey, &crypto::SecretKey) {
(&self.service_public_key, &self.service_secret_key)
}
}
impl From<TestNode> for ValidatorKeys {
fn from(node: TestNode) -> Self {
node.public_keys()
}
}
/// Builder for `TestKit`.
///
/// # Example
///
/// ```
/// # extern crate exonum;
/// # extern crate exonum_testkit;
/// # use exonum::blockchain::{Service, Transaction};
/// # use exonum::messages::RawTransaction;
/// # use exonum::encoding;
/// # use exonum_testkit::TestKitBuilder;
/// # pub struct MyService;
/// # impl Service for MyService {
/// # fn service_name(&self) -> &'static str {
/// # "documentation"
/// # }
/// # fn service_id(&self) -> u16 {
/// # 0
/// # }
/// # fn tx_from_raw(&self, _raw: RawTransaction) -> Result<Box<Transaction>, encoding::Error> {
/// # unimplemented!();
/// # }
/// # }
/// # fn main() {
/// let mut testkit = TestKitBuilder::validator()
/// .with_service(MyService)
/// .with_validators(4)
/// .create();
/// testkit.create_block();
/// // Other test code
/// # }
/// ```
pub struct TestKitBuilder {
us: TestNode,
validators: Vec<TestNode>,
services: Vec<Box<Service>>,
}
impl fmt::Debug for TestKitBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("TestKitBuilder")
.field("us", &self.us)
.field("validators", &self.validators)
.field(
"services",
&self.services
.iter()
.map(|x| x.service_name())
.collect::<Vec<_>>(),
)
.finish()
}
}
impl TestKitBuilder {
/// Creates testkit for the validator node.
pub fn validator() -> Self {
let us = TestNode::new_validator(ValidatorId(0));
TestKitBuilder {
validators: vec![us.clone()],
services: Vec::new(),
us,
}
}
/// Creates testkit for the auditor node.
pub fn auditor() -> Self {
let us = TestNode::new_auditor();
TestKitBuilder {
validators: vec![TestNode::new_validator(ValidatorId(0))],
services: Vec::new(),
us,
}
}
/// Sets the number of validator nodes in the test network.
pub fn with_validators(mut self, validators_count: u16) -> Self {
assert!(
validators_count > 0,
"At least one validator should be present in the network."
);
let additional_validators = (self.validators.len() as u16..validators_count)
.map(ValidatorId)
.map(TestNode::new_validator);
self.validators.extend(additional_validators);
self
}
/// Adds a service to the testkit.
pub fn with_service<S>(mut self, service: S) -> Self
where
S: Into<Box<Service>>,
{
self.services.push(service.into());
self
}
/// Creates the testkit.
pub fn create(self) -> TestKit {
crypto::init();
TestKit::assemble(
self.services,
TestNetwork {
us: self.us,
validators: self.validators,
},
)
}
}
/// Testkit for testing blockchain services. It offers simple network configuration emulation
/// (with no real network setup).
pub struct TestKit {
blockchain: Blockchain,
db_handler: CheckpointDbHandler<MemoryDB>,
events_stream: Spawn<Box<Stream<Item = (), Error = ()>>>,
network: TestNetwork,
api_sender: ApiSender,
mempool: TxPool,
cfg_proposal: Option<ConfigurationProposalState>,
}
impl fmt::Debug for TestKit {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("TestKit")
.field("blockchain", &self.blockchain)
.field("network", &self.network)
.field("mempool", &self.mempool)
.field("cfg_change_proposal", &self.cfg_proposal)
.finish()
}
}
impl TestKit {
/// Creates a new `TestKit` with a single validator with the given service.
pub fn for_service<S>(service: S) -> Self
where
S: Into<Box<Service>>,
{
TestKitBuilder::validator().with_service(service).create()
}
fn assemble(services: Vec<Box<Service>>, network: TestNetwork) -> Self {
let api_channel = mpsc::channel(1_000);
let api_sender = ApiSender::new(api_channel.0.clone());
let db = CheckpointDb::new(MemoryDB::new());
let db_handler = db.handler();
let mut blockchain = Blockchain::new(
Box::new(db),
services,
*network.us().service_keypair().0,
network.us().service_keypair().1.clone(),
api_sender.clone(),
);
let genesis = network.genesis_config();
blockchain.create_genesis_block(genesis.clone()).unwrap();
let mempool = Arc::new(RwLock::new(BTreeMap::new()));
let event_stream: Box<Stream<Item = (), Error = ()>> = {
let blockchain = blockchain.clone();
let mempool = Arc::clone(&mempool);
Box::new(api_channel.1.greedy_fold((), move |_, event| {
let snapshot = blockchain.snapshot();
let schema = CoreSchema::new(&snapshot);
match event {
ExternalMessage::Transaction(tx) => {
let hash = tx.hash();
if !schema.transactions().contains(&hash) {
mempool
.write()
.expect("Cannot write transactions to mempool")
.insert(tx.hash(), tx);
}
}
ExternalMessage::PeerAdd(_) => { /* Ignored */ }
}
}))
};
let events_stream = executor::spawn(event_stream);
TestKit {
blockchain,
db_handler,
api_sender,
events_stream,
network,
mempool: Arc::clone(&mempool),
cfg_proposal: None,
}
}
/// Creates a mounting point for public APIs used by the blockchain.
fn public_api_mount(&self) -> Mount {
self.blockchain.mount_public_api()
}
/// Creates a mounting point for public APIs used by the blockchain.
fn private_api_mount(&self) -> Mount {
self.blockchain.mount_private_api()
}
/// Creates an instance of `TestKitApi` to test the API provided by services.
pub fn api(&self) -> TestKitApi {
TestKitApi::new(self)
}
/// Polls the *existing* events from the event loop until exhaustion. Does not wait
/// until new events arrive.
pub fn poll_events(&mut self) -> Option<Result<(), ()>> {
self.events_stream.wait_stream()
}
/// Returns a snapshot of the current blockchain state.
pub fn snapshot(&self) -> Box<Snapshot> {
self.blockchain.snapshot()
}
/// Returns a blockchain instance for low level manipulations with storage.
pub fn blockchain_mut(&mut self) -> &mut Blockchain {
&mut self.blockchain
}
/// Rolls the blockchain back for a certain number of blocks.
///
/// # Examples
///
/// Rollbacks are useful in testing alternative scenarios (e.g., transactions executed
/// in different order and/or in different blocks) that require an expensive setup:
///
/// ```
/// # #[macro_use] extern crate exonum;
/// # #[macro_use] extern crate exonum_testkit;
/// # use exonum::blockchain::{Service, Transaction};
/// # use exonum::messages::RawTransaction;
/// # use exonum::encoding;
/// # use exonum_testkit::{TestKit, TestKitBuilder};
/// #
/// # type FromRawResult = Result<Box<Transaction>, encoding::Error>;
/// # pub struct MyService;
/// # impl Service for MyService {
/// # fn service_name(&self) -> &'static str {
/// # "documentation"
/// # }
/// # fn service_id(&self) -> u16 {
/// # 0
/// # }
/// # fn tx_from_raw(&self, _raw: RawTransaction) -> FromRawResult {
/// # unimplemented!();
/// # }
/// # }
/// #
/// # message! {
/// # struct MyTransaction {
/// # const TYPE = 0;
/// # const ID = 0;
/// # const SIZE = 40;
/// # field from: &exonum::crypto::PublicKey [0 => 32]
/// # field msg: &str [32 => 40]
/// # }
/// # }
/// # impl Transaction for MyTransaction {
/// # fn verify(&self) -> bool { true }
/// # fn execute(&self, _: &mut exonum::storage::Fork) {}
/// # }
/// #
/// # fn expensive_setup(_: &mut TestKit) {}
/// # fn assert_something_about(_: &TestKit) {}
/// #
/// # fn main() {
/// let mut testkit = TestKitBuilder::validator()
/// .with_service(MyService)
/// .create();
/// expensive_setup(&mut testkit);
/// let (pubkey, key) = exonum::crypto::gen_keypair();
/// let tx_a = MyTransaction::new(&pubkey, "foo", &key);
/// let tx_b = MyTransaction::new(&pubkey, "bar", &key);
/// testkit.create_block_with_transactions(txvec![tx_a.clone(), tx_b.clone()]);
/// assert_something_about(&testkit);
/// testkit.rollback(1);
/// testkit.create_block_with_transactions(txvec![tx_a.clone()]);
/// testkit.create_block_with_transactions(txvec![tx_b.clone()]);
/// assert_something_about(&testkit);
/// testkit.rollback(2);
/// # }
/// ```
pub fn rollback(&mut self, blocks: usize) {
assert!(
(blocks as u64) <= self.height().0,
"Cannot rollback past genesis block"
);
self.db_handler.rollback(blocks);
}
/// Executes a list of transactions given the current state of the blockchain, but does not
/// commit execution results to the blockchain. The execution result is the same
/// as if transactions were included into a new block; for example,
/// transactions included into one of previous blocks do not lead to any state changes.
pub fn probe_all<I>(&mut self, transactions: I) -> Box<Snapshot>
where
I: IntoIterator<Item = Box<Transaction>>,
{
// Filter out already committed transactions; otherwise,
// `create_block_with_transactions()` will panic.
let schema = CoreSchema::new(self.snapshot());
let uncommitted_txs = transactions.into_iter().filter(|tx| {
!schema.transactions().contains(&tx.hash())
});
self.create_block_with_transactions(uncommitted_txs);
let snapshot = self.snapshot();
self.rollback(1);
snapshot
}
/// Executes a transaction given the current state of the blockchain but does not
/// commit execution results to the blockchain. The execution result is the same
/// as if a transaction was included into a new block; for example,
/// a transaction included into one of previous blocks does not lead to any state changes.
pub fn probe<T: Transaction>(&mut self, transaction: T) -> Box<Snapshot> {
self.probe_all(vec![Box::new(transaction) as Box<Transaction>])
}
fn do_create_block(&mut self, tx_hashes: &[crypto::Hash]) {
let new_block_height = self.height().next();
let last_hash = self.last_block_hash();
self.update_configuration(new_block_height);
let (block_hash, patch) = {
let validator_id = self.leader().validator_id().unwrap();
let transactions = self.mempool();
self.blockchain.create_patch(
validator_id,
new_block_height,
tx_hashes,
&transactions,
)
};
// Remove txs from mempool
{
let mut transactions = self.mempool.write().expect(
"Cannot modify transactions in mempool",
);
for hash in tx_hashes {
transactions.remove(hash);
}
}
let propose = self.leader().create_propose(
new_block_height,
&last_hash,
tx_hashes,
);
let precommits: Vec<_> = self.network()
.validators()
.iter()
.map(|v| v.create_precommit(&propose, &block_hash))
.collect();
self.blockchain
.commit(&patch, block_hash, precommits.iter())
.unwrap();
self.poll_events();
}
/// Update test network configuration if such an update has been scheduled
/// with `commit_configuration_change`.
fn update_configuration(&mut self, new_block_height: Height) {
use ConfigurationProposalState::*;
let actual_from = new_block_height.next();
if let Some(cfg_proposal) = self.cfg_proposal.take() {
match cfg_proposal {
Uncommitted(cfg_proposal) => {
// Commit configuration proposal
let stored = cfg_proposal.stored_configuration().clone();
let mut fork = self.blockchain.fork();
CoreSchema::new(&mut fork).commit_configuration(stored);
let changes = fork.into_patch();
self.blockchain.merge(changes).unwrap();
self.cfg_proposal = Some(Committed(cfg_proposal));
}
Committed(cfg_proposal) => {
if cfg_proposal.actual_from() == actual_from {
// Modify the self configuration
self.network_mut().update(
cfg_proposal.us,
cfg_proposal.validators,
);
} else {
self.cfg_proposal = Some(Committed(cfg_proposal));
}
}
}
}
}
/// Creates a block with the given transactions.
/// Transactions that are in the mempool will be ignored.
///
/// # Panics
///
/// - Panics if any of transactions has been already committed to the blockchain.
pub fn create_block_with_transactions<I>(&mut self, txs: I)
where
I: IntoIterator<Item = Box<Transaction>>,
{
let tx_hashes: Vec<_> = {
let mut mempool = self.mempool.write().expect(
"Cannot write transactions to mempool",
);
let snapshot = self.snapshot();
let schema = CoreSchema::new(&snapshot);
txs.into_iter()
.filter(|tx| tx.verify())
.map(|tx| {
let txid = tx.hash();
assert!(
!schema.transactions().contains(&txid),
"Transaction is already committed: {:?}",
tx
);
mempool.insert(txid, tx);
txid
})
.collect()
};
self.create_block_with_tx_hashes(&tx_hashes);
}
/// Creates a block with the given transaction.
/// Transactions that are in the mempool will be ignored.
///
/// # Panics
///
/// - Panics if given transaction has been already committed to the blockchain.
pub fn create_block_with_transaction<T: Transaction>(&mut self, tx: T) {
self.create_block_with_transactions(txvec![tx]);
}
/// Creates block with the specified transactions. The transactions must be previously
/// sent to the node via API or directly put into the `channel()`.
///
/// # Panics
///
/// - Panics in the case any of transaction hashes are not in the mempool.
pub fn create_block_with_tx_hashes(&mut self, tx_hashes: &[crypto::Hash]) {
self.poll_events();
{
let txs = self.mempool();
for hash in tx_hashes {
assert!(txs.contains_key(hash));
}
}
self.do_create_block(tx_hashes);
}
/// Creates block with all transactions in the mempool.
pub fn create_block(&mut self) {
self.poll_events();
let tx_hashes: Vec<_> = self.mempool().keys().cloned().collect();
self.do_create_block(&tx_hashes);
}
/// Creates a chain of blocks until a given height.
///
/// # Example
///
/// ```
/// # extern crate exonum_testkit;
/// # extern crate exonum;
/// # use exonum::helpers::Height;
/// # use exonum_testkit::TestKitBuilder;
/// # fn main() {
/// let mut testkit = TestKitBuilder::validator().create();
/// testkit.create_blocks_until(Height(5));
/// assert_eq!(Height(5), testkit.height());
/// # }
pub fn create_blocks_until(&mut self, height: Height) {
while self.height() < height {
self.create_block();
}
}
/// Returns the hash of latest committed block.
pub fn last_block_hash(&self) -> crypto::Hash {
self.blockchain.last_hash()
}
/// Returns the height of latest committed block.
pub fn height(&self) -> Height {
self.blockchain.last_block().height()
}
/// Returns the actual blockchain configuration.
pub fn actual_configuration(&self) -> StoredConfiguration {
CoreSchema::new(&self.snapshot()).actual_configuration()
}
/// Returns reference to validator with the given identifier.
///
/// # Panics
///
/// - Panics if validator with the given id is absent in test network.
pub fn validator(&self, id: ValidatorId) -> &TestNode {
&self.network.validators[id.0 as usize]
}
/// Returns sufficient number of validators for the Byzantine Fault Toulerance consensus.
pub fn majority_count(&self) -> usize {
NodeState::byzantine_majority_count(self.network().validators().len())
}
/// Returns the test node memory pool handle.
pub fn mempool(&self) -> RwLockReadGuard<BTreeMap<crypto::Hash, Box<Transaction>>> {
self.mempool.read().expect(
"Can't read transactions from the mempool.",
)
}
/// Returns the leader on the current height. At the moment first validator.
pub fn leader(&self) -> &TestNode {
&self.network().validators[0]
}
/// Returns the reference to test network.
pub fn network(&self) -> &TestNetwork {
&self.network
}
/// Returns the mutable reference to test network for manual modifications.
pub fn network_mut(&mut self) -> &mut TestNetwork {
&mut self.network
}
/// Returns a copy of the actual configuration of the testkit.
/// The returned configuration could be modified for use with
/// `commit_configuration_change` method.
pub fn configuration_change_proposal(&self) -> TestNetworkConfiguration {
let stored_configuration = CoreSchema::new(&self.snapshot()).actual_configuration();
TestNetworkConfiguration::from_parts(
self.network().us().clone(),
self.network().validators().into(),
stored_configuration,
)
}
/// Adds a new configuration proposal. Remember, to add this proposal to the blockchain,
/// you should create at least one block.
///
/// # Panics
///
/// - Panics if `actual_from` is less than current height or equals.
/// - Panics if configuration change has been already proposed but not executed.
///
/// # Example
///
/// ```
/// extern crate exonum;
/// extern crate exonum_testkit;
/// extern crate serde;
/// extern crate serde_json;
///
/// use exonum::helpers::{Height, ValidatorId};
/// use exonum_testkit::TestKitBuilder;
/// use exonum::blockchain::Schema;
/// use exonum::storage::StorageValue;
///
/// fn main() {
/// let mut testkit = TestKitBuilder::auditor().with_validators(3).create();
///
/// let cfg_change_height = Height(5);
/// let proposal = {
/// let mut cfg = testkit.configuration_change_proposal();
/// // Add us to validators.
/// let mut validators = cfg.validators().to_vec();
/// validators.push(testkit.network().us().clone());
/// cfg.set_validators(validators);
/// // Change configuration of our service.
/// cfg.set_service_config("my_service", "My config");
/// // Set the height with which the configuration takes effect.
/// cfg.set_actual_from(cfg_change_height);
/// cfg
/// };
/// // Save proposed configuration.
/// let stored = proposal.stored_configuration().clone();
/// // Commit configuration change proposal to the testkit.
/// testkit.commit_configuration_change(proposal);
/// // Create blocks up to the height preceding the `actual_from` height.
/// testkit.create_blocks_until(cfg_change_height.previous());
/// // Check that the proposal has become actual.
/// assert_eq!(testkit.network().us().validator_id(), Some(ValidatorId(3)));
/// assert_eq!(testkit.validator(ValidatorId(3)), testkit.network().us());
/// assert_eq!(testkit.actual_configuration(), stored);
/// assert_eq!(
/// Schema::new(&testkit.snapshot())
/// .previous_configuration()
/// .unwrap()
/// .hash(),
/// stored.previous_cfg_hash
/// );
/// }
/// ```
pub fn commit_configuration_change(&mut self, proposal: TestNetworkConfiguration) {
use self::ConfigurationProposalState::*;
assert!(
self.height() < proposal.actual_from(),
"The `actual_from` height should be greater than the current."
);
assert!(
self.cfg_proposal.is_none(),
"There is an active configuration change proposal."
);
self.cfg_proposal = Some(Uncommitted(proposal));
}
/// Returns the node in the emulated network, from whose perspective the testkit operates.
pub fn us(&self) -> &TestNode {
self.network().us()
}
/// Returns public key of the validator.
pub fn service_public_key(&self) -> crypto::PublicKey {
*self.network().validators()[0].service_keypair().0
}
/// Returns secret key of the validator.
pub fn service_secret_key(&self) -> crypto::SecretKey {
self.network().validators()[0].service_keypair().1.clone()
}
}
/// A configuration of the test network.
#[derive(Debug)]
pub struct TestNetworkConfiguration {
us: TestNode,
validators: Vec<TestNode>,
stored_configuration: StoredConfiguration,
}
// A new configuration proposal state.
#[derive(Debug)]
enum ConfigurationProposalState {
Uncommitted(TestNetworkConfiguration),
Committed(TestNetworkConfiguration),
}
impl TestNetworkConfiguration {
fn from_parts(
us: TestNode,
validators: Vec<TestNode>,
mut stored_configuration: StoredConfiguration,
) -> Self {
let prev_hash = exonum::storage::StorageValue::hash(&stored_configuration);
stored_configuration.previous_cfg_hash = prev_hash;
TestNetworkConfiguration {
us,
validators,
stored_configuration,
}
}
/// Returns the node from whose perspective the testkit operates.
pub fn us(&self) -> &TestNode {
&self.us
}
/// Modifies the node from whose perspective the testkit operates.
pub fn set_us(&mut self, us: TestNode) {
self.us = us;
self.update_our_role();
}
/// Returns the test network validators.
pub fn validators(&self) -> &[TestNode] {
self.validators.as_ref()
}
/// Returns the current consensus configuration.
pub fn consensus_configuration(&self) -> &ConsensusConfig {
&self.stored_configuration.consensus
}
/// Return the height, starting from which this configuration becomes actual.
pub fn actual_from(&self) -> Height {
self.stored_configuration.actual_from
}
/// Modifies the height, starting from which this configuration becomes actual.
pub fn set_actual_from(&mut self, actual_from: Height) {
self.stored_configuration.actual_from = actual_from;
}
/// Modifies the current consensus configuration.
pub fn set_consensus_configuration(&mut self, consensus: ConsensusConfig) {
self.stored_configuration.consensus = consensus;
}
/// Modifies the validators list.
pub fn set_validators<I>(&mut self, validators: I)
where
I: IntoIterator<Item = TestNode>,
{
self.validators = validators
.into_iter()
.enumerate()
.map(|(idx, mut node)| {
node.change_role(Some(ValidatorId(idx as u16)));
node
})
.collect();
self.stored_configuration.validator_keys = self.validators
.iter()
.cloned()
.map(ValidatorKeys::from)
.collect();
self.update_our_role();
}
/// Returns the configuration for service with the given identifier.
pub fn service_config<D>(&self, id: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
let value = self.stored_configuration.services.get(id).expect(
"Unable to find configuration for service",
);
serde_json::from_value(value.clone()).unwrap()
}
/// Modifies the configuration of the service with the given identifier.
pub fn set_service_config<D>(&mut self, id: &str, config: D)
where
D: Serialize,
{
let value = serde_json::to_value(config).unwrap();
self.stored_configuration.services.insert(id.into(), value);
}
/// Returns the resulting exonum blockchain configuration.
pub fn stored_configuration(&self) -> &StoredConfiguration {
&self.stored_configuration
}
fn update_our_role(&mut self) {
let validator_id = self.validators
.iter()
.position(|x| {
x.public_keys().service_key == self.us.service_public_key
})
.map(|x| ValidatorId(x as u16));
self.us.validator_id = validator_id;
}
}
/// Kind of public or private REST API of an Exonum node.
///
/// `ApiKind` allows to use `get*` and `post*` methods of [`TestKitApi`] more safely.
///
/// [`TestKitApi`]: struct.TestKitApi.html
#[derive(Debug)]
pub enum ApiKind {
/// `api/system` endpoints of the built-in Exonum REST API.
System,
/// `api/explorer` endpoints of the built-in Exonum REST API.
Explorer,
/// Endpoints corresponding to a service with the specified string identifier.
Service(&'static str),
}
impl ApiKind {
fn into_prefix(self) -> String {
match self {
ApiKind::System => "api/system".to_string(),
ApiKind::Explorer => "api/explorer".to_string(),
ApiKind::Service(name) => format!("api/services/{}", name),
}
}
}
/// API encapsulation for the testkit. Allows to execute and synchronously retrieve results
/// for REST-ful endpoints of services.
pub struct TestKitApi {
public_mount: Mount,
private_mount: Mount,
api_sender: ApiSender,
}
impl fmt::Debug for TestKitApi {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("TestKitApi").finish()
}
}
impl TestKitApi {
/// Creates a new instance of API.
fn new(testkit: &TestKit) -> Self {
use std::sync::Arc;
use exonum::api::{public, Api};
let blockchain = &testkit.blockchain;
TestKitApi {
public_mount: {
let mut mount = Mount::new();
let service_mount = testkit.public_api_mount();
mount.mount("api/services", service_mount);
let mut router = Router::new();
let pool = Arc::clone(&testkit.mempool);
let system_api = public::SystemApi::new(pool, blockchain.clone());
system_api.wire(&mut router);
mount.mount("api/system", router);
let mut router = Router::new();
let explorer_api = public::ExplorerApi::new(blockchain.clone());
explorer_api.wire(&mut router);
mount.mount("api/explorer", router);
mount
},
private_mount: {
let mut mount = Mount::new();
let service_mount = testkit.private_api_mount();
mount.mount("api/services", service_mount);
mount
},
api_sender: testkit.api_sender.clone(),
}
}
/// Returns the mounting point for public APIs. Useful for intricate testing not covered
/// by `get*` and `post*` functions.
pub fn public_mount(&self) -> &Mount {
&self.public_mount
}
/// Returns the mounting point for private APIs. Useful for intricate testing not covered
/// by `get*` and `post*` functions.
pub fn private_mount(&self) -> &Mount {
&self.private_mount
}
/// Sends a transaction to the node via `ApiSender`.
pub fn send<T: Transaction>(&self, transaction: T) {
self.api_sender.send(Box::new(transaction)).expect(
"Cannot send transaction",
);
}
fn get_internal<D>(mount: &Mount, url: &str, expect_error: bool) -> D
where
for<'de> D: Deserialize<'de>,
{
let status_class = if expect_error {
StatusClass::ClientError
} else {
StatusClass::Success
};
let url = format!("http://localhost:3000/{}", url);
let resp = request::get(&url, Headers::new(), mount);
let resp = if expect_error {
// Support either "normal" or erroneous responses.
// For example, `Api.not_found_response()` returns the response as `Ok(..)`.
match resp {
Ok(resp) => resp,
Err(IronError { response, .. }) => response,
}
} else {
resp.expect("Got unexpected `Err(..)` response")
};
if let Some(ref status) = resp.status {
if status.class() != status_class {
panic!("Unexpected response status: {:?}", status);
}
} else {
panic!("Response status not set");
}
let resp = response::extract_body_to_string(resp);
serde_json::from_str(&resp).unwrap()
}
/// Gets information from a public endpoint of the node.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown), or if the response has a non-20x response status.
pub fn get<D>(&self, kind: ApiKind, endpoint: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
TestKitApi::get_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
false,
)
}
/// Gets information from a private endpoint of the node.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown), or if the response has a non-20x response status.
pub fn get_private<D>(&self, kind: ApiKind, endpoint: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
TestKitApi::get_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
false,
)
}
/// Gets an error from a public endpoint of the node.
///
/// # Panics
///
/// - Panics if the response has a non-40x response status.
pub fn get_err<D>(&self, kind: ApiKind, endpoint: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
TestKitApi::get_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
true,
)
}
fn post_internal<T, D>(mount: &Mount, endpoint: &str, data: &T) -> D
where
T: Serialize,
for<'de> D: Deserialize<'de>,
{
let url = format!("http://localhost:3000/{}", endpoint);
let resp = request::post(
&url,
{
let mut headers = Headers::new();
headers.set(ContentType::json());
headers
},
&serde_json::to_string(&data).expect("Cannot serialize data to JSON"),
mount,
).expect("Cannot send data");
let resp = response::extract_body_to_string(resp);
serde_json::from_str(&resp).expect("Cannot parse result")
}
/// Posts a transaction to the service using the public API. The returned value is the result
/// of synchronous transaction processing, which includes running the API shim
/// and `Transaction.verify()`. `Transaction.execute()` is not run until the transaction
/// gets to a block via one of `create_block*()` methods.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown).
pub fn post<T, D>(&self, kind: ApiKind, endpoint: &str, transaction: &T) -> D
where
T: Serialize,
for<'de> D: Deserialize<'de>,
{
TestKitApi::post_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
transaction,
)
}
/// Posts a transaction to the service using the private API. The returned value is the result
/// of synchronous transaction processing, which includes running the API shim
/// and `Transaction.verify()`. `Transaction.execute()` is not run until the transaction
/// gets to a block via one of `create_block*()` methods.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown).
pub fn post_private<T, D>(&self, kind: ApiKind, endpoint: &str, transaction: &T) -> D
where
T: Serialize,
for<'de> D: Deserialize<'de>,
{
TestKitApi::post_internal(
&self.private_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
transaction,
)
}
}
#[test]
fn test_create_block_heights() {
let mut testkit = TestKitBuilder::validator().create();
assert_eq!(Height(0), testkit.height());
testkit.create_block();
assert_eq!(Height(1), testkit.height());
testkit.create_blocks_until(Height(6));
assert_eq!(Height(6), testkit.height());
}
Remove service_public_key and service_secret_key functions
// Copyright 2017 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Testkit for Exonum blockchain framework, allowing to test service APIs synchronously
//! and in the same process as the testkit.
//!
//! # Example
//! ```
//! #[macro_use]
//! extern crate exonum;
//! #[macro_use]
//! extern crate exonum_testkit;
//! extern crate serde_json;
//!
//! use exonum::crypto::{gen_keypair, PublicKey};
//! use exonum::blockchain::{Block, Schema, Service, Transaction};
//! use exonum::messages::{Message, RawTransaction};
//! use exonum::storage::Fork;
//! use exonum::encoding;
//! use exonum_testkit::{ApiKind, TestKitBuilder};
//!
//! // Simple service implementation.
//!
//! const SERVICE_ID: u16 = 1;
//! const TX_TIMESTAMP_ID: u16 = 1;
//!
//! message! {
//! struct TxTimestamp {
//! const TYPE = SERVICE_ID;
//! const ID = TX_TIMESTAMP_ID;
//! const SIZE = 40;
//!
//! field from: &PublicKey [0 => 32]
//! field msg: &str [32 => 40]
//! }
//! }
//!
//! struct TimestampingService;
//!
//! impl Transaction for TxTimestamp {
//! fn verify(&self) -> bool {
//! self.verify_signature(self.from())
//! }
//!
//! fn execute(&self, _fork: &mut Fork) {}
//!
//! fn info(&self) -> serde_json::Value {
//! serde_json::to_value(self).unwrap()
//! }
//! }
//!
//! impl Service for TimestampingService {
//! fn service_name(&self) -> &'static str {
//! "timestamping"
//! }
//!
//! fn service_id(&self) -> u16 {
//! SERVICE_ID
//! }
//!
//! fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<Transaction>, encoding::Error> {
//! let trans: Box<Transaction> = match raw.message_type() {
//! TX_TIMESTAMP_ID => Box::new(TxTimestamp::from_raw(raw)?),
//! _ => {
//! return Err(encoding::Error::IncorrectMessageType {
//! message_type: raw.message_type(),
//! });
//! }
//! };
//! Ok(trans)
//! }
//! }
//!
//! fn main() {
//! // Create testkit for network with four validators.
//! let mut testkit = TestKitBuilder::validator()
//! .with_validators(4)
//! .with_service(TimestampingService)
//! .create();
//!
//! // Create few transactions.
//! let keypair = gen_keypair();
//! let tx1 = TxTimestamp::new(&keypair.0, "Down To Earth", &keypair.1);
//! let tx2 = TxTimestamp::new(&keypair.0, "Cry Over Spilt Milk", &keypair.1);
//! let tx3 = TxTimestamp::new(&keypair.0, "Dropping Like Flies", &keypair.1);
//! // Commit them into blockchain.
//! testkit.create_block_with_transactions(txvec![
//! tx1.clone(), tx2.clone(), tx3.clone()
//! ]);
//!
//! // Add a single transaction.
//! let tx4 = TxTimestamp::new(&keypair.0, "Barking up the wrong tree", &keypair.1);
//! testkit.create_block_with_transaction(tx4.clone());
//!
//! // Check results with schema.
//! let snapshot = testkit.snapshot();
//! let schema = Schema::new(&snapshot);
//! assert!(schema.transactions().contains(&tx1.hash()));
//! assert!(schema.transactions().contains(&tx2.hash()));
//! assert!(schema.transactions().contains(&tx3.hash()));
//! assert!(schema.transactions().contains(&tx4.hash()));
//!
//! // Check results with api.
//! let api = testkit.api();
//! let blocks: Vec<Block> = api.get(ApiKind::Explorer, "v1/blocks?count=10");
//! assert_eq!(blocks.len(), 3);
//! api.get::<serde_json::Value>(
//! ApiKind::System,
//! &format!("v1/transactions/{}", tx1.hash().to_string()),
//! );
//! }
//! ```
#![deny(missing_debug_implementations, missing_docs)]
extern crate exonum;
extern crate futures;
extern crate iron;
extern crate iron_test;
extern crate mount;
extern crate router;
extern crate serde;
extern crate serde_json;
use futures::Stream;
use futures::executor::{self, Spawn};
use futures::sync::mpsc;
use iron::IronError;
use iron::headers::{ContentType, Headers};
use iron::status::StatusClass;
use iron_test::{request, response};
use mount::Mount;
use router::Router;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::sync::{Arc, RwLock, RwLockReadGuard};
use std::fmt;
use exonum::blockchain::{Blockchain, ConsensusConfig, GenesisConfig, Schema as CoreSchema,
Service, StoredConfiguration, Transaction, ValidatorKeys};
use exonum::crypto;
use exonum::helpers::{Height, Round, ValidatorId};
use exonum::messages::{Message, Precommit, Propose};
use exonum::node::{ApiSender, ExternalMessage, State as NodeState, TransactionSend, TxPool};
use exonum::storage::{MemoryDB, Snapshot};
#[macro_use]
mod macros;
mod checkpoint_db;
pub mod compare;
mod greedy_fold;
#[doc(hidden)]
pub use greedy_fold::GreedilyFoldable;
pub use compare::ComparableSnapshot;
use checkpoint_db::{CheckpointDb, CheckpointDbHandler};
/// Emulated test network.
#[derive(Debug)]
pub struct TestNetwork {
us: TestNode,
validators: Vec<TestNode>,
}
impl TestNetwork {
/// Creates a new emulated network.
pub fn new(validator_count: u16) -> Self {
let validators = (0..validator_count)
.map(ValidatorId)
.map(TestNode::new_validator)
.collect::<Vec<_>>();
let us = validators[0].clone();
TestNetwork { validators, us }
}
/// Returns the node in the emulated network, from whose perspective the testkit operates.
pub fn us(&self) -> &TestNode {
&self.us
}
/// Returns a slice of all validators in the network.
pub fn validators(&self) -> &[TestNode] {
&self.validators
}
/// Returns config encoding the network structure usable for creating the genesis block of
/// a blockchain.
pub fn genesis_config(&self) -> GenesisConfig {
GenesisConfig::new(self.validators.iter().map(TestNode::public_keys))
}
/// Updates the test network by the new set of nodes.
pub fn update<I: IntoIterator<Item = TestNode>>(&mut self, mut us: TestNode, validators: I) {
let validators = validators
.into_iter()
.enumerate()
.map(|(id, mut validator)| {
let validator_id = ValidatorId(id as u16);
validator.change_role(Some(validator_id));
if us.public_keys().consensus_key == validator.public_keys().consensus_key {
us.change_role(Some(validator_id));
}
validator
})
.collect::<Vec<_>>();
self.validators = validators;
self.us.clone_from(&us);
}
/// Returns service public key of the validator with given id.
pub fn service_public_key_of(&self, id: ValidatorId) -> Option<&crypto::PublicKey> {
self.validators().get(id.0 as usize).map(|x| {
&x.service_public_key
})
}
/// Returns consensus public key of the validator with given id.
pub fn consensus_public_key_of(&self, id: ValidatorId) -> Option<&crypto::PublicKey> {
self.validators().get(id.0 as usize).map(|x| {
&x.consensus_public_key
})
}
}
/// An emulated node in the test network.
#[derive(Debug, Clone, PartialEq)]
pub struct TestNode {
consensus_secret_key: crypto::SecretKey,
consensus_public_key: crypto::PublicKey,
service_secret_key: crypto::SecretKey,
service_public_key: crypto::PublicKey,
validator_id: Option<ValidatorId>,
}
impl TestNode {
/// Creates a new auditor.
pub fn new_auditor() -> Self {
let (consensus_public_key, consensus_secret_key) = crypto::gen_keypair();
let (service_public_key, service_secret_key) = crypto::gen_keypair();
TestNode {
consensus_secret_key,
consensus_public_key,
service_secret_key,
service_public_key,
validator_id: None,
}
}
/// Creates a new validator with the given id.
pub fn new_validator(validator_id: ValidatorId) -> Self {
let (consensus_public_key, consensus_secret_key) = crypto::gen_keypair();
let (service_public_key, service_secret_key) = crypto::gen_keypair();
TestNode {
consensus_secret_key,
consensus_public_key,
service_secret_key,
service_public_key,
validator_id: Some(validator_id),
}
}
/// Constructs a new node from the given keypairs.
pub fn from_parts(
consensus_keypair: (crypto::PublicKey, crypto::SecretKey),
service_keypair: (crypto::PublicKey, crypto::SecretKey),
validator_id: Option<ValidatorId>,
) -> TestNode {
TestNode {
consensus_public_key: consensus_keypair.0,
consensus_secret_key: consensus_keypair.1,
service_public_key: service_keypair.0,
service_secret_key: service_keypair.1,
validator_id,
}
}
/// Creates a `Propose` message signed by this validator.
pub fn create_propose(
&self,
height: Height,
last_hash: &crypto::Hash,
tx_hashes: &[crypto::Hash],
) -> Propose {
Propose::new(
self.validator_id.expect(
"An attempt to create propose from a non-validator node.",
),
height,
Round::first(),
last_hash,
tx_hashes,
&self.consensus_secret_key,
)
}
/// Creates a `Precommit` message signed by this validator.
pub fn create_precommit(&self, propose: &Propose, block_hash: &crypto::Hash) -> Precommit {
use std::time::SystemTime;
Precommit::new(
self.validator_id.expect(
"An attempt to create propose from a non-validator node.",
),
propose.height(),
propose.round(),
&propose.hash(),
block_hash,
SystemTime::now(),
&self.consensus_secret_key,
)
}
/// Returns public keys of the node.
pub fn public_keys(&self) -> ValidatorKeys {
ValidatorKeys {
consensus_key: self.consensus_public_key,
service_key: self.service_public_key,
}
}
/// Returns the current validator id of node if it is validator of the test network.
pub fn validator_id(&self) -> Option<ValidatorId> {
self.validator_id
}
/// Changes node role.
pub fn change_role(&mut self, role: Option<ValidatorId>) {
self.validator_id = role;
}
/// Returns the service keypair.
pub fn service_keypair(&self) -> (&crypto::PublicKey, &crypto::SecretKey) {
(&self.service_public_key, &self.service_secret_key)
}
}
impl From<TestNode> for ValidatorKeys {
fn from(node: TestNode) -> Self {
node.public_keys()
}
}
/// Builder for `TestKit`.
///
/// # Example
///
/// ```
/// # extern crate exonum;
/// # extern crate exonum_testkit;
/// # use exonum::blockchain::{Service, Transaction};
/// # use exonum::messages::RawTransaction;
/// # use exonum::encoding;
/// # use exonum_testkit::TestKitBuilder;
/// # pub struct MyService;
/// # impl Service for MyService {
/// # fn service_name(&self) -> &'static str {
/// # "documentation"
/// # }
/// # fn service_id(&self) -> u16 {
/// # 0
/// # }
/// # fn tx_from_raw(&self, _raw: RawTransaction) -> Result<Box<Transaction>, encoding::Error> {
/// # unimplemented!();
/// # }
/// # }
/// # fn main() {
/// let mut testkit = TestKitBuilder::validator()
/// .with_service(MyService)
/// .with_validators(4)
/// .create();
/// testkit.create_block();
/// // Other test code
/// # }
/// ```
pub struct TestKitBuilder {
us: TestNode,
validators: Vec<TestNode>,
services: Vec<Box<Service>>,
}
impl fmt::Debug for TestKitBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("TestKitBuilder")
.field("us", &self.us)
.field("validators", &self.validators)
.field(
"services",
&self.services
.iter()
.map(|x| x.service_name())
.collect::<Vec<_>>(),
)
.finish()
}
}
impl TestKitBuilder {
/// Creates testkit for the validator node.
pub fn validator() -> Self {
let us = TestNode::new_validator(ValidatorId(0));
TestKitBuilder {
validators: vec![us.clone()],
services: Vec::new(),
us,
}
}
/// Creates testkit for the auditor node.
pub fn auditor() -> Self {
let us = TestNode::new_auditor();
TestKitBuilder {
validators: vec![TestNode::new_validator(ValidatorId(0))],
services: Vec::new(),
us,
}
}
/// Sets the number of validator nodes in the test network.
pub fn with_validators(mut self, validators_count: u16) -> Self {
assert!(
validators_count > 0,
"At least one validator should be present in the network."
);
let additional_validators = (self.validators.len() as u16..validators_count)
.map(ValidatorId)
.map(TestNode::new_validator);
self.validators.extend(additional_validators);
self
}
/// Adds a service to the testkit.
pub fn with_service<S>(mut self, service: S) -> Self
where
S: Into<Box<Service>>,
{
self.services.push(service.into());
self
}
/// Creates the testkit.
pub fn create(self) -> TestKit {
crypto::init();
TestKit::assemble(
self.services,
TestNetwork {
us: self.us,
validators: self.validators,
},
)
}
}
/// Testkit for testing blockchain services. It offers simple network configuration emulation
/// (with no real network setup).
pub struct TestKit {
blockchain: Blockchain,
db_handler: CheckpointDbHandler<MemoryDB>,
events_stream: Spawn<Box<Stream<Item = (), Error = ()>>>,
network: TestNetwork,
api_sender: ApiSender,
mempool: TxPool,
cfg_proposal: Option<ConfigurationProposalState>,
}
impl fmt::Debug for TestKit {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("TestKit")
.field("blockchain", &self.blockchain)
.field("network", &self.network)
.field("mempool", &self.mempool)
.field("cfg_change_proposal", &self.cfg_proposal)
.finish()
}
}
impl TestKit {
/// Creates a new `TestKit` with a single validator with the given service.
pub fn for_service<S>(service: S) -> Self
where
S: Into<Box<Service>>,
{
TestKitBuilder::validator().with_service(service).create()
}
fn assemble(services: Vec<Box<Service>>, network: TestNetwork) -> Self {
let api_channel = mpsc::channel(1_000);
let api_sender = ApiSender::new(api_channel.0.clone());
let db = CheckpointDb::new(MemoryDB::new());
let db_handler = db.handler();
let mut blockchain = Blockchain::new(
Box::new(db),
services,
*network.us().service_keypair().0,
network.us().service_keypair().1.clone(),
api_sender.clone(),
);
let genesis = network.genesis_config();
blockchain.create_genesis_block(genesis.clone()).unwrap();
let mempool = Arc::new(RwLock::new(BTreeMap::new()));
let event_stream: Box<Stream<Item = (), Error = ()>> = {
let blockchain = blockchain.clone();
let mempool = Arc::clone(&mempool);
Box::new(api_channel.1.greedy_fold((), move |_, event| {
let snapshot = blockchain.snapshot();
let schema = CoreSchema::new(&snapshot);
match event {
ExternalMessage::Transaction(tx) => {
let hash = tx.hash();
if !schema.transactions().contains(&hash) {
mempool
.write()
.expect("Cannot write transactions to mempool")
.insert(tx.hash(), tx);
}
}
ExternalMessage::PeerAdd(_) => { /* Ignored */ }
}
}))
};
let events_stream = executor::spawn(event_stream);
TestKit {
blockchain,
db_handler,
api_sender,
events_stream,
network,
mempool: Arc::clone(&mempool),
cfg_proposal: None,
}
}
/// Creates a mounting point for public APIs used by the blockchain.
fn public_api_mount(&self) -> Mount {
self.blockchain.mount_public_api()
}
/// Creates a mounting point for public APIs used by the blockchain.
fn private_api_mount(&self) -> Mount {
self.blockchain.mount_private_api()
}
/// Creates an instance of `TestKitApi` to test the API provided by services.
pub fn api(&self) -> TestKitApi {
TestKitApi::new(self)
}
/// Polls the *existing* events from the event loop until exhaustion. Does not wait
/// until new events arrive.
pub fn poll_events(&mut self) -> Option<Result<(), ()>> {
self.events_stream.wait_stream()
}
/// Returns a snapshot of the current blockchain state.
pub fn snapshot(&self) -> Box<Snapshot> {
self.blockchain.snapshot()
}
/// Returns a blockchain instance for low level manipulations with storage.
pub fn blockchain_mut(&mut self) -> &mut Blockchain {
&mut self.blockchain
}
/// Rolls the blockchain back for a certain number of blocks.
///
/// # Examples
///
/// Rollbacks are useful in testing alternative scenarios (e.g., transactions executed
/// in different order and/or in different blocks) that require an expensive setup:
///
/// ```
/// # #[macro_use] extern crate exonum;
/// # #[macro_use] extern crate exonum_testkit;
/// # use exonum::blockchain::{Service, Transaction};
/// # use exonum::messages::RawTransaction;
/// # use exonum::encoding;
/// # use exonum_testkit::{TestKit, TestKitBuilder};
/// #
/// # type FromRawResult = Result<Box<Transaction>, encoding::Error>;
/// # pub struct MyService;
/// # impl Service for MyService {
/// # fn service_name(&self) -> &'static str {
/// # "documentation"
/// # }
/// # fn service_id(&self) -> u16 {
/// # 0
/// # }
/// # fn tx_from_raw(&self, _raw: RawTransaction) -> FromRawResult {
/// # unimplemented!();
/// # }
/// # }
/// #
/// # message! {
/// # struct MyTransaction {
/// # const TYPE = 0;
/// # const ID = 0;
/// # const SIZE = 40;
/// # field from: &exonum::crypto::PublicKey [0 => 32]
/// # field msg: &str [32 => 40]
/// # }
/// # }
/// # impl Transaction for MyTransaction {
/// # fn verify(&self) -> bool { true }
/// # fn execute(&self, _: &mut exonum::storage::Fork) {}
/// # }
/// #
/// # fn expensive_setup(_: &mut TestKit) {}
/// # fn assert_something_about(_: &TestKit) {}
/// #
/// # fn main() {
/// let mut testkit = TestKitBuilder::validator()
/// .with_service(MyService)
/// .create();
/// expensive_setup(&mut testkit);
/// let (pubkey, key) = exonum::crypto::gen_keypair();
/// let tx_a = MyTransaction::new(&pubkey, "foo", &key);
/// let tx_b = MyTransaction::new(&pubkey, "bar", &key);
/// testkit.create_block_with_transactions(txvec![tx_a.clone(), tx_b.clone()]);
/// assert_something_about(&testkit);
/// testkit.rollback(1);
/// testkit.create_block_with_transactions(txvec![tx_a.clone()]);
/// testkit.create_block_with_transactions(txvec![tx_b.clone()]);
/// assert_something_about(&testkit);
/// testkit.rollback(2);
/// # }
/// ```
pub fn rollback(&mut self, blocks: usize) {
assert!(
(blocks as u64) <= self.height().0,
"Cannot rollback past genesis block"
);
self.db_handler.rollback(blocks);
}
/// Executes a list of transactions given the current state of the blockchain, but does not
/// commit execution results to the blockchain. The execution result is the same
/// as if transactions were included into a new block; for example,
/// transactions included into one of previous blocks do not lead to any state changes.
pub fn probe_all<I>(&mut self, transactions: I) -> Box<Snapshot>
where
I: IntoIterator<Item = Box<Transaction>>,
{
// Filter out already committed transactions; otherwise,
// `create_block_with_transactions()` will panic.
let schema = CoreSchema::new(self.snapshot());
let uncommitted_txs = transactions.into_iter().filter(|tx| {
!schema.transactions().contains(&tx.hash())
});
self.create_block_with_transactions(uncommitted_txs);
let snapshot = self.snapshot();
self.rollback(1);
snapshot
}
/// Executes a transaction given the current state of the blockchain but does not
/// commit execution results to the blockchain. The execution result is the same
/// as if a transaction was included into a new block; for example,
/// a transaction included into one of previous blocks does not lead to any state changes.
pub fn probe<T: Transaction>(&mut self, transaction: T) -> Box<Snapshot> {
self.probe_all(vec![Box::new(transaction) as Box<Transaction>])
}
fn do_create_block(&mut self, tx_hashes: &[crypto::Hash]) {
let new_block_height = self.height().next();
let last_hash = self.last_block_hash();
self.update_configuration(new_block_height);
let (block_hash, patch) = {
let validator_id = self.leader().validator_id().unwrap();
let transactions = self.mempool();
self.blockchain.create_patch(
validator_id,
new_block_height,
tx_hashes,
&transactions,
)
};
// Remove txs from mempool
{
let mut transactions = self.mempool.write().expect(
"Cannot modify transactions in mempool",
);
for hash in tx_hashes {
transactions.remove(hash);
}
}
let propose = self.leader().create_propose(
new_block_height,
&last_hash,
tx_hashes,
);
let precommits: Vec<_> = self.network()
.validators()
.iter()
.map(|v| v.create_precommit(&propose, &block_hash))
.collect();
self.blockchain
.commit(&patch, block_hash, precommits.iter())
.unwrap();
self.poll_events();
}
/// Update test network configuration if such an update has been scheduled
/// with `commit_configuration_change`.
fn update_configuration(&mut self, new_block_height: Height) {
use ConfigurationProposalState::*;
let actual_from = new_block_height.next();
if let Some(cfg_proposal) = self.cfg_proposal.take() {
match cfg_proposal {
Uncommitted(cfg_proposal) => {
// Commit configuration proposal
let stored = cfg_proposal.stored_configuration().clone();
let mut fork = self.blockchain.fork();
CoreSchema::new(&mut fork).commit_configuration(stored);
let changes = fork.into_patch();
self.blockchain.merge(changes).unwrap();
self.cfg_proposal = Some(Committed(cfg_proposal));
}
Committed(cfg_proposal) => {
if cfg_proposal.actual_from() == actual_from {
// Modify the self configuration
self.network_mut().update(
cfg_proposal.us,
cfg_proposal.validators,
);
} else {
self.cfg_proposal = Some(Committed(cfg_proposal));
}
}
}
}
}
/// Creates a block with the given transactions.
/// Transactions that are in the mempool will be ignored.
///
/// # Panics
///
/// - Panics if any of transactions has been already committed to the blockchain.
pub fn create_block_with_transactions<I>(&mut self, txs: I)
where
I: IntoIterator<Item = Box<Transaction>>,
{
let tx_hashes: Vec<_> = {
let mut mempool = self.mempool.write().expect(
"Cannot write transactions to mempool",
);
let snapshot = self.snapshot();
let schema = CoreSchema::new(&snapshot);
txs.into_iter()
.filter(|tx| tx.verify())
.map(|tx| {
let txid = tx.hash();
assert!(
!schema.transactions().contains(&txid),
"Transaction is already committed: {:?}",
tx
);
mempool.insert(txid, tx);
txid
})
.collect()
};
self.create_block_with_tx_hashes(&tx_hashes);
}
/// Creates a block with the given transaction.
/// Transactions that are in the mempool will be ignored.
///
/// # Panics
///
/// - Panics if given transaction has been already committed to the blockchain.
pub fn create_block_with_transaction<T: Transaction>(&mut self, tx: T) {
self.create_block_with_transactions(txvec![tx]);
}
/// Creates block with the specified transactions. The transactions must be previously
/// sent to the node via API or directly put into the `channel()`.
///
/// # Panics
///
/// - Panics in the case any of transaction hashes are not in the mempool.
pub fn create_block_with_tx_hashes(&mut self, tx_hashes: &[crypto::Hash]) {
self.poll_events();
{
let txs = self.mempool();
for hash in tx_hashes {
assert!(txs.contains_key(hash));
}
}
self.do_create_block(tx_hashes);
}
/// Creates block with all transactions in the mempool.
pub fn create_block(&mut self) {
self.poll_events();
let tx_hashes: Vec<_> = self.mempool().keys().cloned().collect();
self.do_create_block(&tx_hashes);
}
/// Creates a chain of blocks until a given height.
///
/// # Example
///
/// ```
/// # extern crate exonum_testkit;
/// # extern crate exonum;
/// # use exonum::helpers::Height;
/// # use exonum_testkit::TestKitBuilder;
/// # fn main() {
/// let mut testkit = TestKitBuilder::validator().create();
/// testkit.create_blocks_until(Height(5));
/// assert_eq!(Height(5), testkit.height());
/// # }
pub fn create_blocks_until(&mut self, height: Height) {
while self.height() < height {
self.create_block();
}
}
/// Returns the hash of latest committed block.
pub fn last_block_hash(&self) -> crypto::Hash {
self.blockchain.last_hash()
}
/// Returns the height of latest committed block.
pub fn height(&self) -> Height {
self.blockchain.last_block().height()
}
/// Returns the actual blockchain configuration.
pub fn actual_configuration(&self) -> StoredConfiguration {
CoreSchema::new(&self.snapshot()).actual_configuration()
}
/// Returns reference to validator with the given identifier.
///
/// # Panics
///
/// - Panics if validator with the given id is absent in test network.
pub fn validator(&self, id: ValidatorId) -> &TestNode {
&self.network.validators[id.0 as usize]
}
/// Returns sufficient number of validators for the Byzantine Fault Toulerance consensus.
pub fn majority_count(&self) -> usize {
NodeState::byzantine_majority_count(self.network().validators().len())
}
/// Returns the test node memory pool handle.
pub fn mempool(&self) -> RwLockReadGuard<BTreeMap<crypto::Hash, Box<Transaction>>> {
self.mempool.read().expect(
"Can't read transactions from the mempool.",
)
}
/// Returns the leader on the current height. At the moment first validator.
pub fn leader(&self) -> &TestNode {
&self.network().validators[0]
}
/// Returns the reference to test network.
pub fn network(&self) -> &TestNetwork {
&self.network
}
/// Returns the mutable reference to test network for manual modifications.
pub fn network_mut(&mut self) -> &mut TestNetwork {
&mut self.network
}
/// Returns a copy of the actual configuration of the testkit.
/// The returned configuration could be modified for use with
/// `commit_configuration_change` method.
pub fn configuration_change_proposal(&self) -> TestNetworkConfiguration {
let stored_configuration = CoreSchema::new(&self.snapshot()).actual_configuration();
TestNetworkConfiguration::from_parts(
self.network().us().clone(),
self.network().validators().into(),
stored_configuration,
)
}
/// Adds a new configuration proposal. Remember, to add this proposal to the blockchain,
/// you should create at least one block.
///
/// # Panics
///
/// - Panics if `actual_from` is less than current height or equals.
/// - Panics if configuration change has been already proposed but not executed.
///
/// # Example
///
/// ```
/// extern crate exonum;
/// extern crate exonum_testkit;
/// extern crate serde;
/// extern crate serde_json;
///
/// use exonum::helpers::{Height, ValidatorId};
/// use exonum_testkit::TestKitBuilder;
/// use exonum::blockchain::Schema;
/// use exonum::storage::StorageValue;
///
/// fn main() {
/// let mut testkit = TestKitBuilder::auditor().with_validators(3).create();
///
/// let cfg_change_height = Height(5);
/// let proposal = {
/// let mut cfg = testkit.configuration_change_proposal();
/// // Add us to validators.
/// let mut validators = cfg.validators().to_vec();
/// validators.push(testkit.network().us().clone());
/// cfg.set_validators(validators);
/// // Change configuration of our service.
/// cfg.set_service_config("my_service", "My config");
/// // Set the height with which the configuration takes effect.
/// cfg.set_actual_from(cfg_change_height);
/// cfg
/// };
/// // Save proposed configuration.
/// let stored = proposal.stored_configuration().clone();
/// // Commit configuration change proposal to the testkit.
/// testkit.commit_configuration_change(proposal);
/// // Create blocks up to the height preceding the `actual_from` height.
/// testkit.create_blocks_until(cfg_change_height.previous());
/// // Check that the proposal has become actual.
/// assert_eq!(testkit.network().us().validator_id(), Some(ValidatorId(3)));
/// assert_eq!(testkit.validator(ValidatorId(3)), testkit.network().us());
/// assert_eq!(testkit.actual_configuration(), stored);
/// assert_eq!(
/// Schema::new(&testkit.snapshot())
/// .previous_configuration()
/// .unwrap()
/// .hash(),
/// stored.previous_cfg_hash
/// );
/// }
/// ```
pub fn commit_configuration_change(&mut self, proposal: TestNetworkConfiguration) {
use self::ConfigurationProposalState::*;
assert!(
self.height() < proposal.actual_from(),
"The `actual_from` height should be greater than the current."
);
assert!(
self.cfg_proposal.is_none(),
"There is an active configuration change proposal."
);
self.cfg_proposal = Some(Uncommitted(proposal));
}
/// Returns the node in the emulated network, from whose perspective the testkit operates.
pub fn us(&self) -> &TestNode {
self.network().us()
}
}
/// A configuration of the test network.
#[derive(Debug)]
pub struct TestNetworkConfiguration {
us: TestNode,
validators: Vec<TestNode>,
stored_configuration: StoredConfiguration,
}
// A new configuration proposal state.
#[derive(Debug)]
enum ConfigurationProposalState {
Uncommitted(TestNetworkConfiguration),
Committed(TestNetworkConfiguration),
}
impl TestNetworkConfiguration {
fn from_parts(
us: TestNode,
validators: Vec<TestNode>,
mut stored_configuration: StoredConfiguration,
) -> Self {
let prev_hash = exonum::storage::StorageValue::hash(&stored_configuration);
stored_configuration.previous_cfg_hash = prev_hash;
TestNetworkConfiguration {
us,
validators,
stored_configuration,
}
}
/// Returns the node from whose perspective the testkit operates.
pub fn us(&self) -> &TestNode {
&self.us
}
/// Modifies the node from whose perspective the testkit operates.
pub fn set_us(&mut self, us: TestNode) {
self.us = us;
self.update_our_role();
}
/// Returns the test network validators.
pub fn validators(&self) -> &[TestNode] {
self.validators.as_ref()
}
/// Returns the current consensus configuration.
pub fn consensus_configuration(&self) -> &ConsensusConfig {
&self.stored_configuration.consensus
}
/// Return the height, starting from which this configuration becomes actual.
pub fn actual_from(&self) -> Height {
self.stored_configuration.actual_from
}
/// Modifies the height, starting from which this configuration becomes actual.
pub fn set_actual_from(&mut self, actual_from: Height) {
self.stored_configuration.actual_from = actual_from;
}
/// Modifies the current consensus configuration.
pub fn set_consensus_configuration(&mut self, consensus: ConsensusConfig) {
self.stored_configuration.consensus = consensus;
}
/// Modifies the validators list.
pub fn set_validators<I>(&mut self, validators: I)
where
I: IntoIterator<Item = TestNode>,
{
self.validators = validators
.into_iter()
.enumerate()
.map(|(idx, mut node)| {
node.change_role(Some(ValidatorId(idx as u16)));
node
})
.collect();
self.stored_configuration.validator_keys = self.validators
.iter()
.cloned()
.map(ValidatorKeys::from)
.collect();
self.update_our_role();
}
/// Returns the configuration for service with the given identifier.
pub fn service_config<D>(&self, id: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
let value = self.stored_configuration.services.get(id).expect(
"Unable to find configuration for service",
);
serde_json::from_value(value.clone()).unwrap()
}
/// Modifies the configuration of the service with the given identifier.
pub fn set_service_config<D>(&mut self, id: &str, config: D)
where
D: Serialize,
{
let value = serde_json::to_value(config).unwrap();
self.stored_configuration.services.insert(id.into(), value);
}
/// Returns the resulting exonum blockchain configuration.
pub fn stored_configuration(&self) -> &StoredConfiguration {
&self.stored_configuration
}
fn update_our_role(&mut self) {
let validator_id = self.validators
.iter()
.position(|x| {
x.public_keys().service_key == self.us.service_public_key
})
.map(|x| ValidatorId(x as u16));
self.us.validator_id = validator_id;
}
}
/// Kind of public or private REST API of an Exonum node.
///
/// `ApiKind` allows to use `get*` and `post*` methods of [`TestKitApi`] more safely.
///
/// [`TestKitApi`]: struct.TestKitApi.html
#[derive(Debug)]
pub enum ApiKind {
/// `api/system` endpoints of the built-in Exonum REST API.
System,
/// `api/explorer` endpoints of the built-in Exonum REST API.
Explorer,
/// Endpoints corresponding to a service with the specified string identifier.
Service(&'static str),
}
impl ApiKind {
fn into_prefix(self) -> String {
match self {
ApiKind::System => "api/system".to_string(),
ApiKind::Explorer => "api/explorer".to_string(),
ApiKind::Service(name) => format!("api/services/{}", name),
}
}
}
/// API encapsulation for the testkit. Allows to execute and synchronously retrieve results
/// for REST-ful endpoints of services.
pub struct TestKitApi {
public_mount: Mount,
private_mount: Mount,
api_sender: ApiSender,
}
impl fmt::Debug for TestKitApi {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("TestKitApi").finish()
}
}
impl TestKitApi {
/// Creates a new instance of API.
fn new(testkit: &TestKit) -> Self {
use std::sync::Arc;
use exonum::api::{public, Api};
let blockchain = &testkit.blockchain;
TestKitApi {
public_mount: {
let mut mount = Mount::new();
let service_mount = testkit.public_api_mount();
mount.mount("api/services", service_mount);
let mut router = Router::new();
let pool = Arc::clone(&testkit.mempool);
let system_api = public::SystemApi::new(pool, blockchain.clone());
system_api.wire(&mut router);
mount.mount("api/system", router);
let mut router = Router::new();
let explorer_api = public::ExplorerApi::new(blockchain.clone());
explorer_api.wire(&mut router);
mount.mount("api/explorer", router);
mount
},
private_mount: {
let mut mount = Mount::new();
let service_mount = testkit.private_api_mount();
mount.mount("api/services", service_mount);
mount
},
api_sender: testkit.api_sender.clone(),
}
}
/// Returns the mounting point for public APIs. Useful for intricate testing not covered
/// by `get*` and `post*` functions.
pub fn public_mount(&self) -> &Mount {
&self.public_mount
}
/// Returns the mounting point for private APIs. Useful for intricate testing not covered
/// by `get*` and `post*` functions.
pub fn private_mount(&self) -> &Mount {
&self.private_mount
}
/// Sends a transaction to the node via `ApiSender`.
pub fn send<T: Transaction>(&self, transaction: T) {
self.api_sender.send(Box::new(transaction)).expect(
"Cannot send transaction",
);
}
fn get_internal<D>(mount: &Mount, url: &str, expect_error: bool) -> D
where
for<'de> D: Deserialize<'de>,
{
let status_class = if expect_error {
StatusClass::ClientError
} else {
StatusClass::Success
};
let url = format!("http://localhost:3000/{}", url);
let resp = request::get(&url, Headers::new(), mount);
let resp = if expect_error {
// Support either "normal" or erroneous responses.
// For example, `Api.not_found_response()` returns the response as `Ok(..)`.
match resp {
Ok(resp) => resp,
Err(IronError { response, .. }) => response,
}
} else {
resp.expect("Got unexpected `Err(..)` response")
};
if let Some(ref status) = resp.status {
if status.class() != status_class {
panic!("Unexpected response status: {:?}", status);
}
} else {
panic!("Response status not set");
}
let resp = response::extract_body_to_string(resp);
serde_json::from_str(&resp).unwrap()
}
/// Gets information from a public endpoint of the node.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown), or if the response has a non-20x response status.
pub fn get<D>(&self, kind: ApiKind, endpoint: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
TestKitApi::get_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
false,
)
}
/// Gets information from a private endpoint of the node.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown), or if the response has a non-20x response status.
pub fn get_private<D>(&self, kind: ApiKind, endpoint: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
TestKitApi::get_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
false,
)
}
/// Gets an error from a public endpoint of the node.
///
/// # Panics
///
/// - Panics if the response has a non-40x response status.
pub fn get_err<D>(&self, kind: ApiKind, endpoint: &str) -> D
where
for<'de> D: Deserialize<'de>,
{
TestKitApi::get_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
true,
)
}
fn post_internal<T, D>(mount: &Mount, endpoint: &str, data: &T) -> D
where
T: Serialize,
for<'de> D: Deserialize<'de>,
{
let url = format!("http://localhost:3000/{}", endpoint);
let resp = request::post(
&url,
{
let mut headers = Headers::new();
headers.set(ContentType::json());
headers
},
&serde_json::to_string(&data).expect("Cannot serialize data to JSON"),
mount,
).expect("Cannot send data");
let resp = response::extract_body_to_string(resp);
serde_json::from_str(&resp).expect("Cannot parse result")
}
/// Posts a transaction to the service using the public API. The returned value is the result
/// of synchronous transaction processing, which includes running the API shim
/// and `Transaction.verify()`. `Transaction.execute()` is not run until the transaction
/// gets to a block via one of `create_block*()` methods.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown).
pub fn post<T, D>(&self, kind: ApiKind, endpoint: &str, transaction: &T) -> D
where
T: Serialize,
for<'de> D: Deserialize<'de>,
{
TestKitApi::post_internal(
&self.public_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
transaction,
)
}
/// Posts a transaction to the service using the private API. The returned value is the result
/// of synchronous transaction processing, which includes running the API shim
/// and `Transaction.verify()`. `Transaction.execute()` is not run until the transaction
/// gets to a block via one of `create_block*()` methods.
///
/// # Panics
///
/// - Panics if an error occurs during request processing (e.g., the requested endpoint is
/// unknown).
pub fn post_private<T, D>(&self, kind: ApiKind, endpoint: &str, transaction: &T) -> D
where
T: Serialize,
for<'de> D: Deserialize<'de>,
{
TestKitApi::post_internal(
&self.private_mount,
&format!("{}/{}", kind.into_prefix(), endpoint),
transaction,
)
}
}
#[test]
fn test_create_block_heights() {
let mut testkit = TestKitBuilder::validator().create();
assert_eq!(Height(0), testkit.height());
testkit.create_block();
assert_eq!(Height(1), testkit.height());
testkit.create_blocks_until(Height(6));
assert_eq!(Height(6), testkit.height());
}
|
extern crate num;
use std::ops;
/// 2D grid coordinate
#[derive(Clone, Copy, Eq, Debug, PartialEq)]
pub struct Coord<T> {
pub x: T,
pub y: T
}
impl<T> Coord<T> {
/// Create a grid coordinate at (x, y)
pub fn new(x: T, y: T) -> Coord<T> {
Coord {
x: x,
y: y
}
}
}
/// Rectangle defined by inclusive minimum and maximum coordinates
#[derive(Clone, Copy, Eq, Debug, PartialEq)]
pub struct Rect<T: Copy> {
/// Minimum coordinate (inclusive)
pub min_coord: Coord<T>,
/// Maximum coordinate (inclusive)
pub max_coord: Coord<T>
}
impl<T: Copy + PartialOrd> Rect<T> {
/// Create a new Rect defined by inclusive minimum and maximum
/// coordinates. If min_coord is greater than max_coord on either
/// axis then None is returned.
pub fn new(min_coord: Coord<T>, max_coord: Coord<T>) -> Option<Rect<T>> {
if min_coord.x <= max_coord.x && min_coord.y <= max_coord.y {
Some(Rect {
min_coord: min_coord,
max_coord: max_coord
})
}
else {
None
}
}
pub fn iter(&self) -> RectIter<T> {
RectIter {
rect: *self,
cur_coord: self.min_coord
}
}
}
pub struct RectIter<T: Copy> {
rect: Rect<T>,
cur_coord: Coord<T>
}
impl<T: Copy + Ord + ops::Add<Output=T> + num::One> Iterator for RectIter<T> {
type Item = Coord<T>;
fn next(&mut self) -> Option<Self::Item> {
if self.cur_coord.y <= self.rect.max_coord.y {
let result = Some(self.cur_coord);
self.cur_coord.x = self.cur_coord.x + T::one();
if self.cur_coord.x > self.rect.max_coord.x {
self.cur_coord.x = self.rect.min_coord.x;
self.cur_coord.y = self.cur_coord.y + T::one();
}
result
}
else {
None
}
}
}
#[test]
fn test_rect_iter() {
let rect = Rect::new(Coord::new(1, 2), Coord::new(3, 4)).unwrap();
let coords: Vec<Coord<u8>> = rect.iter().collect();
assert_eq!(coords, [
Coord::new(1, 2), Coord::new(2, 2), Coord::new(3, 2),
Coord::new(1, 3), Coord::new(2, 3), Coord::new(3, 3),
Coord::new(1, 4), Coord::new(2, 4), Coord::new(3, 4)]);
}
// pub struct RectIter<'s, S: 's, T> {
// data: &'s [S],
// cur_elem: *const S,
// cur_coord:
// full: Rect<T>,
// part: Rect<T>
// }
// impl<'s, S: 's, T> Iterator for RectIter<'s, S, T> {
// type Item = (Coord<T>, &'s S);
// fn next(&mut self) -> Option<Self::Item> {
// if cur {
// }
// }
// }
Checkpoint
extern crate num;
use std::ops::Add;
use num::Unsigned;
/// 2D grid coordinate
#[derive(Clone, Copy, Eq, Debug, PartialEq)]
pub struct Coord<T: Unsigned> {
pub x: T,
pub y: T
}
impl<T: Unsigned> Coord<T> {
/// Create a grid coordinate at (x, y)
pub fn new(x: T, y: T) -> Coord<T> {
Coord {
x: x,
y: y
}
}
}
/// Rectangle defined by inclusive minimum and maximum coordinates
#[derive(Clone, Copy, Eq, Debug, PartialEq)]
pub struct Rect<T: Copy + Unsigned> {
/// Minimum coordinate (inclusive)
pub min_coord: Coord<T>,
/// Maximum coordinate (inclusive)
pub max_coord: Coord<T>
}
impl<T: Copy + PartialOrd + Unsigned> Rect<T> {
/// Create a new Rect defined by inclusive minimum and maximum
/// coordinates. If min_coord is greater than max_coord on either
/// axis then None is returned.
pub fn new(min_coord: Coord<T>, max_coord: Coord<T>) -> Option<Rect<T>> {
if min_coord.x <= max_coord.x && min_coord.y <= max_coord.y {
Some(Rect {
min_coord: min_coord,
max_coord: max_coord
})
}
else {
None
}
}
/// Iterate from minimum coord to maximum coord by row.
pub fn iter(&self) -> RectIter<T> {
RectIter {
rect: *self,
cur_coord: self.min_coord
}
}
}
pub struct RectIter<T: Copy + Unsigned> {
rect: Rect<T>,
cur_coord: Coord<T>
}
impl<T: Copy + Ord + Unsigned + Add<Output=T> + num::One> Iterator for RectIter<T> {
type Item = Coord<T>;
fn next(&mut self) -> Option<Self::Item> {
if self.cur_coord.y <= self.rect.max_coord.y {
let result = Some(self.cur_coord);
self.cur_coord.x = self.cur_coord.x + T::one();
if self.cur_coord.x > self.rect.max_coord.x {
self.cur_coord.x = self.rect.min_coord.x;
self.cur_coord.y = self.cur_coord.y + T::one();
}
result
}
else {
None
}
}
}
#[test]
fn test_rect_iter() {
let rect = Rect::new(Coord::new(1, 2), Coord::new(3, 4)).unwrap();
let coords: Vec<Coord<u8>> = rect.iter().collect();
assert_eq!(coords, [
Coord::new(1, 2), Coord::new(2, 2), Coord::new(3, 2),
Coord::new(1, 3), Coord::new(2, 3), Coord::new(3, 3),
Coord::new(1, 4), Coord::new(2, 4), Coord::new(3, 4)]);
}
|
#![feature(core)]
#![cfg_attr(test, feature(io, collections))]
extern crate conduit;
use std::error::Error;
use conduit::{Request, Response, Handler};
pub trait Middleware: Send + Sync {
fn before(&self, _: &mut Request) -> Result<(), Box<Error+Send>> {
Ok(())
}
fn after(&self, _: &mut Request, res: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
res
}
}
pub trait AroundMiddleware: Handler {
fn with_handler(&mut self, handler: Box<Handler + Send + Sync>);
}
pub struct MiddlewareBuilder {
middlewares: Vec<Box<Middleware + Send + Sync>>,
handler: Option<Box<Handler + Send + Sync>>
}
impl MiddlewareBuilder {
pub fn new<H: Handler>(handler: H) -> MiddlewareBuilder {
MiddlewareBuilder {
middlewares: vec!(),
handler: Some(Box::new(handler) as Box<Handler + Send + Sync>)
}
}
pub fn add<M: Middleware>(&mut self, middleware: M) {
self.middlewares.push(Box::new(middleware) as Box<Middleware + Send + Sync>);
}
pub fn around<M: AroundMiddleware>(&mut self, mut middleware: M) {
let handler = self.handler.take().unwrap();
middleware.with_handler(handler);
self.handler = Some(Box::new(middleware) as Box<Handler + Send + Sync>);
}
}
impl Handler for MiddlewareBuilder {
fn call(&self, req: &mut Request) -> Result<Response, Box<Error+Send>> {
let mut error = None;
for (i, middleware) in self.middlewares.iter().enumerate() {
match middleware.before(req) {
Ok(_) => (),
Err(err) => {
error = Some((err, i));
break;
}
}
}
match error {
Some((err, i)) => {
let middlewares = &self.middlewares[..i];
run_afters(middlewares, req, Err(err))
},
None => {
let res = { self.handler.as_ref().unwrap().call(req) };
let middlewares = self.middlewares.as_slice();
run_afters(middlewares, req, res)
}
}
}
}
fn run_afters(middleware: &[Box<Middleware>],
req: &mut Request,
res: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
middleware.iter().rev().fold(res, |res, m| m.after(req, res))
}
#[cfg(test)]
mod tests {
extern crate semver;
use {MiddlewareBuilder, Middleware, AroundMiddleware};
use std::collections::HashMap;
use std::error::Error;
use std::old_io::net::ip::IpAddr;
use std::old_io::{self, MemReader};
use conduit;
use conduit::{Request, Response, Host, Headers, Method, Scheme, Extensions};
use conduit::{Handler, TypeMap};
struct RequestSentinel {
path: String,
extensions: TypeMap,
method: Method
}
impl RequestSentinel {
fn new(method: Method, path: &'static str) -> RequestSentinel {
RequestSentinel {
path: path.to_string(),
extensions: TypeMap::new(),
method: method
}
}
}
impl conduit::Request for RequestSentinel {
fn http_version(&self) -> semver::Version { unimplemented!() }
fn conduit_version(&self) -> semver::Version { unimplemented!() }
fn method(&self) -> Method { self.method }
fn scheme(&self) -> Scheme { unimplemented!() }
fn host<'a>(&'a self) -> Host<'a> { unimplemented!() }
fn virtual_root<'a>(&'a self) -> Option<&'a str> { unimplemented!() }
fn path<'a>(&'a self) -> &'a str {
self.path.as_slice()
}
fn query_string<'a>(&'a self) -> Option<&'a str> { unimplemented!() }
fn remote_ip(&self) -> IpAddr { unimplemented!() }
fn content_length(&self) -> Option<u64> { unimplemented!() }
fn headers<'a>(&'a self) -> &'a Headers { unimplemented!() }
fn body<'a>(&'a mut self) -> &'a mut Reader { unimplemented!() }
fn extensions<'a>(&'a self) -> &'a Extensions {
&self.extensions
}
fn mut_extensions<'a>(&'a mut self) -> &'a mut Extensions {
&mut self.extensions
}
}
struct MyMiddleware;
impl Middleware for MyMiddleware {
fn before<'a>(&self, req: &'a mut Request) -> Result<(), Box<Error+Send>> {
req.mut_extensions().insert("hello".to_string());
Ok(())
}
}
struct ErrorRecovery;
impl Middleware for ErrorRecovery {
fn after(&self, _: &mut Request, res: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
res.or_else(|e| {
let e = e.description().to_string();
Ok(Response {
status: (500, "Internal Server Error"),
headers: HashMap::new(),
body: Box::new(MemReader::new(e.into_bytes()))
})
})
}
}
struct ProducesError;
impl Middleware for ProducesError {
fn before(&self, _: &mut Request) -> Result<(), Box<Error+Send>> {
Err(Box::new(old_io::standard_error(old_io::OtherIoError)) as Box<Error+Send>)
}
}
struct NotReached;
impl Middleware for NotReached {
fn after(&self, _: &mut Request, _: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
Ok(Response {
status: (200, "OK"),
headers: HashMap::new(),
body: Box::new(MemReader::new(vec!()))
})
}
}
struct MyAroundMiddleware {
handler: Option<Box<Handler + Send + Sync>>
}
impl MyAroundMiddleware {
fn new() -> MyAroundMiddleware {
MyAroundMiddleware { handler: None }
}
}
impl Middleware for MyAroundMiddleware {}
impl AroundMiddleware for MyAroundMiddleware {
fn with_handler(&mut self, handler: Box<Handler + Send + Sync>) {
self.handler = Some(handler)
}
}
impl Handler for MyAroundMiddleware {
fn call(&self, req: &mut Request) -> Result<Response, Box<Error+Send>> {
req.mut_extensions().insert("hello".to_string());
self.handler.as_ref().unwrap().call(req)
}
}
fn get_extension<'a, T: 'static>(req: &'a Request) -> &'a T {
req.extensions().find::<T>().unwrap()
}
fn response(string: String) -> Response {
Response {
status: (200, "OK"),
headers: HashMap::new(),
body: Box::new(MemReader::new(string.into_bytes()))
}
}
fn handler(req: &mut Request) -> Result<Response, Box<Error+Send>> {
let hello = get_extension::<String>(req);
Ok(response(hello.clone()))
}
fn error_handler(_: &mut Request) -> Result<Response, Box<Error+Send>> {
Err(Box::new(old_io::IoError {
kind: old_io::OtherIoError,
desc: "Error in handler",
detail: None,
}) as Box<Error+Send>)
}
fn middle_handler(req: &mut Request) -> Result<Response, Box<Error+Send>> {
let hello = get_extension::<String>(req);
let middle = get_extension::<String>(req);
Ok(response(format!("{} {}", hello, middle)))
}
#[test]
fn test_simple_middleware() {
let mut builder = MiddlewareBuilder::new(handler);
builder.add(MyMiddleware);
let mut req = RequestSentinel::new(Method::Get, "/");
let mut res = builder.call(&mut req).ok().expect("No response");
assert_eq!(res.body.read_to_string().ok().expect("No body"), "hello".to_string());
}
#[test]
fn test_error_recovery() {
let mut builder = MiddlewareBuilder::new(handler);
builder.add(ErrorRecovery);
builder.add(ProducesError);
// the error bubbles up from ProducesError and shouldn't reach here
builder.add(NotReached);
let mut req = RequestSentinel::new(Method::Get, "/");
let res = builder.call(&mut req).ok().expect("Error not handled");
assert_eq!(res.status, (500, "Internal Server Error"));
}
#[test]
fn test_error_recovery_in_handlers() {
let mut builder = MiddlewareBuilder::new(error_handler);
builder.add(ErrorRecovery);
let mut req = RequestSentinel::new(Method::Get, "/");
let mut res = builder.call(&mut req).ok().expect("Error not handled");
assert_eq!(res.status, (500, "Internal Server Error"));
assert_eq!(res.body.read_to_string().ok().expect("No body"), "Error in handler".to_string());
}
#[test]
fn test_around_middleware() {
let mut builder = MiddlewareBuilder::new(middle_handler);
builder.add(MyMiddleware);
builder.around(MyAroundMiddleware::new());
let mut req = RequestSentinel::new(Method::Get, "/");
let mut res = builder.call(&mut req).ok().expect("No response");
assert_eq!(res.body.read_to_string().ok().expect("No body"), "hello hello".to_string());
}
}
Update to rust master
#![feature(core)]
#![cfg_attr(test, feature(io))]
extern crate conduit;
use std::error::Error;
use conduit::{Request, Response, Handler};
pub trait Middleware: Send + Sync {
fn before(&self, _: &mut Request) -> Result<(), Box<Error+Send>> {
Ok(())
}
fn after(&self, _: &mut Request, res: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
res
}
}
pub trait AroundMiddleware: Handler {
fn with_handler(&mut self, handler: Box<Handler + Send + Sync>);
}
pub struct MiddlewareBuilder {
middlewares: Vec<Box<Middleware + Send + Sync>>,
handler: Option<Box<Handler + Send + Sync>>
}
impl MiddlewareBuilder {
pub fn new<H: Handler>(handler: H) -> MiddlewareBuilder {
MiddlewareBuilder {
middlewares: vec!(),
handler: Some(Box::new(handler) as Box<Handler + Send + Sync>)
}
}
pub fn add<M: Middleware>(&mut self, middleware: M) {
self.middlewares.push(Box::new(middleware) as Box<Middleware + Send + Sync>);
}
pub fn around<M: AroundMiddleware>(&mut self, mut middleware: M) {
let handler = self.handler.take().unwrap();
middleware.with_handler(handler);
self.handler = Some(Box::new(middleware) as Box<Handler + Send + Sync>);
}
}
impl Handler for MiddlewareBuilder {
fn call(&self, req: &mut Request) -> Result<Response, Box<Error+Send>> {
let mut error = None;
for (i, middleware) in self.middlewares.iter().enumerate() {
match middleware.before(req) {
Ok(_) => (),
Err(err) => {
error = Some((err, i));
break;
}
}
}
match error {
Some((err, i)) => {
let middlewares = &self.middlewares[..i];
run_afters(middlewares, req, Err(err))
},
None => {
let res = { self.handler.as_ref().unwrap().call(req) };
let middlewares = self.middlewares.as_slice();
run_afters(middlewares, req, res)
}
}
}
}
fn run_afters(middleware: &[Box<Middleware>],
req: &mut Request,
res: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
middleware.iter().rev().fold(res, |res, m| m.after(req, res))
}
#[cfg(test)]
mod tests {
extern crate semver;
use {MiddlewareBuilder, Middleware, AroundMiddleware};
use std::collections::HashMap;
use std::error::Error;
use std::old_io::net::ip::IpAddr;
use std::old_io::{self, MemReader};
use conduit;
use conduit::{Request, Response, Host, Headers, Method, Scheme, Extensions};
use conduit::{Handler, TypeMap};
struct RequestSentinel {
path: String,
extensions: TypeMap,
method: Method
}
impl RequestSentinel {
fn new(method: Method, path: &'static str) -> RequestSentinel {
RequestSentinel {
path: path.to_string(),
extensions: TypeMap::new(),
method: method
}
}
}
impl conduit::Request for RequestSentinel {
fn http_version(&self) -> semver::Version { unimplemented!() }
fn conduit_version(&self) -> semver::Version { unimplemented!() }
fn method(&self) -> Method { self.method }
fn scheme(&self) -> Scheme { unimplemented!() }
fn host<'a>(&'a self) -> Host<'a> { unimplemented!() }
fn virtual_root<'a>(&'a self) -> Option<&'a str> { unimplemented!() }
fn path<'a>(&'a self) -> &'a str {
self.path.as_slice()
}
fn query_string<'a>(&'a self) -> Option<&'a str> { unimplemented!() }
fn remote_ip(&self) -> IpAddr { unimplemented!() }
fn content_length(&self) -> Option<u64> { unimplemented!() }
fn headers<'a>(&'a self) -> &'a Headers { unimplemented!() }
fn body<'a>(&'a mut self) -> &'a mut Reader { unimplemented!() }
fn extensions<'a>(&'a self) -> &'a Extensions {
&self.extensions
}
fn mut_extensions<'a>(&'a mut self) -> &'a mut Extensions {
&mut self.extensions
}
}
struct MyMiddleware;
impl Middleware for MyMiddleware {
fn before<'a>(&self, req: &'a mut Request) -> Result<(), Box<Error+Send>> {
req.mut_extensions().insert("hello".to_string());
Ok(())
}
}
struct ErrorRecovery;
impl Middleware for ErrorRecovery {
fn after(&self, _: &mut Request, res: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
res.or_else(|e| {
let e = e.description().to_string();
Ok(Response {
status: (500, "Internal Server Error"),
headers: HashMap::new(),
body: Box::new(MemReader::new(e.into_bytes()))
})
})
}
}
struct ProducesError;
impl Middleware for ProducesError {
fn before(&self, _: &mut Request) -> Result<(), Box<Error+Send>> {
Err(Box::new(old_io::standard_error(old_io::OtherIoError)) as Box<Error+Send>)
}
}
struct NotReached;
impl Middleware for NotReached {
fn after(&self, _: &mut Request, _: Result<Response, Box<Error+Send>>)
-> Result<Response, Box<Error+Send>>
{
Ok(Response {
status: (200, "OK"),
headers: HashMap::new(),
body: Box::new(MemReader::new(vec!()))
})
}
}
struct MyAroundMiddleware {
handler: Option<Box<Handler + Send + Sync>>
}
impl MyAroundMiddleware {
fn new() -> MyAroundMiddleware {
MyAroundMiddleware { handler: None }
}
}
impl Middleware for MyAroundMiddleware {}
impl AroundMiddleware for MyAroundMiddleware {
fn with_handler(&mut self, handler: Box<Handler + Send + Sync>) {
self.handler = Some(handler)
}
}
impl Handler for MyAroundMiddleware {
fn call(&self, req: &mut Request) -> Result<Response, Box<Error+Send>> {
req.mut_extensions().insert("hello".to_string());
self.handler.as_ref().unwrap().call(req)
}
}
fn get_extension<'a, T: 'static>(req: &'a Request) -> &'a T {
req.extensions().find::<T>().unwrap()
}
fn response(string: String) -> Response {
Response {
status: (200, "OK"),
headers: HashMap::new(),
body: Box::new(MemReader::new(string.into_bytes()))
}
}
fn handler(req: &mut Request) -> Result<Response, Box<Error+Send>> {
let hello = get_extension::<String>(req);
Ok(response(hello.clone()))
}
fn error_handler(_: &mut Request) -> Result<Response, Box<Error+Send>> {
Err(Box::new(old_io::IoError {
kind: old_io::OtherIoError,
desc: "Error in handler",
detail: None,
}) as Box<Error+Send>)
}
fn middle_handler(req: &mut Request) -> Result<Response, Box<Error+Send>> {
let hello = get_extension::<String>(req);
let middle = get_extension::<String>(req);
Ok(response(format!("{} {}", hello, middle)))
}
#[test]
fn test_simple_middleware() {
let mut builder = MiddlewareBuilder::new(handler);
builder.add(MyMiddleware);
let mut req = RequestSentinel::new(Method::Get, "/");
let mut res = builder.call(&mut req).ok().expect("No response");
assert_eq!(res.body.read_to_string().ok().expect("No body"), "hello".to_string());
}
#[test]
fn test_error_recovery() {
let mut builder = MiddlewareBuilder::new(handler);
builder.add(ErrorRecovery);
builder.add(ProducesError);
// the error bubbles up from ProducesError and shouldn't reach here
builder.add(NotReached);
let mut req = RequestSentinel::new(Method::Get, "/");
let res = builder.call(&mut req).ok().expect("Error not handled");
assert_eq!(res.status, (500, "Internal Server Error"));
}
#[test]
fn test_error_recovery_in_handlers() {
let mut builder = MiddlewareBuilder::new(error_handler);
builder.add(ErrorRecovery);
let mut req = RequestSentinel::new(Method::Get, "/");
let mut res = builder.call(&mut req).ok().expect("Error not handled");
assert_eq!(res.status, (500, "Internal Server Error"));
assert_eq!(res.body.read_to_string().ok().expect("No body"), "Error in handler".to_string());
}
#[test]
fn test_around_middleware() {
let mut builder = MiddlewareBuilder::new(middle_handler);
builder.add(MyMiddleware);
builder.around(MyAroundMiddleware::new());
let mut req = RequestSentinel::new(Method::Get, "/");
let mut res = builder.call(&mut req).ok().expect("No response");
assert_eq!(res.body.read_to_string().ok().expect("No body"), "hello hello".to_string());
}
}
|
#![feature(macro_rules)]
extern crate libc;
extern crate regex;
pub mod client;
pub mod io;
pub mod ip;
pub mod message;
pub mod query;
pub mod types;
pub mod utils;
Not here yet
#![feature(macro_rules)]
extern crate libc;
extern crate regex;
pub mod client;
pub mod io;
pub mod message;
pub mod query;
pub mod types;
pub mod utils;
|
//! _**tini** is a **t**iny **ini**-file parsing library_
//!
//! This small library provides basic functions to operate with ini-files.
//!
//! Features:
//!
//! * no dependencies;
//! * parsing [from file](struct.Ini.html#method.from_file) and [from buffer](struct.Ini.html#method.from_buffer);
//! * [convert parsed value to given type](struct.Ini.html#method.get);
//! * [parse comma-separated lists to vectors](struct.Ini.html#method.get_vec);
//! * construct new ini-structure with [method chaining](struct.Ini.html#method.item);
//! * writing [to file](struct.Ini.html#method.to_file) and [to buffer](struct.Ini.html#method.to_buffer).
//!
//! # Examples
//! ## Read from buffer and get string values
//! ````
//! # use tini::Ini;
//! let conf = Ini::from_buffer(["[search]",
//! "g = google.com",
//! "dd = duckduckgo.com"].join("\n"));
//!
//! let g: String = conf.get("search", "g").unwrap();
//! let dd: String = conf.get("search", "dd").unwrap();
//!
//! assert_eq!(g, "google.com");
//! assert_eq!(dd, "duckduckgo.com");
//! ````
//! ## Construct in program and get vectors
//! ````
//! # use tini::Ini;
//! let conf = Ini::new().section("floats")
//! .item("consts", "3.1416, 2.7183")
//! .section("integers")
//! .item("lost", "4,8,15,16,23,42");
//! let consts: Vec<f64> = conf.get_vec("floats", "consts").unwrap();
//! let lost: Vec<i32> = conf.get_vec("integers", "lost").unwrap();
//!
//! assert_eq!(consts, [3.1416, 2.7183]);
//! assert_eq!(lost, [4, 8, 15, 16, 23, 42]);
//! ````
use ordered_hashmap::OrderedHashMap;
use parser::{parse_line, Parsed};
use std::fmt;
use std::fs::File;
use std::io::{self, BufReader, BufWriter, Read, Write};
use std::iter::Iterator;
use std::path::Path;
use std::str::FromStr;
mod ordered_hashmap;
type Section = OrderedHashMap<String, String>;
type IniParsed = OrderedHashMap<String, Section>;
type SectionIter<'a> = ordered_hashmap::Iter<'a, String, String>;
type SectionIterMut<'a> = ordered_hashmap::IterMut<'a, String, String>;
/// Structure for INI-file data
#[derive(Debug)]
pub struct Ini {
#[doc(hidden)]
data: IniParsed,
last_section_name: String,
}
impl Ini {
/// Create an empty Ini
pub fn new() -> Ini {
Ini {
data: IniParsed::new(),
last_section_name: String::new(),
}
}
fn from_string(string: &str) -> Ini {
let mut result = Ini::new();
for (i, line) in string.lines().enumerate() {
match parse_line(&line) {
Parsed::Section(name) => result = result.section(name),
Parsed::Value(name, value) => result = result.item(name, value),
Parsed::Error(msg) => println!("line {}: error: {}", i, msg),
_ => (),
};
}
result
}
/// Construct Ini from file
///
/// # Errors
/// Errors returned by `File::open()` and `BufReader::read_to_string()`
///
///
/// # Examples
/// You may use Path
///
/// ```
/// # use std::path::Path;
/// # use tini::Ini;
/// let path = Path::new("./examples/example.ini");
/// let conf = Ini::from_file(path);
/// assert!(conf.ok().is_some());
/// ```
///
/// or `&str`
///
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_file("./examples/example.ini");
/// assert!(conf.ok().is_some());
/// ```
pub fn from_file<S: AsRef<Path> + ?Sized>(path: &S) -> Result<Ini, io::Error> {
let file = File::open(path)?;
let mut reader = BufReader::new(file);
let mut buffer = String::new();
reader.read_to_string(&mut buffer)?;
Ok(Ini::from_string(&buffer))
}
/// Construct Ini from buffer
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\none = 1");
/// let value: Option<u8> = conf.get("section", "one");
/// assert_eq!(value, Some(1));
/// ```
pub fn from_buffer<S: Into<String>>(buf: S) -> Ini {
Ini::from_string(&buf.into())
}
/// Set section name for following [`item()`](#method.item)s. This function doesn't create a
/// section.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::new().section("empty");
/// assert_eq!(conf.to_buffer(), String::new());
/// ```
pub fn section<S: Into<String>>(mut self, name: S) -> Self {
self.last_section_name = name.into();
self
}
/// Add key-value pair to last section
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::new().section("test")
/// .item("value", "10");
///
/// let value: Option<u8> = conf.get("test", "value");
/// assert_eq!(value, Some(10));
/// ```
pub fn item<S: Into<String>>(mut self, name: S, value: S) -> Self {
self.data
.entry(self.last_section_name.clone())
.or_insert(Section::new())
.insert(name.into(), value.into());
self
}
/// Write Ini to file. This function is similar to `from_file` in use.
/// # Errors
/// Errors returned by `File::create()` and `BufWriter::write_all()`
///
pub fn to_file<S: AsRef<Path> + ?Sized>(&self, path: &S) -> Result<(), io::Error> {
let file = File::create(path)?;
let mut writer = BufWriter::new(file);
writer.write_all(self.to_buffer().as_bytes())?;
Ok(())
}
/// Write Ini to buffer
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\none = 1");
/// // you may use `conf.to_buffer()`
/// let value: String = conf.to_buffer();
/// // or format!("{}", conf);
/// // let value: String = format!("{}", conf);
/// // but the result will be the same
/// assert_eq!(value, "[section]\none = 1");
/// ```
pub fn to_buffer(&self) -> String {
format!("{}", self)
}
fn get_raw(&self, section: &str, key: &str) -> Option<&String> {
self.data.get(section).and_then(|x| x.get(key))
}
/// Get scalar value of key in section
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\none = 1");
/// let value: Option<u8> = conf.get("section", "one");
/// assert_eq!(value, Some(1));
/// ```
pub fn get<T: FromStr>(&self, section: &str, key: &str) -> Option<T> {
self.get_raw(section, key).and_then(|x| x.parse().ok())
}
/// Get vector value of key in section
///
/// The function returns `None` if one of the elements can not be parsed.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\nlist = 1, 2, 3, 4");
/// let value: Option<Vec<u8>> = conf.get_vec("section", "list");
/// assert_eq!(value, Some(vec![1, 2, 3, 4]));
/// ```
pub fn get_vec<T>(&self, section: &str, key: &str) -> Option<Vec<T>>
where
T: FromStr,
{
// TODO: write a normal splitter taking into account quotes
self.get_raw(section, key).and_then(|x| {
x.split(',')
.map(|s| s.trim().parse())
.collect::<Result<Vec<T>, _>>()
.ok()
})
}
/// Iterate over a section by a name
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer(["[search]",
/// "g = google.com",
/// "dd = duckduckgo.com"].join("\n"));
/// let search = conf.iter_section("search").unwrap();
/// for (k, v) in search {
/// println!("key: {} value: {}", k, v);
/// }
/// ```
pub fn iter_section(&self, section: &str) -> Option<SectionIter> {
self.data.get(section).map(|value| value.iter())
}
/// Iterate over all sections, yielding pairs of section name and iterator
/// over the section elements. The concrete iterator element type is
/// `(&'a String, ordered_hashmap::Iter<'a, String, String>)`.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::new().section("foo")
/// .item("item", "value")
/// .item("other", "something")
/// .section("bar")
/// .item("one", "1");
/// for (section, iter) in conf.iter() {
/// for (key, val) in iter {
/// println!("section: {} key: {} val: {}", section, key, val);
/// }
/// }
pub fn iter(&self) -> IniIter {
IniIter {
iter: self.data.iter(),
}
}
/// Iterate over all sections, yielding pairs of section name and mutable
/// iterator over the section elements. The concrete iterator element type is
/// `(&'a String, ordered_hashmap::IterMut<'a, String, String>)`.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let mut conf = Ini::new().section("foo")
/// .item("item", "value")
/// .item("other", "something")
/// .section("bar")
/// .item("one", "1");
/// for (section, iter_mut) in conf.iter_mut() {
/// for (key, val) in iter_mut {
/// *val = String::from("replaced");
/// }
/// }
pub fn iter_mut(&mut self) -> IniIterMut {
IniIterMut {
iter: self.data.iter_mut(),
}
}
}
impl fmt::Display for Ini {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut buffer = String::new();
for (section, iter) in self.iter() {
buffer.push_str(&format!("[{}]\n", section));
for (key, value) in iter {
buffer.push_str(&format!("{} = {}\n", key, value));
}
// blank line between sections
buffer.push_str("\n");
}
// remove last two '\n'
buffer.pop();
buffer.pop();
write!(f, "{}", buffer)
}
}
#[doc(hidden)]
pub struct IniIter<'a> {
iter: ordered_hashmap::Iter<'a, String, Section>,
}
impl<'a> Iterator for IniIter<'a> {
type Item = (&'a String, SectionIter<'a>);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|(string, section)| (string, section.iter()))
}
}
#[doc(hidden)]
pub struct IniIterMut<'a> {
iter: ordered_hashmap::IterMut<'a, String, Section>,
}
impl<'a> Iterator for IniIterMut<'a> {
type Item = (&'a String, SectionIterMut<'a>);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|(string, section)| (string, section.iter_mut()))
}
}
#[cfg(test)]
mod library_test {
use super::*;
#[test]
fn bool() {
let ini = Ini::from_buffer("[string]\nabc = true");
let abc: Option<bool> = ini.get("string", "abc");
assert_eq!(abc, Some(true));
}
#[test]
fn float() {
let ini = Ini::from_string("[section]\nname=10.5");
let name: Option<f64> = ini.get("section", "name");
assert_eq!(name, Some(10.5));
}
#[test]
fn float_vec() {
let ini = Ini::from_string("[section]\nname=1.2, 3.4, 5.6");
let name: Option<Vec<f64>> = ini.get_vec("section", "name");
assert_eq!(name, Some(vec![1.2, 3.4, 5.6]));
}
#[test]
fn bad_cast() {
let ini = Ini::new().section("one").item("a", "3.14");
let a: Option<u32> = ini.get("one", "a");
assert_eq!(a, None);
}
#[test]
fn string_vec() {
let ini = Ini::from_string("[section]\nname=a, b, c");
let name: Option<Vec<String>> = ini.get_vec("section", "name");
assert_eq!(
name,
Some(vec![
String::from("a"),
String::from("b"),
String::from("c"),
])
);
}
#[test]
fn parse_error() {
let ini = Ini::from_string("[section]\nlist = 1, 2, --, 4");
let name: Option<Vec<u8>> = ini.get_vec("section", "list");
assert_eq!(name, None);
}
#[test]
fn get_or_macro() {
let ini = Ini::from_string("[section]\nlist = 1, 2, --, 4");
let with_value: Vec<u8> = ini.get_vec("section", "list").unwrap_or(vec![1, 2, 3, 4]);
assert_eq!(with_value, vec![1, 2, 3, 4]);
}
#[test]
fn ordering() {
let ini = Ini::from_string("[a]\nc = 1\nb = 2\na = 3");
let keys: Vec<String> = ini
.data
.get("a")
.unwrap()
.iter()
.map(|(k, _)| k.clone())
.collect();
assert_eq!(["c", "b", "a"], keys[..]);
}
#[test]
fn mutating() {
let mut config = Ini::new()
.section("items")
.item("a", "1")
.item("b", "2")
.item("c", "3");
// mutate items
for (_, item) in config.iter_mut() {
for (_, value) in item {
let v: i32 = value.parse().unwrap();
*value = format!("{}", v + 1);
}
}
let a_val: Option<u8> = config.get("items", "a");
let b_val: Option<u8> = config.get("items", "b");
let c_val: Option<u8> = config.get("items", "c");
assert_eq!(a_val, Some(2));
assert_eq!(b_val, Some(3));
assert_eq!(c_val, Some(4));
}
#[test]
fn redefine_item() {
let config = Ini::new()
.section("items")
.item("one", "3")
.item("two", "2")
.item("one", "1");
let one: Option<i32> = config.get("items", "one");
assert_eq!(one, Some(1));
}
#[test]
fn redefine_section() {
let config = Ini::new()
.section("one")
.item("a", "1")
.section("two")
.item("b", "2")
.section("one")
.item("c", "3");
let a_val: Option<i32> = config.get("one", "a");
let c_val: Option<i32> = config.get("one", "c");
assert_eq!(a_val, Some(1));
assert_eq!(c_val, Some(3));
}
}
mod parser {
#[derive(Debug)]
pub enum Parsed {
Error(String),
Empty,
Section(String),
Value(String, String), /* Vector(String, Vec<String>), impossible, because OrderedHashMap field has type String, not Vec */
}
pub fn parse_line(line: &str) -> Parsed {
let content = match line.split(';').next() {
Some(value) => value.trim(),
None => return Parsed::Empty,
};
if content.len() == 0 {
return Parsed::Empty;
}
// add checks for content
if content.starts_with('[') {
if content.ends_with(']') {
let section_name = content.trim_matches(|c| c == '[' || c == ']').to_owned();
return Parsed::Section(section_name);
} else {
return Parsed::Error("incorrect section syntax".to_owned());
}
} else if content.contains('=') {
let mut pair = content.splitn(2, '=').map(|s| s.trim());
// if key is None => error
let key = match pair.next() {
Some(value) => value.to_owned(),
None => return Parsed::Error("key is None".to_owned()),
};
// if value is None => empty string
let value = match pair.next() {
Some(value) => value.to_owned(),
None => "".to_owned(),
};
if key.len() == 0 {
return Parsed::Error("empty key".to_owned());
}
return Parsed::Value(key, value);
}
Parsed::Error("incorrect syntax".to_owned())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_comment() {
match parse_line(";------") {
Parsed::Empty => assert!(true),
_ => assert!(false),
}
}
#[test]
fn test_entry() {
match parse_line("name1 = 100 ; comment") {
Parsed::Value(name, text) => {
assert_eq!(name, String::from("name1"));
assert_eq!(text, String::from("100"));
}
_ => assert!(false),
}
}
#[test]
fn test_weird_name() {
match parse_line("_.,:(){}-#@&*| = 100") {
Parsed::Value(name, text) => {
assert_eq!(name, String::from("_.,:(){}-#@&*|"));
assert_eq!(text, String::from("100"));
}
_ => assert!(false),
}
}
#[test]
fn test_text_entry() {
match parse_line("text_name = hello world!") {
Parsed::Value(name, text) => {
assert_eq!(name, String::from("text_name"));
assert_eq!(text, String::from("hello world!"));
}
_ => assert!(false),
}
}
#[test]
fn test_incorrect_token() {
match parse_line("[section = 1, 2 = value") {
Parsed::Error(_) => assert!(true),
_ => assert!(false),
}
}
#[test]
fn test_incorrect_key_value_line() {
match parse_line("= 3") {
Parsed::Error(_) => assert!(true),
_ => assert!(false),
}
}
}
}
update ordering tests
//! _**tini** is a **t**iny **ini**-file parsing library_
//!
//! This small library provides basic functions to operate with ini-files.
//!
//! Features:
//!
//! * no dependencies;
//! * parsing [from file](struct.Ini.html#method.from_file) and [from buffer](struct.Ini.html#method.from_buffer);
//! * [convert parsed value to given type](struct.Ini.html#method.get);
//! * [parse comma-separated lists to vectors](struct.Ini.html#method.get_vec);
//! * construct new ini-structure with [method chaining](struct.Ini.html#method.item);
//! * writing [to file](struct.Ini.html#method.to_file) and [to buffer](struct.Ini.html#method.to_buffer).
//!
//! # Examples
//! ## Read from buffer and get string values
//! ````
//! # use tini::Ini;
//! let conf = Ini::from_buffer(["[search]",
//! "g = google.com",
//! "dd = duckduckgo.com"].join("\n"));
//!
//! let g: String = conf.get("search", "g").unwrap();
//! let dd: String = conf.get("search", "dd").unwrap();
//!
//! assert_eq!(g, "google.com");
//! assert_eq!(dd, "duckduckgo.com");
//! ````
//! ## Construct in program and get vectors
//! ````
//! # use tini::Ini;
//! let conf = Ini::new().section("floats")
//! .item("consts", "3.1416, 2.7183")
//! .section("integers")
//! .item("lost", "4,8,15,16,23,42");
//! let consts: Vec<f64> = conf.get_vec("floats", "consts").unwrap();
//! let lost: Vec<i32> = conf.get_vec("integers", "lost").unwrap();
//!
//! assert_eq!(consts, [3.1416, 2.7183]);
//! assert_eq!(lost, [4, 8, 15, 16, 23, 42]);
//! ````
use ordered_hashmap::OrderedHashMap;
use parser::{parse_line, Parsed};
use std::fmt;
use std::fs::File;
use std::io::{self, BufReader, BufWriter, Read, Write};
use std::iter::Iterator;
use std::path::Path;
use std::str::FromStr;
mod ordered_hashmap;
type Section = OrderedHashMap<String, String>;
type IniParsed = OrderedHashMap<String, Section>;
type SectionIter<'a> = ordered_hashmap::Iter<'a, String, String>;
type SectionIterMut<'a> = ordered_hashmap::IterMut<'a, String, String>;
/// Structure for INI-file data
#[derive(Debug)]
pub struct Ini {
#[doc(hidden)]
data: IniParsed,
last_section_name: String,
}
impl Ini {
/// Create an empty Ini
pub fn new() -> Ini {
Ini {
data: IniParsed::new(),
last_section_name: String::new(),
}
}
fn from_string(string: &str) -> Ini {
let mut result = Ini::new();
for (i, line) in string.lines().enumerate() {
match parse_line(&line) {
Parsed::Section(name) => result = result.section(name),
Parsed::Value(name, value) => result = result.item(name, value),
Parsed::Error(msg) => println!("line {}: error: {}", i, msg),
_ => (),
};
}
result
}
/// Construct Ini from file
///
/// # Errors
/// Errors returned by `File::open()` and `BufReader::read_to_string()`
///
///
/// # Examples
/// You may use Path
///
/// ```
/// # use std::path::Path;
/// # use tini::Ini;
/// let path = Path::new("./examples/example.ini");
/// let conf = Ini::from_file(path);
/// assert!(conf.ok().is_some());
/// ```
///
/// or `&str`
///
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_file("./examples/example.ini");
/// assert!(conf.ok().is_some());
/// ```
pub fn from_file<S: AsRef<Path> + ?Sized>(path: &S) -> Result<Ini, io::Error> {
let file = File::open(path)?;
let mut reader = BufReader::new(file);
let mut buffer = String::new();
reader.read_to_string(&mut buffer)?;
Ok(Ini::from_string(&buffer))
}
/// Construct Ini from buffer
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\none = 1");
/// let value: Option<u8> = conf.get("section", "one");
/// assert_eq!(value, Some(1));
/// ```
pub fn from_buffer<S: Into<String>>(buf: S) -> Ini {
Ini::from_string(&buf.into())
}
/// Set section name for following [`item()`](#method.item)s. This function doesn't create a
/// section.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::new().section("empty");
/// assert_eq!(conf.to_buffer(), String::new());
/// ```
pub fn section<S: Into<String>>(mut self, name: S) -> Self {
self.last_section_name = name.into();
self
}
/// Add key-value pair to last section
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::new().section("test")
/// .item("value", "10");
///
/// let value: Option<u8> = conf.get("test", "value");
/// assert_eq!(value, Some(10));
/// ```
pub fn item<S: Into<String>>(mut self, name: S, value: S) -> Self {
self.data
.entry(self.last_section_name.clone())
.or_insert(Section::new())
.insert(name.into(), value.into());
self
}
/// Write Ini to file. This function is similar to `from_file` in use.
/// # Errors
/// Errors returned by `File::create()` and `BufWriter::write_all()`
///
pub fn to_file<S: AsRef<Path> + ?Sized>(&self, path: &S) -> Result<(), io::Error> {
let file = File::create(path)?;
let mut writer = BufWriter::new(file);
writer.write_all(self.to_buffer().as_bytes())?;
Ok(())
}
/// Write Ini to buffer
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\none = 1");
/// // you may use `conf.to_buffer()`
/// let value: String = conf.to_buffer();
/// // or format!("{}", conf);
/// // let value: String = format!("{}", conf);
/// // but the result will be the same
/// assert_eq!(value, "[section]\none = 1");
/// ```
pub fn to_buffer(&self) -> String {
format!("{}", self)
}
fn get_raw(&self, section: &str, key: &str) -> Option<&String> {
self.data.get(section).and_then(|x| x.get(key))
}
/// Get scalar value of key in section
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\none = 1");
/// let value: Option<u8> = conf.get("section", "one");
/// assert_eq!(value, Some(1));
/// ```
pub fn get<T: FromStr>(&self, section: &str, key: &str) -> Option<T> {
self.get_raw(section, key).and_then(|x| x.parse().ok())
}
/// Get vector value of key in section
///
/// The function returns `None` if one of the elements can not be parsed.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer("[section]\nlist = 1, 2, 3, 4");
/// let value: Option<Vec<u8>> = conf.get_vec("section", "list");
/// assert_eq!(value, Some(vec![1, 2, 3, 4]));
/// ```
pub fn get_vec<T>(&self, section: &str, key: &str) -> Option<Vec<T>>
where
T: FromStr,
{
// TODO: write a normal splitter taking into account quotes
self.get_raw(section, key).and_then(|x| {
x.split(',')
.map(|s| s.trim().parse())
.collect::<Result<Vec<T>, _>>()
.ok()
})
}
/// Iterate over a section by a name
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::from_buffer(["[search]",
/// "g = google.com",
/// "dd = duckduckgo.com"].join("\n"));
/// let search = conf.iter_section("search").unwrap();
/// for (k, v) in search {
/// println!("key: {} value: {}", k, v);
/// }
/// ```
pub fn iter_section(&self, section: &str) -> Option<SectionIter> {
self.data.get(section).map(|value| value.iter())
}
/// Iterate over all sections, yielding pairs of section name and iterator
/// over the section elements. The concrete iterator element type is
/// `(&'a String, ordered_hashmap::Iter<'a, String, String>)`.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let conf = Ini::new().section("foo")
/// .item("item", "value")
/// .item("other", "something")
/// .section("bar")
/// .item("one", "1");
/// for (section, iter) in conf.iter() {
/// for (key, val) in iter {
/// println!("section: {} key: {} val: {}", section, key, val);
/// }
/// }
pub fn iter(&self) -> IniIter {
IniIter {
iter: self.data.iter(),
}
}
/// Iterate over all sections, yielding pairs of section name and mutable
/// iterator over the section elements. The concrete iterator element type is
/// `(&'a String, ordered_hashmap::IterMut<'a, String, String>)`.
///
/// # Example
/// ```
/// # use tini::Ini;
/// let mut conf = Ini::new().section("foo")
/// .item("item", "value")
/// .item("other", "something")
/// .section("bar")
/// .item("one", "1");
/// for (section, iter_mut) in conf.iter_mut() {
/// for (key, val) in iter_mut {
/// *val = String::from("replaced");
/// }
/// }
pub fn iter_mut(&mut self) -> IniIterMut {
IniIterMut {
iter: self.data.iter_mut(),
}
}
}
impl fmt::Display for Ini {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut buffer = String::new();
for (section, iter) in self.iter() {
buffer.push_str(&format!("[{}]\n", section));
for (key, value) in iter {
buffer.push_str(&format!("{} = {}\n", key, value));
}
// blank line between sections
buffer.push_str("\n");
}
// remove last two '\n'
buffer.pop();
buffer.pop();
write!(f, "{}", buffer)
}
}
#[doc(hidden)]
pub struct IniIter<'a> {
iter: ordered_hashmap::Iter<'a, String, Section>,
}
impl<'a> Iterator for IniIter<'a> {
type Item = (&'a String, SectionIter<'a>);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|(string, section)| (string, section.iter()))
}
}
#[doc(hidden)]
pub struct IniIterMut<'a> {
iter: ordered_hashmap::IterMut<'a, String, Section>,
}
impl<'a> Iterator for IniIterMut<'a> {
type Item = (&'a String, SectionIterMut<'a>);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|(string, section)| (string, section.iter_mut()))
}
}
#[cfg(test)]
mod library_test {
use super::*;
#[test]
fn bool() {
let ini = Ini::from_buffer("[string]\nabc = true");
let abc: Option<bool> = ini.get("string", "abc");
assert_eq!(abc, Some(true));
}
#[test]
fn float() {
let ini = Ini::from_string("[section]\nname=10.5");
let name: Option<f64> = ini.get("section", "name");
assert_eq!(name, Some(10.5));
}
#[test]
fn float_vec() {
let ini = Ini::from_string("[section]\nname=1.2, 3.4, 5.6");
let name: Option<Vec<f64>> = ini.get_vec("section", "name");
assert_eq!(name, Some(vec![1.2, 3.4, 5.6]));
}
#[test]
fn bad_cast() {
let ini = Ini::new().section("one").item("a", "3.14");
let a: Option<u32> = ini.get("one", "a");
assert_eq!(a, None);
}
#[test]
fn string_vec() {
let ini = Ini::from_string("[section]\nname=a, b, c");
let name: Option<Vec<String>> = ini.get_vec("section", "name");
assert_eq!(
name,
Some(vec![
String::from("a"),
String::from("b"),
String::from("c"),
])
);
}
#[test]
fn parse_error() {
let ini = Ini::from_string("[section]\nlist = 1, 2, --, 4");
let name: Option<Vec<u8>> = ini.get_vec("section", "list");
assert_eq!(name, None);
}
#[test]
fn get_or_macro() {
let ini = Ini::from_string("[section]\nlist = 1, 2, --, 4");
let with_value: Vec<u8> = ini.get_vec("section", "list").unwrap_or(vec![1, 2, 3, 4]);
assert_eq!(with_value, vec![1, 2, 3, 4]);
}
#[test]
fn ordering_iter() {
let ini = Ini::from_string("[a]\nc = 1\nb = 2\na = 3");
let keys: Vec<&String> = ini.data.get("a").unwrap().iter().map(|(k, _)| k).collect();
assert_eq!(["c", "b", "a"], keys[..]);
}
#[test]
fn ordering_keys() {
let ini = Ini::from_string("[a]\nc = 1\nb = 2\na = 3");
let keys: Vec<&String> = ini.data.get("a").unwrap().keys().collect();
assert_eq!(["c", "b", "a"], keys[..]);
}
#[test]
fn mutating() {
let mut config = Ini::new()
.section("items")
.item("a", "1")
.item("b", "2")
.item("c", "3");
// mutate items
for (_, item) in config.iter_mut() {
for (_, value) in item {
let v: i32 = value.parse().unwrap();
*value = format!("{}", v + 1);
}
}
let a_val: Option<u8> = config.get("items", "a");
let b_val: Option<u8> = config.get("items", "b");
let c_val: Option<u8> = config.get("items", "c");
assert_eq!(a_val, Some(2));
assert_eq!(b_val, Some(3));
assert_eq!(c_val, Some(4));
}
#[test]
fn redefine_item() {
let config = Ini::new()
.section("items")
.item("one", "3")
.item("two", "2")
.item("one", "1");
let one: Option<i32> = config.get("items", "one");
assert_eq!(one, Some(1));
}
#[test]
fn redefine_section() {
let config = Ini::new()
.section("one")
.item("a", "1")
.section("two")
.item("b", "2")
.section("one")
.item("c", "3");
let a_val: Option<i32> = config.get("one", "a");
let c_val: Option<i32> = config.get("one", "c");
assert_eq!(a_val, Some(1));
assert_eq!(c_val, Some(3));
}
}
mod parser {
#[derive(Debug)]
pub enum Parsed {
Error(String),
Empty,
Section(String),
Value(String, String), /* Vector(String, Vec<String>), impossible, because OrderedHashMap field has type String, not Vec */
}
pub fn parse_line(line: &str) -> Parsed {
let content = match line.split(';').next() {
Some(value) => value.trim(),
None => return Parsed::Empty,
};
if content.len() == 0 {
return Parsed::Empty;
}
// add checks for content
if content.starts_with('[') {
if content.ends_with(']') {
let section_name = content.trim_matches(|c| c == '[' || c == ']').to_owned();
return Parsed::Section(section_name);
} else {
return Parsed::Error("incorrect section syntax".to_owned());
}
} else if content.contains('=') {
let mut pair = content.splitn(2, '=').map(|s| s.trim());
// if key is None => error
let key = match pair.next() {
Some(value) => value.to_owned(),
None => return Parsed::Error("key is None".to_owned()),
};
// if value is None => empty string
let value = match pair.next() {
Some(value) => value.to_owned(),
None => "".to_owned(),
};
if key.len() == 0 {
return Parsed::Error("empty key".to_owned());
}
return Parsed::Value(key, value);
}
Parsed::Error("incorrect syntax".to_owned())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_comment() {
match parse_line(";------") {
Parsed::Empty => assert!(true),
_ => assert!(false),
}
}
#[test]
fn test_entry() {
match parse_line("name1 = 100 ; comment") {
Parsed::Value(name, text) => {
assert_eq!(name, String::from("name1"));
assert_eq!(text, String::from("100"));
}
_ => assert!(false),
}
}
#[test]
fn test_weird_name() {
match parse_line("_.,:(){}-#@&*| = 100") {
Parsed::Value(name, text) => {
assert_eq!(name, String::from("_.,:(){}-#@&*|"));
assert_eq!(text, String::from("100"));
}
_ => assert!(false),
}
}
#[test]
fn test_text_entry() {
match parse_line("text_name = hello world!") {
Parsed::Value(name, text) => {
assert_eq!(name, String::from("text_name"));
assert_eq!(text, String::from("hello world!"));
}
_ => assert!(false),
}
}
#[test]
fn test_incorrect_token() {
match parse_line("[section = 1, 2 = value") {
Parsed::Error(_) => assert!(true),
_ => assert!(false),
}
}
#[test]
fn test_incorrect_key_value_line() {
match parse_line("= 3") {
Parsed::Error(_) => assert!(true),
_ => assert!(false),
}
}
}
}
|
#![feature(associated_consts)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(question_mark)]
//#![feature(plugin)]
//#![plugin(clippy)]
extern crate jsrs_common;
#[macro_use] extern crate matches;
pub mod alloc;
mod scope;
mod test_utils;
use std::cell::RefCell;
use std::collections::hash_map::HashMap;
use std::rc::Rc;
use jsrs_common::ast::Exp;
use jsrs_common::backend::Backend;
use jsrs_common::types::js_var::{JsPtrEnum, JsVar};
use jsrs_common::types::binding::{Binding, UniqueBinding};
use alloc::AllocBox;
use jsrs_common::gc_error::{GcError, Result};
use scope::{LookupError, Scope, ScopeTag};
pub struct ScopeManager {
scopes: Vec<Scope>,
closures: HashMap<UniqueBinding, Scope>,
alloc_box: Rc<RefCell<AllocBox>>,
}
impl ScopeManager {
fn new(alloc_box: Rc<RefCell<AllocBox>>) -> ScopeManager {
ScopeManager {
scopes: vec![Scope::new(ScopeTag::Call, &alloc_box)],
closures: HashMap::new(),
alloc_box: alloc_box,
}
}
#[inline]
fn curr_scope(&self) -> &Scope {
self.scopes.last().expect("Tried to access current scope, but none existed")
}
#[inline]
fn curr_scope_mut(&mut self) -> &mut Scope {
self.scopes.last_mut().expect("Tried to access current scope, but none existed")
}
#[inline]
fn global_scope(&self) -> &Scope {
self.scopes.get(0).expect("Tried to access global scope, but none existed")
}
#[inline]
fn global_scope_mut(&mut self) -> &mut Scope {
self.scopes.get_mut(0).expect("Tried to access global scope, but none existed")
}
pub fn push_closure_scope(&mut self, closure: &UniqueBinding) -> Result<()> {
let closure_scope = self.closures.remove(closure).ok_or(GcError::Scope)?;
self.scopes.push(closure_scope);
Ok(())
}
pub fn push_scope(&mut self, exp: &Exp) {
let tag = match *exp {
Exp::Call(..) => ScopeTag::Call,
_ => ScopeTag::Block,
};
self.scopes.push(Scope::new(tag, &self.alloc_box));
}
pub fn pop_scope(&mut self, returning_closure: Option<UniqueBinding>, gc_yield: bool) -> Result<()> {
if let Some(mut scope) = self.scopes.pop() {
// Clean up the dying scope's stack and take ownership of its heap-allocated data for
// later collection
if self.scopes.is_empty() {
// The global scope was popped and the program is ending.
scope.trigger_gc();
return Err(GcError::Scope);
}
if let Some(unique) = returning_closure {
let mut closure_scope = Scope::new(ScopeTag::Closure(unique.clone()), &self.alloc_box);
scope.transfer_stack(&mut closure_scope, true)?;
self.closures.insert(unique, closure_scope);
} else {
scope.transfer_stack(self.curr_scope_mut(), false)?
}
// Potentially trigger the garbage collector
if gc_yield {
self.curr_scope_mut().trigger_gc();
}
if let ScopeTag::Closure(unique) = scope.tag.clone() {
self.closures.insert(unique.clone(), scope);
}
Ok(())
} else {
Err(GcError::Scope)
}
}
}
impl Backend for ScopeManager {
fn alloc(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<Binding> {
let binding = var.binding.clone();
self.curr_scope_mut().push_var(var, ptr)?;
Ok(binding)
}
/// Try to load the variable behind a binding
fn load(&self, bnd: &Binding) -> Result<(JsVar, Option<JsPtrEnum>)> {
let lookup = || {
for scope in self.scopes.iter().rev() {
match scope.get_var_copy(bnd) {
Ok(v) => { return Ok(v); },
Err(LookupError::FnBoundary) => {
return Err(GcError::Load(bnd.clone()));
},
Err(LookupError::CheckParent) => {},
Err(LookupError::Unreachable) => unreachable!(),
}
}
Err(GcError::Load(bnd.clone()))
};
match lookup() {
Ok(v) => Ok(v),
Err(GcError::Load(bnd)) =>
self.global_scope().get_var_copy(&bnd)
.map_err(|_| GcError::Load(bnd.clone())),
_ => unreachable!(),
}
}
fn store(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<()> {
let res = self.curr_scope_mut().update_var(var, ptr);
if let Err(GcError::Store(var, ptr)) = res {
self.global_scope_mut().update_var(var, ptr)
} else {
res
}
}
}
pub fn init_gc() -> ScopeManager {
let alloc_box = Rc::new(RefCell::new(AllocBox::new()));
ScopeManager::new(alloc_box)
}
#[cfg(test)]
mod tests {
use super::*;
use jsrs_common::ast::Exp;
use jsrs_common::backend::Backend;
use jsrs_common::types::js_var::{JsKey, JsPtrEnum, JsType, JsVar};
use jsrs_common::types::binding::Binding;
use jsrs_common::gc_error::GcError;
use test_utils;
#[test]
fn test_push_closure_scope() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
mgr.push_scope(&Exp::Undefined);
let (fn_var, fn_ptr) = test_utils::make_fn(&None, &Vec::new());
let unique = fn_var.unique.clone();
mgr.alloc(fn_var, Some(fn_ptr)).unwrap();
mgr.pop_scope(Some(unique.clone()), false).unwrap();
assert_eq!(mgr.closures.len(), 1);
mgr.push_closure_scope(&unique).unwrap();
assert_eq!(mgr.closures.len(), 0);
mgr.pop_scope(None, false).unwrap();
assert_eq!(mgr.closures.len(), 1);
}
#[test]
fn test_pop_scope() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
mgr.push_scope(&Exp::Undefined);
assert_eq!(mgr.scopes.len(), 2);
mgr.pop_scope(None, false).unwrap();
assert_eq!(mgr.scopes.len(), 1);
}
#[test]
fn test_pop_scope_fail() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
let res = mgr.pop_scope(None, false);
assert!(res.is_err());
assert!(matches!(res, Err(GcError::Scope)));
}
#[test]
fn test_alloc() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
mgr.alloc(test_utils::make_num(1.), None).unwrap();
mgr.push_scope(&Exp::Undefined);
mgr.alloc(test_utils::make_num(2.), None).unwrap();
assert!(mgr.alloc_box.borrow().is_empty());
}
#[test]
fn test_load() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
let x = test_utils::make_num(1.);
let x_bnd = mgr.alloc(x, None).unwrap();
let load = mgr.load(&x_bnd);
assert!(load.is_ok());
let load = load.unwrap();
match load.0.t {
JsType::JsNum(n) => assert!(f64::abs(n - 1.) < 0.0001),
_ => unreachable!(),
}
assert!(load.1.is_none());
}
#[test]
fn test_load_fail() {
let alloc_box = test_utils::make_alloc_box();
let mgr = ScopeManager::new(alloc_box);
let bnd = Binding::new("".to_owned());
let res = mgr.load(&bnd);
assert!(res.is_err());
assert!(matches!(res, Err(GcError::Load(_))));
if let Err(GcError::Load(res_bnd)) = res {
assert_eq!(bnd, res_bnd);
}
}
#[test]
fn test_store() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box,);
mgr.push_scope(&Exp::Undefined);
let x = test_utils::make_num(1.);
let x_bnd = mgr.alloc(x, None).unwrap();
let (mut var, _) = mgr.load(&x_bnd).unwrap();
var.t = JsType::JsNum(2.);
assert!(mgr.store(var, None).is_ok());
}
#[test]
fn test_store_fail() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
let x = test_utils::make_num(1.);
assert!(mgr.store(x, None).is_err());
}
#[test]
fn test_load_from_parent_scope_across_fn_boundary() {
let heap = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(heap);
// Avoids having just the global scope available
mgr.push_scope(&Exp::Call(box Exp::Undefined, vec![]));
let (x, x_ptr) = test_utils::make_str("x");
let x_bnd = mgr.alloc(x, Some(x_ptr)).unwrap();
mgr.push_scope(&Exp::Call(box Exp::Undefined, vec![]));
let copy = mgr.load(&x_bnd);
assert!(copy.is_err());
assert!(matches!(copy, Err(GcError::Load(_))));
}
#[test]
fn test_load_from_parent_scope_no_fn_call() {
let heap = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(heap);
// Avoids having just the global scope available
mgr.push_scope(&Exp::Call(box Exp::Undefined, vec![]));
let (x, x_ptr) = test_utils::make_str("x");
let x_bnd = mgr.alloc(x, Some(x_ptr)).unwrap();
mgr.push_scope(&Exp::Undefined);
let copy = mgr.load(&x_bnd);
assert!(copy.is_ok());
let (var_copy, ptr_copy) = copy.unwrap();
assert!(matches!(var_copy, JsVar { t: JsType::JsPtr(_), .. }));
assert!(ptr_copy.is_some());
}
#[test]
fn test_transfer_stack_with_yield() {
let heap = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(heap);
// Make some scopes
mgr.push_scope(&Exp::Undefined);
{
// Push a child scope
mgr.push_scope(&Exp::Undefined);
// Allocate some non-root variables (numbers)
mgr.alloc(test_utils::make_num(0.), None).unwrap();
mgr.alloc(test_utils::make_num(1.), None).unwrap();
mgr.alloc(test_utils::make_num(2.), None).unwrap();
// Make a string to put into an object
// (so it's heap-allocated and we can lose its ref from the object)
let (var, ptr) = test_utils::make_str("test");
// Create an obj of { true: 1.0, false: heap("test") }
let kvs = vec![(JsKey::JsSym("true".to_string()),
test_utils::make_num(1.), None),
(JsKey::JsSym("false".to_string()),
var, Some(ptr))];
let (var, ptr) = test_utils::make_obj(kvs, mgr.alloc_box.clone());
// Push the obj into the current scope
let bnd = mgr.alloc(var, Some(ptr)).unwrap();
// The heap should now have 2 things in it: an object and a string
assert_eq!(mgr.alloc_box.borrow().len(), 2);
// Replace the string in the object with something else so it's no longer live
let copy = mgr.load(&bnd);
let (var_cp, mut ptr_cp) = copy.unwrap();
let key = JsKey::JsSym("false".to_string());
match *&mut ptr_cp {
Some(JsPtrEnum::JsObj(ref mut obj)) => {
obj.add_key(key, test_utils::make_num(-1.), None, &mut *(mgr.alloc_box.borrow_mut()));
},
_ => unreachable!()
}
mgr.store(var_cp, ptr_cp).unwrap();
// The heap should still have 2 things in it: an object and a string
assert_eq!(mgr.alloc_box.borrow().len(), 2);
// Kill the current scope & give its refs to the parent,
// allowing the GC to kick in beforehand.
mgr.pop_scope(None, true).unwrap();
}
// The object we created above should still exist
assert_eq!(mgr.curr_scope().len(), 1);
// But the string it had allocated shouldn't, since we leaked it into the void
assert_eq!(mgr.alloc_box.borrow().len(), 1);
}
}
Add rename_closure method to ScopeManager
#![feature(associated_consts)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(question_mark)]
//#![feature(plugin)]
//#![plugin(clippy)]
extern crate jsrs_common;
#[macro_use] extern crate matches;
pub mod alloc;
mod scope;
mod test_utils;
use std::cell::RefCell;
use std::collections::hash_map::HashMap;
use std::rc::Rc;
use jsrs_common::ast::Exp;
use jsrs_common::backend::Backend;
use jsrs_common::types::js_var::{JsPtrEnum, JsVar};
use jsrs_common::types::binding::{Binding, UniqueBinding};
use alloc::AllocBox;
use jsrs_common::gc_error::{GcError, Result};
use scope::{LookupError, Scope, ScopeTag};
pub struct ScopeManager {
scopes: Vec<Scope>,
closures: HashMap<UniqueBinding, Scope>,
alloc_box: Rc<RefCell<AllocBox>>,
}
impl ScopeManager {
fn new(alloc_box: Rc<RefCell<AllocBox>>) -> ScopeManager {
ScopeManager {
scopes: vec![Scope::new(ScopeTag::Call, &alloc_box)],
closures: HashMap::new(),
alloc_box: alloc_box,
}
}
#[inline]
fn curr_scope(&self) -> &Scope {
self.scopes.last().expect("Tried to access current scope, but none existed")
}
#[inline]
fn curr_scope_mut(&mut self) -> &mut Scope {
self.scopes.last_mut().expect("Tried to access current scope, but none existed")
}
#[inline]
fn global_scope(&self) -> &Scope {
self.scopes.get(0).expect("Tried to access global scope, but none existed")
}
#[inline]
fn global_scope_mut(&mut self) -> &mut Scope {
self.scopes.get_mut(0).expect("Tried to access global scope, but none existed")
}
pub fn push_closure_scope(&mut self, closure: &UniqueBinding) -> Result<()> {
let closure_scope = self.closures.remove(closure).ok_or(GcError::Scope)?;
self.scopes.push(closure_scope);
Ok(())
}
pub fn push_scope(&mut self, exp: &Exp) {
let tag = match *exp {
Exp::Call(..) => ScopeTag::Call,
_ => ScopeTag::Block,
};
self.scopes.push(Scope::new(tag, &self.alloc_box));
}
pub fn pop_scope(&mut self, returning_closure: Option<UniqueBinding>, gc_yield: bool) -> Result<()> {
if let Some(mut scope) = self.scopes.pop() {
// Clean up the dying scope's stack and take ownership of its heap-allocated data for
// later collection
if self.scopes.is_empty() {
// The global scope was popped and the program is ending.
scope.trigger_gc();
return Err(GcError::Scope);
}
if let Some(unique) = returning_closure {
let mut closure_scope = Scope::new(ScopeTag::Closure(unique.clone()), &self.alloc_box);
scope.transfer_stack(&mut closure_scope, true)?;
self.closures.insert(unique, closure_scope);
} else {
scope.transfer_stack(self.curr_scope_mut(), false)?
}
// Potentially trigger the garbage collector
if gc_yield {
self.curr_scope_mut().trigger_gc();
}
if let ScopeTag::Closure(unique) = scope.tag.clone() {
self.closures.insert(unique.clone(), scope);
}
Ok(())
} else {
Err(GcError::Scope)
}
}
pub fn rename_closure(&mut self, old: &UniqueBinding, new: &UniqueBinding) -> bool {
if self.closures.contains_key(old) {
let scope = self.closures.remove(old).unwrap();
self.closures.insert(new.clone(), scope);
true
} else {
false
}
}
}
impl Backend for ScopeManager {
fn alloc(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<Binding> {
let binding = var.binding.clone();
self.curr_scope_mut().push_var(var, ptr)?;
Ok(binding)
}
/// Try to load the variable behind a binding
fn load(&self, bnd: &Binding) -> Result<(JsVar, Option<JsPtrEnum>)> {
let lookup = || {
for scope in self.scopes.iter().rev() {
match scope.get_var_copy(bnd) {
Ok(v) => { return Ok(v); },
Err(LookupError::FnBoundary) => {
return Err(GcError::Load(bnd.clone()));
},
Err(LookupError::CheckParent) => {},
Err(LookupError::Unreachable) => unreachable!(),
}
}
Err(GcError::Load(bnd.clone()))
};
match lookup() {
Ok(v) => Ok(v),
Err(GcError::Load(bnd)) =>
self.global_scope().get_var_copy(&bnd)
.map_err(|_| GcError::Load(bnd.clone())),
_ => unreachable!(),
}
}
fn store(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<()> {
let res = self.curr_scope_mut().update_var(var, ptr);
if let Err(GcError::Store(var, ptr)) = res {
self.global_scope_mut().update_var(var, ptr)
} else {
res
}
}
}
pub fn init_gc() -> ScopeManager {
let alloc_box = Rc::new(RefCell::new(AllocBox::new()));
ScopeManager::new(alloc_box)
}
#[cfg(test)]
mod tests {
use super::*;
use jsrs_common::ast::Exp;
use jsrs_common::backend::Backend;
use jsrs_common::types::js_var::{JsKey, JsPtrEnum, JsType, JsVar};
use jsrs_common::types::binding::Binding;
use jsrs_common::gc_error::GcError;
use test_utils;
#[test]
fn test_push_closure_scope() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
mgr.push_scope(&Exp::Undefined);
let (fn_var, fn_ptr) = test_utils::make_fn(&None, &Vec::new());
let unique = fn_var.unique.clone();
mgr.alloc(fn_var, Some(fn_ptr)).unwrap();
mgr.pop_scope(Some(unique.clone()), false).unwrap();
assert_eq!(mgr.closures.len(), 1);
mgr.push_closure_scope(&unique).unwrap();
assert_eq!(mgr.closures.len(), 0);
mgr.pop_scope(None, false).unwrap();
assert_eq!(mgr.closures.len(), 1);
}
#[test]
fn test_pop_scope() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
mgr.push_scope(&Exp::Undefined);
assert_eq!(mgr.scopes.len(), 2);
mgr.pop_scope(None, false).unwrap();
assert_eq!(mgr.scopes.len(), 1);
}
#[test]
fn test_pop_scope_fail() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
let res = mgr.pop_scope(None, false);
assert!(res.is_err());
assert!(matches!(res, Err(GcError::Scope)));
}
#[test]
fn test_alloc() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
mgr.alloc(test_utils::make_num(1.), None).unwrap();
mgr.push_scope(&Exp::Undefined);
mgr.alloc(test_utils::make_num(2.), None).unwrap();
assert!(mgr.alloc_box.borrow().is_empty());
}
#[test]
fn test_load() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
let x = test_utils::make_num(1.);
let x_bnd = mgr.alloc(x, None).unwrap();
let load = mgr.load(&x_bnd);
assert!(load.is_ok());
let load = load.unwrap();
match load.0.t {
JsType::JsNum(n) => assert!(f64::abs(n - 1.) < 0.0001),
_ => unreachable!(),
}
assert!(load.1.is_none());
}
#[test]
fn test_load_fail() {
let alloc_box = test_utils::make_alloc_box();
let mgr = ScopeManager::new(alloc_box);
let bnd = Binding::new("".to_owned());
let res = mgr.load(&bnd);
assert!(res.is_err());
assert!(matches!(res, Err(GcError::Load(_))));
if let Err(GcError::Load(res_bnd)) = res {
assert_eq!(bnd, res_bnd);
}
}
#[test]
fn test_store() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box,);
mgr.push_scope(&Exp::Undefined);
let x = test_utils::make_num(1.);
let x_bnd = mgr.alloc(x, None).unwrap();
let (mut var, _) = mgr.load(&x_bnd).unwrap();
var.t = JsType::JsNum(2.);
assert!(mgr.store(var, None).is_ok());
}
#[test]
fn test_store_fail() {
let alloc_box = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(alloc_box);
let x = test_utils::make_num(1.);
assert!(mgr.store(x, None).is_err());
}
#[test]
fn test_load_from_parent_scope_across_fn_boundary() {
let heap = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(heap);
// Avoids having just the global scope available
mgr.push_scope(&Exp::Call(box Exp::Undefined, vec![]));
let (x, x_ptr) = test_utils::make_str("x");
let x_bnd = mgr.alloc(x, Some(x_ptr)).unwrap();
mgr.push_scope(&Exp::Call(box Exp::Undefined, vec![]));
let copy = mgr.load(&x_bnd);
assert!(copy.is_err());
assert!(matches!(copy, Err(GcError::Load(_))));
}
#[test]
fn test_load_from_parent_scope_no_fn_call() {
let heap = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(heap);
// Avoids having just the global scope available
mgr.push_scope(&Exp::Call(box Exp::Undefined, vec![]));
let (x, x_ptr) = test_utils::make_str("x");
let x_bnd = mgr.alloc(x, Some(x_ptr)).unwrap();
mgr.push_scope(&Exp::Undefined);
let copy = mgr.load(&x_bnd);
assert!(copy.is_ok());
let (var_copy, ptr_copy) = copy.unwrap();
assert!(matches!(var_copy, JsVar { t: JsType::JsPtr(_), .. }));
assert!(ptr_copy.is_some());
}
#[test]
fn test_transfer_stack_with_yield() {
let heap = test_utils::make_alloc_box();
let mut mgr = ScopeManager::new(heap);
// Make some scopes
mgr.push_scope(&Exp::Undefined);
{
// Push a child scope
mgr.push_scope(&Exp::Undefined);
// Allocate some non-root variables (numbers)
mgr.alloc(test_utils::make_num(0.), None).unwrap();
mgr.alloc(test_utils::make_num(1.), None).unwrap();
mgr.alloc(test_utils::make_num(2.), None).unwrap();
// Make a string to put into an object
// (so it's heap-allocated and we can lose its ref from the object)
let (var, ptr) = test_utils::make_str("test");
// Create an obj of { true: 1.0, false: heap("test") }
let kvs = vec![(JsKey::JsSym("true".to_string()),
test_utils::make_num(1.), None),
(JsKey::JsSym("false".to_string()),
var, Some(ptr))];
let (var, ptr) = test_utils::make_obj(kvs, mgr.alloc_box.clone());
// Push the obj into the current scope
let bnd = mgr.alloc(var, Some(ptr)).unwrap();
// The heap should now have 2 things in it: an object and a string
assert_eq!(mgr.alloc_box.borrow().len(), 2);
// Replace the string in the object with something else so it's no longer live
let copy = mgr.load(&bnd);
let (var_cp, mut ptr_cp) = copy.unwrap();
let key = JsKey::JsSym("false".to_string());
match *&mut ptr_cp {
Some(JsPtrEnum::JsObj(ref mut obj)) => {
obj.add_key(key, test_utils::make_num(-1.), None, &mut *(mgr.alloc_box.borrow_mut()));
},
_ => unreachable!()
}
mgr.store(var_cp, ptr_cp).unwrap();
// The heap should still have 2 things in it: an object and a string
assert_eq!(mgr.alloc_box.borrow().len(), 2);
// Kill the current scope & give its refs to the parent,
// allowing the GC to kick in beforehand.
mgr.pop_scope(None, true).unwrap();
}
// The object we created above should still exist
assert_eq!(mgr.curr_scope().len(), 1);
// But the string it had allocated shouldn't, since we leaked it into the void
assert_eq!(mgr.alloc_box.borrow().len(), 1);
}
}
|
#![deny(missing_docs, warnings)]
#![feature(core)]
#![feature(std_misc)]
//! `Router` provides a fast router handler for the Iron web framework.
extern crate iron;
extern crate "route-recognizer" as recognizer;
pub use router::Router;
pub use recognizer::Params;
mod router;
(fix) Fix attributes for latest Rust.
#![deny(missing_docs)]
#![feature(core, std_misc)]
#![cfg_attr(test, deny(warnings))]
//! `Router` provides fast and flexible routing for Iron.
extern crate iron;
extern crate "route-recognizer" as recognizer;
pub use router::Router;
pub use recognizer::Params;
mod router;
|
extern crate crossbeam;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::thread::JoinHandle;
use std::sync::Arc;
// enums defined below
use ExpressionInner::*;
use ExecutableExpression::*;
use IoArg::*;
mod pipe;
pub fn cmd<T: AsRef<OsStr>>(argv: &[T]) -> Expression<'static> {
let argv_vec = argv.iter().map(|arg| arg.as_ref().to_owned()).collect();
Expression::new(Exec(ArgvCommand(argv_vec)))
}
#[macro_export]
macro_rules! cmd {
( $( $x:expr ),* ) => {
{
use std::ffi::OsStr;
let mut temp_vec = Vec::new();
$(
let temp_osstr: &OsStr = $x.as_ref();
temp_vec.push(temp_osstr.to_owned());
)*
$crate::cmd(&temp_vec)
}
};
}
pub fn sh<T: AsRef<OsStr>>(command: T) -> Expression<'static> {
Expression {
inner: Arc::new(Exec(ShCommand(command.as_ref()
.to_owned()))),
}
}
#[derive(Clone, Debug)]
#[must_use]
pub struct Expression<'a> {
inner: Arc<ExpressionInner<'a>>,
}
impl<'a, 'b> Expression<'a>
where 'b: 'a
{
pub fn run(&self) -> Result<Output, Error> {
let (context, stdout_reader, stderr_reader) = try!(IoContext::new());
let status = try!(self.inner.exec(context));
let stdout_vec = try!(stdout_reader.join().unwrap());
let stderr_vec = try!(stderr_reader.join().unwrap());
let output = Output {
status: status,
stdout: stdout_vec,
stderr: stderr_vec,
};
if output.status != 0 {
Err(Error::Status(output))
} else {
Ok(output)
}
}
pub fn read(&self) -> Result<String, Error> {
let output = try!(self.stdout(OutputRedirect::Capture).run());
let output_str = try!(std::str::from_utf8(&output.stdout));
// TODO: Handle Windows newlines too.
Ok(output_str.trim_right_matches('\n').to_owned())
}
pub fn pipe<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Self::new(Exec(Pipe(self.clone(), right.borrow().clone())))
}
pub fn then<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Self::new(Exec(Then(self.clone(), right.borrow().clone())))
}
pub fn input<T: IntoStdinBytes<'b>>(&self, input: T) -> Self {
Self::new(Io(Stdin(input.into_stdin_bytes()), self.clone()))
}
pub fn stdin<T: IntoStdin<'b>>(&self, stdin: T) -> Self {
Self::new(Io(Stdin(stdin.into_stdin()), self.clone()))
}
pub fn stdout<T: IntoOutput<'b>>(&self, stdout: T) -> Self {
Self::new(Io(Stdout(stdout.into_output()), self.clone()))
}
pub fn stderr<T: IntoOutput<'b>>(&self, stderr: T) -> Self {
Self::new(Io(Stderr(stderr.into_output()), self.clone()))
}
pub fn dir<T: AsRef<Path>>(&self, path: T) -> Self {
Self::new(Io(Dir(path.as_ref().to_owned()), self.clone()))
}
pub fn env<T: AsRef<OsStr>, U: AsRef<OsStr>>(&self, name: T, val: U) -> Self {
Self::new(Io(Env(name.as_ref().to_owned(), val.as_ref().to_owned()),
self.clone()))
}
pub fn env_remove<T: AsRef<OsStr>>(&self, name: T) -> Self {
Self::new(Io(EnvRemove(name.as_ref().to_owned()), self.clone()))
}
pub fn env_clear(&self) -> Self {
Self::new(Io(EnvClear, self.clone()))
}
pub fn unchecked(&self) -> Self {
Self::new(Io(Unchecked, self.clone()))
}
fn new(inner: ExpressionInner<'a>) -> Self {
Expression { inner: Arc::new(inner) }
}
}
#[derive(Debug)]
enum ExpressionInner<'a> {
Exec(ExecutableExpression<'a>),
Io(IoArg<'a>, Expression<'a>),
}
impl<'a> ExpressionInner<'a> {
fn exec(&self, parent_context: IoContext) -> io::Result<Status> {
match *self {
Exec(ref executable) => executable.exec(parent_context),
Io(ref ioarg, ref expr) => {
ioarg.with_child_context(parent_context, |context| expr.inner.exec(context))
}
}
}
}
#[derive(Debug)]
enum ExecutableExpression<'a> {
ArgvCommand(Vec<OsString>),
ShCommand(OsString),
Pipe(Expression<'a>, Expression<'a>),
Then(Expression<'a>, Expression<'a>),
}
impl<'a> ExecutableExpression<'a> {
fn exec(&self, context: IoContext) -> io::Result<Status> {
match *self {
ArgvCommand(ref argv) => exec_argv(argv, context),
ShCommand(ref command) => exec_sh(command, context),
Pipe(ref left, ref right) => exec_pipe(left, right, context),
Then(ref left, ref right) => exec_then(left, right, context),
}
}
}
fn exec_argv<T: AsRef<OsStr>>(argv: &[T], context: IoContext) -> io::Result<Status> {
let mut command = Command::new(&argv[0]);
command.args(&argv[1..]);
command.stdin(context.stdin.into_stdio());
command.stdout(context.stdout.into_stdio());
command.stderr(context.stderr.into_stdio());
command.current_dir(context.dir);
command.env_clear();
for (name, val) in context.env {
command.env(name, val);
}
Ok(try!(command.status()).code().unwrap()) // TODO: Handle signals.
}
fn exec_sh<T: AsRef<OsStr>>(command: T, context: IoContext) -> io::Result<Status> {
// TODO: Use COMSPEC on Windows, as Python does. https://docs.python.org/3/library/subprocess.html
let mut argv = Vec::new();
argv.push("/bin/sh".as_ref());
argv.push("-c".as_ref());
argv.push(command.as_ref());
exec_argv(&argv, context)
}
fn exec_pipe(left: &Expression, right: &Expression, context: IoContext) -> io::Result<Status> {
let (read_pipe, write_pipe) = pipe::open_pipe();
let mut left_context = context.clone(); // dup'ing stdin/stdout isn't strictly necessary, but no big deal
left_context.stdout = write_pipe;
let mut right_context = context;
right_context.stdin = read_pipe;
let (left_result, right_result) = crossbeam::scope(|scope| {
let left_joiner = scope.spawn(|| left.inner.exec(left_context));
let right_result = right.inner.exec(right_context);
let left_result = left_joiner.join();
(left_result, right_result)
});
let right_status = try!(right_result);
let left_status = try!(left_result);
if right_status != 0 {
Ok(right_status)
} else {
Ok(left_status)
}
}
fn exec_then(left: &Expression, right: &Expression, context: IoContext) -> io::Result<Status> {
let status = try!(left.inner.exec(context.clone()));
if status != 0 {
Ok(status)
} else {
right.inner.exec(context)
}
}
#[derive(Debug)]
enum IoArg<'a> {
Stdin(InputRedirect<'a>),
Stdout(OutputRedirect<'a>),
Stderr(OutputRedirect<'a>),
Dir(PathBuf),
Env(OsString, OsString),
EnvRemove(OsString),
EnvClear,
Unchecked,
}
impl<'a> IoArg<'a> {
fn with_child_context<F>(&self, parent_context: IoContext, inner: F) -> io::Result<Status>
where F: FnOnce(IoContext) -> io::Result<Status>
{
crossbeam::scope(|scope| {
let mut context = parent_context; // move it into the closure
let mut maybe_stdin_thread = None;
let mut unchecked = false;
// Put together the redirected context.
match *self {
Stdin(ref redir) => {
let (handle, maybe_thread) = try!(redir.open_handle_maybe_thread(scope));
maybe_stdin_thread = maybe_thread;
context.stdin = handle;
}
Stdout(ref redir) => {
context.stdout = try!(redir.open_handle(&context.stdout,
&context.stderr,
&context.stdout_capture));
}
Stderr(ref redir) => {
context.stderr = try!(redir.open_handle(&context.stdout,
&context.stderr,
&context.stderr_capture));
}
Dir(ref path) => {
context.dir = path.to_owned();
}
Env(ref name, ref val) => {
context.env.insert(name.to_owned(), val.to_owned());
}
EnvRemove(ref name) => {
context.env.remove(name);
}
EnvClear => {
context.env.clear();
}
Unchecked => {
unchecked = true;
}
}
// Run the inner closure.
let status = try!(inner(context));
// Join the input thread, if any.
if let Some(thread) = maybe_stdin_thread {
if let Err(writer_error) = thread.join() {
// A broken pipe error happens if the process on the other end exits before
// we're done writing. We ignore those but return any other errors to the
// caller.
if writer_error.kind() != io::ErrorKind::BrokenPipe {
return Err(writer_error);
}
}
}
if unchecked {
// Return status 0 (success) for ignored expressions.
Ok(0)
} else {
// Otherwise return the real status.
Ok(status)
}
})
}
}
#[derive(Debug)]
pub enum InputRedirect<'a> {
Null,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
BytesSlice(&'a [u8]),
BytesVec(Vec<u8>),
}
impl<'a> InputRedirect<'a> {
fn open_handle_maybe_thread(&'a self,
scope: &crossbeam::Scope<'a>)
-> io::Result<(pipe::Handle, Option<WriterThread>)> {
let mut maybe_thread = None;
let handle = match *self {
InputRedirect::Null => pipe::Handle::from_file(try!(File::open("/dev/null"))), // TODO: Windows
InputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::open(p))),
InputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::open(p))),
InputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
InputRedirect::File(ref f) => pipe::Handle::dup_file(f),
InputRedirect::BytesSlice(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
InputRedirect::BytesVec(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
};
Ok((handle, maybe_thread))
}
}
pub trait IntoStdinBytes<'a> {
fn into_stdin_bytes(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdinBytes<'a> for &'a [u8] {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a str {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl<'a> IntoStdinBytes<'a> for &'a String {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for String {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self.into_bytes())
}
}
pub trait IntoStdin<'a> {
fn into_stdin(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdin<'a> for InputRedirect<'a> {
fn into_stdin(self) -> InputRedirect<'a> {
self
}
}
impl<'a> IntoStdin<'a> for &'a Path {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self)
}
}
impl<'a> IntoStdin<'a> for &'a PathBuf {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for PathBuf {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self)
}
}
impl<'a> IntoStdin<'a> for &'a str {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a String {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for String {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a OsStr {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a OsString {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for OsString {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a File {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::FileRef(self)
}
}
impl IntoStdin<'static> for File {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::File(self)
}
}
#[derive(Debug)]
pub enum OutputRedirect<'a> {
Capture,
Null,
Stdout,
Stderr,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
}
impl<'a> OutputRedirect<'a> {
fn open_handle(&self,
inherited_stdout: &pipe::Handle,
inherited_stderr: &pipe::Handle,
capture_handle: &pipe::Handle)
-> io::Result<pipe::Handle> {
Ok(match *self {
OutputRedirect::Capture => capture_handle.clone(),
OutputRedirect::Null => pipe::Handle::from_file(try!(File::create("/dev/null"))), // TODO: Windows
OutputRedirect::Stdout => inherited_stdout.clone(),
OutputRedirect::Stderr => inherited_stderr.clone(),
OutputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::create(p))),
OutputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::create(p))),
OutputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
OutputRedirect::File(ref f) => pipe::Handle::dup_file(f),
})
}
}
pub trait IntoOutput<'a> {
fn into_output(self) -> OutputRedirect<'a>;
}
impl<'a> IntoOutput<'a> for OutputRedirect<'a> {
fn into_output(self) -> OutputRedirect<'a> {
self
}
}
impl<'a> IntoOutput<'a> for &'a Path {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self)
}
}
impl<'a> IntoOutput<'a> for &'a PathBuf {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for PathBuf {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self)
}
}
impl<'a> IntoOutput<'a> for &'a str {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a String {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for String {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a OsStr {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a OsString {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for OsString {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a File {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::FileRef(self)
}
}
impl IntoOutput<'static> for File {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::File(self)
}
}
// We can't use std::process::{Output, Status}, because we need to be able to instantiate the
// success status value ourselves.
pub type Status = i32;
#[derive(Clone, Debug)]
pub struct Output {
pub status: Status,
pub stdout: Vec<u8>,
pub stderr: Vec<u8>,
}
#[derive(Debug)]
pub enum Error {
Io(io::Error),
Utf8(std::str::Utf8Error),
Status(Output),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<std::str::Utf8Error> for Error {
fn from(err: std::str::Utf8Error) -> Error {
Error::Utf8(err)
}
}
// An IoContext represents the file descriptors child processes are talking to at execution time.
// It's initialized in run(), with dups of the stdin/stdout/stderr pipes, and then passed down to
// sub-expressions. Compound expressions will clone() it, and redirections will modify it.
#[derive(Clone, Debug)]
pub struct IoContext {
stdin: pipe::Handle,
stdout: pipe::Handle,
stderr: pipe::Handle,
stdout_capture: pipe::Handle,
stderr_capture: pipe::Handle,
dir: PathBuf,
env: HashMap<OsString, OsString>,
}
impl IoContext {
// Returns (context, stdout_reader, stderr_reader).
fn new() -> io::Result<(IoContext, ReaderThread, ReaderThread)> {
let (stdout_capture, stdout_reader) = pipe_with_reader_thread();
let (stderr_capture, stderr_reader) = pipe_with_reader_thread();
let mut env = HashMap::new();
for (name, val) in std::env::vars_os() {
env.insert(name, val);
}
let context = IoContext {
stdin: pipe::Handle::stdin(),
stdout: pipe::Handle::stdout(),
stderr: pipe::Handle::stderr(),
stdout_capture: stdout_capture,
stderr_capture: stderr_capture,
dir: try!(std::env::current_dir()),
env: env,
};
Ok((context, stdout_reader, stderr_reader))
}
}
type ReaderThread = JoinHandle<io::Result<Vec<u8>>>;
fn pipe_with_reader_thread() -> (pipe::Handle, ReaderThread) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = std::thread::spawn(move || {
let mut read_file = read_pipe.into_file();
let mut output = Vec::new();
try!(read_file.read_to_end(&mut output));
Ok(output)
});
(write_pipe, thread)
}
type WriterThread = crossbeam::ScopedJoinHandle<io::Result<()>>;
fn pipe_with_writer_thread<'a>(input: &'a [u8],
scope: &crossbeam::Scope<'a>)
-> (pipe::Handle, WriterThread) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = scope.spawn(move || {
let mut write_file = write_pipe.into_file();
try!(write_file.write_all(&input));
Ok(())
});
(read_pipe, thread)
}
#[cfg(test)]
mod test {
extern crate tempfile;
extern crate tempdir;
use super::*;
use std::env;
use std::io::prelude::*;
use std::io::SeekFrom;
use std::path::Path;
#[test]
fn test_cmd() {
let output = cmd!("echo", "hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_sh() {
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_error() {
let result = cmd!("false").run();
if let Err(Error::Status(output)) = result {
// Check that the status is non-zero.
assert!(output.status != 0);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_ignore() {
let ignored_false = cmd!("false").unchecked();
let output = ignored_false.then(cmd!("echo", "waa")).then(ignored_false).read().unwrap();
assert_eq!("waa", output);
}
#[test]
fn test_pipe() {
let output = sh("echo hi").pipe(sh("sed s/i/o/")).read().unwrap();
assert_eq!("ho", output);
}
#[test]
fn test_then() {
let output = sh("echo -n hi").then(sh("echo lo")).read().unwrap();
assert_eq!("hilo", output);
}
#[test]
fn test_input() {
// TODO: Fixed-length bytes input like b"foo" works poorly here. Why?
let expr = sh("sed s/f/g/").input("foo");
let output = expr.read().unwrap();
assert_eq!("goo", output);
}
#[test]
fn test_null() {
// TODO: The separation between InputRedirect and OutputRedirect here is tedious.
let expr = cmd!("cat")
.stdin(InputRedirect::Null)
.stdout(OutputRedirect::Null)
.stderr(OutputRedirect::Null);
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let mut input_file = tempfile::NamedTempFile::new().unwrap();
let output_file = tempfile::NamedTempFile::new().unwrap();
input_file.write_all(b"foo").unwrap();
let expr = sh("sed s/o/a/g").stdin(input_file.path()).stdout(output_file.path());
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
output_file.as_ref().read_to_string(&mut file_output).unwrap();
assert_eq!("faa", file_output);
}
#[test]
fn test_owned_input() {
fn with_input<'a>(expr: &Expression<'a>) -> Expression<'a> {
let mystr = format!("I own this: {}", "foo");
// This would be a lifetime error if we tried to use &mystr.
expr.input(mystr)
}
let c = cmd!("cat");
let c_with_input = with_input(&c);
let output = c_with_input.read().unwrap();
assert_eq!("I own this: foo", output);
}
#[test]
fn test_stderr_to_stdout() {
let command = sh("echo hi >&2").stderr(OutputRedirect::Stdout);
let output = command.read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let mut temp = tempfile::NamedTempFile::new().unwrap();
temp.write_all(b"example").unwrap();
temp.seek(SeekFrom::Start(0)).unwrap();
let expr = cmd!("cat").stdin(temp.as_ref());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
// We don't get automatic Deref when we're matching trait implementations, so in addition
// to implementing String and &str, we *also* implement &String.
// TODO: See if specialization can clean this up.
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true").stdin(&*mystr).input(&*myvec).stdout(&*mypathbuf);
let _ = sh("true").stdin(&mystr).input(&myvec).stdout(&mypathbuf);
let _ = sh("true").stdin(mystr).input(myvec).stdout(mypathbuf);
}
#[test]
fn test_capture_both() {
let output = sh("echo -n hi; echo -n lo >&2")
.stdout(OutputRedirect::Capture)
.stderr(OutputRedirect::Capture)
.run()
.unwrap();
assert_eq!(b"hi", &*output.stdout);
assert_eq!(b"lo", &*output.stderr);
}
#[test]
fn test_cwd() {
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = cmd!("pwd").read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it.
let dir = tempdir::TempDir::new("duct_test").unwrap();
let pwd_output = cmd!("pwd").dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, dir.path());
}
#[test]
fn test_env() {
let output = sh("echo $foo").env("foo", "bar").read().unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_env_remove() {
// Set a var twice, both in the parent process and with an env() call. Make sure a single
// env_remove() call clears both.
let var_name = "test_env_remove_var";
env::set_var(var_name, "junk1");
let command = format!("echo ${}", var_name);
let output = sh(command).env_remove(var_name).env(var_name, "junk2").read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_env_clear() {
// As test_env_remove, but with env_clear().
let var_name = "test_env_remove_var";
env::set_var(var_name, "junk1");
let command = format!("echo ${}", var_name);
let output = sh(command).env_clear().env(var_name, "junk2").read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_broken_pipe() {
// If the input writing thread fills up its pipe buffer, writing will block. If the process
// on the other end of the pipe exits while writer is waiting, the write will return an
// error. We need to swallow that error, rather than returning it.
let myvec = vec![0; 1_000_000];
cmd!("true").input(myvec).run().unwrap();
}
}
null_stdin, null_stdout, and null_stderr
extern crate crossbeam;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::thread::JoinHandle;
use std::sync::Arc;
// enums defined below
use ExpressionInner::*;
use ExecutableExpression::*;
use IoArg::*;
mod pipe;
pub fn cmd<T: AsRef<OsStr>>(argv: &[T]) -> Expression<'static> {
let argv_vec = argv.iter().map(|arg| arg.as_ref().to_owned()).collect();
Expression::new(Exec(ArgvCommand(argv_vec)))
}
#[macro_export]
macro_rules! cmd {
( $( $x:expr ),* ) => {
{
use std::ffi::OsStr;
let mut temp_vec = Vec::new();
$(
let temp_osstr: &OsStr = $x.as_ref();
temp_vec.push(temp_osstr.to_owned());
)*
$crate::cmd(&temp_vec)
}
};
}
pub fn sh<T: AsRef<OsStr>>(command: T) -> Expression<'static> {
Expression {
inner: Arc::new(Exec(ShCommand(command.as_ref()
.to_owned()))),
}
}
#[derive(Clone, Debug)]
#[must_use]
pub struct Expression<'a> {
inner: Arc<ExpressionInner<'a>>,
}
impl<'a, 'b> Expression<'a>
where 'b: 'a
{
pub fn run(&self) -> Result<Output, Error> {
let (context, stdout_reader, stderr_reader) = try!(IoContext::new());
let status = try!(self.inner.exec(context));
let stdout_vec = try!(stdout_reader.join().unwrap());
let stderr_vec = try!(stderr_reader.join().unwrap());
let output = Output {
status: status,
stdout: stdout_vec,
stderr: stderr_vec,
};
if output.status != 0 {
Err(Error::Status(output))
} else {
Ok(output)
}
}
pub fn read(&self) -> Result<String, Error> {
let output = try!(self.stdout(OutputRedirect::Capture).run());
let output_str = try!(std::str::from_utf8(&output.stdout));
// TODO: Handle Windows newlines too.
Ok(output_str.trim_right_matches('\n').to_owned())
}
pub fn pipe<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Self::new(Exec(Pipe(self.clone(), right.borrow().clone())))
}
pub fn then<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Self::new(Exec(Then(self.clone(), right.borrow().clone())))
}
pub fn input<T: IntoStdinBytes<'b>>(&self, input: T) -> Self {
Self::new(Io(Stdin(input.into_stdin_bytes()), self.clone()))
}
pub fn stdin<T: IntoStdin<'b>>(&self, stdin: T) -> Self {
Self::new(Io(Stdin(stdin.into_stdin()), self.clone()))
}
pub fn null_stdin(&self) -> Self {
Self::new(Io(Stdin(InputRedirect::Null), self.clone()))
}
pub fn stdout<T: IntoOutput<'b>>(&self, stdout: T) -> Self {
Self::new(Io(Stdout(stdout.into_output()), self.clone()))
}
pub fn null_stdout(&self) -> Self {
Self::new(Io(Stdout(OutputRedirect::Null), self.clone()))
}
pub fn stderr<T: IntoOutput<'b>>(&self, stderr: T) -> Self {
Self::new(Io(Stderr(stderr.into_output()), self.clone()))
}
pub fn null_stderr(&self) -> Self {
Self::new(Io(Stderr(OutputRedirect::Null), self.clone()))
}
pub fn dir<T: AsRef<Path>>(&self, path: T) -> Self {
Self::new(Io(Dir(path.as_ref().to_owned()), self.clone()))
}
pub fn env<T: AsRef<OsStr>, U: AsRef<OsStr>>(&self, name: T, val: U) -> Self {
Self::new(Io(Env(name.as_ref().to_owned(), val.as_ref().to_owned()),
self.clone()))
}
pub fn env_remove<T: AsRef<OsStr>>(&self, name: T) -> Self {
Self::new(Io(EnvRemove(name.as_ref().to_owned()), self.clone()))
}
pub fn env_clear(&self) -> Self {
Self::new(Io(EnvClear, self.clone()))
}
pub fn unchecked(&self) -> Self {
Self::new(Io(Unchecked, self.clone()))
}
fn new(inner: ExpressionInner<'a>) -> Self {
Expression { inner: Arc::new(inner) }
}
}
#[derive(Debug)]
enum ExpressionInner<'a> {
Exec(ExecutableExpression<'a>),
Io(IoArg<'a>, Expression<'a>),
}
impl<'a> ExpressionInner<'a> {
fn exec(&self, parent_context: IoContext) -> io::Result<Status> {
match *self {
Exec(ref executable) => executable.exec(parent_context),
Io(ref ioarg, ref expr) => {
ioarg.with_child_context(parent_context, |context| expr.inner.exec(context))
}
}
}
}
#[derive(Debug)]
enum ExecutableExpression<'a> {
ArgvCommand(Vec<OsString>),
ShCommand(OsString),
Pipe(Expression<'a>, Expression<'a>),
Then(Expression<'a>, Expression<'a>),
}
impl<'a> ExecutableExpression<'a> {
fn exec(&self, context: IoContext) -> io::Result<Status> {
match *self {
ArgvCommand(ref argv) => exec_argv(argv, context),
ShCommand(ref command) => exec_sh(command, context),
Pipe(ref left, ref right) => exec_pipe(left, right, context),
Then(ref left, ref right) => exec_then(left, right, context),
}
}
}
fn exec_argv<T: AsRef<OsStr>>(argv: &[T], context: IoContext) -> io::Result<Status> {
let mut command = Command::new(&argv[0]);
command.args(&argv[1..]);
command.stdin(context.stdin.into_stdio());
command.stdout(context.stdout.into_stdio());
command.stderr(context.stderr.into_stdio());
command.current_dir(context.dir);
command.env_clear();
for (name, val) in context.env {
command.env(name, val);
}
Ok(try!(command.status()).code().unwrap()) // TODO: Handle signals.
}
fn exec_sh<T: AsRef<OsStr>>(command: T, context: IoContext) -> io::Result<Status> {
// TODO: Use COMSPEC on Windows, as Python does. https://docs.python.org/3/library/subprocess.html
let mut argv = Vec::new();
argv.push("/bin/sh".as_ref());
argv.push("-c".as_ref());
argv.push(command.as_ref());
exec_argv(&argv, context)
}
fn exec_pipe(left: &Expression, right: &Expression, context: IoContext) -> io::Result<Status> {
let (read_pipe, write_pipe) = pipe::open_pipe();
let mut left_context = context.clone(); // dup'ing stdin/stdout isn't strictly necessary, but no big deal
left_context.stdout = write_pipe;
let mut right_context = context;
right_context.stdin = read_pipe;
let (left_result, right_result) = crossbeam::scope(|scope| {
let left_joiner = scope.spawn(|| left.inner.exec(left_context));
let right_result = right.inner.exec(right_context);
let left_result = left_joiner.join();
(left_result, right_result)
});
let right_status = try!(right_result);
let left_status = try!(left_result);
if right_status != 0 {
Ok(right_status)
} else {
Ok(left_status)
}
}
fn exec_then(left: &Expression, right: &Expression, context: IoContext) -> io::Result<Status> {
let status = try!(left.inner.exec(context.clone()));
if status != 0 {
Ok(status)
} else {
right.inner.exec(context)
}
}
#[derive(Debug)]
enum IoArg<'a> {
Stdin(InputRedirect<'a>),
Stdout(OutputRedirect<'a>),
Stderr(OutputRedirect<'a>),
Dir(PathBuf),
Env(OsString, OsString),
EnvRemove(OsString),
EnvClear,
Unchecked,
}
impl<'a> IoArg<'a> {
fn with_child_context<F>(&self, parent_context: IoContext, inner: F) -> io::Result<Status>
where F: FnOnce(IoContext) -> io::Result<Status>
{
crossbeam::scope(|scope| {
let mut context = parent_context; // move it into the closure
let mut maybe_stdin_thread = None;
let mut unchecked = false;
// Put together the redirected context.
match *self {
Stdin(ref redir) => {
let (handle, maybe_thread) = try!(redir.open_handle_maybe_thread(scope));
maybe_stdin_thread = maybe_thread;
context.stdin = handle;
}
Stdout(ref redir) => {
context.stdout = try!(redir.open_handle(&context.stdout,
&context.stderr,
&context.stdout_capture));
}
Stderr(ref redir) => {
context.stderr = try!(redir.open_handle(&context.stdout,
&context.stderr,
&context.stderr_capture));
}
Dir(ref path) => {
context.dir = path.to_owned();
}
Env(ref name, ref val) => {
context.env.insert(name.to_owned(), val.to_owned());
}
EnvRemove(ref name) => {
context.env.remove(name);
}
EnvClear => {
context.env.clear();
}
Unchecked => {
unchecked = true;
}
}
// Run the inner closure.
let status = try!(inner(context));
// Join the input thread, if any.
if let Some(thread) = maybe_stdin_thread {
if let Err(writer_error) = thread.join() {
// A broken pipe error happens if the process on the other end exits before
// we're done writing. We ignore those but return any other errors to the
// caller.
if writer_error.kind() != io::ErrorKind::BrokenPipe {
return Err(writer_error);
}
}
}
if unchecked {
// Return status 0 (success) for ignored expressions.
Ok(0)
} else {
// Otherwise return the real status.
Ok(status)
}
})
}
}
#[derive(Debug)]
pub enum InputRedirect<'a> {
Null,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
BytesSlice(&'a [u8]),
BytesVec(Vec<u8>),
}
impl<'a> InputRedirect<'a> {
fn open_handle_maybe_thread(&'a self,
scope: &crossbeam::Scope<'a>)
-> io::Result<(pipe::Handle, Option<WriterThread>)> {
let mut maybe_thread = None;
let handle = match *self {
InputRedirect::Null => pipe::Handle::from_file(try!(File::open("/dev/null"))), // TODO: Windows
InputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::open(p))),
InputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::open(p))),
InputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
InputRedirect::File(ref f) => pipe::Handle::dup_file(f),
InputRedirect::BytesSlice(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
InputRedirect::BytesVec(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
};
Ok((handle, maybe_thread))
}
}
pub trait IntoStdinBytes<'a> {
fn into_stdin_bytes(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdinBytes<'a> for &'a [u8] {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a str {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl<'a> IntoStdinBytes<'a> for &'a String {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for String {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self.into_bytes())
}
}
pub trait IntoStdin<'a> {
fn into_stdin(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdin<'a> for InputRedirect<'a> {
fn into_stdin(self) -> InputRedirect<'a> {
self
}
}
impl<'a> IntoStdin<'a> for &'a Path {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self)
}
}
impl<'a> IntoStdin<'a> for &'a PathBuf {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for PathBuf {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self)
}
}
impl<'a> IntoStdin<'a> for &'a str {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a String {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for String {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a OsStr {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a OsString {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for OsString {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a File {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::FileRef(self)
}
}
impl IntoStdin<'static> for File {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::File(self)
}
}
#[derive(Debug)]
pub enum OutputRedirect<'a> {
Capture,
Null,
Stdout,
Stderr,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
}
impl<'a> OutputRedirect<'a> {
fn open_handle(&self,
inherited_stdout: &pipe::Handle,
inherited_stderr: &pipe::Handle,
capture_handle: &pipe::Handle)
-> io::Result<pipe::Handle> {
Ok(match *self {
OutputRedirect::Capture => capture_handle.clone(),
OutputRedirect::Null => pipe::Handle::from_file(try!(File::create("/dev/null"))), // TODO: Windows
OutputRedirect::Stdout => inherited_stdout.clone(),
OutputRedirect::Stderr => inherited_stderr.clone(),
OutputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::create(p))),
OutputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::create(p))),
OutputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
OutputRedirect::File(ref f) => pipe::Handle::dup_file(f),
})
}
}
pub trait IntoOutput<'a> {
fn into_output(self) -> OutputRedirect<'a>;
}
impl<'a> IntoOutput<'a> for OutputRedirect<'a> {
fn into_output(self) -> OutputRedirect<'a> {
self
}
}
impl<'a> IntoOutput<'a> for &'a Path {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self)
}
}
impl<'a> IntoOutput<'a> for &'a PathBuf {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for PathBuf {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self)
}
}
impl<'a> IntoOutput<'a> for &'a str {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a String {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for String {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a OsStr {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a OsString {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for OsString {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a File {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::FileRef(self)
}
}
impl IntoOutput<'static> for File {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::File(self)
}
}
// We can't use std::process::{Output, Status}, because we need to be able to instantiate the
// success status value ourselves.
pub type Status = i32;
#[derive(Clone, Debug)]
pub struct Output {
pub status: Status,
pub stdout: Vec<u8>,
pub stderr: Vec<u8>,
}
#[derive(Debug)]
pub enum Error {
Io(io::Error),
Utf8(std::str::Utf8Error),
Status(Output),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<std::str::Utf8Error> for Error {
fn from(err: std::str::Utf8Error) -> Error {
Error::Utf8(err)
}
}
// An IoContext represents the file descriptors child processes are talking to at execution time.
// It's initialized in run(), with dups of the stdin/stdout/stderr pipes, and then passed down to
// sub-expressions. Compound expressions will clone() it, and redirections will modify it.
#[derive(Clone, Debug)]
pub struct IoContext {
stdin: pipe::Handle,
stdout: pipe::Handle,
stderr: pipe::Handle,
stdout_capture: pipe::Handle,
stderr_capture: pipe::Handle,
dir: PathBuf,
env: HashMap<OsString, OsString>,
}
impl IoContext {
// Returns (context, stdout_reader, stderr_reader).
fn new() -> io::Result<(IoContext, ReaderThread, ReaderThread)> {
let (stdout_capture, stdout_reader) = pipe_with_reader_thread();
let (stderr_capture, stderr_reader) = pipe_with_reader_thread();
let mut env = HashMap::new();
for (name, val) in std::env::vars_os() {
env.insert(name, val);
}
let context = IoContext {
stdin: pipe::Handle::stdin(),
stdout: pipe::Handle::stdout(),
stderr: pipe::Handle::stderr(),
stdout_capture: stdout_capture,
stderr_capture: stderr_capture,
dir: try!(std::env::current_dir()),
env: env,
};
Ok((context, stdout_reader, stderr_reader))
}
}
type ReaderThread = JoinHandle<io::Result<Vec<u8>>>;
fn pipe_with_reader_thread() -> (pipe::Handle, ReaderThread) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = std::thread::spawn(move || {
let mut read_file = read_pipe.into_file();
let mut output = Vec::new();
try!(read_file.read_to_end(&mut output));
Ok(output)
});
(write_pipe, thread)
}
type WriterThread = crossbeam::ScopedJoinHandle<io::Result<()>>;
fn pipe_with_writer_thread<'a>(input: &'a [u8],
scope: &crossbeam::Scope<'a>)
-> (pipe::Handle, WriterThread) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = scope.spawn(move || {
let mut write_file = write_pipe.into_file();
try!(write_file.write_all(&input));
Ok(())
});
(read_pipe, thread)
}
#[cfg(test)]
mod test {
extern crate tempfile;
extern crate tempdir;
use super::*;
use std::env;
use std::io::prelude::*;
use std::io::SeekFrom;
use std::path::Path;
#[test]
fn test_cmd() {
let output = cmd!("echo", "hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_sh() {
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_error() {
let result = cmd!("false").run();
if let Err(Error::Status(output)) = result {
// Check that the status is non-zero.
assert!(output.status != 0);
} else {
panic!("Expected a status error.");
}
}
#[test]
fn test_ignore() {
let ignored_false = cmd!("false").unchecked();
let output = ignored_false.then(cmd!("echo", "waa")).then(ignored_false).read().unwrap();
assert_eq!("waa", output);
}
#[test]
fn test_pipe() {
let output = sh("echo hi").pipe(sh("sed s/i/o/")).read().unwrap();
assert_eq!("ho", output);
}
#[test]
fn test_then() {
let output = sh("echo -n hi").then(sh("echo lo")).read().unwrap();
assert_eq!("hilo", output);
}
#[test]
fn test_input() {
// TODO: Fixed-length bytes input like b"foo" works poorly here. Why?
let expr = sh("sed s/f/g/").input("foo");
let output = expr.read().unwrap();
assert_eq!("goo", output);
}
#[test]
fn test_null() {
// TODO: The separation between InputRedirect and OutputRedirect here is tedious.
let expr = cmd!("cat")
.null_stdin()
.null_stdout()
.null_stderr();
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let mut input_file = tempfile::NamedTempFile::new().unwrap();
let output_file = tempfile::NamedTempFile::new().unwrap();
input_file.write_all(b"foo").unwrap();
let expr = sh("sed s/o/a/g").stdin(input_file.path()).stdout(output_file.path());
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
output_file.as_ref().read_to_string(&mut file_output).unwrap();
assert_eq!("faa", file_output);
}
#[test]
fn test_owned_input() {
fn with_input<'a>(expr: &Expression<'a>) -> Expression<'a> {
let mystr = format!("I own this: {}", "foo");
// This would be a lifetime error if we tried to use &mystr.
expr.input(mystr)
}
let c = cmd!("cat");
let c_with_input = with_input(&c);
let output = c_with_input.read().unwrap();
assert_eq!("I own this: foo", output);
}
#[test]
fn test_stderr_to_stdout() {
let command = sh("echo hi >&2").stderr(OutputRedirect::Stdout);
let output = command.read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let mut temp = tempfile::NamedTempFile::new().unwrap();
temp.write_all(b"example").unwrap();
temp.seek(SeekFrom::Start(0)).unwrap();
let expr = cmd!("cat").stdin(temp.as_ref());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
// We don't get automatic Deref when we're matching trait implementations, so in addition
// to implementing String and &str, we *also* implement &String.
// TODO: See if specialization can clean this up.
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
let _ = sh("true").stdin(&*mystr).input(&*myvec).stdout(&*mypathbuf);
let _ = sh("true").stdin(&mystr).input(&myvec).stdout(&mypathbuf);
let _ = sh("true").stdin(mystr).input(myvec).stdout(mypathbuf);
}
#[test]
fn test_capture_both() {
let output = sh("echo -n hi; echo -n lo >&2")
.stdout(OutputRedirect::Capture)
.stderr(OutputRedirect::Capture)
.run()
.unwrap();
assert_eq!(b"hi", &*output.stdout);
assert_eq!(b"lo", &*output.stderr);
}
#[test]
fn test_cwd() {
// First assert that ordinary commands happen in the parent's dir.
let pwd_output = cmd!("pwd").read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, env::current_dir().unwrap());
// Now create a temp dir and make sure we can set dir to it.
let dir = tempdir::TempDir::new("duct_test").unwrap();
let pwd_output = cmd!("pwd").dir(dir.path()).read().unwrap();
let pwd_path = Path::new(&pwd_output);
assert_eq!(pwd_path, dir.path());
}
#[test]
fn test_env() {
let output = sh("echo $foo").env("foo", "bar").read().unwrap();
assert_eq!("bar", output);
}
#[test]
fn test_env_remove() {
// Set a var twice, both in the parent process and with an env() call. Make sure a single
// env_remove() call clears both.
let var_name = "test_env_remove_var";
env::set_var(var_name, "junk1");
let command = format!("echo ${}", var_name);
let output = sh(command).env_remove(var_name).env(var_name, "junk2").read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_env_clear() {
// As test_env_remove, but with env_clear().
let var_name = "test_env_remove_var";
env::set_var(var_name, "junk1");
let command = format!("echo ${}", var_name);
let output = sh(command).env_clear().env(var_name, "junk2").read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_broken_pipe() {
// If the input writing thread fills up its pipe buffer, writing will block. If the process
// on the other end of the pipe exits while writer is waiting, the write will return an
// error. We need to swallow that error, rather than returning it.
let myvec = vec![0; 1_000_000];
cmd!("true").input(myvec).run().unwrap();
}
}
|
//! A simple utility for getting the size of a terminal.
//!
//! Supports both Linux, MacOS, and Windows.
//!
//! This crate requires a minimum rust version of 1.48.0 (2020-11-19)
//!
//! # Example
//!
//! ```
//! use terminal_size::{Width, Height, terminal_size};
//!
//! let size = terminal_size();
//! if let Some((Width(w), Height(h))) = size {
//! println!("Your terminal is {} cols wide and {} lines tall", w, h);
//! } else {
//! println!("Unable to get terminal size");
//! }
//! ```
//!
#[derive(Debug)]
pub struct Width(pub u16);
#[derive(Debug)]
pub struct Height(pub u16);
#[cfg(unix)]
mod unix;
#[cfg(unix)]
pub use crate::unix::{terminal_size, terminal_size_using_fd};
#[cfg(windows)]
mod windows;
#[cfg(windows)]
pub use crate::windows::{terminal_size, terminal_size_using_handle};
#[cfg(not(any(unix, windows)))]
pub fn terminal_size() -> Option<(Width, Height)> { None }
Add common traits to Width and Height
//! A simple utility for getting the size of a terminal.
//!
//! Supports both Linux, MacOS, and Windows.
//!
//! This crate requires a minimum rust version of 1.48.0 (2020-11-19)
//!
//! # Example
//!
//! ```
//! use terminal_size::{Width, Height, terminal_size};
//!
//! let size = terminal_size();
//! if let Some((Width(w), Height(h))) = size {
//! println!("Your terminal is {} cols wide and {} lines tall", w, h);
//! } else {
//! println!("Unable to get terminal size");
//! }
//! ```
//!
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Width(pub u16);
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Height(pub u16);
#[cfg(unix)]
mod unix;
#[cfg(unix)]
pub use crate::unix::{terminal_size, terminal_size_using_fd};
#[cfg(windows)]
mod windows;
#[cfg(windows)]
pub use crate::windows::{terminal_size, terminal_size_using_handle};
#[cfg(not(any(unix, windows)))]
pub fn terminal_size() -> Option<(Width, Height)> { None }
|
//! # YUV4MPEG2 (.y4m) Encoder/Decoder
#![deny(missing_docs)]
use std::fmt;
use std::io;
use std::io::Read;
use std::io::Write;
use std::num;
use std::str;
const MAX_PARAMS_SIZE: usize = 1024;
const FILE_MAGICK: &[u8] = b"YUV4MPEG2 ";
const FRAME_MAGICK: &[u8] = b"FRAME";
const TERMINATOR: u8 = 0x0A;
const FIELD_SEP: u8 = b' ';
const RATIO_SEP: u8 = b':';
/// Both encoding and decoding errors.
#[derive(Debug)]
pub enum Error {
/// End of the file. Technically not an error, but it's easier to process
/// that way.
EOF,
/// Bad input parameters provided.
BadInput,
/// Unknown colorspace (possibly just unimplemented).
UnknownColorspace,
/// Error while parsing the file/frame header.
// TODO(Kagami): Better granularity of parse errors.
ParseError(ParseError),
/// Error while reading/writing the file.
IoError(io::Error),
/// Out of memory (limits exceeded).
OutOfMemory,
}
impl std::error::Error for crate::Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
Error::EOF => None,
Error::BadInput => None,
Error::UnknownColorspace => None,
Error::ParseError(ref err) => Some(err),
Error::IoError(ref err) => Some(err),
Error::OutOfMemory => None,
}
}
}
impl fmt::Display for crate::Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::EOF => write!(f, "End of file"),
Error::BadInput => write!(f, "Bad input parameters provided"),
Error::UnknownColorspace => write!(f, "Bad input parameters provided"),
Error::ParseError(ref err) => err.fmt(f),
Error::IoError(ref err) => err.fmt(f),
Error::OutOfMemory => write!(f, "Out of memory (limits exceeded)"),
}
}
}
/// Granular ParseError Definiations
pub enum ParseError {
/// Error reading y4m header
InvalidY4M,
/// Error parsing int
Int,
/// Error parsing UTF8
Utf8,
/// General Parsing Error
General,
}
impl std::error::Error for crate::ParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
ParseError::InvalidY4M => None,
ParseError::Int => None,
ParseError::Utf8 => None,
ParseError::General => None,
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ParseError::InvalidY4M => write!(f, "Error parsing y4m header"),
ParseError::Int => write!(f, "Error parsing Int"),
ParseError::Utf8 => write!(f, "Error parsing UTF8"),
ParseError::General => write!(f, "General parsing error"),
}
}
}
impl fmt::Debug for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ParseError::InvalidY4M => write!(f, "Error parsing y4m header"),
ParseError::Int => write!(f, "Error parsing Int"),
ParseError::Utf8 => write!(f, "Error parsing UTF8"),
ParseError::General => write!(f, "General parsing error"),
}
}
}
macro_rules! parse_error {
($p:expr) => {
return Err(Error::ParseError($p));
};
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
match err.kind() {
io::ErrorKind::UnexpectedEof => Error::EOF,
_ => Error::IoError(err),
}
}
}
impl From<num::ParseIntError> for Error {
fn from(_: num::ParseIntError) -> Error {
Error::ParseError(ParseError::Int)
}
}
impl From<str::Utf8Error> for Error {
fn from(_: str::Utf8Error) -> Error {
Error::ParseError(ParseError::Utf8)
}
}
trait EnhancedRead {
fn read_until(&mut self, ch: u8, buf: &mut [u8]) -> Result<usize, Error>;
}
impl<R: Read> EnhancedRead for R {
// Current implementation does one `read` call per byte. This might be a
// bit slow for long headers but it simplifies things: we don't need to
// check whether start of the next frame is already read and so on.
fn read_until(&mut self, ch: u8, buf: &mut [u8]) -> Result<usize, Error> {
let mut collected = 0;
while collected < buf.len() {
let chunk_size = self.read(&mut buf[collected..=collected])?;
if chunk_size == 0 {
return Err(Error::EOF);
}
if buf[collected] == ch {
return Ok(collected);
}
collected += chunk_size;
}
parse_error!(ParseError::General)
}
}
fn parse_bytes(buf: &[u8]) -> Result<usize, Error> {
// A bit kludgy but seems like there is no other way.
Ok(str::from_utf8(buf)?.parse()?)
}
/// Simple ratio structure since stdlib lacks one.
#[derive(Debug, Clone, Copy)]
pub struct Ratio {
/// Numerator.
pub num: usize,
/// Denominator.
pub den: usize,
}
impl Ratio {
/// Create a new ratio.
pub fn new(num: usize, den: usize) -> Ratio {
Ratio { num, den }
}
}
impl fmt::Display for Ratio {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.num, self.den)
}
}
/// Colorspace (color model/pixel format). Only subset of them is supported.
///
/// From libavformat/yuv4mpegenc.c:
///
/// > yuv4mpeg can only handle yuv444p, yuv422p, yuv420p, yuv411p and gray8
/// pixel formats. And using 'strict -1' also yuv444p9, yuv422p9, yuv420p9,
/// yuv444p10, yuv422p10, yuv420p10, yuv444p12, yuv422p12, yuv420p12,
/// yuv444p14, yuv422p14, yuv420p14, yuv444p16, yuv422p16, yuv420p16, gray9,
/// gray10, gray12 and gray16 pixel formats.
#[derive(Debug, Clone, Copy)]
pub enum Colorspace {
/// Grayscale only, 8-bit.
Cmono,
/// 4:2:0 with coincident chroma planes, 8-bit.
C420,
/// 4:2:0 with coincident chroma planes, 10-bit.
C420p10,
/// 4:2:0 with coincident chroma planes, 12-bit.
C420p12,
/// 4:2:0 with biaxially-displaced chroma planes, 8-bit.
C420jpeg,
/// 4:2:0 with vertically-displaced chroma planes, 8-bit.
C420paldv,
/// Found in some files. Same as `C420`.
C420mpeg2,
/// 4:2:2, 8-bit.
C422,
/// 4:2:2, 10-bit.
C422p10,
/// 4:2:2, 12-bit.
C422p12,
/// 4:4:4, 8-bit.
C444,
/// 4:4:4, 10-bit.
C444p10,
/// 4:4:4, 12-bit.
C444p12,
}
impl Colorspace {
/// Return the bit depth per sample
#[inline]
pub fn get_bit_depth(self) -> usize {
match self {
Colorspace::Cmono
| Colorspace::C420
| Colorspace::C422
| Colorspace::C444
| Colorspace::C420jpeg
| Colorspace::C420paldv
| Colorspace::C420mpeg2 => 8,
Colorspace::C420p10 | Colorspace::C422p10 | Colorspace::C444p10 => 10,
Colorspace::C420p12 | Colorspace::C422p12 | Colorspace::C444p12 => 12,
}
}
/// Return the number of bytes in a sample
#[inline]
pub fn get_bytes_per_sample(self) -> usize {
if self.get_bit_depth() <= 8 {
1
} else {
2
}
}
}
fn get_plane_sizes(width: usize, height: usize, colorspace: Colorspace) -> (usize, usize, usize) {
let y_plane_size = width * height * colorspace.get_bytes_per_sample();
let c420_chroma_size =
((width + 1) / 2) * ((height + 1) / 2) * colorspace.get_bytes_per_sample();
let c422_chroma_size = ((width + 1) / 2) * height * colorspace.get_bytes_per_sample();
let c420_sizes = (y_plane_size, c420_chroma_size, c420_chroma_size);
let c422_sizes = (y_plane_size, c422_chroma_size, c422_chroma_size);
let c444_sizes = (y_plane_size, y_plane_size, y_plane_size);
match colorspace {
Colorspace::Cmono => (y_plane_size, 0, 0),
Colorspace::C420
| Colorspace::C420p10
| Colorspace::C420p12
| Colorspace::C420jpeg
| Colorspace::C420paldv
| Colorspace::C420mpeg2 => c420_sizes,
Colorspace::C422 | Colorspace::C422p10 | Colorspace::C422p12 => c422_sizes,
Colorspace::C444 | Colorspace::C444p10 | Colorspace::C444p12 => c444_sizes,
}
}
/// Limits on the resources `Decoder` is allowed to use.
#[derive(Clone, Copy, Debug)]
pub struct Limits {
/// Maximum allowed size of frame buffer, default is 1 GiB.
pub bytes: usize,
}
impl Default for Limits {
fn default() -> Limits {
Limits {
bytes: 1024 * 1024 * 1024,
}
}
}
/// YUV4MPEG2 decoder.
pub struct Decoder<R: Read> {
reader: R,
params_buf: Vec<u8>,
frame_buf: Vec<u8>,
raw_params: Vec<u8>,
width: usize,
height: usize,
framerate: Ratio,
colorspace: Colorspace,
y_len: usize,
u_len: usize,
}
impl<R: Read> Decoder<R> {
/// Create a new decoder instance.
pub fn new(reader: R) -> Result<Decoder<R>, Error> {
Decoder::new_with_limits(reader, Limits::default())
}
/// Create a new decoder instance with custom limits.
pub fn new_with_limits(mut reader: R, limits: Limits) -> Result<Decoder<R>, Error> {
let mut params_buf = vec![0; MAX_PARAMS_SIZE];
let end_params_pos = reader.read_until(TERMINATOR, &mut params_buf)?;
if end_params_pos < FILE_MAGICK.len() || !params_buf.starts_with(FILE_MAGICK) {
parse_error!(ParseError::InvalidY4M)
}
let raw_params = (¶ms_buf[FILE_MAGICK.len()..end_params_pos]).to_owned();
let mut width = 0;
let mut height = 0;
// Framerate is actually required per spec, but let's be a bit more
// permissive as per ffmpeg behavior.
let mut framerate = Ratio::new(25, 1);
let mut colorspace = None;
// We shouldn't convert it to string because encoding is unspecified.
for param in raw_params.split(|&b| b == FIELD_SEP) {
if param.is_empty() {
continue;
}
let (name, value) = (param[0], ¶m[1..]);
// TODO(Kagami): interlacing, pixel aspect, comment.
match name {
b'W' => width = parse_bytes(value)?,
b'H' => height = parse_bytes(value)?,
b'F' => {
let parts: Vec<_> = value.splitn(2, |&b| b == RATIO_SEP).collect();
if parts.len() != 2 {
parse_error!(ParseError::General)
}
let num = parse_bytes(parts[0])?;
let den = parse_bytes(parts[1])?;
framerate = Ratio::new(num, den);
}
b'C' => {
colorspace = match value {
b"mono" => Some(Colorspace::Cmono),
b"420" => Some(Colorspace::C420),
b"420p10" => Some(Colorspace::C420p10),
b"420p12" => Some(Colorspace::C420p12),
b"422" => Some(Colorspace::C422),
b"422p10" => Some(Colorspace::C422p10),
b"422p12" => Some(Colorspace::C422p12),
b"444" => Some(Colorspace::C444),
b"444p10" => Some(Colorspace::C444p10),
b"444p12" => Some(Colorspace::C444p12),
b"420jpeg" => Some(Colorspace::C420jpeg),
b"420paldv" => Some(Colorspace::C420paldv),
b"420mpeg2" => Some(Colorspace::C420mpeg2),
_ => return Err(Error::UnknownColorspace),
}
}
_ => {}
}
}
let colorspace = colorspace.unwrap_or(Colorspace::C420);
if width == 0 || height == 0 {
parse_error!(ParseError::General)
}
let (y_len, u_len, v_len) = get_plane_sizes(width, height, colorspace);
let frame_size = y_len + u_len + v_len;
if frame_size > limits.bytes {
return Err(Error::OutOfMemory);
}
let frame_buf = vec![0; frame_size];
Ok(Decoder {
reader,
params_buf,
frame_buf,
raw_params,
width,
height,
framerate,
colorspace,
y_len,
u_len,
})
}
/// Iterate over frames. End of input is indicated by `Error::EOF`.
pub fn read_frame(&mut self) -> Result<Frame, Error> {
let end_params_pos = self.reader.read_until(TERMINATOR, &mut self.params_buf)?;
if end_params_pos < FRAME_MAGICK.len() || !self.params_buf.starts_with(FRAME_MAGICK) {
parse_error!(ParseError::InvalidY4M)
}
// We don't parse frame params currently but user has access to them.
let start_params_pos = FRAME_MAGICK.len();
let raw_params = if end_params_pos - start_params_pos > 0 {
// Check for extra space.
if self.params_buf[start_params_pos] != FIELD_SEP {
parse_error!(ParseError::InvalidY4M)
}
Some((&self.params_buf[start_params_pos + 1..end_params_pos]).to_owned())
} else {
None
};
self.reader.read_exact(&mut self.frame_buf)?;
Ok(Frame::new(
[
&self.frame_buf[0..self.y_len],
&self.frame_buf[self.y_len..self.y_len + self.u_len],
&self.frame_buf[self.y_len + self.u_len..],
],
raw_params,
))
}
/// Return file width.
#[inline]
pub fn get_width(&self) -> usize {
self.width
}
/// Return file height.
#[inline]
pub fn get_height(&self) -> usize {
self.height
}
/// Return file framerate.
#[inline]
pub fn get_framerate(&self) -> Ratio {
self.framerate
}
/// Return file colorspace.
///
/// **NOTE:** normally all .y4m should have colorspace param, but there are
/// files encoded without that tag and it's unclear what should we do in
/// that case. Currently C420 is implied by default as per ffmpeg behavior.
#[inline]
pub fn get_colorspace(&self) -> Colorspace {
self.colorspace
}
/// Return file raw parameters.
#[inline]
pub fn get_raw_params(&self) -> &[u8] {
&self.raw_params
}
/// Return the bit depth per sample
#[inline]
pub fn get_bit_depth(&self) -> usize {
self.colorspace.get_bit_depth()
}
/// Return the number of bytes in a sample
#[inline]
pub fn get_bytes_per_sample(&self) -> usize {
self.colorspace.get_bytes_per_sample()
}
}
/// A single frame.
#[derive(Debug)]
pub struct Frame<'f> {
planes: [&'f [u8]; 3],
raw_params: Option<Vec<u8>>,
}
impl<'f> Frame<'f> {
/// Create a new frame with optional parameters.
/// No heap allocations are made.
pub fn new(planes: [&'f [u8]; 3], raw_params: Option<Vec<u8>>) -> Frame<'f> {
Frame { planes, raw_params }
}
/// Create a new frame from data in 16-bit format.
pub fn from_u16(planes: [&'f [u16]; 3], raw_params: Option<Vec<u8>>) -> Frame<'f> {
Frame::new(
[
unsafe {
std::slice::from_raw_parts::<u8>(
planes[0].as_ptr() as *const u8,
planes[0].len() * 2,
)
},
unsafe {
std::slice::from_raw_parts::<u8>(
planes[1].as_ptr() as *const u8,
planes[1].len() * 2,
)
},
unsafe {
std::slice::from_raw_parts::<u8>(
planes[2].as_ptr() as *const u8,
planes[2].len() * 2,
)
},
],
raw_params,
)
}
/// Return Y (first) plane.
#[inline]
pub fn get_y_plane(&self) -> &[u8] {
self.planes[0]
}
/// Return U (second) plane. Empty in case of grayscale.
#[inline]
pub fn get_u_plane(&self) -> &[u8] {
self.planes[1]
}
/// Return V (third) plane. Empty in case of grayscale.
#[inline]
pub fn get_v_plane(&self) -> &[u8] {
self.planes[2]
}
/// Return frame raw parameters if any.
#[inline]
pub fn get_raw_params(&self) -> Option<&[u8]> {
self.raw_params.as_ref().map(|v| &v[..])
}
}
/// Encoder builder. Allows to set y4m file parameters using builder pattern.
// TODO(Kagami): Accept all known tags and raw params.
#[derive(Debug)]
pub struct EncoderBuilder {
width: usize,
height: usize,
framerate: Ratio,
colorspace: Colorspace,
}
impl EncoderBuilder {
/// Create a new encoder builder.
pub fn new(width: usize, height: usize, framerate: Ratio) -> EncoderBuilder {
EncoderBuilder {
width,
height,
framerate,
colorspace: Colorspace::C420,
}
}
/// Specify file colorspace.
pub fn with_colorspace(mut self, colorspace: Colorspace) -> Self {
self.colorspace = colorspace;
self
}
/// Write header to the stream and create encoder instance.
pub fn write_header<W: Write>(self, mut writer: W) -> Result<Encoder<W>, Error> {
// XXX(Kagami): Beware that FILE_MAGICK already contains space.
writer.write_all(FILE_MAGICK)?;
write!(
writer,
"W{} H{} F{}",
self.width, self.height, self.framerate
)?;
write!(writer, " {:?}", self.colorspace)?;
writer.write_all(&[TERMINATOR])?;
let (y_len, u_len, v_len) = get_plane_sizes(self.width, self.height, self.colorspace);
Ok(Encoder {
writer,
y_len,
u_len,
v_len,
})
}
}
/// YUV4MPEG2 encoder.
pub struct Encoder<W: Write> {
writer: W,
y_len: usize,
u_len: usize,
v_len: usize,
}
impl<W: Write> Encoder<W> {
/// Write next frame to the stream.
pub fn write_frame(&mut self, frame: &Frame) -> Result<(), Error> {
if frame.get_y_plane().len() != self.y_len
|| frame.get_u_plane().len() != self.u_len
|| frame.get_v_plane().len() != self.v_len
{
return Err(Error::BadInput);
}
self.writer.write_all(FRAME_MAGICK)?;
if let Some(params) = frame.get_raw_params() {
self.writer.write_all(&[FIELD_SEP])?;
self.writer.write_all(params)?;
}
self.writer.write_all(&[TERMINATOR])?;
self.writer.write_all(frame.get_y_plane())?;
self.writer.write_all(frame.get_u_plane())?;
self.writer.write_all(frame.get_v_plane())?;
Ok(())
}
}
/// Create a new decoder instance. Alias for `Decoder::new`.
pub fn decode<R: Read>(reader: R) -> Result<Decoder<R>, Error> {
Decoder::new(reader)
}
/// Create a new encoder builder. Alias for `EncoderBuilder::new`.
pub fn encode(width: usize, height: usize, framerate: Ratio) -> EncoderBuilder {
EncoderBuilder::new(width, height, framerate)
}
Move ratio parsing into Ratio::parse()
//! # YUV4MPEG2 (.y4m) Encoder/Decoder
#![deny(missing_docs)]
use std::fmt;
use std::io;
use std::io::Read;
use std::io::Write;
use std::num;
use std::str;
const MAX_PARAMS_SIZE: usize = 1024;
const FILE_MAGICK: &[u8] = b"YUV4MPEG2 ";
const FRAME_MAGICK: &[u8] = b"FRAME";
const TERMINATOR: u8 = 0x0A;
const FIELD_SEP: u8 = b' ';
const RATIO_SEP: u8 = b':';
/// Both encoding and decoding errors.
#[derive(Debug)]
pub enum Error {
/// End of the file. Technically not an error, but it's easier to process
/// that way.
EOF,
/// Bad input parameters provided.
BadInput,
/// Unknown colorspace (possibly just unimplemented).
UnknownColorspace,
/// Error while parsing the file/frame header.
// TODO(Kagami): Better granularity of parse errors.
ParseError(ParseError),
/// Error while reading/writing the file.
IoError(io::Error),
/// Out of memory (limits exceeded).
OutOfMemory,
}
impl std::error::Error for crate::Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
Error::EOF => None,
Error::BadInput => None,
Error::UnknownColorspace => None,
Error::ParseError(ref err) => Some(err),
Error::IoError(ref err) => Some(err),
Error::OutOfMemory => None,
}
}
}
impl fmt::Display for crate::Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::EOF => write!(f, "End of file"),
Error::BadInput => write!(f, "Bad input parameters provided"),
Error::UnknownColorspace => write!(f, "Bad input parameters provided"),
Error::ParseError(ref err) => err.fmt(f),
Error::IoError(ref err) => err.fmt(f),
Error::OutOfMemory => write!(f, "Out of memory (limits exceeded)"),
}
}
}
/// Granular ParseError Definiations
pub enum ParseError {
/// Error reading y4m header
InvalidY4M,
/// Error parsing int
Int,
/// Error parsing UTF8
Utf8,
/// General Parsing Error
General,
}
impl std::error::Error for crate::ParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
ParseError::InvalidY4M => None,
ParseError::Int => None,
ParseError::Utf8 => None,
ParseError::General => None,
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ParseError::InvalidY4M => write!(f, "Error parsing y4m header"),
ParseError::Int => write!(f, "Error parsing Int"),
ParseError::Utf8 => write!(f, "Error parsing UTF8"),
ParseError::General => write!(f, "General parsing error"),
}
}
}
impl fmt::Debug for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ParseError::InvalidY4M => write!(f, "Error parsing y4m header"),
ParseError::Int => write!(f, "Error parsing Int"),
ParseError::Utf8 => write!(f, "Error parsing UTF8"),
ParseError::General => write!(f, "General parsing error"),
}
}
}
macro_rules! parse_error {
($p:expr) => {
return Err(Error::ParseError($p));
};
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
match err.kind() {
io::ErrorKind::UnexpectedEof => Error::EOF,
_ => Error::IoError(err),
}
}
}
impl From<num::ParseIntError> for Error {
fn from(_: num::ParseIntError) -> Error {
Error::ParseError(ParseError::Int)
}
}
impl From<str::Utf8Error> for Error {
fn from(_: str::Utf8Error) -> Error {
Error::ParseError(ParseError::Utf8)
}
}
trait EnhancedRead {
fn read_until(&mut self, ch: u8, buf: &mut [u8]) -> Result<usize, Error>;
}
impl<R: Read> EnhancedRead for R {
// Current implementation does one `read` call per byte. This might be a
// bit slow for long headers but it simplifies things: we don't need to
// check whether start of the next frame is already read and so on.
fn read_until(&mut self, ch: u8, buf: &mut [u8]) -> Result<usize, Error> {
let mut collected = 0;
while collected < buf.len() {
let chunk_size = self.read(&mut buf[collected..=collected])?;
if chunk_size == 0 {
return Err(Error::EOF);
}
if buf[collected] == ch {
return Ok(collected);
}
collected += chunk_size;
}
parse_error!(ParseError::General)
}
}
fn parse_bytes(buf: &[u8]) -> Result<usize, Error> {
// A bit kludgy but seems like there is no other way.
Ok(str::from_utf8(buf)?.parse()?)
}
/// Simple ratio structure since stdlib lacks one.
#[derive(Debug, Clone, Copy)]
pub struct Ratio {
/// Numerator.
pub num: usize,
/// Denominator.
pub den: usize,
}
impl Ratio {
/// Create a new ratio.
pub fn new(num: usize, den: usize) -> Ratio {
Ratio { num, den }
}
/// Parse a ratio from a byte slice.
pub fn parse(value: &[u8]) -> Result<Ratio, Error> {
let parts: Vec<_> = value.splitn(2, |&b| b == RATIO_SEP).collect();
if parts.len() != 2 {
parse_error!(ParseError::General)
}
let num = parse_bytes(parts[0])?;
let den = parse_bytes(parts[1])?;
Ok(Ratio::new(num, den))
}
}
impl fmt::Display for Ratio {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.num, self.den)
}
}
/// Colorspace (color model/pixel format). Only subset of them is supported.
///
/// From libavformat/yuv4mpegenc.c:
///
/// > yuv4mpeg can only handle yuv444p, yuv422p, yuv420p, yuv411p and gray8
/// pixel formats. And using 'strict -1' also yuv444p9, yuv422p9, yuv420p9,
/// yuv444p10, yuv422p10, yuv420p10, yuv444p12, yuv422p12, yuv420p12,
/// yuv444p14, yuv422p14, yuv420p14, yuv444p16, yuv422p16, yuv420p16, gray9,
/// gray10, gray12 and gray16 pixel formats.
#[derive(Debug, Clone, Copy)]
pub enum Colorspace {
/// Grayscale only, 8-bit.
Cmono,
/// 4:2:0 with coincident chroma planes, 8-bit.
C420,
/// 4:2:0 with coincident chroma planes, 10-bit.
C420p10,
/// 4:2:0 with coincident chroma planes, 12-bit.
C420p12,
/// 4:2:0 with biaxially-displaced chroma planes, 8-bit.
C420jpeg,
/// 4:2:0 with vertically-displaced chroma planes, 8-bit.
C420paldv,
/// Found in some files. Same as `C420`.
C420mpeg2,
/// 4:2:2, 8-bit.
C422,
/// 4:2:2, 10-bit.
C422p10,
/// 4:2:2, 12-bit.
C422p12,
/// 4:4:4, 8-bit.
C444,
/// 4:4:4, 10-bit.
C444p10,
/// 4:4:4, 12-bit.
C444p12,
}
impl Colorspace {
/// Return the bit depth per sample
#[inline]
pub fn get_bit_depth(self) -> usize {
match self {
Colorspace::Cmono
| Colorspace::C420
| Colorspace::C422
| Colorspace::C444
| Colorspace::C420jpeg
| Colorspace::C420paldv
| Colorspace::C420mpeg2 => 8,
Colorspace::C420p10 | Colorspace::C422p10 | Colorspace::C444p10 => 10,
Colorspace::C420p12 | Colorspace::C422p12 | Colorspace::C444p12 => 12,
}
}
/// Return the number of bytes in a sample
#[inline]
pub fn get_bytes_per_sample(self) -> usize {
if self.get_bit_depth() <= 8 {
1
} else {
2
}
}
}
fn get_plane_sizes(width: usize, height: usize, colorspace: Colorspace) -> (usize, usize, usize) {
let y_plane_size = width * height * colorspace.get_bytes_per_sample();
let c420_chroma_size =
((width + 1) / 2) * ((height + 1) / 2) * colorspace.get_bytes_per_sample();
let c422_chroma_size = ((width + 1) / 2) * height * colorspace.get_bytes_per_sample();
let c420_sizes = (y_plane_size, c420_chroma_size, c420_chroma_size);
let c422_sizes = (y_plane_size, c422_chroma_size, c422_chroma_size);
let c444_sizes = (y_plane_size, y_plane_size, y_plane_size);
match colorspace {
Colorspace::Cmono => (y_plane_size, 0, 0),
Colorspace::C420
| Colorspace::C420p10
| Colorspace::C420p12
| Colorspace::C420jpeg
| Colorspace::C420paldv
| Colorspace::C420mpeg2 => c420_sizes,
Colorspace::C422 | Colorspace::C422p10 | Colorspace::C422p12 => c422_sizes,
Colorspace::C444 | Colorspace::C444p10 | Colorspace::C444p12 => c444_sizes,
}
}
/// Limits on the resources `Decoder` is allowed to use.
#[derive(Clone, Copy, Debug)]
pub struct Limits {
/// Maximum allowed size of frame buffer, default is 1 GiB.
pub bytes: usize,
}
impl Default for Limits {
fn default() -> Limits {
Limits {
bytes: 1024 * 1024 * 1024,
}
}
}
/// YUV4MPEG2 decoder.
pub struct Decoder<R: Read> {
reader: R,
params_buf: Vec<u8>,
frame_buf: Vec<u8>,
raw_params: Vec<u8>,
width: usize,
height: usize,
framerate: Ratio,
colorspace: Colorspace,
y_len: usize,
u_len: usize,
}
impl<R: Read> Decoder<R> {
/// Create a new decoder instance.
pub fn new(reader: R) -> Result<Decoder<R>, Error> {
Decoder::new_with_limits(reader, Limits::default())
}
/// Create a new decoder instance with custom limits.
pub fn new_with_limits(mut reader: R, limits: Limits) -> Result<Decoder<R>, Error> {
let mut params_buf = vec![0; MAX_PARAMS_SIZE];
let end_params_pos = reader.read_until(TERMINATOR, &mut params_buf)?;
if end_params_pos < FILE_MAGICK.len() || !params_buf.starts_with(FILE_MAGICK) {
parse_error!(ParseError::InvalidY4M)
}
let raw_params = (¶ms_buf[FILE_MAGICK.len()..end_params_pos]).to_owned();
let mut width = 0;
let mut height = 0;
// Framerate is actually required per spec, but let's be a bit more
// permissive as per ffmpeg behavior.
let mut framerate = Ratio::new(25, 1);
let mut colorspace = None;
// We shouldn't convert it to string because encoding is unspecified.
for param in raw_params.split(|&b| b == FIELD_SEP) {
if param.is_empty() {
continue;
}
let (name, value) = (param[0], ¶m[1..]);
// TODO(Kagami): interlacing, pixel aspect, comment.
match name {
b'W' => width = parse_bytes(value)?,
b'H' => height = parse_bytes(value)?,
b'F' => framerate = Ratio::parse(value)?,
b'C' => {
colorspace = match value {
b"mono" => Some(Colorspace::Cmono),
b"420" => Some(Colorspace::C420),
b"420p10" => Some(Colorspace::C420p10),
b"420p12" => Some(Colorspace::C420p12),
b"422" => Some(Colorspace::C422),
b"422p10" => Some(Colorspace::C422p10),
b"422p12" => Some(Colorspace::C422p12),
b"444" => Some(Colorspace::C444),
b"444p10" => Some(Colorspace::C444p10),
b"444p12" => Some(Colorspace::C444p12),
b"420jpeg" => Some(Colorspace::C420jpeg),
b"420paldv" => Some(Colorspace::C420paldv),
b"420mpeg2" => Some(Colorspace::C420mpeg2),
_ => return Err(Error::UnknownColorspace),
}
}
_ => {}
}
}
let colorspace = colorspace.unwrap_or(Colorspace::C420);
if width == 0 || height == 0 {
parse_error!(ParseError::General)
}
let (y_len, u_len, v_len) = get_plane_sizes(width, height, colorspace);
let frame_size = y_len + u_len + v_len;
if frame_size > limits.bytes {
return Err(Error::OutOfMemory);
}
let frame_buf = vec![0; frame_size];
Ok(Decoder {
reader,
params_buf,
frame_buf,
raw_params,
width,
height,
framerate,
colorspace,
y_len,
u_len,
})
}
/// Iterate over frames. End of input is indicated by `Error::EOF`.
pub fn read_frame(&mut self) -> Result<Frame, Error> {
let end_params_pos = self.reader.read_until(TERMINATOR, &mut self.params_buf)?;
if end_params_pos < FRAME_MAGICK.len() || !self.params_buf.starts_with(FRAME_MAGICK) {
parse_error!(ParseError::InvalidY4M)
}
// We don't parse frame params currently but user has access to them.
let start_params_pos = FRAME_MAGICK.len();
let raw_params = if end_params_pos - start_params_pos > 0 {
// Check for extra space.
if self.params_buf[start_params_pos] != FIELD_SEP {
parse_error!(ParseError::InvalidY4M)
}
Some((&self.params_buf[start_params_pos + 1..end_params_pos]).to_owned())
} else {
None
};
self.reader.read_exact(&mut self.frame_buf)?;
Ok(Frame::new(
[
&self.frame_buf[0..self.y_len],
&self.frame_buf[self.y_len..self.y_len + self.u_len],
&self.frame_buf[self.y_len + self.u_len..],
],
raw_params,
))
}
/// Return file width.
#[inline]
pub fn get_width(&self) -> usize {
self.width
}
/// Return file height.
#[inline]
pub fn get_height(&self) -> usize {
self.height
}
/// Return file framerate.
#[inline]
pub fn get_framerate(&self) -> Ratio {
self.framerate
}
/// Return file colorspace.
///
/// **NOTE:** normally all .y4m should have colorspace param, but there are
/// files encoded without that tag and it's unclear what should we do in
/// that case. Currently C420 is implied by default as per ffmpeg behavior.
#[inline]
pub fn get_colorspace(&self) -> Colorspace {
self.colorspace
}
/// Return file raw parameters.
#[inline]
pub fn get_raw_params(&self) -> &[u8] {
&self.raw_params
}
/// Return the bit depth per sample
#[inline]
pub fn get_bit_depth(&self) -> usize {
self.colorspace.get_bit_depth()
}
/// Return the number of bytes in a sample
#[inline]
pub fn get_bytes_per_sample(&self) -> usize {
self.colorspace.get_bytes_per_sample()
}
}
/// A single frame.
#[derive(Debug)]
pub struct Frame<'f> {
planes: [&'f [u8]; 3],
raw_params: Option<Vec<u8>>,
}
impl<'f> Frame<'f> {
/// Create a new frame with optional parameters.
/// No heap allocations are made.
pub fn new(planes: [&'f [u8]; 3], raw_params: Option<Vec<u8>>) -> Frame<'f> {
Frame { planes, raw_params }
}
/// Create a new frame from data in 16-bit format.
pub fn from_u16(planes: [&'f [u16]; 3], raw_params: Option<Vec<u8>>) -> Frame<'f> {
Frame::new(
[
unsafe {
std::slice::from_raw_parts::<u8>(
planes[0].as_ptr() as *const u8,
planes[0].len() * 2,
)
},
unsafe {
std::slice::from_raw_parts::<u8>(
planes[1].as_ptr() as *const u8,
planes[1].len() * 2,
)
},
unsafe {
std::slice::from_raw_parts::<u8>(
planes[2].as_ptr() as *const u8,
planes[2].len() * 2,
)
},
],
raw_params,
)
}
/// Return Y (first) plane.
#[inline]
pub fn get_y_plane(&self) -> &[u8] {
self.planes[0]
}
/// Return U (second) plane. Empty in case of grayscale.
#[inline]
pub fn get_u_plane(&self) -> &[u8] {
self.planes[1]
}
/// Return V (third) plane. Empty in case of grayscale.
#[inline]
pub fn get_v_plane(&self) -> &[u8] {
self.planes[2]
}
/// Return frame raw parameters if any.
#[inline]
pub fn get_raw_params(&self) -> Option<&[u8]> {
self.raw_params.as_ref().map(|v| &v[..])
}
}
/// Encoder builder. Allows to set y4m file parameters using builder pattern.
// TODO(Kagami): Accept all known tags and raw params.
#[derive(Debug)]
pub struct EncoderBuilder {
width: usize,
height: usize,
framerate: Ratio,
colorspace: Colorspace,
}
impl EncoderBuilder {
/// Create a new encoder builder.
pub fn new(width: usize, height: usize, framerate: Ratio) -> EncoderBuilder {
EncoderBuilder {
width,
height,
framerate,
colorspace: Colorspace::C420,
}
}
/// Specify file colorspace.
pub fn with_colorspace(mut self, colorspace: Colorspace) -> Self {
self.colorspace = colorspace;
self
}
/// Write header to the stream and create encoder instance.
pub fn write_header<W: Write>(self, mut writer: W) -> Result<Encoder<W>, Error> {
// XXX(Kagami): Beware that FILE_MAGICK already contains space.
writer.write_all(FILE_MAGICK)?;
write!(
writer,
"W{} H{} F{}",
self.width, self.height, self.framerate
)?;
write!(writer, " {:?}", self.colorspace)?;
writer.write_all(&[TERMINATOR])?;
let (y_len, u_len, v_len) = get_plane_sizes(self.width, self.height, self.colorspace);
Ok(Encoder {
writer,
y_len,
u_len,
v_len,
})
}
}
/// YUV4MPEG2 encoder.
pub struct Encoder<W: Write> {
writer: W,
y_len: usize,
u_len: usize,
v_len: usize,
}
impl<W: Write> Encoder<W> {
/// Write next frame to the stream.
pub fn write_frame(&mut self, frame: &Frame) -> Result<(), Error> {
if frame.get_y_plane().len() != self.y_len
|| frame.get_u_plane().len() != self.u_len
|| frame.get_v_plane().len() != self.v_len
{
return Err(Error::BadInput);
}
self.writer.write_all(FRAME_MAGICK)?;
if let Some(params) = frame.get_raw_params() {
self.writer.write_all(&[FIELD_SEP])?;
self.writer.write_all(params)?;
}
self.writer.write_all(&[TERMINATOR])?;
self.writer.write_all(frame.get_y_plane())?;
self.writer.write_all(frame.get_u_plane())?;
self.writer.write_all(frame.get_v_plane())?;
Ok(())
}
}
/// Create a new decoder instance. Alias for `Decoder::new`.
pub fn decode<R: Read>(reader: R) -> Result<Decoder<R>, Error> {
Decoder::new(reader)
}
/// Create a new encoder builder. Alias for `EncoderBuilder::new`.
pub fn encode(width: usize, height: usize, framerate: Ratio) -> EncoderBuilder {
EncoderBuilder::new(width, height, framerate)
}
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Integer trait and functions.
//!
//! ## Compatibility
//!
//! The `num-integer` crate is tested for rustc 1.8 and greater.
#![doc(html_root_url = "https://docs.rs/num-integer/0.1")]
#![no_std]
#[cfg(feature = "std")]
extern crate std;
extern crate num_traits as traits;
use core::ops::Add;
use traits::{Num, Signed};
mod roots;
pub use roots::Roots;
pub use roots::{sqrt, cbrt, nth_root};
pub trait Integer: Sized + Num + PartialOrd + Ord + Eq {
/// Floored integer division.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert!(( 8).div_floor(& 3) == 2);
/// assert!(( 8).div_floor(&-3) == -3);
/// assert!((-8).div_floor(& 3) == -3);
/// assert!((-8).div_floor(&-3) == 2);
///
/// assert!(( 1).div_floor(& 2) == 0);
/// assert!(( 1).div_floor(&-2) == -1);
/// assert!((-1).div_floor(& 2) == -1);
/// assert!((-1).div_floor(&-2) == 0);
/// ~~~
fn div_floor(&self, other: &Self) -> Self;
/// Floored integer modulo, satisfying:
///
/// ~~~
/// # use num_integer::Integer;
/// # let n = 1; let d = 1;
/// assert!(n.div_floor(&d) * d + n.mod_floor(&d) == n)
/// ~~~
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert!(( 8).mod_floor(& 3) == 2);
/// assert!(( 8).mod_floor(&-3) == -1);
/// assert!((-8).mod_floor(& 3) == 1);
/// assert!((-8).mod_floor(&-3) == -2);
///
/// assert!(( 1).mod_floor(& 2) == 1);
/// assert!(( 1).mod_floor(&-2) == -1);
/// assert!((-1).mod_floor(& 2) == 1);
/// assert!((-1).mod_floor(&-2) == -1);
/// ~~~
fn mod_floor(&self, other: &Self) -> Self;
/// Greatest Common Divisor (GCD).
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(6.gcd(&8), 2);
/// assert_eq!(7.gcd(&3), 1);
/// ~~~
fn gcd(&self, other: &Self) -> Self;
/// Lowest Common Multiple (LCM).
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(7.lcm(&3), 21);
/// assert_eq!(2.lcm(&4), 4);
/// ~~~
fn lcm(&self, other: &Self) -> Self;
/// Deprecated, use `is_multiple_of` instead.
fn divides(&self, other: &Self) -> bool;
/// Returns `true` if `self` is a multiple of `other`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(9.is_multiple_of(&3), true);
/// assert_eq!(3.is_multiple_of(&9), false);
/// ~~~
fn is_multiple_of(&self, other: &Self) -> bool;
/// Returns `true` if the number is even.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(3.is_even(), false);
/// assert_eq!(4.is_even(), true);
/// ~~~
fn is_even(&self) -> bool;
/// Returns `true` if the number is odd.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(3.is_odd(), true);
/// assert_eq!(4.is_odd(), false);
/// ~~~
fn is_odd(&self) -> bool;
/// Simultaneous truncated integer division and modulus.
/// Returns `(quotient, remainder)`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 8).div_rem( &3), ( 2, 2));
/// assert_eq!(( 8).div_rem(&-3), (-2, 2));
/// assert_eq!((-8).div_rem( &3), (-2, -2));
/// assert_eq!((-8).div_rem(&-3), ( 2, -2));
///
/// assert_eq!(( 1).div_rem( &2), ( 0, 1));
/// assert_eq!(( 1).div_rem(&-2), ( 0, 1));
/// assert_eq!((-1).div_rem( &2), ( 0, -1));
/// assert_eq!((-1).div_rem(&-2), ( 0, -1));
/// ~~~
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self);
/// Simultaneous floored integer division and modulus.
/// Returns `(quotient, remainder)`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 8).div_mod_floor( &3), ( 2, 2));
/// assert_eq!(( 8).div_mod_floor(&-3), (-3, -1));
/// assert_eq!((-8).div_mod_floor( &3), (-3, 1));
/// assert_eq!((-8).div_mod_floor(&-3), ( 2, -2));
///
/// assert_eq!(( 1).div_mod_floor( &2), ( 0, 1));
/// assert_eq!(( 1).div_mod_floor(&-2), (-1, -1));
/// assert_eq!((-1).div_mod_floor( &2), (-1, 1));
/// assert_eq!((-1).div_mod_floor(&-2), ( 0, -1));
/// ~~~
fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
(self.div_floor(other), self.mod_floor(other))
}
}
/// Simultaneous integer division and modulus
#[inline]
pub fn div_rem<T: Integer>(x: T, y: T) -> (T, T) {
x.div_rem(&y)
}
/// Floored integer division
#[inline]
pub fn div_floor<T: Integer>(x: T, y: T) -> T {
x.div_floor(&y)
}
/// Floored integer modulus
#[inline]
pub fn mod_floor<T: Integer>(x: T, y: T) -> T {
x.mod_floor(&y)
}
/// Simultaneous floored integer division and modulus
#[inline]
pub fn div_mod_floor<T: Integer>(x: T, y: T) -> (T, T) {
x.div_mod_floor(&y)
}
/// Calculates the Greatest Common Divisor (GCD) of the number and `other`. The
/// result is always positive.
#[inline(always)]
pub fn gcd<T: Integer>(x: T, y: T) -> T {
x.gcd(&y)
}
/// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
#[inline(always)]
pub fn lcm<T: Integer>(x: T, y: T) -> T {
x.lcm(&y)
}
macro_rules! impl_integer_for_isize {
($T:ty, $test_mod:ident) => (
impl Integer for $T {
/// Floored integer division
#[inline]
fn div_floor(&self, other: &Self) -> Self {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
match self.div_rem(other) {
(d, r) if (r > 0 && *other < 0)
|| (r < 0 && *other > 0) => d - 1,
(d, _) => d,
}
}
/// Floored integer modulo
#[inline]
fn mod_floor(&self, other: &Self) -> Self {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
match *self % *other {
r if (r > 0 && *other < 0)
|| (r < 0 && *other > 0) => r + *other,
r => r,
}
}
/// Calculates `div_floor` and `mod_floor` simultaneously
#[inline]
fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
match self.div_rem(other) {
(d, r) if (r > 0 && *other < 0)
|| (r < 0 && *other > 0) => (d - 1, r + *other),
(d, r) => (d, r),
}
}
/// Calculates the Greatest Common Divisor (GCD) of the number and
/// `other`. The result is always positive.
#[inline]
fn gcd(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 { return (m | n).abs() }
// find common factors of 2
let shift = (m | n).trailing_zeros();
// The algorithm needs positive numbers, but the minimum value
// can't be represented as a positive one.
// It's also a power of two, so the gcd can be
// calculated by bitshifting in that case
// Assuming two's complement, the number created by the shift
// is positive for all numbers except gcd = abs(min value)
// The call to .abs() causes a panic in debug mode
if m == Self::min_value() || n == Self::min_value() {
return (1 << shift).abs()
}
// guaranteed to be positive now, rest like unsigned algorithm
m = m.abs();
n = n.abs();
// divide n and m by 2 until odd
m >>= m.trailing_zeros();
n >>= n.trailing_zeros();
while m != n {
if m > n {
m -= n;
m >>= m.trailing_zeros();
} else {
n -= m;
n >>= n.trailing_zeros();
}
}
m << shift
}
/// Calculates the Lowest Common Multiple (LCM) of the number and
/// `other`.
#[inline]
fn lcm(&self, other: &Self) -> Self {
// should not have to recalculate abs
(*self * (*other / self.gcd(other))).abs()
}
/// Deprecated, use `is_multiple_of` instead.
#[inline]
fn divides(&self, other: &Self) -> bool {
self.is_multiple_of(other)
}
/// Returns `true` if the number is a multiple of `other`.
#[inline]
fn is_multiple_of(&self, other: &Self) -> bool {
*self % *other == 0
}
/// Returns `true` if the number is divisible by `2`
#[inline]
fn is_even(&self) -> bool { (*self) & 1 == 0 }
/// Returns `true` if the number is not divisible by `2`
#[inline]
fn is_odd(&self) -> bool { !self.is_even() }
/// Simultaneous truncated integer division and modulus.
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self) {
(*self / *other, *self % *other)
}
}
#[cfg(test)]
mod $test_mod {
use Integer;
use core::mem;
/// Checks that the division rule holds for:
///
/// - `n`: numerator (dividend)
/// - `d`: denominator (divisor)
/// - `qr`: quotient and remainder
#[cfg(test)]
fn test_division_rule((n,d): ($T, $T), (q,r): ($T, $T)) {
assert_eq!(d * q + r, n);
}
#[test]
fn test_div_rem() {
fn test_nd_dr(nd: ($T,$T), qr: ($T,$T)) {
let (n,d) = nd;
let separate_div_rem = (n / d, n % d);
let combined_div_rem = n.div_rem(&d);
assert_eq!(separate_div_rem, qr);
assert_eq!(combined_div_rem, qr);
test_division_rule(nd, separate_div_rem);
test_division_rule(nd, combined_div_rem);
}
test_nd_dr(( 8, 3), ( 2, 2));
test_nd_dr(( 8, -3), (-2, 2));
test_nd_dr((-8, 3), (-2, -2));
test_nd_dr((-8, -3), ( 2, -2));
test_nd_dr(( 1, 2), ( 0, 1));
test_nd_dr(( 1, -2), ( 0, 1));
test_nd_dr((-1, 2), ( 0, -1));
test_nd_dr((-1, -2), ( 0, -1));
}
#[test]
fn test_div_mod_floor() {
fn test_nd_dm(nd: ($T,$T), dm: ($T,$T)) {
let (n,d) = nd;
let separate_div_mod_floor = (n.div_floor(&d), n.mod_floor(&d));
let combined_div_mod_floor = n.div_mod_floor(&d);
assert_eq!(separate_div_mod_floor, dm);
assert_eq!(combined_div_mod_floor, dm);
test_division_rule(nd, separate_div_mod_floor);
test_division_rule(nd, combined_div_mod_floor);
}
test_nd_dm(( 8, 3), ( 2, 2));
test_nd_dm(( 8, -3), (-3, -1));
test_nd_dm((-8, 3), (-3, 1));
test_nd_dm((-8, -3), ( 2, -2));
test_nd_dm(( 1, 2), ( 0, 1));
test_nd_dm(( 1, -2), (-1, -1));
test_nd_dm((-1, 2), (-1, 1));
test_nd_dm((-1, -2), ( 0, -1));
}
#[test]
fn test_gcd() {
assert_eq!((10 as $T).gcd(&2), 2 as $T);
assert_eq!((10 as $T).gcd(&3), 1 as $T);
assert_eq!((0 as $T).gcd(&3), 3 as $T);
assert_eq!((3 as $T).gcd(&3), 3 as $T);
assert_eq!((56 as $T).gcd(&42), 14 as $T);
assert_eq!((3 as $T).gcd(&-3), 3 as $T);
assert_eq!((-6 as $T).gcd(&3), 3 as $T);
assert_eq!((-4 as $T).gcd(&-2), 2 as $T);
}
#[test]
fn test_gcd_cmp_with_euclidean() {
fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
while m != 0 {
mem::swap(&mut m, &mut n);
m %= n;
}
n.abs()
}
// gcd(-128, b) = 128 is not representable as positive value
// for i8
for i in -127..127 {
for j in -127..127 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
}
// last value
// FIXME: Use inclusive ranges for above loop when implemented
let i = 127;
for j in -127..127 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
assert_eq!(127.gcd(&127), 127);
}
#[test]
fn test_gcd_min_val() {
let min = <$T>::min_value();
let max = <$T>::max_value();
let max_pow2 = max / 2 + 1;
assert_eq!(min.gcd(&max), 1 as $T);
assert_eq!(max.gcd(&min), 1 as $T);
assert_eq!(min.gcd(&max_pow2), max_pow2);
assert_eq!(max_pow2.gcd(&min), max_pow2);
assert_eq!(min.gcd(&42), 2 as $T);
assert_eq!((42 as $T).gcd(&min), 2 as $T);
}
#[test]
#[should_panic]
fn test_gcd_min_val_min_val() {
let min = <$T>::min_value();
assert!(min.gcd(&min) >= 0);
}
#[test]
#[should_panic]
fn test_gcd_min_val_0() {
let min = <$T>::min_value();
assert!(min.gcd(&0) >= 0);
}
#[test]
#[should_panic]
fn test_gcd_0_min_val() {
let min = <$T>::min_value();
assert!((0 as $T).gcd(&min) >= 0);
}
#[test]
fn test_lcm() {
assert_eq!((1 as $T).lcm(&0), 0 as $T);
assert_eq!((0 as $T).lcm(&1), 0 as $T);
assert_eq!((1 as $T).lcm(&1), 1 as $T);
assert_eq!((-1 as $T).lcm(&1), 1 as $T);
assert_eq!((1 as $T).lcm(&-1), 1 as $T);
assert_eq!((-1 as $T).lcm(&-1), 1 as $T);
assert_eq!((8 as $T).lcm(&9), 72 as $T);
assert_eq!((11 as $T).lcm(&5), 55 as $T);
}
#[test]
fn test_even() {
assert_eq!((-4 as $T).is_even(), true);
assert_eq!((-3 as $T).is_even(), false);
assert_eq!((-2 as $T).is_even(), true);
assert_eq!((-1 as $T).is_even(), false);
assert_eq!((0 as $T).is_even(), true);
assert_eq!((1 as $T).is_even(), false);
assert_eq!((2 as $T).is_even(), true);
assert_eq!((3 as $T).is_even(), false);
assert_eq!((4 as $T).is_even(), true);
}
#[test]
fn test_odd() {
assert_eq!((-4 as $T).is_odd(), false);
assert_eq!((-3 as $T).is_odd(), true);
assert_eq!((-2 as $T).is_odd(), false);
assert_eq!((-1 as $T).is_odd(), true);
assert_eq!((0 as $T).is_odd(), false);
assert_eq!((1 as $T).is_odd(), true);
assert_eq!((2 as $T).is_odd(), false);
assert_eq!((3 as $T).is_odd(), true);
assert_eq!((4 as $T).is_odd(), false);
}
}
)
}
impl_integer_for_isize!(i8, test_integer_i8);
impl_integer_for_isize!(i16, test_integer_i16);
impl_integer_for_isize!(i32, test_integer_i32);
impl_integer_for_isize!(i64, test_integer_i64);
impl_integer_for_isize!(isize, test_integer_isize);
#[cfg(has_i128)]
impl_integer_for_isize!(i128, test_integer_i128);
macro_rules! impl_integer_for_usize {
($T:ty, $test_mod:ident) => (
impl Integer for $T {
/// Unsigned integer division. Returns the same result as `div` (`/`).
#[inline]
fn div_floor(&self, other: &Self) -> Self {
*self / *other
}
/// Unsigned integer modulo operation. Returns the same result as `rem` (`%`).
#[inline]
fn mod_floor(&self, other: &Self) -> Self {
*self % *other
}
/// Calculates the Greatest Common Divisor (GCD) of the number and `other`
#[inline]
fn gcd(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 { return m | n }
// find common factors of 2
let shift = (m | n).trailing_zeros();
// divide n and m by 2 until odd
m >>= m.trailing_zeros();
n >>= n.trailing_zeros();
while m != n {
if m > n {
m -= n;
m >>= m.trailing_zeros();
} else {
n -= m;
n >>= n.trailing_zeros();
}
}
m << shift
}
/// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
#[inline]
fn lcm(&self, other: &Self) -> Self {
*self * (*other / self.gcd(other))
}
/// Deprecated, use `is_multiple_of` instead.
#[inline]
fn divides(&self, other: &Self) -> bool {
self.is_multiple_of(other)
}
/// Returns `true` if the number is a multiple of `other`.
#[inline]
fn is_multiple_of(&self, other: &Self) -> bool {
*self % *other == 0
}
/// Returns `true` if the number is divisible by `2`.
#[inline]
fn is_even(&self) -> bool {
*self % 2 == 0
}
/// Returns `true` if the number is not divisible by `2`.
#[inline]
fn is_odd(&self) -> bool {
!self.is_even()
}
/// Simultaneous truncated integer division and modulus.
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self) {
(*self / *other, *self % *other)
}
}
#[cfg(test)]
mod $test_mod {
use Integer;
use core::mem;
#[test]
fn test_div_mod_floor() {
assert_eq!((10 as $T).div_floor(&(3 as $T)), 3 as $T);
assert_eq!((10 as $T).mod_floor(&(3 as $T)), 1 as $T);
assert_eq!((10 as $T).div_mod_floor(&(3 as $T)), (3 as $T, 1 as $T));
assert_eq!((5 as $T).div_floor(&(5 as $T)), 1 as $T);
assert_eq!((5 as $T).mod_floor(&(5 as $T)), 0 as $T);
assert_eq!((5 as $T).div_mod_floor(&(5 as $T)), (1 as $T, 0 as $T));
assert_eq!((3 as $T).div_floor(&(7 as $T)), 0 as $T);
assert_eq!((3 as $T).mod_floor(&(7 as $T)), 3 as $T);
assert_eq!((3 as $T).div_mod_floor(&(7 as $T)), (0 as $T, 3 as $T));
}
#[test]
fn test_gcd() {
assert_eq!((10 as $T).gcd(&2), 2 as $T);
assert_eq!((10 as $T).gcd(&3), 1 as $T);
assert_eq!((0 as $T).gcd(&3), 3 as $T);
assert_eq!((3 as $T).gcd(&3), 3 as $T);
assert_eq!((56 as $T).gcd(&42), 14 as $T);
}
#[test]
fn test_gcd_cmp_with_euclidean() {
fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
while m != 0 {
mem::swap(&mut m, &mut n);
m %= n;
}
n
}
for i in 0..255 {
for j in 0..255 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
}
// last value
// FIXME: Use inclusive ranges for above loop when implemented
let i = 255;
for j in 0..255 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
assert_eq!(255.gcd(&255), 255);
}
#[test]
fn test_lcm() {
assert_eq!((1 as $T).lcm(&0), 0 as $T);
assert_eq!((0 as $T).lcm(&1), 0 as $T);
assert_eq!((1 as $T).lcm(&1), 1 as $T);
assert_eq!((8 as $T).lcm(&9), 72 as $T);
assert_eq!((11 as $T).lcm(&5), 55 as $T);
assert_eq!((15 as $T).lcm(&17), 255 as $T);
}
#[test]
fn test_is_multiple_of() {
assert!((6 as $T).is_multiple_of(&(6 as $T)));
assert!((6 as $T).is_multiple_of(&(3 as $T)));
assert!((6 as $T).is_multiple_of(&(1 as $T)));
}
#[test]
fn test_even() {
assert_eq!((0 as $T).is_even(), true);
assert_eq!((1 as $T).is_even(), false);
assert_eq!((2 as $T).is_even(), true);
assert_eq!((3 as $T).is_even(), false);
assert_eq!((4 as $T).is_even(), true);
}
#[test]
fn test_odd() {
assert_eq!((0 as $T).is_odd(), false);
assert_eq!((1 as $T).is_odd(), true);
assert_eq!((2 as $T).is_odd(), false);
assert_eq!((3 as $T).is_odd(), true);
assert_eq!((4 as $T).is_odd(), false);
}
}
)
}
impl_integer_for_usize!(u8, test_integer_u8);
impl_integer_for_usize!(u16, test_integer_u16);
impl_integer_for_usize!(u32, test_integer_u32);
impl_integer_for_usize!(u64, test_integer_u64);
impl_integer_for_usize!(usize, test_integer_usize);
#[cfg(has_i128)]
impl_integer_for_usize!(u128, test_integer_u128);
/// An iterator over binomial coefficients.
pub struct IterBinomial<T> {
a: T,
n: T,
k: T,
}
impl<T> IterBinomial<T>
where T: Integer,
{
/// For a given n, iterate over all binomial coefficients binomial(n, k), for k=0...n.
///
/// Note that this might overflow, depending on `T`. For the primitive
/// integer types, the following n are the largest ones for which there will
/// be no overflow:
///
/// type | n
/// -----|---
/// u8 | 10
/// i8 | 9
/// u16 | 18
/// i16 | 17
/// u32 | 34
/// i32 | 33
/// u64 | 67
/// i64 | 66
///
/// For larger n, `T` should be a bigint type.
pub fn new(n: T) -> IterBinomial<T> {
IterBinomial {
k: T::zero(), a: T::one(), n: n
}
}
}
impl<T> Iterator for IterBinomial<T>
where T: Integer + Clone
{
type Item = T;
fn next(&mut self) -> Option<T> {
if self.k > self.n {
return None;
}
self.a = if !self.k.is_zero() {
multiply_and_divide(
self.a.clone(),
self.n.clone() - self.k.clone() + T::one(),
self.k.clone()
)
} else {
T::one()
};
self.k = self.k.clone() + T::one();
Some(self.a.clone())
}
}
/// Calculate r * a / b, avoiding overflows and fractions.
///
/// Assumes that b divides r * a evenly.
fn multiply_and_divide<T: Integer + Clone>(r: T, a: T, b: T) -> T {
// See http://blog.plover.com/math/choose-2.html for the idea.
let g = gcd(r.clone(), b.clone());
r/g.clone() * (a / (b/g))
}
/// Calculate the binomial coefficient.
///
/// Note that this might overflow, depending on `T`. For the primitive integer
/// types, the following n are the largest ones possible such that there will
/// be no overflow for any k:
///
/// type | n
/// -----|---
/// u8 | 10
/// i8 | 9
/// u16 | 18
/// i16 | 17
/// u32 | 34
/// i32 | 33
/// u64 | 67
/// i64 | 66
///
/// For larger n, consider using a bigint type for `T`.
pub fn binomial<T: Integer + Clone>(mut n: T, k: T) -> T {
// See http://blog.plover.com/math/choose.html for the idea.
if k > n {
return T::zero();
}
if k > n.clone() - k.clone() {
return binomial(n.clone(), n - k);
}
let mut r = T::one();
let mut d = T::one();
loop {
if d > k {
break;
}
r = multiply_and_divide(r, n.clone(), d.clone());
n = n - T::one();
d = d + T::one();
}
r
}
/// Calculate the multinomial coefficient.
pub fn multinomial<T: Integer + Clone>(k: &[T]) -> T
where for<'a> T: Add<&'a T, Output = T>
{
let mut r = T::one();
let mut p = T::zero();
for i in k {
p = p + i;
r = r * binomial(p.clone(), i.clone());
}
r
}
#[test]
fn test_lcm_overflow() {
macro_rules! check {
($t:ty, $x:expr, $y:expr, $r:expr) => { {
let x: $t = $x;
let y: $t = $y;
let o = x.checked_mul(y);
assert!(o.is_none(),
"sanity checking that {} input {} * {} overflows",
stringify!($t), x, y);
assert_eq!(x.lcm(&y), $r);
assert_eq!(y.lcm(&x), $r);
} }
}
// Original bug (Issue #166)
check!(i64, 46656000000000000, 600, 46656000000000000);
check!(i8, 0x40, 0x04, 0x40);
check!(u8, 0x80, 0x02, 0x80);
check!(i16, 0x40_00, 0x04, 0x40_00);
check!(u16, 0x80_00, 0x02, 0x80_00);
check!(i32, 0x4000_0000, 0x04, 0x4000_0000);
check!(u32, 0x8000_0000, 0x02, 0x8000_0000);
check!(i64, 0x4000_0000_0000_0000, 0x04, 0x4000_0000_0000_0000);
check!(u64, 0x8000_0000_0000_0000, 0x02, 0x8000_0000_0000_0000);
}
#[test]
fn test_iter_binomial() {
macro_rules! check_simple {
($t:ty) => { {
let n: $t = 3;
let expected = [1, 3, 3, 1];
for (b, &e) in IterBinomial::new(n).zip(&expected) {
assert_eq!(b, e);
}
} }
}
check_simple!(u8);
check_simple!(i8);
check_simple!(u16);
check_simple!(i16);
check_simple!(u32);
check_simple!(i32);
check_simple!(u64);
check_simple!(i64);
macro_rules! check_binomial {
($t:ty, $n:expr) => { {
let n: $t = $n;
let mut k: $t = 0;
for b in IterBinomial::new(n) {
assert_eq!(b, binomial(n, k));
k += 1;
}
} }
}
// Check the largest n for which there is no overflow.
check_binomial!(u8, 10);
check_binomial!(i8, 9);
check_binomial!(u16, 18);
check_binomial!(i16, 17);
check_binomial!(u32, 34);
check_binomial!(i32, 33);
check_binomial!(u64, 67);
check_binomial!(i64, 66);
}
#[test]
fn test_binomial() {
macro_rules! check {
($t:ty, $x:expr, $y:expr, $r:expr) => { {
let x: $t = $x;
let y: $t = $y;
let expected: $t = $r;
assert_eq!(binomial(x, y), expected);
if y <= x {
assert_eq!(binomial(x, x - y), expected);
}
} }
}
check!(u8, 9, 4, 126);
check!(u8, 0, 0, 1);
check!(u8, 2, 3, 0);
check!(i8, 9, 4, 126);
check!(i8, 0, 0, 1);
check!(i8, 2, 3, 0);
check!(u16, 100, 2, 4950);
check!(u16, 14, 4, 1001);
check!(u16, 0, 0, 1);
check!(u16, 2, 3, 0);
check!(i16, 100, 2, 4950);
check!(i16, 14, 4, 1001);
check!(i16, 0, 0, 1);
check!(i16, 2, 3, 0);
check!(u32, 100, 2, 4950);
check!(u32, 35, 11, 417225900);
check!(u32, 14, 4, 1001);
check!(u32, 0, 0, 1);
check!(u32, 2, 3, 0);
check!(i32, 100, 2, 4950);
check!(i32, 35, 11, 417225900);
check!(i32, 14, 4, 1001);
check!(i32, 0, 0, 1);
check!(i32, 2, 3, 0);
check!(u64, 100, 2, 4950);
check!(u64, 35, 11, 417225900);
check!(u64, 14, 4, 1001);
check!(u64, 0, 0, 1);
check!(u64, 2, 3, 0);
check!(i64, 100, 2, 4950);
check!(i64, 35, 11, 417225900);
check!(i64, 14, 4, 1001);
check!(i64, 0, 0, 1);
check!(i64, 2, 3, 0);
}
#[test]
fn test_multinomial() {
macro_rules! check_binomial {
($t:ty, $k:expr) => { {
let n: $t = $k.iter().fold(0, |acc, &x| acc + x);
let k: &[$t] = $k;
assert_eq!(k.len(), 2);
assert_eq!(multinomial(k), binomial(n, k[0]));
} }
}
check_binomial!(u8, &[4, 5]);
check_binomial!(i8, &[4, 5]);
check_binomial!(u16, &[2, 98]);
check_binomial!(u16, &[4, 10]);
check_binomial!(i16, &[2, 98]);
check_binomial!(i16, &[4, 10]);
check_binomial!(u32, &[2, 98]);
check_binomial!(u32, &[11, 24]);
check_binomial!(u32, &[4, 10]);
check_binomial!(i32, &[2, 98]);
check_binomial!(i32, &[11, 24]);
check_binomial!(i32, &[4, 10]);
check_binomial!(u64, &[2, 98]);
check_binomial!(u64, &[11, 24]);
check_binomial!(u64, &[4, 10]);
check_binomial!(i64, &[2, 98]);
check_binomial!(i64, &[11, 24]);
check_binomial!(i64, &[4, 10]);
macro_rules! check_multinomial {
($t:ty, $k:expr, $r:expr) => { {
let k: &[$t] = $k;
let expected: $t = $r;
assert_eq!(multinomial(k), expected);
} }
}
check_multinomial!(u8, &[2, 1, 2], 30);
check_multinomial!(u8, &[2, 3, 0], 10);
check_multinomial!(i8, &[2, 1, 2], 30);
check_multinomial!(i8, &[2, 3, 0], 10);
check_multinomial!(u16, &[2, 1, 2], 30);
check_multinomial!(u16, &[2, 3, 0], 10);
check_multinomial!(i16, &[2, 1, 2], 30);
check_multinomial!(i16, &[2, 3, 0], 10);
check_multinomial!(u32, &[2, 1, 2], 30);
check_multinomial!(u32, &[2, 3, 0], 10);
check_multinomial!(i32, &[2, 1, 2], 30);
check_multinomial!(i32, &[2, 3, 0], 10);
check_multinomial!(u64, &[2, 1, 2], 30);
check_multinomial!(u64, &[2, 3, 0], 10);
check_multinomial!(i64, &[2, 1, 2], 30);
check_multinomial!(i64, &[2, 3, 0], 10);
check_multinomial!(u64, &[], 1);
check_multinomial!(u64, &[0], 1);
check_multinomial!(u64, &[12345], 1);
}
lcm(0, 0) = 0
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Integer trait and functions.
//!
//! ## Compatibility
//!
//! The `num-integer` crate is tested for rustc 1.8 and greater.
#![doc(html_root_url = "https://docs.rs/num-integer/0.1")]
#![no_std]
#[cfg(feature = "std")]
extern crate std;
extern crate num_traits as traits;
use core::ops::Add;
use traits::{Num, Signed, Zero};
mod roots;
pub use roots::Roots;
pub use roots::{sqrt, cbrt, nth_root};
pub trait Integer: Sized + Num + PartialOrd + Ord + Eq {
/// Floored integer division.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert!(( 8).div_floor(& 3) == 2);
/// assert!(( 8).div_floor(&-3) == -3);
/// assert!((-8).div_floor(& 3) == -3);
/// assert!((-8).div_floor(&-3) == 2);
///
/// assert!(( 1).div_floor(& 2) == 0);
/// assert!(( 1).div_floor(&-2) == -1);
/// assert!((-1).div_floor(& 2) == -1);
/// assert!((-1).div_floor(&-2) == 0);
/// ~~~
fn div_floor(&self, other: &Self) -> Self;
/// Floored integer modulo, satisfying:
///
/// ~~~
/// # use num_integer::Integer;
/// # let n = 1; let d = 1;
/// assert!(n.div_floor(&d) * d + n.mod_floor(&d) == n)
/// ~~~
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert!(( 8).mod_floor(& 3) == 2);
/// assert!(( 8).mod_floor(&-3) == -1);
/// assert!((-8).mod_floor(& 3) == 1);
/// assert!((-8).mod_floor(&-3) == -2);
///
/// assert!(( 1).mod_floor(& 2) == 1);
/// assert!(( 1).mod_floor(&-2) == -1);
/// assert!((-1).mod_floor(& 2) == 1);
/// assert!((-1).mod_floor(&-2) == -1);
/// ~~~
fn mod_floor(&self, other: &Self) -> Self;
/// Greatest Common Divisor (GCD).
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(6.gcd(&8), 2);
/// assert_eq!(7.gcd(&3), 1);
/// ~~~
fn gcd(&self, other: &Self) -> Self;
/// Lowest Common Multiple (LCM).
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(7.lcm(&3), 21);
/// assert_eq!(2.lcm(&4), 4);
/// assert_eq!(0.lcm(&0), 0);
/// ~~~
fn lcm(&self, other: &Self) -> Self;
/// Deprecated, use `is_multiple_of` instead.
fn divides(&self, other: &Self) -> bool;
/// Returns `true` if `self` is a multiple of `other`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(9.is_multiple_of(&3), true);
/// assert_eq!(3.is_multiple_of(&9), false);
/// ~~~
fn is_multiple_of(&self, other: &Self) -> bool;
/// Returns `true` if the number is even.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(3.is_even(), false);
/// assert_eq!(4.is_even(), true);
/// ~~~
fn is_even(&self) -> bool;
/// Returns `true` if the number is odd.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(3.is_odd(), true);
/// assert_eq!(4.is_odd(), false);
/// ~~~
fn is_odd(&self) -> bool;
/// Simultaneous truncated integer division and modulus.
/// Returns `(quotient, remainder)`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 8).div_rem( &3), ( 2, 2));
/// assert_eq!(( 8).div_rem(&-3), (-2, 2));
/// assert_eq!((-8).div_rem( &3), (-2, -2));
/// assert_eq!((-8).div_rem(&-3), ( 2, -2));
///
/// assert_eq!(( 1).div_rem( &2), ( 0, 1));
/// assert_eq!(( 1).div_rem(&-2), ( 0, 1));
/// assert_eq!((-1).div_rem( &2), ( 0, -1));
/// assert_eq!((-1).div_rem(&-2), ( 0, -1));
/// ~~~
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self);
/// Simultaneous floored integer division and modulus.
/// Returns `(quotient, remainder)`.
///
/// # Examples
///
/// ~~~
/// # use num_integer::Integer;
/// assert_eq!(( 8).div_mod_floor( &3), ( 2, 2));
/// assert_eq!(( 8).div_mod_floor(&-3), (-3, -1));
/// assert_eq!((-8).div_mod_floor( &3), (-3, 1));
/// assert_eq!((-8).div_mod_floor(&-3), ( 2, -2));
///
/// assert_eq!(( 1).div_mod_floor( &2), ( 0, 1));
/// assert_eq!(( 1).div_mod_floor(&-2), (-1, -1));
/// assert_eq!((-1).div_mod_floor( &2), (-1, 1));
/// assert_eq!((-1).div_mod_floor(&-2), ( 0, -1));
/// ~~~
fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
(self.div_floor(other), self.mod_floor(other))
}
}
/// Simultaneous integer division and modulus
#[inline]
pub fn div_rem<T: Integer>(x: T, y: T) -> (T, T) {
x.div_rem(&y)
}
/// Floored integer division
#[inline]
pub fn div_floor<T: Integer>(x: T, y: T) -> T {
x.div_floor(&y)
}
/// Floored integer modulus
#[inline]
pub fn mod_floor<T: Integer>(x: T, y: T) -> T {
x.mod_floor(&y)
}
/// Simultaneous floored integer division and modulus
#[inline]
pub fn div_mod_floor<T: Integer>(x: T, y: T) -> (T, T) {
x.div_mod_floor(&y)
}
/// Calculates the Greatest Common Divisor (GCD) of the number and `other`. The
/// result is always positive.
#[inline(always)]
pub fn gcd<T: Integer>(x: T, y: T) -> T {
x.gcd(&y)
}
/// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
#[inline(always)]
pub fn lcm<T: Integer>(x: T, y: T) -> T {
x.lcm(&y)
}
macro_rules! impl_integer_for_isize {
($T:ty, $test_mod:ident) => (
impl Integer for $T {
/// Floored integer division
#[inline]
fn div_floor(&self, other: &Self) -> Self {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
match self.div_rem(other) {
(d, r) if (r > 0 && *other < 0)
|| (r < 0 && *other > 0) => d - 1,
(d, _) => d,
}
}
/// Floored integer modulo
#[inline]
fn mod_floor(&self, other: &Self) -> Self {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
match *self % *other {
r if (r > 0 && *other < 0)
|| (r < 0 && *other > 0) => r + *other,
r => r,
}
}
/// Calculates `div_floor` and `mod_floor` simultaneously
#[inline]
fn div_mod_floor(&self, other: &Self) -> (Self, Self) {
// Algorithm from [Daan Leijen. _Division and Modulus for Computer Scientists_,
// December 2001](http://research.microsoft.com/pubs/151917/divmodnote-letter.pdf)
match self.div_rem(other) {
(d, r) if (r > 0 && *other < 0)
|| (r < 0 && *other > 0) => (d - 1, r + *other),
(d, r) => (d, r),
}
}
/// Calculates the Greatest Common Divisor (GCD) of the number and
/// `other`. The result is always positive.
#[inline]
fn gcd(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 { return (m | n).abs() }
// find common factors of 2
let shift = (m | n).trailing_zeros();
// The algorithm needs positive numbers, but the minimum value
// can't be represented as a positive one.
// It's also a power of two, so the gcd can be
// calculated by bitshifting in that case
// Assuming two's complement, the number created by the shift
// is positive for all numbers except gcd = abs(min value)
// The call to .abs() causes a panic in debug mode
if m == Self::min_value() || n == Self::min_value() {
return (1 << shift).abs()
}
// guaranteed to be positive now, rest like unsigned algorithm
m = m.abs();
n = n.abs();
// divide n and m by 2 until odd
m >>= m.trailing_zeros();
n >>= n.trailing_zeros();
while m != n {
if m > n {
m -= n;
m >>= m.trailing_zeros();
} else {
n -= m;
n >>= n.trailing_zeros();
}
}
m << shift
}
/// Calculates the Lowest Common Multiple (LCM) of the number and
/// `other`.
#[inline]
fn lcm(&self, other: &Self) -> Self {
if self.is_zero() && other.is_zero() { Self::zero() }
else { // should not have to recalculate abs
(*self * (*other / self.gcd(other))).abs() }
}
/// Deprecated, use `is_multiple_of` instead.
#[inline]
fn divides(&self, other: &Self) -> bool {
self.is_multiple_of(other)
}
/// Returns `true` if the number is a multiple of `other`.
#[inline]
fn is_multiple_of(&self, other: &Self) -> bool {
*self % *other == 0
}
/// Returns `true` if the number is divisible by `2`
#[inline]
fn is_even(&self) -> bool { (*self) & 1 == 0 }
/// Returns `true` if the number is not divisible by `2`
#[inline]
fn is_odd(&self) -> bool { !self.is_even() }
/// Simultaneous truncated integer division and modulus.
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self) {
(*self / *other, *self % *other)
}
}
#[cfg(test)]
mod $test_mod {
use Integer;
use core::mem;
/// Checks that the division rule holds for:
///
/// - `n`: numerator (dividend)
/// - `d`: denominator (divisor)
/// - `qr`: quotient and remainder
#[cfg(test)]
fn test_division_rule((n,d): ($T, $T), (q,r): ($T, $T)) {
assert_eq!(d * q + r, n);
}
#[test]
fn test_div_rem() {
fn test_nd_dr(nd: ($T,$T), qr: ($T,$T)) {
let (n,d) = nd;
let separate_div_rem = (n / d, n % d);
let combined_div_rem = n.div_rem(&d);
assert_eq!(separate_div_rem, qr);
assert_eq!(combined_div_rem, qr);
test_division_rule(nd, separate_div_rem);
test_division_rule(nd, combined_div_rem);
}
test_nd_dr(( 8, 3), ( 2, 2));
test_nd_dr(( 8, -3), (-2, 2));
test_nd_dr((-8, 3), (-2, -2));
test_nd_dr((-8, -3), ( 2, -2));
test_nd_dr(( 1, 2), ( 0, 1));
test_nd_dr(( 1, -2), ( 0, 1));
test_nd_dr((-1, 2), ( 0, -1));
test_nd_dr((-1, -2), ( 0, -1));
}
#[test]
fn test_div_mod_floor() {
fn test_nd_dm(nd: ($T,$T), dm: ($T,$T)) {
let (n,d) = nd;
let separate_div_mod_floor = (n.div_floor(&d), n.mod_floor(&d));
let combined_div_mod_floor = n.div_mod_floor(&d);
assert_eq!(separate_div_mod_floor, dm);
assert_eq!(combined_div_mod_floor, dm);
test_division_rule(nd, separate_div_mod_floor);
test_division_rule(nd, combined_div_mod_floor);
}
test_nd_dm(( 8, 3), ( 2, 2));
test_nd_dm(( 8, -3), (-3, -1));
test_nd_dm((-8, 3), (-3, 1));
test_nd_dm((-8, -3), ( 2, -2));
test_nd_dm(( 1, 2), ( 0, 1));
test_nd_dm(( 1, -2), (-1, -1));
test_nd_dm((-1, 2), (-1, 1));
test_nd_dm((-1, -2), ( 0, -1));
}
#[test]
fn test_gcd() {
assert_eq!((10 as $T).gcd(&2), 2 as $T);
assert_eq!((10 as $T).gcd(&3), 1 as $T);
assert_eq!((0 as $T).gcd(&3), 3 as $T);
assert_eq!((3 as $T).gcd(&3), 3 as $T);
assert_eq!((56 as $T).gcd(&42), 14 as $T);
assert_eq!((3 as $T).gcd(&-3), 3 as $T);
assert_eq!((-6 as $T).gcd(&3), 3 as $T);
assert_eq!((-4 as $T).gcd(&-2), 2 as $T);
}
#[test]
fn test_gcd_cmp_with_euclidean() {
fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
while m != 0 {
mem::swap(&mut m, &mut n);
m %= n;
}
n.abs()
}
// gcd(-128, b) = 128 is not representable as positive value
// for i8
for i in -127..127 {
for j in -127..127 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
}
// last value
// FIXME: Use inclusive ranges for above loop when implemented
let i = 127;
for j in -127..127 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
assert_eq!(127.gcd(&127), 127);
}
#[test]
fn test_gcd_min_val() {
let min = <$T>::min_value();
let max = <$T>::max_value();
let max_pow2 = max / 2 + 1;
assert_eq!(min.gcd(&max), 1 as $T);
assert_eq!(max.gcd(&min), 1 as $T);
assert_eq!(min.gcd(&max_pow2), max_pow2);
assert_eq!(max_pow2.gcd(&min), max_pow2);
assert_eq!(min.gcd(&42), 2 as $T);
assert_eq!((42 as $T).gcd(&min), 2 as $T);
}
#[test]
#[should_panic]
fn test_gcd_min_val_min_val() {
let min = <$T>::min_value();
assert!(min.gcd(&min) >= 0);
}
#[test]
#[should_panic]
fn test_gcd_min_val_0() {
let min = <$T>::min_value();
assert!(min.gcd(&0) >= 0);
}
#[test]
#[should_panic]
fn test_gcd_0_min_val() {
let min = <$T>::min_value();
assert!((0 as $T).gcd(&min) >= 0);
}
#[test]
fn test_lcm() {
assert_eq!((1 as $T).lcm(&0), 0 as $T);
assert_eq!((0 as $T).lcm(&1), 0 as $T);
assert_eq!((1 as $T).lcm(&1), 1 as $T);
assert_eq!((-1 as $T).lcm(&1), 1 as $T);
assert_eq!((1 as $T).lcm(&-1), 1 as $T);
assert_eq!((-1 as $T).lcm(&-1), 1 as $T);
assert_eq!((8 as $T).lcm(&9), 72 as $T);
assert_eq!((11 as $T).lcm(&5), 55 as $T);
}
#[test]
fn test_even() {
assert_eq!((-4 as $T).is_even(), true);
assert_eq!((-3 as $T).is_even(), false);
assert_eq!((-2 as $T).is_even(), true);
assert_eq!((-1 as $T).is_even(), false);
assert_eq!((0 as $T).is_even(), true);
assert_eq!((1 as $T).is_even(), false);
assert_eq!((2 as $T).is_even(), true);
assert_eq!((3 as $T).is_even(), false);
assert_eq!((4 as $T).is_even(), true);
}
#[test]
fn test_odd() {
assert_eq!((-4 as $T).is_odd(), false);
assert_eq!((-3 as $T).is_odd(), true);
assert_eq!((-2 as $T).is_odd(), false);
assert_eq!((-1 as $T).is_odd(), true);
assert_eq!((0 as $T).is_odd(), false);
assert_eq!((1 as $T).is_odd(), true);
assert_eq!((2 as $T).is_odd(), false);
assert_eq!((3 as $T).is_odd(), true);
assert_eq!((4 as $T).is_odd(), false);
}
}
)
}
impl_integer_for_isize!(i8, test_integer_i8);
impl_integer_for_isize!(i16, test_integer_i16);
impl_integer_for_isize!(i32, test_integer_i32);
impl_integer_for_isize!(i64, test_integer_i64);
impl_integer_for_isize!(isize, test_integer_isize);
#[cfg(has_i128)]
impl_integer_for_isize!(i128, test_integer_i128);
macro_rules! impl_integer_for_usize {
($T:ty, $test_mod:ident) => (
impl Integer for $T {
/// Unsigned integer division. Returns the same result as `div` (`/`).
#[inline]
fn div_floor(&self, other: &Self) -> Self {
*self / *other
}
/// Unsigned integer modulo operation. Returns the same result as `rem` (`%`).
#[inline]
fn mod_floor(&self, other: &Self) -> Self {
*self % *other
}
/// Calculates the Greatest Common Divisor (GCD) of the number and `other`
#[inline]
fn gcd(&self, other: &Self) -> Self {
// Use Stein's algorithm
let mut m = *self;
let mut n = *other;
if m == 0 || n == 0 { return m | n }
// find common factors of 2
let shift = (m | n).trailing_zeros();
// divide n and m by 2 until odd
m >>= m.trailing_zeros();
n >>= n.trailing_zeros();
while m != n {
if m > n {
m -= n;
m >>= m.trailing_zeros();
} else {
n -= m;
n >>= n.trailing_zeros();
}
}
m << shift
}
/// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
#[inline]
fn lcm(&self, other: &Self) -> Self {
if self.is_zero() && other.is_zero() { Self::zero() }
else { *self * (*other / self.gcd(other)) }
}
/// Deprecated, use `is_multiple_of` instead.
#[inline]
fn divides(&self, other: &Self) -> bool {
self.is_multiple_of(other)
}
/// Returns `true` if the number is a multiple of `other`.
#[inline]
fn is_multiple_of(&self, other: &Self) -> bool {
*self % *other == 0
}
/// Returns `true` if the number is divisible by `2`.
#[inline]
fn is_even(&self) -> bool {
*self % 2 == 0
}
/// Returns `true` if the number is not divisible by `2`.
#[inline]
fn is_odd(&self) -> bool {
!self.is_even()
}
/// Simultaneous truncated integer division and modulus.
#[inline]
fn div_rem(&self, other: &Self) -> (Self, Self) {
(*self / *other, *self % *other)
}
}
#[cfg(test)]
mod $test_mod {
use Integer;
use core::mem;
#[test]
fn test_div_mod_floor() {
assert_eq!((10 as $T).div_floor(&(3 as $T)), 3 as $T);
assert_eq!((10 as $T).mod_floor(&(3 as $T)), 1 as $T);
assert_eq!((10 as $T).div_mod_floor(&(3 as $T)), (3 as $T, 1 as $T));
assert_eq!((5 as $T).div_floor(&(5 as $T)), 1 as $T);
assert_eq!((5 as $T).mod_floor(&(5 as $T)), 0 as $T);
assert_eq!((5 as $T).div_mod_floor(&(5 as $T)), (1 as $T, 0 as $T));
assert_eq!((3 as $T).div_floor(&(7 as $T)), 0 as $T);
assert_eq!((3 as $T).mod_floor(&(7 as $T)), 3 as $T);
assert_eq!((3 as $T).div_mod_floor(&(7 as $T)), (0 as $T, 3 as $T));
}
#[test]
fn test_gcd() {
assert_eq!((10 as $T).gcd(&2), 2 as $T);
assert_eq!((10 as $T).gcd(&3), 1 as $T);
assert_eq!((0 as $T).gcd(&3), 3 as $T);
assert_eq!((3 as $T).gcd(&3), 3 as $T);
assert_eq!((56 as $T).gcd(&42), 14 as $T);
}
#[test]
fn test_gcd_cmp_with_euclidean() {
fn euclidean_gcd(mut m: $T, mut n: $T) -> $T {
while m != 0 {
mem::swap(&mut m, &mut n);
m %= n;
}
n
}
for i in 0..255 {
for j in 0..255 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
}
// last value
// FIXME: Use inclusive ranges for above loop when implemented
let i = 255;
for j in 0..255 {
assert_eq!(euclidean_gcd(i,j), i.gcd(&j));
}
assert_eq!(255.gcd(&255), 255);
}
#[test]
fn test_lcm() {
assert_eq!((1 as $T).lcm(&0), 0 as $T);
assert_eq!((0 as $T).lcm(&1), 0 as $T);
assert_eq!((1 as $T).lcm(&1), 1 as $T);
assert_eq!((8 as $T).lcm(&9), 72 as $T);
assert_eq!((11 as $T).lcm(&5), 55 as $T);
assert_eq!((15 as $T).lcm(&17), 255 as $T);
}
#[test]
fn test_is_multiple_of() {
assert!((6 as $T).is_multiple_of(&(6 as $T)));
assert!((6 as $T).is_multiple_of(&(3 as $T)));
assert!((6 as $T).is_multiple_of(&(1 as $T)));
}
#[test]
fn test_even() {
assert_eq!((0 as $T).is_even(), true);
assert_eq!((1 as $T).is_even(), false);
assert_eq!((2 as $T).is_even(), true);
assert_eq!((3 as $T).is_even(), false);
assert_eq!((4 as $T).is_even(), true);
}
#[test]
fn test_odd() {
assert_eq!((0 as $T).is_odd(), false);
assert_eq!((1 as $T).is_odd(), true);
assert_eq!((2 as $T).is_odd(), false);
assert_eq!((3 as $T).is_odd(), true);
assert_eq!((4 as $T).is_odd(), false);
}
}
)
}
impl_integer_for_usize!(u8, test_integer_u8);
impl_integer_for_usize!(u16, test_integer_u16);
impl_integer_for_usize!(u32, test_integer_u32);
impl_integer_for_usize!(u64, test_integer_u64);
impl_integer_for_usize!(usize, test_integer_usize);
#[cfg(has_i128)]
impl_integer_for_usize!(u128, test_integer_u128);
/// An iterator over binomial coefficients.
pub struct IterBinomial<T> {
a: T,
n: T,
k: T,
}
impl<T> IterBinomial<T>
where T: Integer,
{
/// For a given n, iterate over all binomial coefficients binomial(n, k), for k=0...n.
///
/// Note that this might overflow, depending on `T`. For the primitive
/// integer types, the following n are the largest ones for which there will
/// be no overflow:
///
/// type | n
/// -----|---
/// u8 | 10
/// i8 | 9
/// u16 | 18
/// i16 | 17
/// u32 | 34
/// i32 | 33
/// u64 | 67
/// i64 | 66
///
/// For larger n, `T` should be a bigint type.
pub fn new(n: T) -> IterBinomial<T> {
IterBinomial {
k: T::zero(), a: T::one(), n: n
}
}
}
impl<T> Iterator for IterBinomial<T>
where T: Integer + Clone
{
type Item = T;
fn next(&mut self) -> Option<T> {
if self.k > self.n {
return None;
}
self.a = if !self.k.is_zero() {
multiply_and_divide(
self.a.clone(),
self.n.clone() - self.k.clone() + T::one(),
self.k.clone()
)
} else {
T::one()
};
self.k = self.k.clone() + T::one();
Some(self.a.clone())
}
}
/// Calculate r * a / b, avoiding overflows and fractions.
///
/// Assumes that b divides r * a evenly.
fn multiply_and_divide<T: Integer + Clone>(r: T, a: T, b: T) -> T {
// See http://blog.plover.com/math/choose-2.html for the idea.
let g = gcd(r.clone(), b.clone());
r/g.clone() * (a / (b/g))
}
/// Calculate the binomial coefficient.
///
/// Note that this might overflow, depending on `T`. For the primitive integer
/// types, the following n are the largest ones possible such that there will
/// be no overflow for any k:
///
/// type | n
/// -----|---
/// u8 | 10
/// i8 | 9
/// u16 | 18
/// i16 | 17
/// u32 | 34
/// i32 | 33
/// u64 | 67
/// i64 | 66
///
/// For larger n, consider using a bigint type for `T`.
pub fn binomial<T: Integer + Clone>(mut n: T, k: T) -> T {
// See http://blog.plover.com/math/choose.html for the idea.
if k > n {
return T::zero();
}
if k > n.clone() - k.clone() {
return binomial(n.clone(), n - k);
}
let mut r = T::one();
let mut d = T::one();
loop {
if d > k {
break;
}
r = multiply_and_divide(r, n.clone(), d.clone());
n = n - T::one();
d = d + T::one();
}
r
}
/// Calculate the multinomial coefficient.
pub fn multinomial<T: Integer + Clone>(k: &[T]) -> T
where for<'a> T: Add<&'a T, Output = T>
{
let mut r = T::one();
let mut p = T::zero();
for i in k {
p = p + i;
r = r * binomial(p.clone(), i.clone());
}
r
}
#[test]
fn test_lcm_overflow() {
macro_rules! check {
($t:ty, $x:expr, $y:expr, $r:expr) => { {
let x: $t = $x;
let y: $t = $y;
let o = x.checked_mul(y);
assert!(o.is_none(),
"sanity checking that {} input {} * {} overflows",
stringify!($t), x, y);
assert_eq!(x.lcm(&y), $r);
assert_eq!(y.lcm(&x), $r);
} }
}
// Original bug (Issue #166)
check!(i64, 46656000000000000, 600, 46656000000000000);
check!(i8, 0x40, 0x04, 0x40);
check!(u8, 0x80, 0x02, 0x80);
check!(i16, 0x40_00, 0x04, 0x40_00);
check!(u16, 0x80_00, 0x02, 0x80_00);
check!(i32, 0x4000_0000, 0x04, 0x4000_0000);
check!(u32, 0x8000_0000, 0x02, 0x8000_0000);
check!(i64, 0x4000_0000_0000_0000, 0x04, 0x4000_0000_0000_0000);
check!(u64, 0x8000_0000_0000_0000, 0x02, 0x8000_0000_0000_0000);
}
#[test]
fn test_iter_binomial() {
macro_rules! check_simple {
($t:ty) => { {
let n: $t = 3;
let expected = [1, 3, 3, 1];
for (b, &e) in IterBinomial::new(n).zip(&expected) {
assert_eq!(b, e);
}
} }
}
check_simple!(u8);
check_simple!(i8);
check_simple!(u16);
check_simple!(i16);
check_simple!(u32);
check_simple!(i32);
check_simple!(u64);
check_simple!(i64);
macro_rules! check_binomial {
($t:ty, $n:expr) => { {
let n: $t = $n;
let mut k: $t = 0;
for b in IterBinomial::new(n) {
assert_eq!(b, binomial(n, k));
k += 1;
}
} }
}
// Check the largest n for which there is no overflow.
check_binomial!(u8, 10);
check_binomial!(i8, 9);
check_binomial!(u16, 18);
check_binomial!(i16, 17);
check_binomial!(u32, 34);
check_binomial!(i32, 33);
check_binomial!(u64, 67);
check_binomial!(i64, 66);
}
#[test]
fn test_binomial() {
macro_rules! check {
($t:ty, $x:expr, $y:expr, $r:expr) => { {
let x: $t = $x;
let y: $t = $y;
let expected: $t = $r;
assert_eq!(binomial(x, y), expected);
if y <= x {
assert_eq!(binomial(x, x - y), expected);
}
} }
}
check!(u8, 9, 4, 126);
check!(u8, 0, 0, 1);
check!(u8, 2, 3, 0);
check!(i8, 9, 4, 126);
check!(i8, 0, 0, 1);
check!(i8, 2, 3, 0);
check!(u16, 100, 2, 4950);
check!(u16, 14, 4, 1001);
check!(u16, 0, 0, 1);
check!(u16, 2, 3, 0);
check!(i16, 100, 2, 4950);
check!(i16, 14, 4, 1001);
check!(i16, 0, 0, 1);
check!(i16, 2, 3, 0);
check!(u32, 100, 2, 4950);
check!(u32, 35, 11, 417225900);
check!(u32, 14, 4, 1001);
check!(u32, 0, 0, 1);
check!(u32, 2, 3, 0);
check!(i32, 100, 2, 4950);
check!(i32, 35, 11, 417225900);
check!(i32, 14, 4, 1001);
check!(i32, 0, 0, 1);
check!(i32, 2, 3, 0);
check!(u64, 100, 2, 4950);
check!(u64, 35, 11, 417225900);
check!(u64, 14, 4, 1001);
check!(u64, 0, 0, 1);
check!(u64, 2, 3, 0);
check!(i64, 100, 2, 4950);
check!(i64, 35, 11, 417225900);
check!(i64, 14, 4, 1001);
check!(i64, 0, 0, 1);
check!(i64, 2, 3, 0);
}
#[test]
fn test_multinomial() {
macro_rules! check_binomial {
($t:ty, $k:expr) => { {
let n: $t = $k.iter().fold(0, |acc, &x| acc + x);
let k: &[$t] = $k;
assert_eq!(k.len(), 2);
assert_eq!(multinomial(k), binomial(n, k[0]));
} }
}
check_binomial!(u8, &[4, 5]);
check_binomial!(i8, &[4, 5]);
check_binomial!(u16, &[2, 98]);
check_binomial!(u16, &[4, 10]);
check_binomial!(i16, &[2, 98]);
check_binomial!(i16, &[4, 10]);
check_binomial!(u32, &[2, 98]);
check_binomial!(u32, &[11, 24]);
check_binomial!(u32, &[4, 10]);
check_binomial!(i32, &[2, 98]);
check_binomial!(i32, &[11, 24]);
check_binomial!(i32, &[4, 10]);
check_binomial!(u64, &[2, 98]);
check_binomial!(u64, &[11, 24]);
check_binomial!(u64, &[4, 10]);
check_binomial!(i64, &[2, 98]);
check_binomial!(i64, &[11, 24]);
check_binomial!(i64, &[4, 10]);
macro_rules! check_multinomial {
($t:ty, $k:expr, $r:expr) => { {
let k: &[$t] = $k;
let expected: $t = $r;
assert_eq!(multinomial(k), expected);
} }
}
check_multinomial!(u8, &[2, 1, 2], 30);
check_multinomial!(u8, &[2, 3, 0], 10);
check_multinomial!(i8, &[2, 1, 2], 30);
check_multinomial!(i8, &[2, 3, 0], 10);
check_multinomial!(u16, &[2, 1, 2], 30);
check_multinomial!(u16, &[2, 3, 0], 10);
check_multinomial!(i16, &[2, 1, 2], 30);
check_multinomial!(i16, &[2, 3, 0], 10);
check_multinomial!(u32, &[2, 1, 2], 30);
check_multinomial!(u32, &[2, 3, 0], 10);
check_multinomial!(i32, &[2, 1, 2], 30);
check_multinomial!(i32, &[2, 3, 0], 10);
check_multinomial!(u64, &[2, 1, 2], 30);
check_multinomial!(u64, &[2, 3, 0], 10);
check_multinomial!(i64, &[2, 1, 2], 30);
check_multinomial!(i64, &[2, 3, 0], 10);
check_multinomial!(u64, &[], 1);
check_multinomial!(u64, &[0], 1);
check_multinomial!(u64, &[12345], 1);
}
|
//! Object-hash mapping library for Redis.
//!
//! Ohmers is a library for storing objects in Redis, a persistent
//! key-value database.
//! It is based on the Ruby library Ohm, and it uses the same key names,
//! so it can be used in the same system.
//!
//! # Prerequisites
//!
//! Have a [redis server](https://github.com/antirez/redis/) running and a
//! [redis-rs](https://github.com/mitsuhiko/redis-rs/) connection.
//!
//! # Getting started
//!
//! Ohmers maps Rust structs to hash maps in Redis. First define the structs
//! using the model! macro, and then use their methods to created, read,
//! update, delete.
//!
//! ```rust
//! # #[macro_use(model, create, insert)] extern crate ohmers;
//! # extern crate rustc_serialize;
//! # extern crate redis;
//! # use ohmers::*;
//!
//! model!(Event {
//! indices {
//! name:String = "My Event".to_string();
//! };
//! venue:Reference<Venue> = Reference::new();
//! participants:Set<Person> = Set::new();
//! votes:Counter = Counter;
//! });
//!
//! model!(Venue {
//! name:String = "My Venue".to_string();
//! events:Set<Event> = Set::new();
//! });
//!
//! model!(Person {
//! name:String = "A Person".to_string();
//! });
//! # fn main() {
//! # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
//! let p1 = create!(Person { name: "Alice".to_string(), }, &client).unwrap();
//! let p2 = create!(Person { name: "Bob".to_string(), }, &client).unwrap();
//! let p3 = create!(Person { name: "Charlie".to_string(), }, &client).unwrap();
//!
//! let v1 = create!(Venue { name: "Home".to_string(), }, &client).unwrap();
//! let v2 = create!(Venue { name: "Work".to_string(), }, &client).unwrap();
//!
//! let mut e1 = create!(Event { name: "Birthday Party".to_string(), }, &client).unwrap();
//! insert!(e1.participants, p1, &client).unwrap();
//! insert!(e1.participants, p2, &client).unwrap();
//! insert!(e1.participants, p3, &client).unwrap();
//! e1.venue.set(&v1);
//! e1.save(&client).unwrap();
//!
//! let mut e2 = create!(Event { name: "Work Meeting".to_string(), }, &client).unwrap();
//! insert!(e2.participants, p1, &client).unwrap();
//! insert!(e2.participants, p2, &client).unwrap();
//! e2.venue.set(&v2);
//! e2.save(&client).unwrap();
//! # }
//! ```
pub extern crate rmp as msgpack;
extern crate redis;
extern crate rustc_serialize;
extern crate regex;
extern crate stal;
use std::ascii::AsciiExt;
use std::collections::{HashSet, HashMap};
use std::marker::PhantomData;
use std::mem::replace;
use std::string::FromUtf8Error;
use redis::Commands;
use redis::ToRedisArgs;
use regex::Regex;
pub use stal::Set as StalSet;
mod encoder;
use encoder::*;
mod decoder;
use decoder::*;
mod lua;
use lua::{DELETE, SAVE};
/// Declares a struct.
/// Fields may be declared as a part of uniques, indices, or regular fields.
/// Every field must have a default value.
/// The struct will derive RustcEncodable, RustcDecodable, and Default.
/// More `derive`s can be specified.
///
/// A property `id: usize = 0;` is automatically added to track the object.
///
/// # Examples
/// ```
/// # #[macro_use(model)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// model!(
/// derive { Clone, PartialOrd }
/// MyStruct {
/// uniques { my_unique_identifier:u8 = 0; };
/// indices { my_index:u8 = 0; };
/// other_field:String = "".to_string();
/// });
/// # fn main() {
/// # }
/// ```
#[macro_export]
macro_rules! model {
($class: ident { $($key: ident:$proptype: ty = $default: expr);*; } ) => {
model!(
$class {
uniques { };
indices { };
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident { $($key: ident:$proptype: ty = $default: expr);*; } ) => {
model!(
derive { $($derive),* }
$class {
uniques { };
indices { };
$($key:$proptype = $default;)*
}
);
};
($class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
$class {
uniques {
$(
$ukey: $uproptype = $udefault;
)*
};
indices { };
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
derive { $($derive),* }
$class {
uniques {
$(
$ukey: $uproptype = $udefault;
)*
};
indices { };
$($key:$proptype = $default;)*
}
);
};
($class: ident {
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
$class {
uniques { };
indices {
$(
$ikey: $iproptype = $idefault;
)*
};
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident {
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
derive { $($derive),* }
$class {
uniques { };
indices {
$(
$ikey: $iproptype = $idefault;
)*
};
$($key:$proptype = $default;)*
}
);
};
(
$class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
derive { }
$class {
uniques {
$(
$ukey: $uproptype = $udefault;
)*
};
indices {
$(
$ikey: $iproptype = $idefault;
)*
};
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
#[derive(RustcEncodable, RustcDecodable, Debug, $($derive,)* )]
struct $class {
id: usize,
$(
$key: $proptype,
)*
$(
$ukey: $uproptype,
)*
$(
$ikey: $iproptype,
)*
}
impl Default for $class {
fn default() -> Self {
$class {
id: 0,
$(
$key: $default,
)*
$(
$ukey: $udefault,
)*
$(
$ikey: $idefault,
)*
}
}
}
impl ohmers::Ohmer for $class {
fn id(&self) -> usize { self.id }
fn set_id(&mut self, id: usize) { self.id = id; }
// These functions are implemented in the trait, but this
// reduces the runtime overhead
fn get_class_name(&self) -> String {
stringify!($class).to_owned()
}
fn key_for_unique(&self, field: &str, value: &str) -> String {
format!("{}:uniques:{}:{}", stringify!($class), field, value)
}
fn key_for_index(&self, field: &str, value: &str) -> String {
format!("{}:indices:{}:{}", stringify!($class), field, value)
}
fn unique_fields<'a>(&self) -> std::collections::HashSet<&'a str> {
#![allow(unused_mut)]
let mut hs = std::collections::HashSet::new();
$(
hs.insert(stringify!($ukey));
)*
hs
}
fn index_fields<'a>(&self) -> std::collections::HashSet<&'a str> {
#![allow(unused_mut)]
let mut hs = std::collections::HashSet::new();
$(
hs.insert(stringify!($ikey));
)*
hs
}
}
impl PartialEq for $class {
fn eq(&self, other: &$class) -> bool {
self.id == other.id
}
}
}
}
/// Creates a new instance of `$class` using the default properties,
/// overriding specified collection of `$key` with `$value`.
///
/// # Examples
/// ```
/// # #[macro_use(model, new)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// model!(
/// MyStruct {
/// k1:u8 = 1;
/// k2:u8 = 2;
/// });
///
/// # fn main() {
/// let st = new!(MyStruct { k2: 3, });
/// assert_eq!(st.id, 0); // object was not created in Redis yet
/// assert_eq!(st.k1, 1);
/// assert_eq!(st.k2, 3);
/// # }
/// ```
#[macro_export]
macro_rules! new {
($class: ident { $($key:ident: $value: expr),*, }) => {{
let mut obj = $class::default();
$(
obj.$key = $value;
)*
obj
}}
}
/// Creates a new instance of `$class` using the default properties,
/// overriding specified collection of `$key` with `$value`, and saving it
/// in the database
///
/// # Examples
/// ```
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// model!(
/// MyStruct {
/// k1:u8 = 1;
/// k2:u8 = 2;
/// });
///
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// let st = create!(MyStruct { k2: 3, }, &client).unwrap();
/// assert!(st.id > 0); // object was already created in Redis
/// assert_eq!(st.k1, 1);
/// assert_eq!(st.k2, 3);
/// # }
/// ```
#[macro_export]
macro_rules! create {
($class: ident { $($key:ident: $value: expr),*, }, $conn: expr) => {{
let mut obj = $class::default();
$(
obj.$key = $value;
)*
obj.save(&$conn).map(|_| obj)
}}
}
/// Returns a `Query` with all the `$class` objects where `$key` is `$value`.
/// All the `$key` must be declared as `indices` in the `model!` declaration.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create, find)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// Browser {
/// indices {
/// name:String = "".to_string();
/// major_version:u8 = 0;
/// };
/// minor_version:u8 = 0;
/// });
///
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("Browser:indices:name:Firefox").unwrap();
/// # let _:bool = client.del("Browser:indices:name:Chrome").unwrap();
/// # let _:bool = client.del("Browser:indices:major_version:42").unwrap();
/// # let _:bool = client.del("Browser:indices:major_version:43").unwrap();
/// # let _:bool = client.del("Browser:indices:major_version:44").unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 42, minor_version: 3, }, &client).unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 42, minor_version: 4, }, &client).unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 43, }, &client).unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 43, minor_version: 1, }, &client).unwrap();
/// create!(Browser { name: "Chrome".to_string(), major_version: 43, minor_version: 1, }, &client).unwrap();
/// create!(Browser { name: "Chrome".to_string(), major_version: 43, minor_version: 2, }, &client).unwrap();
/// create!(Browser { name: "Chrome".to_string(), major_version: 44, minor_version: 3, }, &client).unwrap();
///
/// assert_eq!(find!(
/// Browser { name: "Chrome", major_version: 44, } ||
/// { name: "Firefox", major_version: 43, },
/// &client
/// ).try_into_iter().unwrap().collect::<Vec<_>>().len(), 3);
/// # }
/// ```
#[macro_export]
macro_rules! find {
($class: ident $({ $($key:ident: $value: expr),*, })||*, $conn: expr) => {{
ohmers::Query::<$class>::new(
ohmers::StalSet::Union(vec![
$(
ohmers::StalSet::Inter(
vec![
$(
ohmers::Query::<$class>::key(stringify!($key), &*format!("{}", $value)),
)*
]
),
)*
]
), &$conn)
}}
}
/// Properties declared as `Collection` can use the collection macro to get a
/// `Query` to iterate over all of its elements.
/// A `Collection` is an accessor to objects that have a `Reference` to the
/// object.
#[macro_export]
macro_rules! collection {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.all(&*$obj.get_class_name(), &$obj, &$conn)
}}
}
/// Number of elements in a List or Set property.
#[macro_export]
macro_rules! len {
($obj: ident. $prop: ident, $conn: expr) => {{
$obj.$prop.len(stringify!($prop), &$obj, &$conn)
}}
}
/// Insert `$el` in `$obj.$prop`. The property must be a Set.
#[macro_export]
macro_rules! insert {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.insert(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Adds `$el` at the end of `$obj.$prop`. The property must be a List.
#[macro_export]
macro_rules! push_back {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.push_back(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Adds `$el` at the beginning of `$obj.$prop`. The property must be a List.
#[macro_export]
macro_rules! push_front {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.push_front(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Retrieves and remove an element from the end of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! pop_back {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.pop_back(stringify!($prop), &$obj, &$conn)
}}
}
/// Retrieves and remove an element from the beginning of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! pop_front {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.pop_front(stringify!($prop), &$obj, &$conn)
}}
}
/// Retrieves an element from the beginning of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! first {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.first(stringify!($prop), &$obj, &$conn)
}}
}
/// Retrieves an element from the end of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! last {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.last(stringify!($prop), &$obj, &$conn)
}}
}
/// Creates an iterable of `$obj.$prop` between `$start` and `$end`.
/// The property must be a List.
///
/// # Examples
/// ```rust,ignore
/// try_range!(myobj.mylist[0 => 4], &client);
/// ```
#[macro_export]
macro_rules! try_range {
($obj: ident.$prop: ident[$start:expr => $end:expr], $conn: expr) => {{
$obj.$prop.try_range(stringify!($prop), &$obj, $start, $end, &$conn)
}}
}
/// Creates an iterable of all elements in `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! try_iter {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.try_iter(stringify!($prop), &$obj, &$conn)
}}
}
/// Checks if an element is in a List or a Set.
#[macro_export]
macro_rules! contains {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.contains(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Removes occurences of an element in a List or a Set.
#[macro_export]
macro_rules! remove {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.remove(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Find an element by a unique index.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// OperativeSystem {
/// uniques {
/// name:String = "".to_string();
/// };
/// major_version:u8 = 0;
/// minor_version:u8 = 0;
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("OperativeSystem:uniques:name").unwrap();
/// create!(OperativeSystem { name: "Windows".to_owned(), major_version: 10, }, &client);
/// create!(OperativeSystem { name: "GNU/Linux".to_owned(), major_version: 3, minor_version: 14, }, &client);
/// create!(OperativeSystem { name: "OS X".to_owned(), major_version: 10, minor_version: 10, }, &client);
/// assert_eq!(ohmers::with::<OperativeSystem, _>("name", "OS X", &client).unwrap().unwrap().major_version, 10);
/// # }
/// ```
pub fn with<T: Ohmer, S: ToRedisArgs>(property: &str, value: S, r: &redis::Client) -> Result<Option<T>, DecoderError> {
let mut obj = T::default();
let opt_id:Option<usize> = try!(r.hget(format!("{}:uniques:{}", obj.get_class_name(), property), value));
let id = match opt_id {
Some(id) => id,
None => return Ok(None),
};
try!(obj.load(id, r));
Ok(Some(obj))
}
/// Gets an element by id.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// Server {
/// name:String = "".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// let server = create!(Server { name: "My Server".to_owned(), }, &client).unwrap();
/// assert_eq!(&*ohmers::get::<Server>(server.id, &client).unwrap().name, "My Server");
/// # }
/// ```
pub fn get<T: Ohmer>(id: usize, r: &redis::Client) -> Result<T, DecoderError> {
let mut obj = T::default();
try!(obj.load(id, r));
Ok(obj)
}
/// Gets a query for all elements.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create, new)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// URL {
/// domain:String = "".to_string();
/// path:String = "/".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("URL:all").unwrap();
/// # let _:bool = client.del("URL:id").unwrap();
/// create!(URL { domain: "example.com".to_owned(), }, &client).unwrap();
/// create!(URL { domain: "example.org".to_owned(), path: "/ping".to_owned(), }, &client).unwrap();
/// assert_eq!(ohmers::all_query::<URL>(&client).unwrap().sort("path", None, true, true).unwrap().collect::<Vec<_>>(),
/// vec![
/// new!(URL { id: 1, domain: "example.com".to_owned(), }),
/// new!(URL { id: 2, domain: "example.org".to_owned(), path: "/ping".to_owned(), }),
/// ]);
/// # }
/// ```
pub fn all_query<'a, T: 'a + Ohmer>(r: &'a redis::Client) -> Result<Query<'a, T>, OhmerError> {
let class_name = T::default().get_class_name();
Ok(Query::<'a, T>::new(stal::Set::Key(format!("{}:all", class_name).as_bytes().to_vec()), r))
}
/// Gets an iterator for all elements.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create, new)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// Furniture {
/// kind:String = "".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("Furniture:all").unwrap();
/// # let _:bool = client.del("Furniture:id").unwrap();
/// create!(Furniture { kind: "Couch".to_owned(), }, &client).unwrap();
/// create!(Furniture { kind: "Chair".to_owned(), }, &client).unwrap();
/// assert_eq!(ohmers::all::<Furniture>(&client).unwrap().collect::<Vec<_>>(),
/// vec![
/// new!(Furniture { id: 1, kind: "Chair".to_owned(), }),
/// new!(Furniture { id: 2, kind: "Couch".to_owned(), }),
/// ]);
/// # }
/// ```
pub fn all<'a, T: 'a + Ohmer>(r: &'a redis::Client) -> Result<Iter<T>, OhmerError> {
Ok(try!(try!(all_query(r)).try_iter()))
}
/// Structs that can be stored in and retrieved from Redis.
/// You can use the `model!` macro as a helper.
pub trait Ohmer : rustc_serialize::Encodable + rustc_serialize::Decodable + Default + Sized {
/// The name of the field storing the unique auto increment identifier.
/// It must be named "id" to be consistent with the LUA scripts.
fn id_field(&self) -> String { "id".to_string() }
/// The object unique identifier. It is 0 if it was not saved yet.
fn id(&self) -> usize;
/// Sets the object unique identifier. It should not be called manually,
/// it is set after save.
fn set_id(&mut self, id: usize);
/// Fields with a unique index.
fn unique_fields<'a>(&self) -> HashSet<&'a str> { HashSet::new() }
/// Fields with an index.
fn index_fields<'a>(&self) -> HashSet<&'a str> { HashSet::new() }
/// Redis key to find an element with a unique index field value.
fn key_for_unique(&self, field: &str, value: &str) -> String {
format!("{}:uniques:{}:{}", self.get_class_name(), field, value)
}
/// Redis key to find all elements with an indexed field value.
fn key_for_index(&self, field: &str, value: &str) -> String {
format!("{}:indices:{}:{}", self.get_class_name(), field, value)
}
/// Name of all the fields that are counters. Counters are stored
/// independently to keep atomicity in its operations.
fn counters(&self) -> HashSet<String> {
let mut encoder = Encoder::new();
self.encode(&mut encoder).unwrap();
encoder.counters
}
/// Object name used in the database.
fn get_class_name(&self) -> String {
let mut encoder = Encoder::new();
self.encode(&mut encoder).unwrap();
encoder.features.remove("name").unwrap()
}
/// Loads an object by id.
fn load(&mut self, id: usize, r: &redis::Client) -> Result<(), DecoderError> {
let mut properties:HashMap<String, String> = try!(try!(r.get_connection()).hgetall(format!("{}:{}", self.get_class_name(), id)));
properties.insert("id".to_string(), format!("{}", id));
let mut decoder = Decoder::new(properties);
*self = try!(rustc_serialize::Decodable::decode(&mut decoder));
Ok(())
}
/// Serializes this object.
fn encoder(&self) -> Result<Encoder, OhmerError> {
let mut encoder = Encoder::new();
encoder.id_field = self.id_field();
try!(self.encode(&mut encoder));
Ok(encoder)
}
/// Grabs all the uniques and indices from this object.
fn uniques_indices(&self, encoder: &Encoder
) -> Result<(HashMap<String, String>, HashMap<String, Vec<String>>), OhmerError> {
let mut unique_fields = self.unique_fields();
let mut index_fields = self.index_fields();
let mut uniques = HashMap::new();
let mut indices = HashMap::new();
for i in 0..(encoder.attributes.len() / 2) {
let pos = i * 2;
let key = &encoder.attributes[pos];
if unique_fields.remove(&**key) {
uniques.insert(key.clone(), encoder.attributes[pos + 1].clone());
}
if index_fields.remove(&**key) {
indices.insert(key.clone(), vec![encoder.attributes[pos + 1].clone()]);
} else if key.len() > 3 && &key[key.len() - 3..] == "_id" &&
index_fields.remove(&key[..key.len() - 3]) {
indices.insert(key.clone(), vec![encoder.attributes[pos + 1].clone()]);
}
}
if unique_fields.len() > 0 {
return Err(OhmerError::UnknownIndex(unique_fields.iter().next().unwrap().to_string()));
}
Ok((uniques, indices))
}
/// Saves the object in the database, and sets the instance `id` if it was
/// not set.
fn save(&mut self, r: &redis::Client) -> Result<(), OhmerError> {
let encoder = try!(self.encoder());
let (uniques, indices) = try!(self.uniques_indices(&encoder));
let script = redis::Script::new(SAVE);
let result = script
.arg(try!(msgpack_encode(&encoder.features)))
.arg(try!(msgpack_encode(&encoder.attributes.iter().map(|x| &*x).collect::<Vec<_>>())))
.arg(try!(msgpack_encode(&indices)))
.arg(try!(msgpack_encode(&uniques)))
.invoke(&try!(r.get_connection()));
let id = match result {
Ok(id) => id,
Err(e) => {
let re = Regex::new(r"UniqueIndexViolation: (\w+)").unwrap();
let s = format!("{}", e);
match re.find(&*s) {
Some((start, stop)) => return Err(OhmerError::UniqueIndexViolation(s[start + 22..stop].to_string())),
None => return Err(OhmerError::RedisError(e)),
}
},
};
self.set_id(id);
Ok(())
}
/// Deletes the object from the database.
fn delete(self, r: &redis::Client) -> Result<(), OhmerError> {
let encoder = try!(self.encoder());
let (uniques, _) = try!(self.uniques_indices(&encoder));
let mut tracked = encoder.sets;
tracked.extend(encoder.counters);
tracked.extend(encoder.lists);
let mut model = HashMap::new();
let id = self.id();
let name = self.get_class_name();
model.insert("key", format!("{}:{}", name, id));
model.insert("id", format!("{}", id));
model.insert("name", name);
let script = redis::Script::new(DELETE);
let _:() = try!(script
.arg(try!(msgpack_encode(&model)))
.arg(try!(msgpack_encode(&uniques)))
.arg(try!(msgpack_encode(&tracked)))
.invoke(&try!(r.get_connection())));
Ok(())
}
}
/// A Reference to another Ohmer object.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use redis::Commands;
/// # use ohmers::{Ohmer, Reference, get};
/// model!(
/// PhoneNumber {
/// uniques { number:String = "".to_string(); };
/// });
/// model!(
/// PhoneDevice {
/// number:Reference<PhoneNumber> = Reference::new();
/// model:String = "".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("PhoneNumber:uniques:number").unwrap();
/// let n1 = create!(PhoneNumber { number: "555-123-4567".to_owned(), }, &client).unwrap();
/// let _ = create!(PhoneNumber { number: "555-456-7890".to_owned(), }, &client).unwrap();
/// let mut d1 = create!(PhoneDevice { model: "iPhone 3GS".to_owned(), }, &client).unwrap();
/// d1.number.set(&n1);
/// d1.save(&client).unwrap();
/// assert_eq!(&*get::<PhoneDevice>(d1.id, &client).unwrap().number.get(&client).unwrap().number, "555-123-4567");
/// # }
/// ```
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Reference<T: Ohmer> {
id: usize,
phantom: PhantomData<T>,
}
impl<T: Ohmer> Reference<T> {
/// Creates a new reference with no value.
pub fn new() -> Self {
Reference { id: 0, phantom: PhantomData }
}
/// Creates a new reference with the specified value.
pub fn with_value(obj: &T) -> Self {
Reference { id: obj.id(), phantom: PhantomData }
}
/// Returns a new instance of the referenced object.
pub fn get(&self, r: &redis::Client) -> Result<T, DecoderError> {
get(self.id, r)
}
/// Updates the reference to the new object. It does not save automatically,
/// `Parent.save(&connection);` still needs to be called.
pub fn set(&mut self, obj: &T) {
self.id = obj.id();
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Collection<T: Ohmer> {
phantom: PhantomData<T>,
}
impl<T: Ohmer> Collection<T> {
pub fn new() -> Self {
Collection { phantom: PhantomData }
}
pub fn all<'a, P: Ohmer>(&'a self, property: &str, parent: &P, r: &'a redis::Client) -> Query<T> {
Query::<T>::find(&*format!("{}_id", property.to_ascii_lowercase()), &*format!("{}", parent.id()), r)
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct List<T: Ohmer> {
phantom: PhantomData<T>,
}
impl<T: Ohmer> List<T> {
pub fn new() -> Self {
List { phantom: PhantomData }
}
fn key_name<P: Ohmer>(&self, property: &str, parent: &P) -> Result<String, OhmerError> {
let id = parent.id();
if id == 0 {
Err(OhmerError::NotSaved)
} else {
Ok(format!("{}:{}:{}", parent.get_class_name(), property, parent.id()))
}
}
pub fn len<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<usize, OhmerError> {
Ok(try!(r.llen(try!(self.key_name(property, parent)))))
}
pub fn push_back<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<(), OhmerError> {
Ok(try!(r.rpush(try!(self.key_name(property, parent)), obj.id())))
}
pub fn pop_back<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.rpop(try!(self.key_name(property, parent)))) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn push_front<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<(), OhmerError> {
Ok(try!(r.lpush(try!(self.key_name(property, parent)), obj.id())))
}
pub fn pop_front<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.lpop(try!(self.key_name(property, parent)))) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn first<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.lindex(try!(self.key_name(property, parent)), 0)) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn last<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.lindex(try!(self.key_name(property, parent)), -1)) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn try_range<'a, P: Ohmer>(&'a self, property: &str, parent: &P, start: isize, end: isize, r: &'a redis::Client) -> Result<Iter<T>, OhmerError> {
let ids:Vec<usize> = try!(r.lrange(try!(self.key_name(property, parent)), start, end));
Ok(Iter::new(ids.into_iter(), r))
}
pub fn try_iter<'a, P: Ohmer>(&'a self, property: &str, parent: &P, r: &'a redis::Client) -> Result<Iter<T>, OhmerError> {
self.try_range(property, parent, 0, -1, r)
}
pub fn contains<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
let ids:Vec<usize> = try!(r.lrange(try!(self.key_name(property, parent)), 0, -1));
Ok(ids.contains(&obj.id()))
}
pub fn remove<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<usize, OhmerError> {
Ok(try!(r.lrem(try!(self.key_name(property, parent)), 0, obj.id())))
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Set<T: Ohmer> {
phantom: PhantomData<T>,
}
impl<T: Ohmer> Set<T> {
pub fn new() -> Self {
Set { phantom: PhantomData }
}
fn key_name<P: Ohmer>(&self, property: &str, parent: &P) -> Result<String, OhmerError> {
let id = parent.id();
if id == 0 {
Err(OhmerError::NotSaved)
} else {
Ok(format!("{}:{}:{}", parent.get_class_name(), property, parent.id()))
}
}
pub fn key<P: Ohmer>(&self, property: &str, parent: &P) -> Result<stal::Set, OhmerError> {
Ok(stal::Set::Key(try!(self.key_name(property, parent)).as_bytes().to_vec()))
}
pub fn query<'a, P: Ohmer>(&'a self, property: &str, parent: &P, r: &'a redis::Client) -> Result<Query<T>, OhmerError> {
let key = try!(self.key(property, parent));
Ok(Query::new(key, r))
}
pub fn insert<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
Ok(try!(r.sadd(try!(self.key_name(property, parent)), obj.id())))
}
pub fn remove<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
Ok(try!(r.srem(try!(self.key_name(property, parent)), obj.id())))
}
pub fn contains<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
Ok(try!(r.sismember(try!(self.key_name(property, parent)), obj.id())))
}
pub fn len<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<usize, OhmerError> {
Ok(try!(r.scard(try!(self.key_name(property, parent)))))
}
}
#[derive(PartialEq, Debug)]
pub enum OhmerError {
NotSaved,
RedisError(redis::RedisError),
EncoderError(EncoderError),
DecoderError,
UnknownIndex(String),
UniqueIndexViolation(String),
CommandError(Vec<u8>),
}
impl From<FromUtf8Error> for OhmerError {
fn from(err: FromUtf8Error) -> OhmerError {
OhmerError::CommandError(err.into_bytes())
}
}
impl From<redis::RedisError> for OhmerError {
fn from(e: redis::RedisError) -> OhmerError {
OhmerError::RedisError(e)
}
}
impl From<EncoderError> for OhmerError {
fn from(e: EncoderError) -> OhmerError {
OhmerError::EncoderError(e)
}
}
impl From<DecoderError> for OhmerError {
fn from(_: DecoderError) -> OhmerError {
OhmerError::DecoderError
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Counter;
impl Counter {
fn get_key<T: Ohmer>(&self, obj: &T, prop: &str) -> Result<String, OhmerError> {
let class_name = obj.get_class_name();
let id = obj.id();
if id == 0 {
return Err(OhmerError::NotSaved);
}
Ok(format!("{}:{}:{}", class_name, id, prop))
}
pub fn incr<T: Ohmer>(&self, obj: &T, prop: &str, incr: i64, r: &redis::Client) -> Result<i64, OhmerError> {
let key = try!(self.get_key(obj, prop));
Ok(try!(r.incr(key, incr)))
}
pub fn get<T: Ohmer>(&self, obj: &T, prop: &str, r: &redis::Client) -> Result<i64, OhmerError> {
let key = try!(self.get_key(obj, prop));
let r:Option<i64> = try!(r.get(key));
match r {
Some(v) => Ok(v),
None => Ok(0),
}
}
}
#[macro_export]
macro_rules! incrby {
($obj: expr, $prop: ident, $incr: expr, $client: expr) => {{
$obj.$prop.incr(&$obj, stringify!($prop), $incr, $client)
}}
}
#[macro_export]
macro_rules! incr {
($obj: expr, $prop: ident, $client: expr) => {{
$obj.$prop.incr(&$obj, stringify!($prop), 1, $client)
}}
}
#[macro_export]
macro_rules! decr {
($obj: expr, $prop: ident, $client: expr) => {{
$obj.$prop.incr(&$obj, stringify!($prop), -1, $client)
}}
}
pub struct Query<'a, T: 'a + Ohmer> {
set: stal::Set,
r: &'a redis::Client,
phantom: PhantomData<T>,
}
impl<'a, T: Ohmer> Query<'a, T> {
pub fn new(set: stal::Set, r: &'a redis::Client) -> Self {
Query { set: set, phantom: PhantomData, r: r }
}
pub fn from_keys(kv: &[(&str, &str)], r: &'a redis::Client) -> Self {
let set = stal::Set::Inter(kv.iter().map(|kv| Query::<T>::key(kv.0, kv.1)).collect());
Query::new(set, r)
}
pub fn key(field: &str, value: &str) -> stal::Set {
stal::Set::Key(T::default().key_for_index(field, value).as_bytes().to_vec())
}
pub fn find(field: &str, value: &str, r: &'a redis::Client) -> Self {
Query { set: Query::<T>::key(field, value), phantom: PhantomData, r: r }
}
pub fn inter(&mut self, field: &str, value: &str) -> &mut Self {
self.sinter(vec![Query::<T>::key(field, value)]);
self
}
pub fn sinter(&mut self, mut sets: Vec<stal::Set>) {
let set = replace(&mut self.set, stal::Set::Key(vec![]));
sets.push(set);
self.set = stal::Set::Inter(sets);
}
pub fn union(&mut self, field: &str, value: &str) -> &mut Self {
self.sunion(vec![Query::<T>::key(field, value)]);
self
}
pub fn sunion(&mut self, mut sets: Vec<stal::Set>) {
let set = replace(&mut self.set, stal::Set::Key(vec![]));
sets.push(set);
self.set = stal::Set::Union(sets);
}
pub fn diff(&mut self, field: &str, value: &str) -> &mut Self {
self.sdiff(vec![Query::<T>::key(field, value)]);
self
}
pub fn sdiff(&mut self, mut sets: Vec<stal::Set>) {
let set = replace(&mut self.set, stal::Set::Key(vec![]));
sets.insert(0, set);
self.set = stal::Set::Diff(sets);
}
pub fn try_iter(&self) -> Result<Iter<'a, T>, OhmerError> {
Iter::from_ops(self.set.ids().solve(), self.r)
}
pub fn try_into_iter(self) -> Result<Iter<'a, T>, OhmerError> {
Iter::from_ops(self.set.into_ids().solve(), self.r)
}
pub fn sort(&self, by: &str, limit: Option<(usize, usize)>, asc: bool, alpha: bool) -> Result<Iter<'a, T>, OhmerError> {
let default = T::default();
let class_name = default.get_class_name();
let key = if default.counters().contains(by) {
format!("{}:*:{}", class_name, by)
} else {
format!("{}:*->{}", class_name, by)
}.as_bytes().to_vec();
let mut template = vec![b"SORT".to_vec(), vec![], b"BY".to_vec(), key];
if let Some(l) = limit {
template.push(b"LIMIT".to_vec());
template.push(format!("{}", l.0).as_bytes().to_vec());
template.push(format!("{}", l.1).as_bytes().to_vec());
}
template.push(if asc { b"ASC".to_vec() } else { b"DESC".to_vec() });
if alpha {
template.push(b"ALPHA".to_vec());
}
let stal = stal::Stal::from_template(template, vec![(self.set.clone(), 1)]);
Iter::from_ops(stal.solve(), self.r)
}
}
pub struct Iter<'a, T> {
r: &'a redis::Client,
iter: std::vec::IntoIter<usize>,
phantom: PhantomData<T>,
}
impl<'a, T: Ohmer> Iter<'a, T> {
fn new(iter: std::vec::IntoIter<usize>, r: &'a redis::Client) -> Self {
Iter {
iter: iter,
r: r,
phantom: PhantomData,
}
}
fn from_ops(ops: (Vec<Vec<Vec<u8>>>, usize), r: &'a redis::Client) -> Result<Self, OhmerError> {
let mut q = redis::pipe();
q.atomic();
let mut i = 0;
let len = ops.0.len();
for op in ops.0.into_iter() {
if i == 0 || i == len - 1 {
i += 1;
// skip MULTI and EXEC
continue;
}
let mut first = true;
for arg in op {
if first {
q.cmd(&*try!(String::from_utf8(arg)));
first = false;
} else {
q.arg(arg);
}
if i != ops.1 {
q.ignore();
}
}
i += 1;
}
let mut result:Vec<Vec<usize>> = try!(q.query(r));
Ok(Iter { iter: result.pop().unwrap().into_iter(), r: r, phantom: PhantomData })
}
}
impl<'a, T: Ohmer> Iterator for Iter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
match self.iter.next() {
Some(id) => match get(id, self.r) {
Ok(v) => Some(v),
Err(_) => None,
},
None => None,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.iter.len(), Some(self.iter.len()))
}
}
Allow new and create to skip trailing comma
//! Object-hash mapping library for Redis.
//!
//! Ohmers is a library for storing objects in Redis, a persistent
//! key-value database.
//! It is based on the Ruby library Ohm, and it uses the same key names,
//! so it can be used in the same system.
//!
//! # Prerequisites
//!
//! Have a [redis server](https://github.com/antirez/redis/) running and a
//! [redis-rs](https://github.com/mitsuhiko/redis-rs/) connection.
//!
//! # Getting started
//!
//! Ohmers maps Rust structs to hash maps in Redis. First define the structs
//! using the model! macro, and then use their methods to created, read,
//! update, delete.
//!
//! ```rust
//! # #[macro_use(model, create, insert)] extern crate ohmers;
//! # extern crate rustc_serialize;
//! # extern crate redis;
//! # use ohmers::*;
//!
//! model!(Event {
//! indices {
//! name:String = "My Event".to_string();
//! };
//! venue:Reference<Venue> = Reference::new();
//! participants:Set<Person> = Set::new();
//! votes:Counter = Counter;
//! });
//!
//! model!(Venue {
//! name:String = "My Venue".to_string();
//! events:Set<Event> = Set::new();
//! });
//!
//! model!(Person {
//! name:String = "A Person".to_string();
//! });
//! # fn main() {
//! # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
//! let p1 = create!(Person { name: "Alice".to_string(), }, &client).unwrap();
//! let p2 = create!(Person { name: "Bob".to_string(), }, &client).unwrap();
//! let p3 = create!(Person { name: "Charlie".to_string(), }, &client).unwrap();
//!
//! let v1 = create!(Venue { name: "Home".to_string(), }, &client).unwrap();
//! let v2 = create!(Venue { name: "Work".to_string(), }, &client).unwrap();
//!
//! let mut e1 = create!(Event { name: "Birthday Party".to_string(), }, &client).unwrap();
//! insert!(e1.participants, p1, &client).unwrap();
//! insert!(e1.participants, p2, &client).unwrap();
//! insert!(e1.participants, p3, &client).unwrap();
//! e1.venue.set(&v1);
//! e1.save(&client).unwrap();
//!
//! let mut e2 = create!(Event { name: "Work Meeting".to_string(), }, &client).unwrap();
//! insert!(e2.participants, p1, &client).unwrap();
//! insert!(e2.participants, p2, &client).unwrap();
//! e2.venue.set(&v2);
//! e2.save(&client).unwrap();
//! # }
//! ```
pub extern crate rmp as msgpack;
extern crate redis;
extern crate rustc_serialize;
extern crate regex;
extern crate stal;
use std::ascii::AsciiExt;
use std::collections::{HashSet, HashMap};
use std::marker::PhantomData;
use std::mem::replace;
use std::string::FromUtf8Error;
use redis::Commands;
use redis::ToRedisArgs;
use regex::Regex;
pub use stal::Set as StalSet;
mod encoder;
use encoder::*;
mod decoder;
use decoder::*;
mod lua;
use lua::{DELETE, SAVE};
/// Declares a struct.
/// Fields may be declared as a part of uniques, indices, or regular fields.
/// Every field must have a default value.
/// The struct will derive RustcEncodable, RustcDecodable, and Default.
/// More `derive`s can be specified.
///
/// A property `id: usize = 0;` is automatically added to track the object.
///
/// # Examples
/// ```
/// # #[macro_use(model)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// model!(
/// derive { Clone, PartialOrd }
/// MyStruct {
/// uniques { my_unique_identifier:u8 = 0; };
/// indices { my_index:u8 = 0; };
/// other_field:String = "".to_string();
/// });
/// # fn main() {
/// # }
/// ```
#[macro_export]
macro_rules! model {
($class: ident { $($key: ident:$proptype: ty = $default: expr);*; } ) => {
model!(
$class {
uniques { };
indices { };
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident { $($key: ident:$proptype: ty = $default: expr);*; } ) => {
model!(
derive { $($derive),* }
$class {
uniques { };
indices { };
$($key:$proptype = $default;)*
}
);
};
($class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
$class {
uniques {
$(
$ukey: $uproptype = $udefault;
)*
};
indices { };
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
derive { $($derive),* }
$class {
uniques {
$(
$ukey: $uproptype = $udefault;
)*
};
indices { };
$($key:$proptype = $default;)*
}
);
};
($class: ident {
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
$class {
uniques { };
indices {
$(
$ikey: $iproptype = $idefault;
)*
};
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident {
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
derive { $($derive),* }
$class {
uniques { };
indices {
$(
$ikey: $iproptype = $idefault;
)*
};
$($key:$proptype = $default;)*
}
);
};
(
$class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
model!(
derive { }
$class {
uniques {
$(
$ukey: $uproptype = $udefault;
)*
};
indices {
$(
$ikey: $iproptype = $idefault;
)*
};
$($key:$proptype = $default;)*
}
);
};
(
derive { $($derive: ident),* }
$class: ident {
uniques { $($ukey: ident:$uproptype: ty = $udefault: expr;)* };
indices { $($ikey: ident:$iproptype: ty = $idefault: expr;)* };
$($key: ident:$proptype: ty = $default: expr;)* }
) => {
#[derive(RustcEncodable, RustcDecodable, Debug, $($derive,)* )]
struct $class {
id: usize,
$(
$key: $proptype,
)*
$(
$ukey: $uproptype,
)*
$(
$ikey: $iproptype,
)*
}
impl Default for $class {
fn default() -> Self {
$class {
id: 0,
$(
$key: $default,
)*
$(
$ukey: $udefault,
)*
$(
$ikey: $idefault,
)*
}
}
}
impl ohmers::Ohmer for $class {
fn id(&self) -> usize { self.id }
fn set_id(&mut self, id: usize) { self.id = id; }
// These functions are implemented in the trait, but this
// reduces the runtime overhead
fn get_class_name(&self) -> String {
stringify!($class).to_owned()
}
fn key_for_unique(&self, field: &str, value: &str) -> String {
format!("{}:uniques:{}:{}", stringify!($class), field, value)
}
fn key_for_index(&self, field: &str, value: &str) -> String {
format!("{}:indices:{}:{}", stringify!($class), field, value)
}
fn unique_fields<'a>(&self) -> std::collections::HashSet<&'a str> {
#![allow(unused_mut)]
let mut hs = std::collections::HashSet::new();
$(
hs.insert(stringify!($ukey));
)*
hs
}
fn index_fields<'a>(&self) -> std::collections::HashSet<&'a str> {
#![allow(unused_mut)]
let mut hs = std::collections::HashSet::new();
$(
hs.insert(stringify!($ikey));
)*
hs
}
}
impl PartialEq for $class {
fn eq(&self, other: &$class) -> bool {
self.id == other.id
}
}
}
}
/// Creates a new instance of `$class` using the default properties,
/// overriding specified collection of `$key` with `$value`.
///
/// # Examples
/// ```
/// # #[macro_use(model, new)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// model!(
/// MyStruct {
/// k1:u8 = 1;
/// k2:u8 = 2;
/// });
///
/// # fn main() {
/// let st = new!(MyStruct { k2: 3 });
/// assert_eq!(st.id, 0); // object was not created in Redis yet
/// assert_eq!(st.k1, 1);
/// assert_eq!(st.k2, 3);
/// # }
/// ```
#[macro_export]
macro_rules! new {
($class: ident { $($key:ident: $value: expr),*$(,)* }) => {{
let mut obj = $class::default();
$(
obj.$key = $value;
)*
obj
}}
}
/// Creates a new instance of `$class` using the default properties,
/// overriding specified collection of `$key` with `$value`, and saving it
/// in the database
///
/// # Examples
/// ```
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// model!(
/// MyStruct {
/// k1:u8 = 1;
/// k2:u8 = 2;
/// });
///
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// let st = create!(MyStruct { k2: 3, }, &client).unwrap();
/// assert!(st.id > 0); // object was already created in Redis
/// assert_eq!(st.k1, 1);
/// assert_eq!(st.k2, 3);
/// # }
/// ```
#[macro_export]
macro_rules! create {
($class: ident { $($key:ident: $value: expr),*$(,)* }, $conn: expr) => {{
let mut obj = $class::default();
$(
obj.$key = $value;
)*
obj.save(&$conn).map(|_| obj)
}}
}
/// Returns a `Query` with all the `$class` objects where `$key` is `$value`.
/// All the `$key` must be declared as `indices` in the `model!` declaration.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create, find)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// Browser {
/// indices {
/// name:String = "".to_string();
/// major_version:u8 = 0;
/// };
/// minor_version:u8 = 0;
/// });
///
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("Browser:indices:name:Firefox").unwrap();
/// # let _:bool = client.del("Browser:indices:name:Chrome").unwrap();
/// # let _:bool = client.del("Browser:indices:major_version:42").unwrap();
/// # let _:bool = client.del("Browser:indices:major_version:43").unwrap();
/// # let _:bool = client.del("Browser:indices:major_version:44").unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 42, minor_version: 3, }, &client).unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 42, minor_version: 4, }, &client).unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 43, }, &client).unwrap();
/// create!(Browser { name: "Firefox".to_string(), major_version: 43, minor_version: 1, }, &client).unwrap();
/// create!(Browser { name: "Chrome".to_string(), major_version: 43, minor_version: 1, }, &client).unwrap();
/// create!(Browser { name: "Chrome".to_string(), major_version: 43, minor_version: 2, }, &client).unwrap();
/// create!(Browser { name: "Chrome".to_string(), major_version: 44, minor_version: 3, }, &client).unwrap();
///
/// assert_eq!(find!(
/// Browser { name: "Chrome", major_version: 44, } ||
/// { name: "Firefox", major_version: 43, },
/// &client
/// ).try_into_iter().unwrap().collect::<Vec<_>>().len(), 3);
/// # }
/// ```
#[macro_export]
macro_rules! find {
($class: ident $({ $($key:ident: $value: expr),*, })||*, $conn: expr) => {{
ohmers::Query::<$class>::new(
ohmers::StalSet::Union(vec![
$(
ohmers::StalSet::Inter(
vec![
$(
ohmers::Query::<$class>::key(stringify!($key), &*format!("{}", $value)),
)*
]
),
)*
]
), &$conn)
}}
}
/// Properties declared as `Collection` can use the collection macro to get a
/// `Query` to iterate over all of its elements.
/// A `Collection` is an accessor to objects that have a `Reference` to the
/// object.
#[macro_export]
macro_rules! collection {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.all(&*$obj.get_class_name(), &$obj, &$conn)
}}
}
/// Number of elements in a List or Set property.
#[macro_export]
macro_rules! len {
($obj: ident. $prop: ident, $conn: expr) => {{
$obj.$prop.len(stringify!($prop), &$obj, &$conn)
}}
}
/// Insert `$el` in `$obj.$prop`. The property must be a Set.
#[macro_export]
macro_rules! insert {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.insert(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Adds `$el` at the end of `$obj.$prop`. The property must be a List.
#[macro_export]
macro_rules! push_back {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.push_back(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Adds `$el` at the beginning of `$obj.$prop`. The property must be a List.
#[macro_export]
macro_rules! push_front {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.push_front(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Retrieves and remove an element from the end of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! pop_back {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.pop_back(stringify!($prop), &$obj, &$conn)
}}
}
/// Retrieves and remove an element from the beginning of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! pop_front {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.pop_front(stringify!($prop), &$obj, &$conn)
}}
}
/// Retrieves an element from the beginning of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! first {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.first(stringify!($prop), &$obj, &$conn)
}}
}
/// Retrieves an element from the end of `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! last {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.last(stringify!($prop), &$obj, &$conn)
}}
}
/// Creates an iterable of `$obj.$prop` between `$start` and `$end`.
/// The property must be a List.
///
/// # Examples
/// ```rust,ignore
/// try_range!(myobj.mylist[0 => 4], &client);
/// ```
#[macro_export]
macro_rules! try_range {
($obj: ident.$prop: ident[$start:expr => $end:expr], $conn: expr) => {{
$obj.$prop.try_range(stringify!($prop), &$obj, $start, $end, &$conn)
}}
}
/// Creates an iterable of all elements in `$obj.$prop`.
/// The property must be a List.
#[macro_export]
macro_rules! try_iter {
($obj: ident.$prop: ident, $conn: expr) => {{
$obj.$prop.try_iter(stringify!($prop), &$obj, &$conn)
}}
}
/// Checks if an element is in a List or a Set.
#[macro_export]
macro_rules! contains {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.contains(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Removes occurences of an element in a List or a Set.
#[macro_export]
macro_rules! remove {
($obj: ident.$prop: ident, $el: expr, $conn: expr) => {{
$obj.$prop.remove(stringify!($prop), &$obj, &$el, &$conn)
}}
}
/// Find an element by a unique index.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// OperativeSystem {
/// uniques {
/// name:String = "".to_string();
/// };
/// major_version:u8 = 0;
/// minor_version:u8 = 0;
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("OperativeSystem:uniques:name").unwrap();
/// create!(OperativeSystem { name: "Windows".to_owned(), major_version: 10, }, &client);
/// create!(OperativeSystem { name: "GNU/Linux".to_owned(), major_version: 3, minor_version: 14, }, &client);
/// create!(OperativeSystem { name: "OS X".to_owned(), major_version: 10, minor_version: 10, }, &client);
/// assert_eq!(ohmers::with::<OperativeSystem, _>("name", "OS X", &client).unwrap().unwrap().major_version, 10);
/// # }
/// ```
pub fn with<T: Ohmer, S: ToRedisArgs>(property: &str, value: S, r: &redis::Client) -> Result<Option<T>, DecoderError> {
let mut obj = T::default();
let opt_id:Option<usize> = try!(r.hget(format!("{}:uniques:{}", obj.get_class_name(), property), value));
let id = match opt_id {
Some(id) => id,
None => return Ok(None),
};
try!(obj.load(id, r));
Ok(Some(obj))
}
/// Gets an element by id.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// Server {
/// name:String = "".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// let server = create!(Server { name: "My Server".to_owned(), }, &client).unwrap();
/// assert_eq!(&*ohmers::get::<Server>(server.id, &client).unwrap().name, "My Server");
/// # }
/// ```
pub fn get<T: Ohmer>(id: usize, r: &redis::Client) -> Result<T, DecoderError> {
let mut obj = T::default();
try!(obj.load(id, r));
Ok(obj)
}
/// Gets a query for all elements.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create, new)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// URL {
/// domain:String = "".to_string();
/// path:String = "/".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("URL:all").unwrap();
/// # let _:bool = client.del("URL:id").unwrap();
/// create!(URL { domain: "example.com".to_owned(), }, &client).unwrap();
/// create!(URL { domain: "example.org".to_owned(), path: "/ping".to_owned(), }, &client).unwrap();
/// assert_eq!(ohmers::all_query::<URL>(&client).unwrap().sort("path", None, true, true).unwrap().collect::<Vec<_>>(),
/// vec![
/// new!(URL { id: 1, domain: "example.com".to_owned(), }),
/// new!(URL { id: 2, domain: "example.org".to_owned(), path: "/ping".to_owned(), }),
/// ]);
/// # }
/// ```
pub fn all_query<'a, T: 'a + Ohmer>(r: &'a redis::Client) -> Result<Query<'a, T>, OhmerError> {
let class_name = T::default().get_class_name();
Ok(Query::<'a, T>::new(stal::Set::Key(format!("{}:all", class_name).as_bytes().to_vec()), r))
}
/// Gets an iterator for all elements.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create, new)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use ohmers::Ohmer;
/// # use redis::Commands;
/// model!(
/// Furniture {
/// kind:String = "".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("Furniture:all").unwrap();
/// # let _:bool = client.del("Furniture:id").unwrap();
/// create!(Furniture { kind: "Couch".to_owned(), }, &client).unwrap();
/// create!(Furniture { kind: "Chair".to_owned(), }, &client).unwrap();
/// assert_eq!(ohmers::all::<Furniture>(&client).unwrap().collect::<Vec<_>>(),
/// vec![
/// new!(Furniture { id: 1, kind: "Chair".to_owned(), }),
/// new!(Furniture { id: 2, kind: "Couch".to_owned(), }),
/// ]);
/// # }
/// ```
pub fn all<'a, T: 'a + Ohmer>(r: &'a redis::Client) -> Result<Iter<T>, OhmerError> {
Ok(try!(try!(all_query(r)).try_iter()))
}
/// Structs that can be stored in and retrieved from Redis.
/// You can use the `model!` macro as a helper.
pub trait Ohmer : rustc_serialize::Encodable + rustc_serialize::Decodable + Default + Sized {
/// The name of the field storing the unique auto increment identifier.
/// It must be named "id" to be consistent with the LUA scripts.
fn id_field(&self) -> String { "id".to_string() }
/// The object unique identifier. It is 0 if it was not saved yet.
fn id(&self) -> usize;
/// Sets the object unique identifier. It should not be called manually,
/// it is set after save.
fn set_id(&mut self, id: usize);
/// Fields with a unique index.
fn unique_fields<'a>(&self) -> HashSet<&'a str> { HashSet::new() }
/// Fields with an index.
fn index_fields<'a>(&self) -> HashSet<&'a str> { HashSet::new() }
/// Redis key to find an element with a unique index field value.
fn key_for_unique(&self, field: &str, value: &str) -> String {
format!("{}:uniques:{}:{}", self.get_class_name(), field, value)
}
/// Redis key to find all elements with an indexed field value.
fn key_for_index(&self, field: &str, value: &str) -> String {
format!("{}:indices:{}:{}", self.get_class_name(), field, value)
}
/// Name of all the fields that are counters. Counters are stored
/// independently to keep atomicity in its operations.
fn counters(&self) -> HashSet<String> {
let mut encoder = Encoder::new();
self.encode(&mut encoder).unwrap();
encoder.counters
}
/// Object name used in the database.
fn get_class_name(&self) -> String {
let mut encoder = Encoder::new();
self.encode(&mut encoder).unwrap();
encoder.features.remove("name").unwrap()
}
/// Loads an object by id.
fn load(&mut self, id: usize, r: &redis::Client) -> Result<(), DecoderError> {
let mut properties:HashMap<String, String> = try!(try!(r.get_connection()).hgetall(format!("{}:{}", self.get_class_name(), id)));
properties.insert("id".to_string(), format!("{}", id));
let mut decoder = Decoder::new(properties);
*self = try!(rustc_serialize::Decodable::decode(&mut decoder));
Ok(())
}
/// Serializes this object.
fn encoder(&self) -> Result<Encoder, OhmerError> {
let mut encoder = Encoder::new();
encoder.id_field = self.id_field();
try!(self.encode(&mut encoder));
Ok(encoder)
}
/// Grabs all the uniques and indices from this object.
fn uniques_indices(&self, encoder: &Encoder
) -> Result<(HashMap<String, String>, HashMap<String, Vec<String>>), OhmerError> {
let mut unique_fields = self.unique_fields();
let mut index_fields = self.index_fields();
let mut uniques = HashMap::new();
let mut indices = HashMap::new();
for i in 0..(encoder.attributes.len() / 2) {
let pos = i * 2;
let key = &encoder.attributes[pos];
if unique_fields.remove(&**key) {
uniques.insert(key.clone(), encoder.attributes[pos + 1].clone());
}
if index_fields.remove(&**key) {
indices.insert(key.clone(), vec![encoder.attributes[pos + 1].clone()]);
} else if key.len() > 3 && &key[key.len() - 3..] == "_id" &&
index_fields.remove(&key[..key.len() - 3]) {
indices.insert(key.clone(), vec![encoder.attributes[pos + 1].clone()]);
}
}
if unique_fields.len() > 0 {
return Err(OhmerError::UnknownIndex(unique_fields.iter().next().unwrap().to_string()));
}
Ok((uniques, indices))
}
/// Saves the object in the database, and sets the instance `id` if it was
/// not set.
fn save(&mut self, r: &redis::Client) -> Result<(), OhmerError> {
let encoder = try!(self.encoder());
let (uniques, indices) = try!(self.uniques_indices(&encoder));
let script = redis::Script::new(SAVE);
let result = script
.arg(try!(msgpack_encode(&encoder.features)))
.arg(try!(msgpack_encode(&encoder.attributes.iter().map(|x| &*x).collect::<Vec<_>>())))
.arg(try!(msgpack_encode(&indices)))
.arg(try!(msgpack_encode(&uniques)))
.invoke(&try!(r.get_connection()));
let id = match result {
Ok(id) => id,
Err(e) => {
let re = Regex::new(r"UniqueIndexViolation: (\w+)").unwrap();
let s = format!("{}", e);
match re.find(&*s) {
Some((start, stop)) => return Err(OhmerError::UniqueIndexViolation(s[start + 22..stop].to_string())),
None => return Err(OhmerError::RedisError(e)),
}
},
};
self.set_id(id);
Ok(())
}
/// Deletes the object from the database.
fn delete(self, r: &redis::Client) -> Result<(), OhmerError> {
let encoder = try!(self.encoder());
let (uniques, _) = try!(self.uniques_indices(&encoder));
let mut tracked = encoder.sets;
tracked.extend(encoder.counters);
tracked.extend(encoder.lists);
let mut model = HashMap::new();
let id = self.id();
let name = self.get_class_name();
model.insert("key", format!("{}:{}", name, id));
model.insert("id", format!("{}", id));
model.insert("name", name);
let script = redis::Script::new(DELETE);
let _:() = try!(script
.arg(try!(msgpack_encode(&model)))
.arg(try!(msgpack_encode(&uniques)))
.arg(try!(msgpack_encode(&tracked)))
.invoke(&try!(r.get_connection())));
Ok(())
}
}
/// A Reference to another Ohmer object.
///
/// # Examples
///
/// ```rust
/// # #[macro_use(model, create)] extern crate ohmers;
/// # extern crate rustc_serialize;
/// # extern crate redis;
/// # use redis::Commands;
/// # use ohmers::{Ohmer, Reference, get};
/// model!(
/// PhoneNumber {
/// uniques { number:String = "".to_string(); };
/// });
/// model!(
/// PhoneDevice {
/// number:Reference<PhoneNumber> = Reference::new();
/// model:String = "".to_string();
/// });
/// # fn main() {
/// # let client = redis::Client::open("redis://127.0.0.1/").unwrap();
/// # let _:bool = client.del("PhoneNumber:uniques:number").unwrap();
/// let n1 = create!(PhoneNumber { number: "555-123-4567".to_owned(), }, &client).unwrap();
/// let _ = create!(PhoneNumber { number: "555-456-7890".to_owned(), }, &client).unwrap();
/// let mut d1 = create!(PhoneDevice { model: "iPhone 3GS".to_owned(), }, &client).unwrap();
/// d1.number.set(&n1);
/// d1.save(&client).unwrap();
/// assert_eq!(&*get::<PhoneDevice>(d1.id, &client).unwrap().number.get(&client).unwrap().number, "555-123-4567");
/// # }
/// ```
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Reference<T: Ohmer> {
id: usize,
phantom: PhantomData<T>,
}
impl<T: Ohmer> Reference<T> {
/// Creates a new reference with no value.
pub fn new() -> Self {
Reference { id: 0, phantom: PhantomData }
}
/// Creates a new reference with the specified value.
pub fn with_value(obj: &T) -> Self {
Reference { id: obj.id(), phantom: PhantomData }
}
/// Returns a new instance of the referenced object.
pub fn get(&self, r: &redis::Client) -> Result<T, DecoderError> {
get(self.id, r)
}
/// Updates the reference to the new object. It does not save automatically,
/// `Parent.save(&connection);` still needs to be called.
pub fn set(&mut self, obj: &T) {
self.id = obj.id();
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Collection<T: Ohmer> {
phantom: PhantomData<T>,
}
impl<T: Ohmer> Collection<T> {
pub fn new() -> Self {
Collection { phantom: PhantomData }
}
pub fn all<'a, P: Ohmer>(&'a self, property: &str, parent: &P, r: &'a redis::Client) -> Query<T> {
Query::<T>::find(&*format!("{}_id", property.to_ascii_lowercase()), &*format!("{}", parent.id()), r)
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct List<T: Ohmer> {
phantom: PhantomData<T>,
}
impl<T: Ohmer> List<T> {
pub fn new() -> Self {
List { phantom: PhantomData }
}
fn key_name<P: Ohmer>(&self, property: &str, parent: &P) -> Result<String, OhmerError> {
let id = parent.id();
if id == 0 {
Err(OhmerError::NotSaved)
} else {
Ok(format!("{}:{}:{}", parent.get_class_name(), property, parent.id()))
}
}
pub fn len<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<usize, OhmerError> {
Ok(try!(r.llen(try!(self.key_name(property, parent)))))
}
pub fn push_back<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<(), OhmerError> {
Ok(try!(r.rpush(try!(self.key_name(property, parent)), obj.id())))
}
pub fn pop_back<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.rpop(try!(self.key_name(property, parent)))) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn push_front<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<(), OhmerError> {
Ok(try!(r.lpush(try!(self.key_name(property, parent)), obj.id())))
}
pub fn pop_front<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.lpop(try!(self.key_name(property, parent)))) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn first<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.lindex(try!(self.key_name(property, parent)), 0)) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn last<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<Option<T>, OhmerError> {
Ok(match try!(r.lindex(try!(self.key_name(property, parent)), -1)) {
Some(id) => Some(try!(get(id, r))),
None => None,
})
}
pub fn try_range<'a, P: Ohmer>(&'a self, property: &str, parent: &P, start: isize, end: isize, r: &'a redis::Client) -> Result<Iter<T>, OhmerError> {
let ids:Vec<usize> = try!(r.lrange(try!(self.key_name(property, parent)), start, end));
Ok(Iter::new(ids.into_iter(), r))
}
pub fn try_iter<'a, P: Ohmer>(&'a self, property: &str, parent: &P, r: &'a redis::Client) -> Result<Iter<T>, OhmerError> {
self.try_range(property, parent, 0, -1, r)
}
pub fn contains<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
let ids:Vec<usize> = try!(r.lrange(try!(self.key_name(property, parent)), 0, -1));
Ok(ids.contains(&obj.id()))
}
pub fn remove<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<usize, OhmerError> {
Ok(try!(r.lrem(try!(self.key_name(property, parent)), 0, obj.id())))
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Set<T: Ohmer> {
phantom: PhantomData<T>,
}
impl<T: Ohmer> Set<T> {
pub fn new() -> Self {
Set { phantom: PhantomData }
}
fn key_name<P: Ohmer>(&self, property: &str, parent: &P) -> Result<String, OhmerError> {
let id = parent.id();
if id == 0 {
Err(OhmerError::NotSaved)
} else {
Ok(format!("{}:{}:{}", parent.get_class_name(), property, parent.id()))
}
}
pub fn key<P: Ohmer>(&self, property: &str, parent: &P) -> Result<stal::Set, OhmerError> {
Ok(stal::Set::Key(try!(self.key_name(property, parent)).as_bytes().to_vec()))
}
pub fn query<'a, P: Ohmer>(&'a self, property: &str, parent: &P, r: &'a redis::Client) -> Result<Query<T>, OhmerError> {
let key = try!(self.key(property, parent));
Ok(Query::new(key, r))
}
pub fn insert<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
Ok(try!(r.sadd(try!(self.key_name(property, parent)), obj.id())))
}
pub fn remove<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
Ok(try!(r.srem(try!(self.key_name(property, parent)), obj.id())))
}
pub fn contains<P: Ohmer>(&self, property: &str, parent: &P, obj: &T, r: &redis::Client) -> Result<bool, OhmerError> {
Ok(try!(r.sismember(try!(self.key_name(property, parent)), obj.id())))
}
pub fn len<P: Ohmer>(&self, property: &str, parent: &P, r: &redis::Client) -> Result<usize, OhmerError> {
Ok(try!(r.scard(try!(self.key_name(property, parent)))))
}
}
#[derive(PartialEq, Debug)]
pub enum OhmerError {
NotSaved,
RedisError(redis::RedisError),
EncoderError(EncoderError),
DecoderError,
UnknownIndex(String),
UniqueIndexViolation(String),
CommandError(Vec<u8>),
}
impl From<FromUtf8Error> for OhmerError {
fn from(err: FromUtf8Error) -> OhmerError {
OhmerError::CommandError(err.into_bytes())
}
}
impl From<redis::RedisError> for OhmerError {
fn from(e: redis::RedisError) -> OhmerError {
OhmerError::RedisError(e)
}
}
impl From<EncoderError> for OhmerError {
fn from(e: EncoderError) -> OhmerError {
OhmerError::EncoderError(e)
}
}
impl From<DecoderError> for OhmerError {
fn from(_: DecoderError) -> OhmerError {
OhmerError::DecoderError
}
}
#[derive(RustcEncodable, RustcDecodable, PartialEq, Debug, Clone)]
pub struct Counter;
impl Counter {
fn get_key<T: Ohmer>(&self, obj: &T, prop: &str) -> Result<String, OhmerError> {
let class_name = obj.get_class_name();
let id = obj.id();
if id == 0 {
return Err(OhmerError::NotSaved);
}
Ok(format!("{}:{}:{}", class_name, id, prop))
}
pub fn incr<T: Ohmer>(&self, obj: &T, prop: &str, incr: i64, r: &redis::Client) -> Result<i64, OhmerError> {
let key = try!(self.get_key(obj, prop));
Ok(try!(r.incr(key, incr)))
}
pub fn get<T: Ohmer>(&self, obj: &T, prop: &str, r: &redis::Client) -> Result<i64, OhmerError> {
let key = try!(self.get_key(obj, prop));
let r:Option<i64> = try!(r.get(key));
match r {
Some(v) => Ok(v),
None => Ok(0),
}
}
}
#[macro_export]
macro_rules! incrby {
($obj: expr, $prop: ident, $incr: expr, $client: expr) => {{
$obj.$prop.incr(&$obj, stringify!($prop), $incr, $client)
}}
}
#[macro_export]
macro_rules! incr {
($obj: expr, $prop: ident, $client: expr) => {{
$obj.$prop.incr(&$obj, stringify!($prop), 1, $client)
}}
}
#[macro_export]
macro_rules! decr {
($obj: expr, $prop: ident, $client: expr) => {{
$obj.$prop.incr(&$obj, stringify!($prop), -1, $client)
}}
}
pub struct Query<'a, T: 'a + Ohmer> {
set: stal::Set,
r: &'a redis::Client,
phantom: PhantomData<T>,
}
impl<'a, T: Ohmer> Query<'a, T> {
pub fn new(set: stal::Set, r: &'a redis::Client) -> Self {
Query { set: set, phantom: PhantomData, r: r }
}
pub fn from_keys(kv: &[(&str, &str)], r: &'a redis::Client) -> Self {
let set = stal::Set::Inter(kv.iter().map(|kv| Query::<T>::key(kv.0, kv.1)).collect());
Query::new(set, r)
}
pub fn key(field: &str, value: &str) -> stal::Set {
stal::Set::Key(T::default().key_for_index(field, value).as_bytes().to_vec())
}
pub fn find(field: &str, value: &str, r: &'a redis::Client) -> Self {
Query { set: Query::<T>::key(field, value), phantom: PhantomData, r: r }
}
pub fn inter(&mut self, field: &str, value: &str) -> &mut Self {
self.sinter(vec![Query::<T>::key(field, value)]);
self
}
pub fn sinter(&mut self, mut sets: Vec<stal::Set>) {
let set = replace(&mut self.set, stal::Set::Key(vec![]));
sets.push(set);
self.set = stal::Set::Inter(sets);
}
pub fn union(&mut self, field: &str, value: &str) -> &mut Self {
self.sunion(vec![Query::<T>::key(field, value)]);
self
}
pub fn sunion(&mut self, mut sets: Vec<stal::Set>) {
let set = replace(&mut self.set, stal::Set::Key(vec![]));
sets.push(set);
self.set = stal::Set::Union(sets);
}
pub fn diff(&mut self, field: &str, value: &str) -> &mut Self {
self.sdiff(vec![Query::<T>::key(field, value)]);
self
}
pub fn sdiff(&mut self, mut sets: Vec<stal::Set>) {
let set = replace(&mut self.set, stal::Set::Key(vec![]));
sets.insert(0, set);
self.set = stal::Set::Diff(sets);
}
pub fn try_iter(&self) -> Result<Iter<'a, T>, OhmerError> {
Iter::from_ops(self.set.ids().solve(), self.r)
}
pub fn try_into_iter(self) -> Result<Iter<'a, T>, OhmerError> {
Iter::from_ops(self.set.into_ids().solve(), self.r)
}
pub fn sort(&self, by: &str, limit: Option<(usize, usize)>, asc: bool, alpha: bool) -> Result<Iter<'a, T>, OhmerError> {
let default = T::default();
let class_name = default.get_class_name();
let key = if default.counters().contains(by) {
format!("{}:*:{}", class_name, by)
} else {
format!("{}:*->{}", class_name, by)
}.as_bytes().to_vec();
let mut template = vec![b"SORT".to_vec(), vec![], b"BY".to_vec(), key];
if let Some(l) = limit {
template.push(b"LIMIT".to_vec());
template.push(format!("{}", l.0).as_bytes().to_vec());
template.push(format!("{}", l.1).as_bytes().to_vec());
}
template.push(if asc { b"ASC".to_vec() } else { b"DESC".to_vec() });
if alpha {
template.push(b"ALPHA".to_vec());
}
let stal = stal::Stal::from_template(template, vec![(self.set.clone(), 1)]);
Iter::from_ops(stal.solve(), self.r)
}
}
pub struct Iter<'a, T> {
r: &'a redis::Client,
iter: std::vec::IntoIter<usize>,
phantom: PhantomData<T>,
}
impl<'a, T: Ohmer> Iter<'a, T> {
fn new(iter: std::vec::IntoIter<usize>, r: &'a redis::Client) -> Self {
Iter {
iter: iter,
r: r,
phantom: PhantomData,
}
}
fn from_ops(ops: (Vec<Vec<Vec<u8>>>, usize), r: &'a redis::Client) -> Result<Self, OhmerError> {
let mut q = redis::pipe();
q.atomic();
let mut i = 0;
let len = ops.0.len();
for op in ops.0.into_iter() {
if i == 0 || i == len - 1 {
i += 1;
// skip MULTI and EXEC
continue;
}
let mut first = true;
for arg in op {
if first {
q.cmd(&*try!(String::from_utf8(arg)));
first = false;
} else {
q.arg(arg);
}
if i != ops.1 {
q.ignore();
}
}
i += 1;
}
let mut result:Vec<Vec<usize>> = try!(q.query(r));
Ok(Iter { iter: result.pop().unwrap().into_iter(), r: r, phantom: PhantomData })
}
}
impl<'a, T: Ohmer> Iterator for Iter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
match self.iter.next() {
Some(id) => match get(id, self.r) {
Ok(v) => Some(v),
Err(_) => None,
},
None => None,
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.iter.len(), Some(self.iter.len()))
}
}
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::slice::bytes::copy_memory;
pub static S: [u8, ..256] = [
0x29, 0x2E, 0x43, 0xC9, 0xA2, 0xD8, 0x7C, 0x01, 0x3D, 0x36, 0x54, 0xA1, 0xEC, 0xF0, 0x06, 0x13,
0x62, 0xA7, 0x05, 0xF3, 0xC0, 0xC7, 0x73, 0x8C, 0x98, 0x93, 0x2B, 0xD9, 0xBC, 0x4C, 0x82, 0xCA,
0x1E, 0x9B, 0x57, 0x3C, 0xFD, 0xD4, 0xE0, 0x16, 0x67, 0x42, 0x6F, 0x18, 0x8A, 0x17, 0xE5, 0x12,
0xBE, 0x4E, 0xC4, 0xD6, 0xDA, 0x9E, 0xDE, 0x49, 0xA0, 0xFB, 0xF5, 0x8E, 0xBB, 0x2F, 0xEE, 0x7A,
0xA9, 0x68, 0x79, 0x91, 0x15, 0xB2, 0x07, 0x3F, 0x94, 0xC2, 0x10, 0x89, 0x0B, 0x22, 0x5F, 0x21,
0x80, 0x7F, 0x5D, 0x9A, 0x5A, 0x90, 0x32, 0x27, 0x35, 0x3E, 0xCC, 0xE7, 0xBF, 0xF7, 0x97, 0x03,
0xFF, 0x19, 0x30, 0xB3, 0x48, 0xA5, 0xB5, 0xD1, 0xD7, 0x5E, 0x92, 0x2A, 0xAC, 0x56, 0xAA, 0xC6,
0x4F, 0xB8, 0x38, 0xD2, 0x96, 0xA4, 0x7D, 0xB6, 0x76, 0xFC, 0x6B, 0xE2, 0x9C, 0x74, 0x04, 0xF1,
0x45, 0x9D, 0x70, 0x59, 0x64, 0x71, 0x87, 0x20, 0x86, 0x5B, 0xCF, 0x65, 0xE6, 0x2D, 0xA8, 0x02,
0x1B, 0x60, 0x25, 0xAD, 0xAE, 0xB0, 0xB9, 0xF6, 0x1C, 0x46, 0x61, 0x69, 0x34, 0x40, 0x7E, 0x0F,
0x55, 0x47, 0xA3, 0x23, 0xDD, 0x51, 0xAF, 0x3A, 0xC3, 0x5C, 0xF9, 0xCE, 0xBA, 0xC5, 0xEA, 0x26,
0x2C, 0x53, 0x0D, 0x6E, 0x85, 0x28, 0x84, 0x09, 0xD3, 0xDF, 0xCD, 0xF4, 0x41, 0x81, 0x4D, 0x52,
0x6A, 0xDC, 0x37, 0xC8, 0x6C, 0xC1, 0xAB, 0xFA, 0x24, 0xE1, 0x7B, 0x08, 0x0C, 0xBD, 0xB1, 0x4A,
0x78, 0x88, 0x95, 0x8B, 0xE3, 0x63, 0xE8, 0x6D, 0xE9, 0xCB, 0xD5, 0xFE, 0x3B, 0x00, 0x1D, 0x39,
0xF2, 0xEF, 0xB7, 0x0E, 0x66, 0x58, 0xD0, 0xE4, 0xA6, 0x77, 0x72, 0xF8, 0xEB, 0x75, 0x4B, 0x0A,
0x31, 0x44, 0x50, 0xB4, 0x8F, 0xED, 0x1F, 0x1A, 0xDB, 0x99, 0x8D, 0x33, 0x9F, 0x11, 0x83, 0x14
];
pub static S2: [u8, ..256] = [
0xDD, 0x07, 0x8F, 0x5F, 0x7E, 0x12, 0x0E, 0x46, 0xCB, 0xB7, 0xEF, 0x4C, 0xCC, 0xB2, 0xE3, 0x9F,
0x4A, 0xFD, 0x2F, 0x0F, 0xFF, 0x44, 0x27, 0x2D, 0x2B, 0x61, 0xF7, 0x90, 0x98, 0xDE, 0x20, 0xF6,
0x87, 0x4F, 0x4D, 0xA3, 0xC8, 0x92, 0xAF, 0x57, 0xB5, 0x00, 0x6B, 0x1A, 0xB0, 0x8D, 0x01, 0x3D,
0x62, 0xF0, 0x56, 0xFB, 0x9C, 0x58, 0x09, 0xC2, 0x72, 0xDF, 0xA7, 0xDC, 0x23, 0x08, 0x59, 0x47,
0x9D, 0xBC, 0x29, 0x02, 0xF1, 0x80, 0x99, 0xA1, 0x64, 0x37, 0xCF, 0xEE, 0x1D, 0xBE, 0x31, 0x70,
0xF2, 0xA5, 0xBF, 0xB1, 0x0A, 0xA0, 0x6D, 0x22, 0xE5, 0x83, 0x54, 0x89, 0xA9, 0x52, 0x69, 0x4E,
0x91, 0x9A, 0x10, 0xD5, 0x84, 0x8B, 0xE4, 0x28, 0x41, 0x9B, 0xC0, 0x7A, 0xC4, 0xD7, 0xB3, 0x2A,
0x82, 0x85, 0xEA, 0x16, 0x7D, 0xED, 0x78, 0xE9, 0xD0, 0x42, 0x3F, 0xCA, 0x06, 0x76, 0x9E, 0x51,
0x50, 0xBD, 0x1E, 0xFE, 0xB6, 0xB4, 0x88, 0x86, 0xD1, 0x4B, 0x2C, 0xD3, 0x17, 0xFA, 0x3B, 0xF4,
0x55, 0x43, 0x6A, 0x19, 0x48, 0xD2, 0x74, 0x5E, 0x18, 0xF9, 0x53, 0x21, 0x7C, 0x81, 0x35, 0xFC,
0x38, 0x0B, 0x04, 0xA2, 0x75, 0x65, 0xE8, 0x11, 0x8E, 0x40, 0x6E, 0xC6, 0x6C, 0x93, 0x94, 0xA6,
0x95, 0xCE, 0x45, 0x63, 0xF3, 0x66, 0x77, 0xE2, 0x71, 0x96, 0xAC, 0x3C, 0x1C, 0xCD, 0x30, 0x5C,
0x14, 0xC5, 0x49, 0xA8, 0x32, 0xAD, 0x6F, 0x15, 0xC3, 0x03, 0x1F, 0xD9, 0x5A, 0xBA, 0xAB, 0x8A,
0xE6, 0x67, 0x73, 0xB8, 0x25, 0xDA, 0x33, 0x68, 0x05, 0x1B, 0x34, 0xF8, 0xC1, 0xA4, 0x36, 0xB9,
0x26, 0xC9, 0x7B, 0xD4, 0xE7, 0x2E, 0x8C, 0x5B, 0xD6, 0xD8, 0xAE, 0xEC, 0x0C, 0xF5, 0x3E, 0xE1,
0x0D, 0x7F, 0xE0, 0x13, 0xBB, 0x3A, 0x97, 0x5D, 0xEB, 0xAA, 0xC7, 0x39, 0x79, 0x24, 0xDB, 0x60
];
pub fn md2_pad(msg: &[u8]) -> Vec<u8> {
let mut msg = msg.to_vec();
let pad = 16 - msg.len() % 16;
msg.grow_fn(pad, |_| pad as u8);
msg
}
pub fn md2_checksum(msg: &[u8]) -> [u8, ..16] {
let mut checksum = [0u8, ..16];
let mut last = 0u8;
for chunk in msg.chunks(16) {
for (i, byte) in checksum.iter_mut().enumerate() {
*byte ^= S[(chunk[i] ^ last) as uint];
last = *byte;
}
}
checksum
}
pub fn md2_compress(state: &[u8], msg: &[u8]) -> [u8, ..16] {
// Two 128 bit blocks in, one 128 bit block out.
assert!(state.len() == 16 && msg.len() == 16);
let mut x = [0u8, ..48];
let mut result = [0u8, ..16];
// Copy over the previous state.
copy_memory(x.slice_mut(0, 16), state);
// Copy over the message block.
copy_memory(x.slice_mut(16, 32), msg);
// XOR the previous state and the message block.
for (i, byte) in msg.iter().enumerate() {
x[32 + i] = *byte ^ x[i];
}
// Encrypt block (18 rounds).
let mut t = 0u8;
for i in range(0, 18) {
for byte in x.iter_mut() {
*byte ^= S[t as uint];
t = *byte;
}
t += i;
}
// Copy the new state.
copy_memory(&mut result, x.slice(0, 16));
result
}
pub fn md2(msg: &[u8]) -> [u8, ..16] {
// Pad the message to be a multiple of 16 bytes long.
let msg = md2_pad(msg);
assert!(msg.len() % 16 == 0);
// Compress all message blocks.
let state = msg.chunks(16).fold([0u8, ..16], |s, m| md2_compress(&s, m));
// Compute the checksum.
let checksum = md2_checksum(msg.as_slice());
// Compress checksum and return.
md2_compress(&state, &checksum)
}
#[cfg(test)]
mod test {
use md2;
fn hex(buf: &[u8]) -> String {
buf.iter().fold(String::new(), |a, &b| format!("{}{:02x}", a, b))
}
fn cmp(d: &str, s: &str) {
assert_eq!(d.to_string(), hex(&md2(s.as_bytes())));
}
#[test]
fn test_md2() {
cmp("8350e5a3e24c153df2275c9f80692773", "");
cmp("32ec01ec4a6dac72c0ab96fb34c0b5d1", "a");
cmp("da853b0d3f88d99b30283a69e6ded6bb", "abc");
cmp("ab4f496bfb2a530b219ff33031fe06b0", "message digest");
cmp("4e8ddff3650292ab5a4108c3aa47940b", "abcdefghijklmnopqrstuvwxyz");
cmp("da33def2a42df13975352846c30338cd", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789");
cmp("d5976f79d83d3a0dc9806c3c66f3efd8", "12345678901234567890123456789012345678901234567890123456789012345678901234567890");
}
}
make code a little nicer
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::slice::bytes::copy_memory;
pub static SBOX: [u8, ..256] = [
0x29, 0x2E, 0x43, 0xC9, 0xA2, 0xD8, 0x7C, 0x01, 0x3D, 0x36, 0x54, 0xA1, 0xEC, 0xF0, 0x06, 0x13,
0x62, 0xA7, 0x05, 0xF3, 0xC0, 0xC7, 0x73, 0x8C, 0x98, 0x93, 0x2B, 0xD9, 0xBC, 0x4C, 0x82, 0xCA,
0x1E, 0x9B, 0x57, 0x3C, 0xFD, 0xD4, 0xE0, 0x16, 0x67, 0x42, 0x6F, 0x18, 0x8A, 0x17, 0xE5, 0x12,
0xBE, 0x4E, 0xC4, 0xD6, 0xDA, 0x9E, 0xDE, 0x49, 0xA0, 0xFB, 0xF5, 0x8E, 0xBB, 0x2F, 0xEE, 0x7A,
0xA9, 0x68, 0x79, 0x91, 0x15, 0xB2, 0x07, 0x3F, 0x94, 0xC2, 0x10, 0x89, 0x0B, 0x22, 0x5F, 0x21,
0x80, 0x7F, 0x5D, 0x9A, 0x5A, 0x90, 0x32, 0x27, 0x35, 0x3E, 0xCC, 0xE7, 0xBF, 0xF7, 0x97, 0x03,
0xFF, 0x19, 0x30, 0xB3, 0x48, 0xA5, 0xB5, 0xD1, 0xD7, 0x5E, 0x92, 0x2A, 0xAC, 0x56, 0xAA, 0xC6,
0x4F, 0xB8, 0x38, 0xD2, 0x96, 0xA4, 0x7D, 0xB6, 0x76, 0xFC, 0x6B, 0xE2, 0x9C, 0x74, 0x04, 0xF1,
0x45, 0x9D, 0x70, 0x59, 0x64, 0x71, 0x87, 0x20, 0x86, 0x5B, 0xCF, 0x65, 0xE6, 0x2D, 0xA8, 0x02,
0x1B, 0x60, 0x25, 0xAD, 0xAE, 0xB0, 0xB9, 0xF6, 0x1C, 0x46, 0x61, 0x69, 0x34, 0x40, 0x7E, 0x0F,
0x55, 0x47, 0xA3, 0x23, 0xDD, 0x51, 0xAF, 0x3A, 0xC3, 0x5C, 0xF9, 0xCE, 0xBA, 0xC5, 0xEA, 0x26,
0x2C, 0x53, 0x0D, 0x6E, 0x85, 0x28, 0x84, 0x09, 0xD3, 0xDF, 0xCD, 0xF4, 0x41, 0x81, 0x4D, 0x52,
0x6A, 0xDC, 0x37, 0xC8, 0x6C, 0xC1, 0xAB, 0xFA, 0x24, 0xE1, 0x7B, 0x08, 0x0C, 0xBD, 0xB1, 0x4A,
0x78, 0x88, 0x95, 0x8B, 0xE3, 0x63, 0xE8, 0x6D, 0xE9, 0xCB, 0xD5, 0xFE, 0x3B, 0x00, 0x1D, 0x39,
0xF2, 0xEF, 0xB7, 0x0E, 0x66, 0x58, 0xD0, 0xE4, 0xA6, 0x77, 0x72, 0xF8, 0xEB, 0x75, 0x4B, 0x0A,
0x31, 0x44, 0x50, 0xB4, 0x8F, 0xED, 0x1F, 0x1A, 0xDB, 0x99, 0x8D, 0x33, 0x9F, 0x11, 0x83, 0x14
];
pub static S2: [u8, ..256] = [
0xDD, 0x07, 0x8F, 0x5F, 0x7E, 0x12, 0x0E, 0x46, 0xCB, 0xB7, 0xEF, 0x4C, 0xCC, 0xB2, 0xE3, 0x9F,
0x4A, 0xFD, 0x2F, 0x0F, 0xFF, 0x44, 0x27, 0x2D, 0x2B, 0x61, 0xF7, 0x90, 0x98, 0xDE, 0x20, 0xF6,
0x87, 0x4F, 0x4D, 0xA3, 0xC8, 0x92, 0xAF, 0x57, 0xB5, 0x00, 0x6B, 0x1A, 0xB0, 0x8D, 0x01, 0x3D,
0x62, 0xF0, 0x56, 0xFB, 0x9C, 0x58, 0x09, 0xC2, 0x72, 0xDF, 0xA7, 0xDC, 0x23, 0x08, 0x59, 0x47,
0x9D, 0xBC, 0x29, 0x02, 0xF1, 0x80, 0x99, 0xA1, 0x64, 0x37, 0xCF, 0xEE, 0x1D, 0xBE, 0x31, 0x70,
0xF2, 0xA5, 0xBF, 0xB1, 0x0A, 0xA0, 0x6D, 0x22, 0xE5, 0x83, 0x54, 0x89, 0xA9, 0x52, 0x69, 0x4E,
0x91, 0x9A, 0x10, 0xD5, 0x84, 0x8B, 0xE4, 0x28, 0x41, 0x9B, 0xC0, 0x7A, 0xC4, 0xD7, 0xB3, 0x2A,
0x82, 0x85, 0xEA, 0x16, 0x7D, 0xED, 0x78, 0xE9, 0xD0, 0x42, 0x3F, 0xCA, 0x06, 0x76, 0x9E, 0x51,
0x50, 0xBD, 0x1E, 0xFE, 0xB6, 0xB4, 0x88, 0x86, 0xD1, 0x4B, 0x2C, 0xD3, 0x17, 0xFA, 0x3B, 0xF4,
0x55, 0x43, 0x6A, 0x19, 0x48, 0xD2, 0x74, 0x5E, 0x18, 0xF9, 0x53, 0x21, 0x7C, 0x81, 0x35, 0xFC,
0x38, 0x0B, 0x04, 0xA2, 0x75, 0x65, 0xE8, 0x11, 0x8E, 0x40, 0x6E, 0xC6, 0x6C, 0x93, 0x94, 0xA6,
0x95, 0xCE, 0x45, 0x63, 0xF3, 0x66, 0x77, 0xE2, 0x71, 0x96, 0xAC, 0x3C, 0x1C, 0xCD, 0x30, 0x5C,
0x14, 0xC5, 0x49, 0xA8, 0x32, 0xAD, 0x6F, 0x15, 0xC3, 0x03, 0x1F, 0xD9, 0x5A, 0xBA, 0xAB, 0x8A,
0xE6, 0x67, 0x73, 0xB8, 0x25, 0xDA, 0x33, 0x68, 0x05, 0x1B, 0x34, 0xF8, 0xC1, 0xA4, 0x36, 0xB9,
0x26, 0xC9, 0x7B, 0xD4, 0xE7, 0x2E, 0x8C, 0x5B, 0xD6, 0xD8, 0xAE, 0xEC, 0x0C, 0xF5, 0x3E, 0xE1,
0x0D, 0x7F, 0xE0, 0x13, 0xBB, 0x3A, 0x97, 0x5D, 0xEB, 0xAA, 0xC7, 0x39, 0x79, 0x24, 0xDB, 0x60
];
pub fn md2_pad(msg: &[u8]) -> Vec<u8> {
let mut msg = msg.to_vec();
let pad = 16 - msg.len() % 16;
msg.grow_fn(pad, |_| pad as u8);
msg
}
pub fn md2_checksum(msg: &[u8]) -> [u8, ..16] {
// Message must be padded.
assert!(msg.len() % 16 == 0);
let mut checksum = [0u8, ..16];
let mut last = 0u8;
for chunk in msg.chunks(16) {
for (mbyte, cbyte) in chunk.iter().zip(checksum.iter_mut()) {
*cbyte ^= SBOX[(*mbyte ^ last) as uint];
last = *cbyte;
}
}
checksum
}
pub fn md2_compress(state: &[u8], msg: &[u8]) -> [u8, ..16] {
// Two 128 bit blocks in.
assert!(state.len() == 16 && msg.len() == 16);
let mut x = [0u8, ..48];
let mut result = [0u8, ..16];
// Copy over the previous state.
copy_memory(x.slice_mut(0, 16), state);
// Copy over the message block.
copy_memory(x.slice_mut(16, 32), msg);
// XOR the previous state and the message block.
for (i, byte) in msg.iter().enumerate() {
x[32 + i] = *byte ^ x[i];
}
// Encrypt block (18 rounds).
let mut t = 0u8;
for i in range(0, 18) {
for byte in x.iter_mut() {
*byte ^= SBOX[t as uint];
t = *byte;
}
t += i;
}
// Copy the new state.
copy_memory(&mut result, x.slice(0, 16));
result
}
pub fn md2(msg: &[u8]) -> [u8, ..16] {
// Pad the message to be a multiple of 16 bytes long.
let msg = md2_pad(msg);
// Compress all message blocks.
let state = msg.chunks(16).fold([0u8, ..16], |s, m| md2_compress(&s, m));
// Compute the checksum.
let checksum = md2_checksum(msg.as_slice());
// Compress checksum and return.
md2_compress(&state, &checksum)
}
#[cfg(test)]
mod test {
use md2;
fn hex(buf: &[u8]) -> String {
buf.iter().fold(String::new(), |a, &b| format!("{}{:02x}", a, b))
}
fn cmp(d: &str, s: &str) {
assert_eq!(d.to_string(), hex(&md2(s.as_bytes())));
}
#[test]
fn test_md2() {
cmp("8350e5a3e24c153df2275c9f80692773", "");
cmp("32ec01ec4a6dac72c0ab96fb34c0b5d1", "a");
cmp("da853b0d3f88d99b30283a69e6ded6bb", "abc");
cmp("ab4f496bfb2a530b219ff33031fe06b0", "message digest");
cmp("4e8ddff3650292ab5a4108c3aa47940b", "abcdefghijklmnopqrstuvwxyz");
cmp("da33def2a42df13975352846c30338cd", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789");
cmp("d5976f79d83d3a0dc9806c3c66f3efd8", "12345678901234567890123456789012345678901234567890123456789012345678901234567890");
}
}
|
use std::io::Write;
use inflector::Inflector;
use regex::{Captures, Regex};
use hyper::status::StatusCode;
use botocore::{Member, Operation, Shape, ShapeType};
use ::Service;
use super::{GenerateProtocol, error_type_name, FileWriter, IoResult, rest_response_parser, generate_field_name};
pub struct RestJsonGenerator;
impl GenerateProtocol for RestJsonGenerator {
fn generate_method_signatures(&self, writer: &mut FileWriter, service: &Service) -> IoResult {
for (operation_name, operation) in service.operations().iter() {
let input_type = operation.input_shape();
let output_type = operation.output_shape_or("()");
// Retrieve the `Shape` for the input for this operation.
let input_shape = &service.get_shape(input_type).unwrap();
writeln!(writer,
"
{documentation}
{method_signature} -> \
Result<{output_type}, {error_type}>;
",
documentation = generate_documentation(operation).unwrap_or("".to_owned()),
method_signature = generate_method_signature(operation, input_shape),
error_type = error_type_name(operation_name),
output_type = output_type)?
}
Ok(())
}
fn generate_method_impls(&self, writer: &mut FileWriter, service: &Service) -> IoResult {
for (operation_name, operation) in service.operations().iter() {
let input_type = operation.input_shape();
let output_type = operation.output_shape_or("()");
// Retrieve the `Shape` for the input for this operation.
let input_shape = &service.get_shape(input_type).unwrap();
// Construct a list of format strings which will be used to format
// the request URI, mapping the input struct to the URI arguments.
let member_uri_strings = generate_shape_member_uri_strings(input_shape);
// A boolean controlling whether or not the payload should be loaded
// into the request.
// According to the AWS SDK documentation, requests should only have
// a request body for operations with ANY non-URI or non-query
// parameters.
let load_payload = input_shape.members
.as_ref()
.unwrap()
.iter()
.any(|(_, member)| member.location.is_none());
// Construct a list of strings which will be used to load request
// parameters from the input struct into a `Params` vec, which will
// then be added to the request.
let member_param_strings = generate_shape_member_param_strings(service, input_shape);
writeln!(writer,"
{documentation}
{method_signature} -> Result<{output_type}, {error_type}> {{
{encode_input}
{request_uri_formatter}
let mut request = SignedRequest::new(\"{http_method}\", \"{endpoint_prefix}\", self.region, &request_uri);
request.set_content_type(\"application/x-amz-json-1.1\".to_owned());
{modify_endpoint_prefix}
{load_payload}
{load_params}
request.sign(&self.credentials_provider.credentials()?);
let response = self.dispatcher.dispatch(&request)?;
match response.status {{
{status_code} => {{
{parse_body}
{parse_headers}
{parse_status_code}
Ok(result)
}}
_ => Err({error_type}::from_body(String::from_utf8_lossy(&response.body).as_ref())),
}}
}}
",
documentation = generate_documentation(operation).unwrap_or("".to_owned()),
method_signature = generate_method_signature(operation, input_shape),
endpoint_prefix = service.signing_name(),
modify_endpoint_prefix = generate_endpoint_modification(service).unwrap_or("".to_owned()),
http_method = operation.http.method,
error_type = error_type_name(operation_name),
status_code = http_code_to_status_code(operation.http.response_code),
parse_body = generate_body_parser(operation, service),
parse_status_code = generate_status_code_parser(operation, service),
output_type = output_type,
parse_headers = rest_response_parser::generate_response_headers_parser(service, operation)
.unwrap_or_else(|| "".to_owned()),
request_uri_formatter = generate_uri_formatter(
&generate_snake_case_uri(&operation.http.request_uri),
&member_uri_strings
),
load_payload = generate_payload_loading_string(load_payload),
load_params = generate_params_loading_string(&member_param_strings),
encode_input = generate_encoding_string(load_payload),
)?
}
Ok(())
}
// May need to special case this
fn generate_prelude(&self, writer: &mut FileWriter, _: &Service) -> IoResult {
writeln!(writer,
"use serde_json;
use rusoto_core::param::{{Params, ServiceParams}};
use rusoto_core::signature::SignedRequest;
use serde_json::from_str;
use serde_json::Value as SerdeJsonValue;")
}
fn generate_struct_attributes(&self, serialized: bool, deserialized: bool) -> String {
let mut derived = vec!["Default", "Debug", "Clone"];
if serialized {
derived.push("Serialize");
}
if deserialized {
derived.push("Deserialize")
}
format!("#[derive({})]", derived.join(","))
}
fn timestamp_type(&self) -> &'static str {
"f64"
}
}
// Used to print the enum value rather than the status code and the canonical reason.
// For codegen purposes, leaving existing StatusCode Display trait implementation intact.
// StatusCode::Ok.to_string() prints "200 OK"
// StatusCode::Ok.enum_as_string() prints "StatusCode::Ok"
trait CodegenString {
fn enum_as_string(&self) -> String;
}
impl CodegenString for StatusCode {
fn enum_as_string(&self) -> String {
format!("StatusCode::{:?}", self)
}
}
fn http_code_to_status_code(code: Option<i32>) -> String {
match code {
Some(actual_code) => StatusCode::from_u16(actual_code as u16).enum_as_string(),
// Some service definitions such as elastictranscoder don't specify
// the response code, we'll assume this:
None => "StatusCode::Ok".to_string(),
}
}
// IoT has an endpoint_prefix and a signing_name that differ
fn generate_endpoint_modification(service: &Service) -> Option<String> {
if service.signing_name() == service.endpoint_prefix() {
None
} else {
Some(format!("request.set_endpoint_prefix(\"{}\".to_string());",
service.endpoint_prefix()))
}
}
// IoT defines a lot of empty (and therefore unnecessary) request shapes
// don't clutter method signatures with them
fn generate_method_signature(operation: &Operation, shape: &Shape) -> String {
if shape.members.is_some() && !shape.members.as_ref().unwrap().is_empty() {
format!("fn {method_name}(&self, input: &{input_type})",
method_name = operation.name.to_snake_case(),
input_type = operation.input_shape())
} else {
format!("fn {method_name}(&self)",
method_name = operation.name.to_snake_case())
}
}
fn generate_encoding_string(load_payload: bool) -> String {
if load_payload {
"let encoded = serde_json::to_string(input).unwrap();".to_owned()
} else {
"".to_owned()
}
}
fn generate_payload_loading_string(load_payload: bool) -> String {
if load_payload {
"request.set_payload(Some(encoded.into_bytes()));".to_owned()
} else {
"".to_owned()
}
}
fn generate_snake_case_uri(request_uri: &str) -> String {
lazy_static! {
static ref URI_ARGS_REGEX: Regex = Regex::new(r"\{([\w\d]+)\}").unwrap();
}
URI_ARGS_REGEX.replace_all(request_uri, |caps: &Captures| {
format!("{{{}}}",
caps.get(1).map(|c| Inflector::to_snake_case(c.as_str())).unwrap())
})
.to_string()
}
fn generate_params_loading_string(param_strings: &[String]) -> String {
match param_strings.len() {
0 => "".to_owned(),
_ => {
format!("let mut params = Params::new();
{param_strings}
request.set_params(params);",
param_strings = param_strings.join("\n"))
}
}
}
fn generate_shape_member_param_strings(service: &Service, shape: &Shape) -> Vec<String> {
shape.members
.as_ref()
.unwrap()
.iter()
.filter_map(|(member_name, member)| {
member.location.as_ref().and_then(|loc| {
if !member.deprecated() && loc == "querystring" {
let member_shape = service.shape_for_member(member).unwrap();
Some(generate_param_load_string(member_name, member_shape, shape.required(member_name)))
} else {
None
}
})
})
.collect::<Vec<String>>()
}
fn generate_param_load_string(member_name: &str, member_shape: &Shape, is_required: bool) -> String {
let field_name = generate_field_name(member_name);
match (member_shape.shape_type, is_required) {
(ShapeType::List, true) => {
format!(
"for item in input.{field_name}.iter() {{
params.put(\"{member_name}\", item);
}}",
member_name = member_name,
field_name = field_name)
},
(ShapeType::List, false) => {
format!(
"if let Some(ref x) = input.{field_name} {{
for item in x.iter() {{
params.put(\"{member_name}\", item);
}}
}}",
member_name = member_name,
field_name = field_name,
)
},
(ShapeType::Map, true) => {
format!(
"for (key, val) in input.{field_name}.iter() {{
params.put(key, val);
}}",
field_name = member_name.to_snake_case())
},
(ShapeType::Map, false) => {
format!(
"if let Some(ref x) = input.{field_name} {{
for (key, val) in x.iter() {{
params.put(key, val);
}}
}}",
field_name = member_name.to_snake_case(),
)
},
(_, true) => {
format!(
"params.put(\"{member_name}\", &input.{field_name});",
member_name = member_name,
field_name = field_name)
},
(_, false) => {
format!(
"if let Some(ref x) = input.{field_name} {{
params.put(\"{member_name}\", x);
}}",
member_name = member_name,
field_name = field_name,
)
}
}
}
fn generate_uri_formatter(request_uri: &str, uri_strings: &[String]) -> String {
match uri_strings.len() {
0 => {
format!(
"let request_uri = \"{request_uri}\";",
request_uri = request_uri,
)
}
_ => {
format!("let request_uri = format!(\"{request_uri}\", {uri_strings});",
request_uri = request_uri,
uri_strings = uri_strings.join(", "))
}
}
}
fn generate_shape_member_uri_strings(shape: &Shape) -> Vec<String> {
shape.members
.as_ref()
.unwrap()
.iter()
.filter_map(|(member_name, member)| {
generate_member_format_string(&member_name.to_snake_case(), member)
})
.collect::<Vec<String>>()
}
fn generate_member_format_string(member_name: &str, member: &Member) -> Option<String> {
match member.location {
Some(ref x) if x == "uri" => {
match member.location_name {
Some(ref loc_name) => {
Some(format!(
"{member_name} = input.{field_name}",
field_name = member_name,
member_name = loc_name.to_snake_case(),
))
}
None => {
Some(format!(
"{member_name} = input.{field_name}",
field_name = member_name,
member_name = member_name,
))
}
}
}
Some(_) | None => None,
}
}
fn generate_documentation(operation: &Operation) -> Option<String> {
operation.documentation
.as_ref()
.map(|docs| format!("#[doc=\"{}\"]", docs.replace("\"", "\\\"")))
}
/// Generate code to plumb the response status code into any fields
/// in the output shape that specify it
fn generate_status_code_parser(operation: &Operation, service: &Service) -> String {
if operation.output.is_none() {
return "".to_owned();
}
let shape_name = &operation.output.as_ref().unwrap().shape;
let output_shape = &service.get_shape(shape_name).expect("Shape missing from service definition");
let mut status_code_parser = "".to_string();
for (member_name, member) in output_shape.members.as_ref().unwrap() {
if let Some(ref location) = member.location {
if location == "statusCode" {
if output_shape.required(member_name) {
status_code_parser += &format!("result.{} = StatusCode::to_u16(&response.status);", member_name.to_snake_case());
} else {
status_code_parser += &format!("result.{} = Some(StatusCode::to_u16(&response.status) as i64);", member_name.to_snake_case());
}
}
}
}
status_code_parser
}
/// Generate code to parse the http response body, either as a JSON object
/// deserialized with serde, or as a raw payload that's assigned to one of
/// the fields in the result object.
///
/// Needs to determine whether or not other fields in the result object
/// will be set later (e.g. from headers), so the compiler won't spit out
/// warnings about unnecessary mutability
fn generate_body_parser(operation: &Operation, service: &Service) -> String {
if operation.output.is_none() {
return "let result = ();".to_string();
}
let shape_name = &operation.output.as_ref().unwrap().shape;
let output_shape = &service.get_shape(shape_name).expect("Shape missing from service definition");
let mutable_result = output_shape.members
.as_ref()
.unwrap()
.iter()
.any(|(_, member)| member.location.is_some());
match output_shape.payload {
None => json_body_parser(shape_name, mutable_result),
Some(ref payload_member_name) => {
let payload_member_shape = &output_shape.members.as_ref().unwrap()[payload_member_name].shape;
let payload_shape = &service.get_shape(payload_member_shape).expect("Shape missing from service definition");
match payload_shape.shape_type {
payload_type if payload_type == ShapeType::Blob ||
payload_type == ShapeType::String => {
payload_body_parser(payload_type, shape_name, payload_member_name, mutable_result)
}
_ => json_body_parser(shape_name, mutable_result),
}
}
}
}
/// Take the raw http response body and assign it to the payload field
/// on the result object
fn payload_body_parser(payload_type: ShapeType, output_shape: &str, payload_member: &str, mutable_result: bool) -> String {
let response_body = match payload_type {
ShapeType::Blob => "Some(response.body)",
_ => "Some(String::from_utf8_lossy(&response.body).into_owned())",
};
format!("
let {mutable} result = {output_shape}::default();
result.{payload_member} = {response_body};
",
output_shape = output_shape,
payload_member = payload_member.to_snake_case(),
response_body = response_body,
mutable = if mutable_result { "mut" } else { "" }
)
}
/// Parse the http response body as a JSON object with serde, and use that
/// as the result object
fn json_body_parser(output_shape: &str, mutable_result: bool) -> String {
// `serde-json` serializes field-less structs as "null", but AWS returns
// "{{}}" for a field-less response, so we must check for this result
// and convert it if necessary.
format!("
let mut body = response.body;
if body == b\"{{}}\" {{
body = b\"null\".to_vec();
}}
debug!(\"Response body: {{:?}}\", body);
debug!(\"Response status: {{}}\", response.status);
let {mutable} result = serde_json::from_slice::<{output_shape}>(&body).unwrap();
",
output_shape = output_shape,
mutable = if mutable_result { "mut" } else { "" }
)
}
Removes incorrect comment.
use std::io::Write;
use inflector::Inflector;
use regex::{Captures, Regex};
use hyper::status::StatusCode;
use botocore::{Member, Operation, Shape, ShapeType};
use ::Service;
use super::{GenerateProtocol, error_type_name, FileWriter, IoResult, rest_response_parser, generate_field_name};
pub struct RestJsonGenerator;
impl GenerateProtocol for RestJsonGenerator {
fn generate_method_signatures(&self, writer: &mut FileWriter, service: &Service) -> IoResult {
for (operation_name, operation) in service.operations().iter() {
let input_type = operation.input_shape();
let output_type = operation.output_shape_or("()");
// Retrieve the `Shape` for the input for this operation.
let input_shape = &service.get_shape(input_type).unwrap();
writeln!(writer,
"
{documentation}
{method_signature} -> \
Result<{output_type}, {error_type}>;
",
documentation = generate_documentation(operation).unwrap_or("".to_owned()),
method_signature = generate_method_signature(operation, input_shape),
error_type = error_type_name(operation_name),
output_type = output_type)?
}
Ok(())
}
fn generate_method_impls(&self, writer: &mut FileWriter, service: &Service) -> IoResult {
for (operation_name, operation) in service.operations().iter() {
let input_type = operation.input_shape();
let output_type = operation.output_shape_or("()");
// Retrieve the `Shape` for the input for this operation.
let input_shape = &service.get_shape(input_type).unwrap();
// Construct a list of format strings which will be used to format
// the request URI, mapping the input struct to the URI arguments.
let member_uri_strings = generate_shape_member_uri_strings(input_shape);
// A boolean controlling whether or not the payload should be loaded
// into the request.
// According to the AWS SDK documentation, requests should only have
// a request body for operations with ANY non-URI or non-query
// parameters.
let load_payload = input_shape.members
.as_ref()
.unwrap()
.iter()
.any(|(_, member)| member.location.is_none());
// Construct a list of strings which will be used to load request
// parameters from the input struct into a `Params` vec, which will
// then be added to the request.
let member_param_strings = generate_shape_member_param_strings(service, input_shape);
writeln!(writer,"
{documentation}
{method_signature} -> Result<{output_type}, {error_type}> {{
{encode_input}
{request_uri_formatter}
let mut request = SignedRequest::new(\"{http_method}\", \"{endpoint_prefix}\", self.region, &request_uri);
request.set_content_type(\"application/x-amz-json-1.1\".to_owned());
{modify_endpoint_prefix}
{load_payload}
{load_params}
request.sign(&self.credentials_provider.credentials()?);
let response = self.dispatcher.dispatch(&request)?;
match response.status {{
{status_code} => {{
{parse_body}
{parse_headers}
{parse_status_code}
Ok(result)
}}
_ => Err({error_type}::from_body(String::from_utf8_lossy(&response.body).as_ref())),
}}
}}
",
documentation = generate_documentation(operation).unwrap_or("".to_owned()),
method_signature = generate_method_signature(operation, input_shape),
endpoint_prefix = service.signing_name(),
modify_endpoint_prefix = generate_endpoint_modification(service).unwrap_or("".to_owned()),
http_method = operation.http.method,
error_type = error_type_name(operation_name),
status_code = http_code_to_status_code(operation.http.response_code),
parse_body = generate_body_parser(operation, service),
parse_status_code = generate_status_code_parser(operation, service),
output_type = output_type,
parse_headers = rest_response_parser::generate_response_headers_parser(service, operation)
.unwrap_or_else(|| "".to_owned()),
request_uri_formatter = generate_uri_formatter(
&generate_snake_case_uri(&operation.http.request_uri),
&member_uri_strings
),
load_payload = generate_payload_loading_string(load_payload),
load_params = generate_params_loading_string(&member_param_strings),
encode_input = generate_encoding_string(load_payload),
)?
}
Ok(())
}
fn generate_prelude(&self, writer: &mut FileWriter, _: &Service) -> IoResult {
writeln!(writer,
"use serde_json;
use rusoto_core::param::{{Params, ServiceParams}};
use rusoto_core::signature::SignedRequest;
use serde_json::from_str;
use serde_json::Value as SerdeJsonValue;")
}
fn generate_struct_attributes(&self, serialized: bool, deserialized: bool) -> String {
let mut derived = vec!["Default", "Debug", "Clone"];
if serialized {
derived.push("Serialize");
}
if deserialized {
derived.push("Deserialize")
}
format!("#[derive({})]", derived.join(","))
}
fn timestamp_type(&self) -> &'static str {
"f64"
}
}
// Used to print the enum value rather than the status code and the canonical reason.
// For codegen purposes, leaving existing StatusCode Display trait implementation intact.
// StatusCode::Ok.to_string() prints "200 OK"
// StatusCode::Ok.enum_as_string() prints "StatusCode::Ok"
trait CodegenString {
fn enum_as_string(&self) -> String;
}
impl CodegenString for StatusCode {
fn enum_as_string(&self) -> String {
format!("StatusCode::{:?}", self)
}
}
fn http_code_to_status_code(code: Option<i32>) -> String {
match code {
Some(actual_code) => StatusCode::from_u16(actual_code as u16).enum_as_string(),
// Some service definitions such as elastictranscoder don't specify
// the response code, we'll assume this:
None => "StatusCode::Ok".to_string(),
}
}
// IoT has an endpoint_prefix and a signing_name that differ
fn generate_endpoint_modification(service: &Service) -> Option<String> {
if service.signing_name() == service.endpoint_prefix() {
None
} else {
Some(format!("request.set_endpoint_prefix(\"{}\".to_string());",
service.endpoint_prefix()))
}
}
// IoT defines a lot of empty (and therefore unnecessary) request shapes
// don't clutter method signatures with them
fn generate_method_signature(operation: &Operation, shape: &Shape) -> String {
if shape.members.is_some() && !shape.members.as_ref().unwrap().is_empty() {
format!("fn {method_name}(&self, input: &{input_type})",
method_name = operation.name.to_snake_case(),
input_type = operation.input_shape())
} else {
format!("fn {method_name}(&self)",
method_name = operation.name.to_snake_case())
}
}
fn generate_encoding_string(load_payload: bool) -> String {
if load_payload {
"let encoded = serde_json::to_string(input).unwrap();".to_owned()
} else {
"".to_owned()
}
}
fn generate_payload_loading_string(load_payload: bool) -> String {
if load_payload {
"request.set_payload(Some(encoded.into_bytes()));".to_owned()
} else {
"".to_owned()
}
}
fn generate_snake_case_uri(request_uri: &str) -> String {
lazy_static! {
static ref URI_ARGS_REGEX: Regex = Regex::new(r"\{([\w\d]+)\}").unwrap();
}
URI_ARGS_REGEX.replace_all(request_uri, |caps: &Captures| {
format!("{{{}}}",
caps.get(1).map(|c| Inflector::to_snake_case(c.as_str())).unwrap())
})
.to_string()
}
fn generate_params_loading_string(param_strings: &[String]) -> String {
match param_strings.len() {
0 => "".to_owned(),
_ => {
format!("let mut params = Params::new();
{param_strings}
request.set_params(params);",
param_strings = param_strings.join("\n"))
}
}
}
fn generate_shape_member_param_strings(service: &Service, shape: &Shape) -> Vec<String> {
shape.members
.as_ref()
.unwrap()
.iter()
.filter_map(|(member_name, member)| {
member.location.as_ref().and_then(|loc| {
if !member.deprecated() && loc == "querystring" {
let member_shape = service.shape_for_member(member).unwrap();
Some(generate_param_load_string(member_name, member_shape, shape.required(member_name)))
} else {
None
}
})
})
.collect::<Vec<String>>()
}
fn generate_param_load_string(member_name: &str, member_shape: &Shape, is_required: bool) -> String {
let field_name = generate_field_name(member_name);
match (member_shape.shape_type, is_required) {
(ShapeType::List, true) => {
format!(
"for item in input.{field_name}.iter() {{
params.put(\"{member_name}\", item);
}}",
member_name = member_name,
field_name = field_name)
},
(ShapeType::List, false) => {
format!(
"if let Some(ref x) = input.{field_name} {{
for item in x.iter() {{
params.put(\"{member_name}\", item);
}}
}}",
member_name = member_name,
field_name = field_name,
)
},
(ShapeType::Map, true) => {
format!(
"for (key, val) in input.{field_name}.iter() {{
params.put(key, val);
}}",
field_name = member_name.to_snake_case())
},
(ShapeType::Map, false) => {
format!(
"if let Some(ref x) = input.{field_name} {{
for (key, val) in x.iter() {{
params.put(key, val);
}}
}}",
field_name = member_name.to_snake_case(),
)
},
(_, true) => {
format!(
"params.put(\"{member_name}\", &input.{field_name});",
member_name = member_name,
field_name = field_name)
},
(_, false) => {
format!(
"if let Some(ref x) = input.{field_name} {{
params.put(\"{member_name}\", x);
}}",
member_name = member_name,
field_name = field_name,
)
}
}
}
fn generate_uri_formatter(request_uri: &str, uri_strings: &[String]) -> String {
match uri_strings.len() {
0 => {
format!(
"let request_uri = \"{request_uri}\";",
request_uri = request_uri,
)
}
_ => {
format!("let request_uri = format!(\"{request_uri}\", {uri_strings});",
request_uri = request_uri,
uri_strings = uri_strings.join(", "))
}
}
}
fn generate_shape_member_uri_strings(shape: &Shape) -> Vec<String> {
shape.members
.as_ref()
.unwrap()
.iter()
.filter_map(|(member_name, member)| {
generate_member_format_string(&member_name.to_snake_case(), member)
})
.collect::<Vec<String>>()
}
fn generate_member_format_string(member_name: &str, member: &Member) -> Option<String> {
match member.location {
Some(ref x) if x == "uri" => {
match member.location_name {
Some(ref loc_name) => {
Some(format!(
"{member_name} = input.{field_name}",
field_name = member_name,
member_name = loc_name.to_snake_case(),
))
}
None => {
Some(format!(
"{member_name} = input.{field_name}",
field_name = member_name,
member_name = member_name,
))
}
}
}
Some(_) | None => None,
}
}
fn generate_documentation(operation: &Operation) -> Option<String> {
operation.documentation
.as_ref()
.map(|docs| format!("#[doc=\"{}\"]", docs.replace("\"", "\\\"")))
}
/// Generate code to plumb the response status code into any fields
/// in the output shape that specify it
fn generate_status_code_parser(operation: &Operation, service: &Service) -> String {
if operation.output.is_none() {
return "".to_owned();
}
let shape_name = &operation.output.as_ref().unwrap().shape;
let output_shape = &service.get_shape(shape_name).expect("Shape missing from service definition");
let mut status_code_parser = "".to_string();
for (member_name, member) in output_shape.members.as_ref().unwrap() {
if let Some(ref location) = member.location {
if location == "statusCode" {
if output_shape.required(member_name) {
status_code_parser += &format!("result.{} = StatusCode::to_u16(&response.status);", member_name.to_snake_case());
} else {
status_code_parser += &format!("result.{} = Some(StatusCode::to_u16(&response.status) as i64);", member_name.to_snake_case());
}
}
}
}
status_code_parser
}
/// Generate code to parse the http response body, either as a JSON object
/// deserialized with serde, or as a raw payload that's assigned to one of
/// the fields in the result object.
///
/// Needs to determine whether or not other fields in the result object
/// will be set later (e.g. from headers), so the compiler won't spit out
/// warnings about unnecessary mutability
fn generate_body_parser(operation: &Operation, service: &Service) -> String {
if operation.output.is_none() {
return "let result = ();".to_string();
}
let shape_name = &operation.output.as_ref().unwrap().shape;
let output_shape = &service.get_shape(shape_name).expect("Shape missing from service definition");
let mutable_result = output_shape.members
.as_ref()
.unwrap()
.iter()
.any(|(_, member)| member.location.is_some());
match output_shape.payload {
None => json_body_parser(shape_name, mutable_result),
Some(ref payload_member_name) => {
let payload_member_shape = &output_shape.members.as_ref().unwrap()[payload_member_name].shape;
let payload_shape = &service.get_shape(payload_member_shape).expect("Shape missing from service definition");
match payload_shape.shape_type {
payload_type if payload_type == ShapeType::Blob ||
payload_type == ShapeType::String => {
payload_body_parser(payload_type, shape_name, payload_member_name, mutable_result)
}
_ => json_body_parser(shape_name, mutable_result),
}
}
}
}
/// Take the raw http response body and assign it to the payload field
/// on the result object
fn payload_body_parser(payload_type: ShapeType, output_shape: &str, payload_member: &str, mutable_result: bool) -> String {
let response_body = match payload_type {
ShapeType::Blob => "Some(response.body)",
_ => "Some(String::from_utf8_lossy(&response.body).into_owned())",
};
format!("
let {mutable} result = {output_shape}::default();
result.{payload_member} = {response_body};
",
output_shape = output_shape,
payload_member = payload_member.to_snake_case(),
response_body = response_body,
mutable = if mutable_result { "mut" } else { "" }
)
}
/// Parse the http response body as a JSON object with serde, and use that
/// as the result object
fn json_body_parser(output_shape: &str, mutable_result: bool) -> String {
// `serde-json` serializes field-less structs as "null", but AWS returns
// "{{}}" for a field-less response, so we must check for this result
// and convert it if necessary.
format!("
let mut body = response.body;
if body == b\"{{}}\" {{
body = b\"null\".to_vec();
}}
debug!(\"Response body: {{:?}}\", body);
debug!(\"Response status: {{}}\", response.status);
let {mutable} result = serde_json::from_slice::<{output_shape}>(&body).unwrap();
",
output_shape = output_shape,
mutable = if mutable_result { "mut" } else { "" }
)
}
|
#![deny(missing_docs)]
#![feature(hash)]
//! A library for range addressing
/// A representation of a range
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Range {
/// The range offset
pub offset: usize,
/// The range length
pub length: usize
}
impl Range {
/// Creates a new `Range`
#[inline(always)]
pub fn new(offset: usize, length: usize) -> Range {
Range {
offset: offset,
length: length,
}
}
/// Creates an empty range with an offset.
#[inline(always)]
pub fn empty(offset: usize) -> Range {
Range {
offset: offset,
length: 0,
}
}
/// Returns true if range is empty
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.length == 0
}
/// Returns the next offset
#[inline(always)]
pub fn next_offset(&self) -> usize {
self.offset + self.length
}
/// Returns a range iterator.
#[inline(always)]
pub fn iter(&self) -> std::ops::Range<usize> {
self.offset..self.offset + self.length
}
/// Shrinks range at both ends with `n` items.
#[inline(always)]
pub fn shrink_n(&self, n: usize) -> Option<Range> {
if self.length < 2 * n {
None
} else {
Some(Range::new(self.offset + n, self.length - 2 * n))
}
}
/// Shrinks range at both ends with 1 item.
#[inline(always)]
pub fn shrink(&self) -> Option<Range> {
self.shrink_n(1)
}
}
/// The parent/child relationship for hierarchial contiguous arrays.
/// Meant to be used by newtypes wrapping `Range` for type safety.
pub trait ParentRange {
type Child;
/// Gets the immutable inner range.
fn range(&self) -> &Range;
/// Gets the mutable inner range.
fn range_mut(&mut self) -> &mut Range;
}
Added `ParentRange::from_range`
Closes https://github.com/PistonDevelopers/range/issues/13
#![deny(missing_docs)]
#![feature(hash)]
//! A library for range addressing
/// A representation of a range
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Range {
/// The range offset
pub offset: usize,
/// The range length
pub length: usize
}
impl Range {
/// Creates a new `Range`
#[inline(always)]
pub fn new(offset: usize, length: usize) -> Range {
Range {
offset: offset,
length: length,
}
}
/// Creates an empty range with an offset.
#[inline(always)]
pub fn empty(offset: usize) -> Range {
Range {
offset: offset,
length: 0,
}
}
/// Returns true if range is empty
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.length == 0
}
/// Returns the next offset
#[inline(always)]
pub fn next_offset(&self) -> usize {
self.offset + self.length
}
/// Returns a range iterator.
#[inline(always)]
pub fn iter(&self) -> std::ops::Range<usize> {
self.offset..self.offset + self.length
}
/// Shrinks range at both ends with `n` items.
#[inline(always)]
pub fn shrink_n(&self, n: usize) -> Option<Range> {
if self.length < 2 * n {
None
} else {
Some(Range::new(self.offset + n, self.length - 2 * n))
}
}
/// Shrinks range at both ends with 1 item.
#[inline(always)]
pub fn shrink(&self) -> Option<Range> {
self.shrink_n(1)
}
}
/// The parent/child relationship for hierarchial contiguous arrays.
/// Meant to be used by newtypes wrapping `Range` for type safety.
pub trait ParentRange {
type Child;
/// Creates parent range from inner range.
fn from_range(range: Range) -> Self;
/// Gets the immutable inner range.
fn range(&self) -> &Range;
/// Gets the mutable inner range.
fn range_mut(&mut self) -> &mut Range;
}
|
//! Elasticsearch Response Iterators
//!
//! A crate to handle parsing and handling Elasticsearch search results which provides
//! convenient iterators to step through the results returned. It is designed to work
//! with [`elastic-reqwest`](https://github.com/elastic-rs/elastic-hyper/).
//!
//! ## Usage
//!
//! Query your Elasticsearch Cluster, then iterate through the results
//!
//! ```no_run
//! # extern crate elastic_responses;
//! # use elastic_responses::SearchResponse;
//! # fn do_request() -> SearchResponse { unimplemented!() }
//! # fn main() {
//! // Send a request (omitted, see `samples/search`), and read the response.
//! // Parse body to JSON as an elastic_responses::SearchResponse object
//! let body_as_json: SearchResponse = do_request();
//!
//! // Use hits() or aggs() iterators
//! // Hits
//! for i in body_as_json.hits() {
//! println!("{:?}",i);
//! }
//!
//! // Agregations
//! for i in body_as_json.aggs() {
//! println!("{:?}",i);
//! }
//! # }
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate quick_error;
extern crate serde;
extern crate serde_json;
extern crate slog_stdlog;
extern crate slog_envlogger;
/// Error types from Elasticsearch
pub mod error;
/// Response type parsing.
pub mod parse;
mod common;
mod ping;
mod get;
mod search;
mod bulk;
pub use self::common::*;
pub use self::ping::*;
pub use self::get::*;
pub use self::search::*;
pub use self::bulk::*;
use std::io::Read;
use serde_json::Value;
use self::parse::MaybeOkResponse;
use error::*;
/// A raw HTTP response with enough information to parse
/// a concrete type from it.
pub struct HttpResponse<R> {
code: u16,
body: R,
}
impl<R> HttpResponse<R> {
/// Create a new HTTP response from the given status code
/// and body.
pub fn new(status: u16, body: R) -> Self {
HttpResponse {
code: status,
body: body,
}
}
/// Get the status code.
pub fn status(&self) -> u16 {
self.code
}
}
type ApiResult<T> = Result<T, ResponseError>;
/// Convert a response message into a either a success
/// or failure result.
pub trait FromResponse
where Self: Sized
{
fn from_response<I: Into<HttpResponse<R>>, R: Read>(res: I) -> ApiResult<Self>;
}
impl FromResponse for Value {
fn from_response<I: Into<HttpResponse<R>>, R: Read>(res: I) -> ApiResult<Self> {
let res = res.into();
res.response(|res| {
match res.status() {
200...299 => Ok(MaybeOkResponse::new(true, res)),
_ => Ok(MaybeOkResponse::new(false, res)),
}
})
}
}
update links
//! Elasticsearch Response Iterators
//!
//! A crate to handle parsing and handling Elasticsearch search results which provides
//! convenient iterators to step through the results returned. It is designed to work
//! with [`elastic-reqwest`](https://github.com/elastic-rs/elastic-reqwest/).
//!
//! ## Usage
//!
//! Query your Elasticsearch Cluster, then iterate through the results
//!
//! ```no_run
//! # extern crate elastic_responses;
//! # use elastic_responses::SearchResponse;
//! # fn do_request() -> SearchResponse { unimplemented!() }
//! # fn main() {
//! // Send a request (omitted, see `samples/search`), and read the response.
//! // Parse body to JSON as an elastic_responses::SearchResponse object
//! let body_as_json: SearchResponse = do_request();
//!
//! // Use hits() or aggs() iterators
//! // Hits
//! for i in body_as_json.hits() {
//! println!("{:?}",i);
//! }
//!
//! // Agregations
//! for i in body_as_json.aggs() {
//! println!("{:?}",i);
//! }
//! # }
//! ```
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate quick_error;
extern crate serde;
extern crate serde_json;
extern crate slog_stdlog;
extern crate slog_envlogger;
/// Error types from Elasticsearch
pub mod error;
/// Response type parsing.
pub mod parse;
mod common;
mod ping;
mod get;
mod search;
mod bulk;
pub use self::common::*;
pub use self::ping::*;
pub use self::get::*;
pub use self::search::*;
pub use self::bulk::*;
use std::io::Read;
use serde_json::Value;
use self::parse::MaybeOkResponse;
use error::*;
/// A raw HTTP response with enough information to parse
/// a concrete type from it.
pub struct HttpResponse<R> {
code: u16,
body: R,
}
impl<R> HttpResponse<R> {
/// Create a new HTTP response from the given status code
/// and body.
pub fn new(status: u16, body: R) -> Self {
HttpResponse {
code: status,
body: body,
}
}
/// Get the status code.
pub fn status(&self) -> u16 {
self.code
}
}
type ApiResult<T> = Result<T, ResponseError>;
/// Convert a response message into a either a success
/// or failure result.
pub trait FromResponse
where Self: Sized
{
fn from_response<I: Into<HttpResponse<R>>, R: Read>(res: I) -> ApiResult<Self>;
}
impl FromResponse for Value {
fn from_response<I: Into<HttpResponse<R>>, R: Read>(res: I) -> ApiResult<Self> {
let res = res.into();
res.response(|res| {
match res.status() {
200...299 => Ok(MaybeOkResponse::new(true, res)),
_ => Ok(MaybeOkResponse::new(false, res)),
}
})
}
} |
use std::ffi;
use std::ptr;
use std::rc::Rc;
use std::cell::RefCell;
use std::slice;
extern crate libc;
use libc::{
c_int,
c_void,
uint32_t,
int32_t,
};
#[macro_use]
extern crate bitflags;
extern crate libfreenect_sys;
use libfreenect_sys as ft;
#[derive(Debug)]
enum FreenectError {
LibraryReturnCode(i32),
NullPtr,
FrameFormatMismatch,
}
// Error type for the library
pub type FreenectResult<T> = Result<T, FreenectError>;
#[derive(Debug)]
pub enum LogLevel {
Fatal, // Log for crashing/non-recoverable errors
Error, // Log for major errors
Warning, // Log for warning messages
Notice, // Log for important messages
Info, // Log for normal messages
Debug, // Log for useful development messages
Spew, // Log for slightly less useful messages
Flood, // Log EVERYTHING. May slow performance.
}
impl LogLevel {
fn to_lowlevel(&self) -> ft::freenect_loglevel {
match *self {
LogLevel::Fatal => ft::freenect_loglevel::FREENECT_LOG_FATAL,
LogLevel::Error => ft::freenect_loglevel::FREENECT_LOG_ERROR,
LogLevel::Warning => ft::freenect_loglevel::FREENECT_LOG_WARNING,
LogLevel::Notice => ft::freenect_loglevel::FREENECT_LOG_NOTICE,
LogLevel::Info => ft::freenect_loglevel::FREENECT_LOG_INFO,
LogLevel::Debug => ft::freenect_loglevel::FREENECT_LOG_DEBUG,
LogLevel::Spew => ft::freenect_loglevel::FREENECT_LOG_SPEW,
LogLevel::Flood => ft::freenect_loglevel::FREENECT_LOG_FLOOD,
}
}
fn from_lowlevel(lvl: ft::freenect_loglevel) -> LogLevel {
match lvl {
ft::freenect_loglevel::FREENECT_LOG_FATAL => LogLevel::Fatal,
ft::freenect_loglevel::FREENECT_LOG_ERROR => LogLevel::Error,
ft::freenect_loglevel::FREENECT_LOG_WARNING => LogLevel::Warning,
ft::freenect_loglevel::FREENECT_LOG_NOTICE => LogLevel::Notice,
ft::freenect_loglevel::FREENECT_LOG_INFO => LogLevel::Info,
ft::freenect_loglevel::FREENECT_LOG_DEBUG => LogLevel::Debug,
ft::freenect_loglevel::FREENECT_LOG_SPEW => LogLevel::Spew,
ft::freenect_loglevel::FREENECT_LOG_FLOOD => LogLevel::Flood,
}
}
}
#[derive(Debug)]
pub enum Resolution {
Low,
Medium,
High,
}
impl Resolution {
fn to_lowlevel(&self) -> ft::freenect_resolution {
match *self {
Resolution::Low => ft::freenect_resolution::FREENECT_RESOLUTION_LOW,
Resolution::Medium => ft::freenect_resolution::FREENECT_RESOLUTION_MEDIUM,
Resolution::High => ft::freenect_resolution::FREENECT_RESOLUTION_HIGH,
}
}
fn from_lowlevel(res: &ft::freenect_resolution) -> Resolution {
match *res {
ft::freenect_resolution::FREENECT_RESOLUTION_LOW => Resolution::Low,
ft::freenect_resolution::FREENECT_RESOLUTION_MEDIUM => Resolution::Medium,
ft::freenect_resolution::FREENECT_RESOLUTION_HIGH => Resolution::High,
_ => panic!("Unknown freenect_resolution enum")
}
}
}
#[derive(Debug)]
pub enum VideoFormat {
Rgb,
Bayer,
Ir8Bit,
Ir10Bit,
Ir10BitPacked,
YuvRgb,
YuvRaw,
}
impl VideoFormat {
fn to_lowlevel(&self) -> ft::freenect_video_format {
match *self {
VideoFormat::Rgb => ft::freenect_video_format::FREENECT_VIDEO_RGB,
VideoFormat::Bayer => ft::freenect_video_format::FREENECT_VIDEO_BAYER,
VideoFormat::Ir8Bit => ft::freenect_video_format::FREENECT_VIDEO_IR_8BIT,
VideoFormat::Ir10Bit => ft::freenect_video_format::FREENECT_VIDEO_IR_10BIT,
VideoFormat::Ir10BitPacked => ft::freenect_video_format::FREENECT_VIDEO_IR_10BIT_PACKED,
VideoFormat::YuvRgb => ft::freenect_video_format::FREENECT_VIDEO_YUV_RGB,
VideoFormat::YuvRaw => ft::freenect_video_format::FREENECT_VIDEO_YUV_RAW,
}
}
fn from_lowlevel_int(i: int32_t) -> VideoFormat {
match i {
0 => VideoFormat::Rgb,
1 => VideoFormat::Bayer,
2 => VideoFormat::Ir8Bit,
3 => VideoFormat::Ir10Bit,
4 => VideoFormat::Ir10BitPacked,
5 => VideoFormat::YuvRgb,
6 => VideoFormat::YuvRaw,
_ => panic!("Unknown freenect_video_format enum"),
}
}
}
#[derive(Debug)]
pub enum DepthFormat {
_11Bit,
_10Bit,
_11BitPacked,
_10BitPacked,
Registered,
Mm
}
impl DepthFormat {
fn to_lowlevel(&self) -> ft::freenect_depth_format {
match *self {
DepthFormat::_11Bit => ft::freenect_depth_format::FREENECT_DEPTH_11BIT,
DepthFormat::_10Bit => ft::freenect_depth_format::FREENECT_DEPTH_10BIT,
DepthFormat::_11BitPacked => ft::freenect_depth_format::FREENECT_DEPTH_11BIT_PACKED,
DepthFormat::_10BitPacked => ft::freenect_depth_format::FREENECT_DEPTH_10BIT_PACKED,
DepthFormat::Registered => ft::freenect_depth_format::FREENECT_DEPTH_REGISTERED,
DepthFormat::Mm => ft::freenect_depth_format::FREENECT_DEPTH_MM,
}
}
fn from_lowlevel_int(i: int32_t) -> DepthFormat {
match i {
0 => DepthFormat::_11Bit,
1 => DepthFormat::_10Bit,
2 => DepthFormat::_11BitPacked,
3 => DepthFormat::_10BitPacked,
4 => DepthFormat::Registered,
5 => DepthFormat::Mm,
_ => panic!("Unknown freenect_depth_format enum"),
}
}
}
#[derive(Debug)]
enum FrameModeFormat {
Video(VideoFormat),
Depth(DepthFormat),
}
#[derive(Debug)]
pub struct FrameMode {
reserved: uint32_t, // Need to track contents of underlying freenect struct
resolution: Resolution,
format: FrameModeFormat,
bytes: i32,
width: i16,
height: i16,
data_bits_per_pixel: i8,
padding_bits_per_pixel: i8,
framerate: i8,
is_valid: bool,
}
impl FrameMode {
fn to_lowlevel(&self) -> ft::freenect_frame_mode {
ft::freenect_frame_mode {
reserved: self.reserved,
resolution: self.resolution.to_lowlevel(),
dummy: match self.format {
FrameModeFormat::Video(ref x) => x.to_lowlevel() as int32_t,
FrameModeFormat::Depth(ref y) => y.to_lowlevel() as int32_t,
},
bytes: self.bytes,
width: self.width,
height: self.height,
data_bits_per_pixel: self.data_bits_per_pixel,
padding_bits_per_pixel: self.padding_bits_per_pixel,
framerate: self.framerate,
is_valid: if self.is_valid { 1 } else { 0 },
}
}
fn to_lowlevel_video(&self) -> Option<ft::freenect_frame_mode> {
match self.format {
FrameModeFormat::Video(_) => Some(self.to_lowlevel()),
FrameModeFormat::Depth(_) => None,
}
}
fn to_lowlevel_depth(&self) -> Option<ft::freenect_frame_mode> {
match self.format {
FrameModeFormat::Video(_) => None,
FrameModeFormat::Depth(_) => Some(self.to_lowlevel()),
}
}
fn from_lowlevel(mode: &ft::freenect_frame_mode, fmt: FrameModeFormat) -> FrameMode {
FrameMode {
reserved: mode.reserved,
resolution: Resolution::from_lowlevel(&mode.resolution),
format: fmt,
bytes: mode.bytes as i32,
width: mode.width as i16,
height: mode.height as i16,
data_bits_per_pixel: mode.data_bits_per_pixel as i8,
padding_bits_per_pixel: mode.padding_bits_per_pixel as i8,
framerate: mode.framerate as i8,
is_valid: if mode.is_valid > 0 { true } else { false },
}
}
fn from_lowlevel_video(mode: &ft::freenect_frame_mode) -> FrameMode {
FrameMode::from_lowlevel(mode, FrameModeFormat::Video(VideoFormat::from_lowlevel_int(mode.dummy)))
}
fn from_lowlevel_depth(mode: &ft::freenect_frame_mode) -> FrameMode {
FrameMode::from_lowlevel(mode, FrameModeFormat::Depth(DepthFormat::from_lowlevel_int(mode.dummy)))
}
}
pub enum TiltStatus {
Stopped,
Limit,
Moving,
}
impl TiltStatus {
fn from_lowlevel(status: &ft::freenect_tilt_status_code) -> TiltStatus {
match *status {
ft::freenect_tilt_status_code::TILT_STATUS_STOPPED => TiltStatus::Stopped,
ft::freenect_tilt_status_code::TILT_STATUS_LIMIT => TiltStatus::Limit,
ft::freenect_tilt_status_code::TILT_STATUS_MOVING => TiltStatus::Moving,
}
}
}
pub struct RawTiltState {
pub accelerometer_x: i16,
pub accelerometer_y: i16,
pub accelerometer_z: i16,
pub tilt_angle: i8,
pub tilt_status: TiltStatus,
}
impl RawTiltState {
fn from_lowlevel(state: *const ft::freenect_raw_tilt_state) -> RawTiltState {
let state = unsafe { &*state };
RawTiltState{
accelerometer_x: state.accelerometer_x,
accelerometer_y: state.accelerometer_y,
accelerometer_z: state.accelerometer_z,
tilt_angle: state.tilt_angle,
tilt_status: TiltStatus::from_lowlevel(&state.tilt_status),
}
}
}
pub enum Flag {
AutoExposure,
AutoWhiteBalance,
RawColor,
MirrorDepth,
MirrorVideo,
}
impl Flag {
fn to_lowlevel(&self) -> ft::freenect_flag {
match *self {
Flag::AutoExposure => ft::freenect_flag::FREENECT_AUTO_EXPOSURE,
Flag::AutoWhiteBalance => ft::freenect_flag::FREENECT_AUTO_WHITE_BALANCE,
Flag::RawColor => ft::freenect_flag::FREENECT_RAW_COLOR,
Flag::MirrorDepth => ft::freenect_flag::FREENECT_MIRROR_DEPTH,
Flag::MirrorVideo => ft::freenect_flag::FREENECT_MIRROR_VIDEO,
}
}
}
bitflags! {
flags DeviceFlags: u32 {
const DEVICE_MOTOR = ft::freenect_device_flags::FREENECT_DEVICE_MOTOR as u32,
const DEVICE_CAMERA = ft::freenect_device_flags::FREENECT_DEVICE_CAMERA as u32,
const DEVICE_AUDIO = ft::freenect_device_flags::FREENECT_DEVICE_AUDIO as u32,
}
}
#[derive(Debug)]
pub struct DeviceAttributes {
pub camera_serial: String,
}
struct InnerContext {
ctx: *mut ft::freenect_context,
}
// InnerContext separated from main Context so that 'Device' handles can hold a reference to the
// InnerContext to prevent premature release. Could also use lifetimes (probably) to statically
// enforce this.
impl InnerContext {
fn new() -> FreenectResult<InnerContext> {
let mut ctx = InnerContext{ctx: ptr::null_mut()};
match unsafe { ft::freenect_init(&mut ctx.ctx, ptr::null_mut()) } {
0 => {
if ctx.ctx != ptr::null_mut() {
Ok(ctx)
} else {
Err(FreenectError::NullPtr)
}
},
x => Err(FreenectError::LibraryReturnCode(x)),
}
}
}
impl Drop for InnerContext {
fn drop(&mut self) {
let ret = unsafe { ft::freenect_shutdown(self.ctx) };
if ret < 0 {
panic!(ret)
}
}
}
pub struct Context {
ctx: Rc<InnerContext>,
}
impl Context {
pub fn new() -> FreenectResult<Context> {
let inner_ctx = try!(InnerContext::new());
Ok(Context{ctx: Rc::new(inner_ctx)})
}
pub fn set_log_level(&mut self, level: LogLevel) {
unsafe { ft::freenect_set_log_level(self.ctx.ctx, level.to_lowlevel()); }
}
pub fn process_events(&mut self) -> FreenectResult<()> {
match unsafe { ft::freenect_process_events(self.ctx.ctx) } {
0 => Ok(()),
x => Err(FreenectError::LibraryReturnCode(x)),
}
}
// FIXME: Implement process_events with timeout
pub fn num_devices(&mut self) -> FreenectResult<u32> {
let ret = unsafe { ft::freenect_num_devices(self.ctx.ctx) };
if ret < 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(ret as u32)
}
}
pub fn list_device_attributes(&mut self) -> FreenectResult<Vec<DeviceAttributes>> {
let mut lowlevel_list: *mut ft::freenect_device_attributes = ptr::null_mut();
let ret = unsafe { ft::freenect_list_device_attributes(self.ctx.ctx, &mut lowlevel_list) };
if ret < 0 {
return Err(FreenectError::LibraryReturnCode(ret));
}
let mut device_list: Vec<DeviceAttributes> = Vec::new();
let mut curr_item = lowlevel_list;
while curr_item != ptr::null_mut() {
let serial_cstr = unsafe { ffi::CStr::from_ptr((*curr_item).camera_serial) };
let serial = String::from_utf8_lossy(serial_cstr.to_bytes()).to_string();
device_list.push(DeviceAttributes{camera_serial: serial});
unsafe { curr_item = (*curr_item).next };
}
unsafe { ft::freenect_free_device_attributes(lowlevel_list) };
Ok(device_list)
}
// Internal use only
fn select_subdevices(&mut self, subdevs: DeviceFlags) {
unsafe { ft::freenect_select_subdevices(self.ctx.ctx, subdevs.bits) };
}
// Internal use only
fn enabled_subdevices(&mut self) -> DeviceFlags {
let ret = unsafe { ft::freenect_enabled_subdevices(self.ctx.ctx) };
return DeviceFlags::from_bits(ret as u32).unwrap();
}
pub fn open_device(&mut self, index: u32, subdevs: DeviceFlags) -> FreenectResult<Device> {
let mut dev: *mut ft::freenect_device = ptr::null_mut();
self.select_subdevices(subdevs);
let ret = unsafe { ft::freenect_open_device(self.ctx.ctx, &mut dev, index as i32) };
if ret < 0 {
return Err(FreenectError::LibraryReturnCode(ret))
}
return Ok(Device::from_raw_device(self.ctx.clone(), dev, self.enabled_subdevices()));
}
pub fn open_device_by_camera_serial(&mut self, serial: &str, subdevs: DeviceFlags) -> FreenectResult<Device> {
let mut dev: *mut ft::freenect_device = ptr::null_mut();
let serial_cstring = ffi::CString::new(serial).unwrap();
self.select_subdevices(subdevs);
let ret = unsafe { ft::freenect_open_device_by_camera_serial(self.ctx.ctx, &mut dev, serial_cstring.as_ptr()) };
if ret < 0 {
return Err(FreenectError::LibraryReturnCode(ret))
}
return Ok(Device::from_raw_device(self.ctx.clone(), dev, self.enabled_subdevices()));
}
}
// Rust struct allowing methods to be attached to the underyling C struct
struct CDevice {
dev: *mut ft::freenect_device,
}
impl Drop for CDevice {
fn drop(&mut self) {
let ret = unsafe { ft::freenect_close_device(self.dev) };
if ret != 0 {
panic!(ret)
}
}
}
impl CDevice {
fn from_raw_device(dev: *mut ft::freenect_device) -> CDevice {
CDevice{dev: dev}
}
fn set_user(&mut self, user: *mut c_void) {
unsafe { ft::freenect_set_user(self.dev, user) };
}
fn set_depth_callback(&mut self, cb: ft::freenect_depth_cb) {
unsafe { ft::freenect_set_depth_callback(self.dev, cb) };
}
fn set_video_callback(&mut self, cb: ft::freenect_video_cb) {
unsafe { ft::freenect_set_video_callback(self.dev, cb) };
}
fn start_depth(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_start_depth(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn start_video(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_start_video(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn stop_depth(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_stop_depth(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn stop_video(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_stop_video(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn update_tilt_state(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_update_tilt_state(self.dev) };
if ret == 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(())
}
}
fn get_tilt_state(&mut self) -> *mut ft::freenect_raw_tilt_state {
unsafe { ft::freenect_get_tilt_state(self.dev) }
}
fn set_tilt_degs(&mut self, angle: f64) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_set_tilt_degs(self.dev, angle) };
if ret == 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(())
}
}
fn get_current_video_mode(&mut self) -> FrameMode {
let lowlevel_video_mode = unsafe { ft::freenect_get_current_video_mode(self.dev) };
FrameMode::from_lowlevel_video(&lowlevel_video_mode)
}
fn set_video_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
let lowlevel_video_mode = try!(mode.to_lowlevel_video().ok_or(FreenectError::FrameFormatMismatch));
unsafe { ft::freenect_set_video_mode(self.dev, lowlevel_video_mode) };
Ok(())
}
fn get_current_depth_mode(&mut self) -> FrameMode {
let lowlevel_depth_mode = unsafe { ft::freenect_get_current_depth_mode(self.dev) };
FrameMode::from_lowlevel_depth(&lowlevel_depth_mode)
}
fn set_depth_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
let lowlevel_depth_mode = try!(mode.to_lowlevel_depth().ok_or(FreenectError::FrameFormatMismatch));
unsafe { ft::freenect_set_depth_mode(self.dev, lowlevel_depth_mode) };
Ok(())
}
fn set_flag(&mut self, flag: Flag, set: bool) -> FreenectResult<()> {
let flag_value = if set {
ft::freenect_flag_value::FREENECT_ON
} else {
ft::freenect_flag_value::FREENECT_OFF
};
let ret = unsafe { ft::freenect_set_flag(self.dev, flag.to_lowlevel(), flag_value) };
if ret == 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(())
}
}
}
pub struct Device {
ctx: Rc<InnerContext>, // Handle to prevent underlying context being free'd before device
dev: Rc<RefCell<CDevice>>,
pub motor: Option<MotorSubdevice>,
pub camera: Option<CameraSubdevice>,
pub audio: Option<AudioSubdevice>,
}
impl Device {
fn from_raw_device(ctx: Rc<InnerContext>, dev: *mut ft::freenect_device, subdevs: DeviceFlags) -> Device {
let inner_dev = Rc::new(RefCell::new(CDevice::from_raw_device(dev)));
Device {
ctx: ctx,
dev: inner_dev.clone(),
motor: if subdevs.contains(DEVICE_MOTOR) { Some(MotorSubdevice::new(inner_dev.clone())) } else { None },
camera: if subdevs.contains(DEVICE_CAMERA) { Some(CameraSubdevice::new(inner_dev.clone())) } else { None },
audio: if subdevs.contains(DEVICE_AUDIO) { Some(AudioSubdevice::new(inner_dev.clone())) } else { None },
}
}
}
pub struct MotorSubdevice {
dev: Rc<RefCell<CDevice>>,
}
impl MotorSubdevice {
fn new(dev: Rc<RefCell<CDevice>>) -> MotorSubdevice {
MotorSubdevice{dev: dev}
}
pub fn get_tilt_state(&mut self) -> FreenectResult<RawTiltState> {
let mut cdev = self.dev.borrow_mut();
try!(cdev.update_tilt_state());
Ok(RawTiltState::from_lowlevel(cdev.get_tilt_state()))
}
}
// Exists so it can be boxed (therefore fixing its memory address) and have its address handed as a
// C callback userdata void pointer
struct ClosureHolder {
dev: Rc<RefCell<CDevice>>,
depth_cb: Option<Box<FnMut(&mut [u8], u32) + Send + 'static>>,
video_cb: Option<Box<FnMut(&mut [u8], u32) + Send + 'static>>,
starting: bool,
}
impl ClosureHolder {
fn new(dev: Rc<RefCell<CDevice>>) -> ClosureHolder {
ClosureHolder{dev: dev, depth_cb: None, video_cb: None, starting: false}
}
}
pub struct CameraSubdevice {
dev: Rc<RefCell<CDevice>>,
ch: Box<ClosureHolder>,
}
impl CameraSubdevice {
fn new(dev: Rc<RefCell<CDevice>>) -> CameraSubdevice {
let mut cam_sub = CameraSubdevice{ dev: dev.clone(), ch: Box::new(ClosureHolder::new(dev.clone()))};
// Register all callbacks. We'll let Rust code decide if a user callback should be called.
unsafe {
let mut cdev = dev.borrow_mut();
cdev.set_user(std::mem::transmute(&mut *cam_sub.ch));
cdev.set_depth_callback(CameraSubdevice::depth_cb_trampoline);
cdev.set_video_callback(CameraSubdevice::video_cb_trampoline);
}
return cam_sub;
}
pub fn set_depth_callback(&mut self, cb: Option<Box<FnMut(&mut [u8], u32) + Send + 'static>>) {
self.ch.depth_cb = cb;
}
pub fn set_video_callback(&mut self, cb: Option<Box<FnMut(&mut [u8], u32) + Send + 'static>>) {
self.ch.video_cb = cb;
}
pub fn start_depth(&mut self) -> FreenectResult<()> {
(*self.ch).starting = true;
let ret = self.dev.borrow_mut().start_depth();
(*self.ch).starting = false;
return ret;
}
pub fn start_video(&mut self) -> FreenectResult<()> {
(*self.ch).starting = true;
let ret = self.dev.borrow_mut().start_video();
(*self.ch).starting = false;
return ret;
}
pub fn stop_depth(&mut self) -> FreenectResult<()> {
self.dev.borrow_mut().stop_depth()
}
pub fn stop_video(&mut self) -> FreenectResult<()> {
self.dev.borrow_mut().stop_video()
}
pub fn set_tilt_degs(&mut self, angle: f64) -> FreenectResult<()> {
self.dev.borrow_mut().set_tilt_degs(angle)
}
pub fn get_current_video_mode(&mut self) -> FrameMode {
self.dev.borrow_mut().get_current_video_mode()
}
pub fn set_video_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
self.dev.borrow_mut().set_video_mode(mode)
}
pub fn get_current_depth_mode(&mut self) -> FrameMode {
self.dev.borrow_mut().get_current_depth_mode()
}
pub fn set_depth_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
self.dev.borrow_mut().set_depth_mode(mode)
}
pub fn set_flag(&mut self, flag: Flag, set: bool) -> FreenectResult<()> {
self.dev.borrow_mut().set_flag(flag, set)
}
extern "C" fn depth_cb_trampoline(dev: *mut ft::freenect_device, depth: *mut c_void, timestamp: uint32_t) {
unsafe {
let ch = ft::freenect_get_user(dev) as *mut ClosureHolder;
// libfreenect end's up calling this callback when start_depth is called. This is an
// issue as the cdev RefCell will be borrowed twice (causing a panic). Instead, check a
// flag indicating we are starting, and if set, just ignore the frame.
if !(*ch).starting {
// Callback provides no information on frame buffer length. Retrieve the length by
// directly asking for the current mode information
let mode = (*ch).dev.borrow_mut().get_current_depth_mode();
let frame = slice::from_raw_parts_mut(depth as *mut u8, mode.bytes as usize);
let timestamp = timestamp as u32;
match (*ch).depth_cb {
Some(ref mut cb) => cb(frame, timestamp),
None => return,
};
}
}
}
extern "C" fn video_cb_trampoline(dev: *mut ft::freenect_device, video: *mut c_void, timestamp: uint32_t) {
unsafe {
let ch = ft::freenect_get_user(dev) as *mut ClosureHolder;
if !(*ch).starting {
// Callback provides no information on frame buffer length. Retrieve the length by
// directly asking for the current mode information
let mode = (*ch).dev.borrow_mut().get_current_video_mode();
let frame = slice::from_raw_parts_mut(video as *mut u8, mode.bytes as usize);
let timestamp = timestamp as u32;
match (*ch).video_cb {
Some(ref mut cb) => cb(frame, timestamp),
None => return,
};
}
}
}
}
pub struct AudioSubdevice {
dev: Rc<RefCell<CDevice>>,
}
impl AudioSubdevice {
fn new(dev: Rc<RefCell<CDevice>>) -> AudioSubdevice {
AudioSubdevice{dev: dev}
}
}
pub fn supported_subdevices() -> DeviceFlags {
let bits = unsafe { ft::freenect_supported_subdevices() as u32 };
return DeviceFlags::from_bits(bits).unwrap();
}
pub fn find_video_mode(res: Resolution, fmt: VideoFormat) -> Option<FrameMode> {
let frame_mode_lowlevel = unsafe { ft::freenect_find_video_mode(res.to_lowlevel(), fmt.to_lowlevel()) };
let frame_mode = FrameMode::from_lowlevel_video(&frame_mode_lowlevel);
if frame_mode.is_valid {
Some(frame_mode)
} else {
None
}
}
pub fn find_depth_mode(res: Resolution, fmt: DepthFormat) -> Option<FrameMode> {
let frame_mode_lowlevel = unsafe { ft::freenect_find_depth_mode(res.to_lowlevel(), fmt.to_lowlevel()) };
let frame_mode = FrameMode::from_lowlevel_depth(&frame_mode_lowlevel);
if frame_mode.is_valid {
Some(frame_mode)
} else {
None
}
}
pub struct VideoModeIter {
video_mode_count: c_int,
next_mode: c_int,
}
impl VideoModeIter {
fn new() -> VideoModeIter {
VideoModeIter{
video_mode_count: unsafe { ft::freenect_get_video_mode_count() },
next_mode: 0,
}
}
}
impl Iterator for VideoModeIter {
type Item = FrameMode;
fn next(&mut self) -> Option<FrameMode> {
if self.next_mode < self.video_mode_count {
let lowlevel_frame_mode = unsafe { ft::freenect_get_video_mode(self.next_mode) };
self.next_mode += 1;
Some(FrameMode::from_lowlevel_video(&lowlevel_frame_mode))
} else {
None
}
}
}
impl ExactSizeIterator for VideoModeIter {
fn len(&self) -> usize {
self.video_mode_count as usize
}
}
pub fn video_modes() -> VideoModeIter {
VideoModeIter::new()
}
pub struct DepthModeIter {
depth_mode_count: c_int,
next_mode: c_int,
}
impl DepthModeIter {
fn new() -> DepthModeIter {
DepthModeIter{
depth_mode_count: unsafe { ft::freenect_get_depth_mode_count() },
next_mode: 0,
}
}
}
impl Iterator for DepthModeIter {
type Item = FrameMode;
fn next(&mut self) -> Option<FrameMode> {
if self.next_mode < self.depth_mode_count {
let lowlevel_frame_mode = unsafe { ft::freenect_get_depth_mode(self.next_mode) };
self.next_mode += 1;
Some(FrameMode::from_lowlevel_depth(&lowlevel_frame_mode))
} else {
None
}
}
}
impl ExactSizeIterator for DepthModeIter {
fn len(&self) -> usize {
self.depth_mode_count as usize
}
}
pub fn depth_modes() -> DepthModeIter {
DepthModeIter::new()
}
Pass Frame metadata with buffer into callbacks
use std::ffi;
use std::ptr;
use std::rc::Rc;
use std::cell::RefCell;
use std::slice;
extern crate libc;
use libc::{
c_int,
c_void,
uint32_t,
int32_t,
};
#[macro_use]
extern crate bitflags;
extern crate libfreenect_sys;
use libfreenect_sys as ft;
#[derive(Debug)]
enum FreenectError {
LibraryReturnCode(i32),
NullPtr,
FrameFormatMismatch,
}
// Error type for the library
pub type FreenectResult<T> = Result<T, FreenectError>;
#[derive(Debug)]
pub enum LogLevel {
Fatal, // Log for crashing/non-recoverable errors
Error, // Log for major errors
Warning, // Log for warning messages
Notice, // Log for important messages
Info, // Log for normal messages
Debug, // Log for useful development messages
Spew, // Log for slightly less useful messages
Flood, // Log EVERYTHING. May slow performance.
}
impl LogLevel {
fn to_lowlevel(&self) -> ft::freenect_loglevel {
match *self {
LogLevel::Fatal => ft::freenect_loglevel::FREENECT_LOG_FATAL,
LogLevel::Error => ft::freenect_loglevel::FREENECT_LOG_ERROR,
LogLevel::Warning => ft::freenect_loglevel::FREENECT_LOG_WARNING,
LogLevel::Notice => ft::freenect_loglevel::FREENECT_LOG_NOTICE,
LogLevel::Info => ft::freenect_loglevel::FREENECT_LOG_INFO,
LogLevel::Debug => ft::freenect_loglevel::FREENECT_LOG_DEBUG,
LogLevel::Spew => ft::freenect_loglevel::FREENECT_LOG_SPEW,
LogLevel::Flood => ft::freenect_loglevel::FREENECT_LOG_FLOOD,
}
}
fn from_lowlevel(lvl: ft::freenect_loglevel) -> LogLevel {
match lvl {
ft::freenect_loglevel::FREENECT_LOG_FATAL => LogLevel::Fatal,
ft::freenect_loglevel::FREENECT_LOG_ERROR => LogLevel::Error,
ft::freenect_loglevel::FREENECT_LOG_WARNING => LogLevel::Warning,
ft::freenect_loglevel::FREENECT_LOG_NOTICE => LogLevel::Notice,
ft::freenect_loglevel::FREENECT_LOG_INFO => LogLevel::Info,
ft::freenect_loglevel::FREENECT_LOG_DEBUG => LogLevel::Debug,
ft::freenect_loglevel::FREENECT_LOG_SPEW => LogLevel::Spew,
ft::freenect_loglevel::FREENECT_LOG_FLOOD => LogLevel::Flood,
}
}
}
#[derive(Debug)]
pub enum Resolution {
Low,
Medium,
High,
}
impl Resolution {
fn to_lowlevel(&self) -> ft::freenect_resolution {
match *self {
Resolution::Low => ft::freenect_resolution::FREENECT_RESOLUTION_LOW,
Resolution::Medium => ft::freenect_resolution::FREENECT_RESOLUTION_MEDIUM,
Resolution::High => ft::freenect_resolution::FREENECT_RESOLUTION_HIGH,
}
}
fn from_lowlevel(res: &ft::freenect_resolution) -> Resolution {
match *res {
ft::freenect_resolution::FREENECT_RESOLUTION_LOW => Resolution::Low,
ft::freenect_resolution::FREENECT_RESOLUTION_MEDIUM => Resolution::Medium,
ft::freenect_resolution::FREENECT_RESOLUTION_HIGH => Resolution::High,
_ => panic!("Unknown freenect_resolution enum")
}
}
}
#[derive(Debug)]
pub enum VideoFormat {
Rgb,
Bayer,
Ir8Bit,
Ir10Bit,
Ir10BitPacked,
YuvRgb,
YuvRaw,
}
impl VideoFormat {
fn to_lowlevel(&self) -> ft::freenect_video_format {
match *self {
VideoFormat::Rgb => ft::freenect_video_format::FREENECT_VIDEO_RGB,
VideoFormat::Bayer => ft::freenect_video_format::FREENECT_VIDEO_BAYER,
VideoFormat::Ir8Bit => ft::freenect_video_format::FREENECT_VIDEO_IR_8BIT,
VideoFormat::Ir10Bit => ft::freenect_video_format::FREENECT_VIDEO_IR_10BIT,
VideoFormat::Ir10BitPacked => ft::freenect_video_format::FREENECT_VIDEO_IR_10BIT_PACKED,
VideoFormat::YuvRgb => ft::freenect_video_format::FREENECT_VIDEO_YUV_RGB,
VideoFormat::YuvRaw => ft::freenect_video_format::FREENECT_VIDEO_YUV_RAW,
}
}
fn from_lowlevel_int(i: int32_t) -> VideoFormat {
match i {
0 => VideoFormat::Rgb,
1 => VideoFormat::Bayer,
2 => VideoFormat::Ir8Bit,
3 => VideoFormat::Ir10Bit,
4 => VideoFormat::Ir10BitPacked,
5 => VideoFormat::YuvRgb,
6 => VideoFormat::YuvRaw,
_ => panic!("Unknown freenect_video_format enum"),
}
}
}
#[derive(Debug)]
pub enum DepthFormat {
_11Bit,
_10Bit,
_11BitPacked,
_10BitPacked,
Registered,
Mm
}
impl DepthFormat {
fn to_lowlevel(&self) -> ft::freenect_depth_format {
match *self {
DepthFormat::_11Bit => ft::freenect_depth_format::FREENECT_DEPTH_11BIT,
DepthFormat::_10Bit => ft::freenect_depth_format::FREENECT_DEPTH_10BIT,
DepthFormat::_11BitPacked => ft::freenect_depth_format::FREENECT_DEPTH_11BIT_PACKED,
DepthFormat::_10BitPacked => ft::freenect_depth_format::FREENECT_DEPTH_10BIT_PACKED,
DepthFormat::Registered => ft::freenect_depth_format::FREENECT_DEPTH_REGISTERED,
DepthFormat::Mm => ft::freenect_depth_format::FREENECT_DEPTH_MM,
}
}
fn from_lowlevel_int(i: int32_t) -> DepthFormat {
match i {
0 => DepthFormat::_11Bit,
1 => DepthFormat::_10Bit,
2 => DepthFormat::_11BitPacked,
3 => DepthFormat::_10BitPacked,
4 => DepthFormat::Registered,
5 => DepthFormat::Mm,
_ => panic!("Unknown freenect_depth_format enum"),
}
}
}
#[derive(Debug)]
pub enum FrameModeFormat {
Video(VideoFormat),
Depth(DepthFormat),
}
#[derive(Debug)]
pub struct FrameMode {
reserved: uint32_t, // Need to track contents of underlying freenect struct
pub resolution: Resolution,
pub format: FrameModeFormat,
pub bytes: i32,
pub width: i16,
pub height: i16,
pub data_bits_per_pixel: i8,
pub padding_bits_per_pixel: i8,
pub framerate: i8,
pub is_valid: bool,
}
impl FrameMode {
fn to_lowlevel(&self) -> ft::freenect_frame_mode {
ft::freenect_frame_mode {
reserved: self.reserved,
resolution: self.resolution.to_lowlevel(),
dummy: match self.format {
FrameModeFormat::Video(ref x) => x.to_lowlevel() as int32_t,
FrameModeFormat::Depth(ref y) => y.to_lowlevel() as int32_t,
},
bytes: self.bytes,
width: self.width,
height: self.height,
data_bits_per_pixel: self.data_bits_per_pixel,
padding_bits_per_pixel: self.padding_bits_per_pixel,
framerate: self.framerate,
is_valid: if self.is_valid { 1 } else { 0 },
}
}
fn to_lowlevel_video(&self) -> Option<ft::freenect_frame_mode> {
match self.format {
FrameModeFormat::Video(_) => Some(self.to_lowlevel()),
FrameModeFormat::Depth(_) => None,
}
}
fn to_lowlevel_depth(&self) -> Option<ft::freenect_frame_mode> {
match self.format {
FrameModeFormat::Video(_) => None,
FrameModeFormat::Depth(_) => Some(self.to_lowlevel()),
}
}
fn from_lowlevel(mode: &ft::freenect_frame_mode, fmt: FrameModeFormat) -> FrameMode {
FrameMode {
reserved: mode.reserved,
resolution: Resolution::from_lowlevel(&mode.resolution),
format: fmt,
bytes: mode.bytes as i32,
width: mode.width as i16,
height: mode.height as i16,
data_bits_per_pixel: mode.data_bits_per_pixel as i8,
padding_bits_per_pixel: mode.padding_bits_per_pixel as i8,
framerate: mode.framerate as i8,
is_valid: if mode.is_valid > 0 { true } else { false },
}
}
fn from_lowlevel_video(mode: &ft::freenect_frame_mode) -> FrameMode {
FrameMode::from_lowlevel(mode, FrameModeFormat::Video(VideoFormat::from_lowlevel_int(mode.dummy)))
}
fn from_lowlevel_depth(mode: &ft::freenect_frame_mode) -> FrameMode {
FrameMode::from_lowlevel(mode, FrameModeFormat::Depth(DepthFormat::from_lowlevel_int(mode.dummy)))
}
}
pub enum TiltStatus {
Stopped,
Limit,
Moving,
}
impl TiltStatus {
fn from_lowlevel(status: &ft::freenect_tilt_status_code) -> TiltStatus {
match *status {
ft::freenect_tilt_status_code::TILT_STATUS_STOPPED => TiltStatus::Stopped,
ft::freenect_tilt_status_code::TILT_STATUS_LIMIT => TiltStatus::Limit,
ft::freenect_tilt_status_code::TILT_STATUS_MOVING => TiltStatus::Moving,
}
}
}
pub struct RawTiltState {
pub accelerometer_x: i16,
pub accelerometer_y: i16,
pub accelerometer_z: i16,
pub tilt_angle: i8,
pub tilt_status: TiltStatus,
}
impl RawTiltState {
fn from_lowlevel(state: *const ft::freenect_raw_tilt_state) -> RawTiltState {
let state = unsafe { &*state };
RawTiltState{
accelerometer_x: state.accelerometer_x,
accelerometer_y: state.accelerometer_y,
accelerometer_z: state.accelerometer_z,
tilt_angle: state.tilt_angle,
tilt_status: TiltStatus::from_lowlevel(&state.tilt_status),
}
}
}
pub enum Flag {
AutoExposure,
AutoWhiteBalance,
RawColor,
MirrorDepth,
MirrorVideo,
}
impl Flag {
fn to_lowlevel(&self) -> ft::freenect_flag {
match *self {
Flag::AutoExposure => ft::freenect_flag::FREENECT_AUTO_EXPOSURE,
Flag::AutoWhiteBalance => ft::freenect_flag::FREENECT_AUTO_WHITE_BALANCE,
Flag::RawColor => ft::freenect_flag::FREENECT_RAW_COLOR,
Flag::MirrorDepth => ft::freenect_flag::FREENECT_MIRROR_DEPTH,
Flag::MirrorVideo => ft::freenect_flag::FREENECT_MIRROR_VIDEO,
}
}
}
bitflags! {
flags DeviceFlags: u32 {
const DEVICE_MOTOR = ft::freenect_device_flags::FREENECT_DEVICE_MOTOR as u32,
const DEVICE_CAMERA = ft::freenect_device_flags::FREENECT_DEVICE_CAMERA as u32,
const DEVICE_AUDIO = ft::freenect_device_flags::FREENECT_DEVICE_AUDIO as u32,
}
}
#[derive(Debug)]
pub struct DeviceAttributes {
pub camera_serial: String,
}
struct InnerContext {
ctx: *mut ft::freenect_context,
}
// InnerContext separated from main Context so that 'Device' handles can hold a reference to the
// InnerContext to prevent premature release. Could also use lifetimes (probably) to statically
// enforce this.
impl InnerContext {
fn new() -> FreenectResult<InnerContext> {
let mut ctx = InnerContext{ctx: ptr::null_mut()};
match unsafe { ft::freenect_init(&mut ctx.ctx, ptr::null_mut()) } {
0 => {
if ctx.ctx != ptr::null_mut() {
Ok(ctx)
} else {
Err(FreenectError::NullPtr)
}
},
x => Err(FreenectError::LibraryReturnCode(x)),
}
}
}
impl Drop for InnerContext {
fn drop(&mut self) {
let ret = unsafe { ft::freenect_shutdown(self.ctx) };
if ret < 0 {
panic!(ret)
}
}
}
pub struct Context {
ctx: Rc<InnerContext>,
}
impl Context {
pub fn new() -> FreenectResult<Context> {
let inner_ctx = try!(InnerContext::new());
Ok(Context{ctx: Rc::new(inner_ctx)})
}
pub fn set_log_level(&mut self, level: LogLevel) {
unsafe { ft::freenect_set_log_level(self.ctx.ctx, level.to_lowlevel()); }
}
pub fn process_events(&mut self) -> FreenectResult<()> {
match unsafe { ft::freenect_process_events(self.ctx.ctx) } {
0 => Ok(()),
x => Err(FreenectError::LibraryReturnCode(x)),
}
}
// FIXME: Implement process_events with timeout
pub fn num_devices(&mut self) -> FreenectResult<u32> {
let ret = unsafe { ft::freenect_num_devices(self.ctx.ctx) };
if ret < 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(ret as u32)
}
}
pub fn list_device_attributes(&mut self) -> FreenectResult<Vec<DeviceAttributes>> {
let mut lowlevel_list: *mut ft::freenect_device_attributes = ptr::null_mut();
let ret = unsafe { ft::freenect_list_device_attributes(self.ctx.ctx, &mut lowlevel_list) };
if ret < 0 {
return Err(FreenectError::LibraryReturnCode(ret));
}
let mut device_list: Vec<DeviceAttributes> = Vec::new();
let mut curr_item = lowlevel_list;
while curr_item != ptr::null_mut() {
let serial_cstr = unsafe { ffi::CStr::from_ptr((*curr_item).camera_serial) };
let serial = String::from_utf8_lossy(serial_cstr.to_bytes()).to_string();
device_list.push(DeviceAttributes{camera_serial: serial});
unsafe { curr_item = (*curr_item).next };
}
unsafe { ft::freenect_free_device_attributes(lowlevel_list) };
Ok(device_list)
}
// Internal use only
fn select_subdevices(&mut self, subdevs: DeviceFlags) {
unsafe { ft::freenect_select_subdevices(self.ctx.ctx, subdevs.bits) };
}
// Internal use only
fn enabled_subdevices(&mut self) -> DeviceFlags {
let ret = unsafe { ft::freenect_enabled_subdevices(self.ctx.ctx) };
return DeviceFlags::from_bits(ret as u32).unwrap();
}
pub fn open_device(&mut self, index: u32, subdevs: DeviceFlags) -> FreenectResult<Device> {
let mut dev: *mut ft::freenect_device = ptr::null_mut();
self.select_subdevices(subdevs);
let ret = unsafe { ft::freenect_open_device(self.ctx.ctx, &mut dev, index as i32) };
if ret < 0 {
return Err(FreenectError::LibraryReturnCode(ret))
}
return Ok(Device::from_raw_device(self.ctx.clone(), dev, self.enabled_subdevices()));
}
pub fn open_device_by_camera_serial(&mut self, serial: &str, subdevs: DeviceFlags) -> FreenectResult<Device> {
let mut dev: *mut ft::freenect_device = ptr::null_mut();
let serial_cstring = ffi::CString::new(serial).unwrap();
self.select_subdevices(subdevs);
let ret = unsafe { ft::freenect_open_device_by_camera_serial(self.ctx.ctx, &mut dev, serial_cstring.as_ptr()) };
if ret < 0 {
return Err(FreenectError::LibraryReturnCode(ret))
}
return Ok(Device::from_raw_device(self.ctx.clone(), dev, self.enabled_subdevices()));
}
}
// Rust struct allowing methods to be attached to the underyling C struct
struct CDevice {
dev: *mut ft::freenect_device,
}
impl Drop for CDevice {
fn drop(&mut self) {
let ret = unsafe { ft::freenect_close_device(self.dev) };
if ret != 0 {
panic!(ret)
}
}
}
impl CDevice {
fn from_raw_device(dev: *mut ft::freenect_device) -> CDevice {
CDevice{dev: dev}
}
fn set_user(&mut self, user: *mut c_void) {
unsafe { ft::freenect_set_user(self.dev, user) };
}
fn set_depth_callback(&mut self, cb: ft::freenect_depth_cb) {
unsafe { ft::freenect_set_depth_callback(self.dev, cb) };
}
fn set_video_callback(&mut self, cb: ft::freenect_video_cb) {
unsafe { ft::freenect_set_video_callback(self.dev, cb) };
}
fn start_depth(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_start_depth(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn start_video(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_start_video(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn stop_depth(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_stop_depth(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn stop_video(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_stop_video(self.dev) };
if ret == 0 {
Ok(())
} else {
Err(FreenectError::LibraryReturnCode(ret))
}
}
fn update_tilt_state(&mut self) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_update_tilt_state(self.dev) };
if ret == 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(())
}
}
fn get_tilt_state(&mut self) -> *mut ft::freenect_raw_tilt_state {
unsafe { ft::freenect_get_tilt_state(self.dev) }
}
fn set_tilt_degs(&mut self, angle: f64) -> FreenectResult<()> {
let ret = unsafe { ft::freenect_set_tilt_degs(self.dev, angle) };
if ret == 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(())
}
}
fn get_current_video_mode(&mut self) -> FrameMode {
let lowlevel_video_mode = unsafe { ft::freenect_get_current_video_mode(self.dev) };
FrameMode::from_lowlevel_video(&lowlevel_video_mode)
}
fn set_video_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
let lowlevel_video_mode = try!(mode.to_lowlevel_video().ok_or(FreenectError::FrameFormatMismatch));
unsafe { ft::freenect_set_video_mode(self.dev, lowlevel_video_mode) };
Ok(())
}
fn get_current_depth_mode(&mut self) -> FrameMode {
let lowlevel_depth_mode = unsafe { ft::freenect_get_current_depth_mode(self.dev) };
FrameMode::from_lowlevel_depth(&lowlevel_depth_mode)
}
fn set_depth_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
let lowlevel_depth_mode = try!(mode.to_lowlevel_depth().ok_or(FreenectError::FrameFormatMismatch));
unsafe { ft::freenect_set_depth_mode(self.dev, lowlevel_depth_mode) };
Ok(())
}
fn set_flag(&mut self, flag: Flag, set: bool) -> FreenectResult<()> {
let flag_value = if set {
ft::freenect_flag_value::FREENECT_ON
} else {
ft::freenect_flag_value::FREENECT_OFF
};
let ret = unsafe { ft::freenect_set_flag(self.dev, flag.to_lowlevel(), flag_value) };
if ret == 0 {
Err(FreenectError::LibraryReturnCode(ret))
} else {
Ok(())
}
}
}
pub struct Device {
ctx: Rc<InnerContext>, // Handle to prevent underlying context being free'd before device
dev: Rc<RefCell<CDevice>>,
pub motor: Option<MotorSubdevice>,
pub camera: Option<CameraSubdevice>,
pub audio: Option<AudioSubdevice>,
}
impl Device {
fn from_raw_device(ctx: Rc<InnerContext>, dev: *mut ft::freenect_device, subdevs: DeviceFlags) -> Device {
let inner_dev = Rc::new(RefCell::new(CDevice::from_raw_device(dev)));
Device {
ctx: ctx,
dev: inner_dev.clone(),
motor: if subdevs.contains(DEVICE_MOTOR) { Some(MotorSubdevice::new(inner_dev.clone())) } else { None },
camera: if subdevs.contains(DEVICE_CAMERA) { Some(CameraSubdevice::new(inner_dev.clone())) } else { None },
audio: if subdevs.contains(DEVICE_AUDIO) { Some(AudioSubdevice::new(inner_dev.clone())) } else { None },
}
}
}
pub struct MotorSubdevice {
dev: Rc<RefCell<CDevice>>,
}
impl MotorSubdevice {
fn new(dev: Rc<RefCell<CDevice>>) -> MotorSubdevice {
MotorSubdevice{dev: dev}
}
pub fn get_tilt_state(&mut self) -> FreenectResult<RawTiltState> {
let mut cdev = self.dev.borrow_mut();
try!(cdev.update_tilt_state());
Ok(RawTiltState::from_lowlevel(cdev.get_tilt_state()))
}
}
// Exists so it can be boxed (therefore fixing its memory address) and have its address handed as a
// C callback userdata void pointer
struct ClosureHolder {
dev: Rc<RefCell<CDevice>>,
depth_cb: Option<Box<FnMut(&FrameMode, &mut [u8], u32) + Send + 'static>>,
video_cb: Option<Box<FnMut(&FrameMode, &mut [u8], u32) + Send + 'static>>,
starting: bool,
}
impl ClosureHolder {
fn new(dev: Rc<RefCell<CDevice>>) -> ClosureHolder {
ClosureHolder{dev: dev, depth_cb: None, video_cb: None, starting: false}
}
}
pub struct CameraSubdevice {
dev: Rc<RefCell<CDevice>>,
ch: Box<ClosureHolder>,
}
impl CameraSubdevice {
fn new(dev: Rc<RefCell<CDevice>>) -> CameraSubdevice {
let mut cam_sub = CameraSubdevice{ dev: dev.clone(), ch: Box::new(ClosureHolder::new(dev.clone()))};
// Register all callbacks. We'll let Rust code decide if a user callback should be called.
unsafe {
let mut cdev = dev.borrow_mut();
cdev.set_user(std::mem::transmute(&mut *cam_sub.ch));
cdev.set_depth_callback(CameraSubdevice::depth_cb_trampoline);
cdev.set_video_callback(CameraSubdevice::video_cb_trampoline);
}
return cam_sub;
}
pub fn set_depth_callback(&mut self, cb: Option<Box<FnMut(&FrameMode, &mut [u8], u32) + Send + 'static>>) {
self.ch.depth_cb = cb;
}
pub fn set_video_callback(&mut self, cb: Option<Box<FnMut(&FrameMode, &mut [u8], u32) + Send + 'static>>) {
self.ch.video_cb = cb;
}
pub fn start_depth(&mut self) -> FreenectResult<()> {
(*self.ch).starting = true;
let ret = self.dev.borrow_mut().start_depth();
(*self.ch).starting = false;
return ret;
}
pub fn start_video(&mut self) -> FreenectResult<()> {
(*self.ch).starting = true;
let ret = self.dev.borrow_mut().start_video();
(*self.ch).starting = false;
return ret;
}
pub fn stop_depth(&mut self) -> FreenectResult<()> {
self.dev.borrow_mut().stop_depth()
}
pub fn stop_video(&mut self) -> FreenectResult<()> {
self.dev.borrow_mut().stop_video()
}
pub fn set_tilt_degs(&mut self, angle: f64) -> FreenectResult<()> {
self.dev.borrow_mut().set_tilt_degs(angle)
}
pub fn get_current_video_mode(&mut self) -> FrameMode {
self.dev.borrow_mut().get_current_video_mode()
}
pub fn set_video_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
self.dev.borrow_mut().set_video_mode(mode)
}
pub fn get_current_depth_mode(&mut self) -> FrameMode {
self.dev.borrow_mut().get_current_depth_mode()
}
pub fn set_depth_mode(&mut self, mode: FrameMode) -> FreenectResult<()> {
self.dev.borrow_mut().set_depth_mode(mode)
}
pub fn set_flag(&mut self, flag: Flag, set: bool) -> FreenectResult<()> {
self.dev.borrow_mut().set_flag(flag, set)
}
extern "C" fn depth_cb_trampoline(dev: *mut ft::freenect_device, depth: *mut c_void, timestamp: uint32_t) {
unsafe {
let ch = ft::freenect_get_user(dev) as *mut ClosureHolder;
// libfreenect end's up calling this callback when start_depth is called. This is an
// issue as the cdev RefCell will be borrowed twice (causing a panic). Instead, check a
// flag indicating we are starting, and if set, just ignore the frame.
if !(*ch).starting {
// Callback provides no information on frame buffer length. Retrieve the length by
// directly asking for the current mode information
let mode = (*ch).dev.borrow_mut().get_current_depth_mode();
let frame = slice::from_raw_parts_mut(depth as *mut u8, mode.bytes as usize);
let timestamp = timestamp as u32;
match (*ch).depth_cb {
Some(ref mut cb) => cb(&mode, frame, timestamp),
None => return,
};
}
}
}
extern "C" fn video_cb_trampoline(dev: *mut ft::freenect_device, video: *mut c_void, timestamp: uint32_t) {
unsafe {
let ch = ft::freenect_get_user(dev) as *mut ClosureHolder;
if !(*ch).starting {
// Callback provides no information on frame buffer length. Retrieve the length by
// directly asking for the current mode information
let mode = (*ch).dev.borrow_mut().get_current_video_mode();
let frame = slice::from_raw_parts_mut(video as *mut u8, mode.bytes as usize);
let timestamp = timestamp as u32;
match (*ch).video_cb {
Some(ref mut cb) => cb(&mode, frame, timestamp),
None => return,
};
}
}
}
}
pub struct AudioSubdevice {
dev: Rc<RefCell<CDevice>>,
}
impl AudioSubdevice {
fn new(dev: Rc<RefCell<CDevice>>) -> AudioSubdevice {
AudioSubdevice{dev: dev}
}
}
pub fn supported_subdevices() -> DeviceFlags {
let bits = unsafe { ft::freenect_supported_subdevices() as u32 };
return DeviceFlags::from_bits(bits).unwrap();
}
pub fn find_video_mode(res: Resolution, fmt: VideoFormat) -> Option<FrameMode> {
let frame_mode_lowlevel = unsafe { ft::freenect_find_video_mode(res.to_lowlevel(), fmt.to_lowlevel()) };
let frame_mode = FrameMode::from_lowlevel_video(&frame_mode_lowlevel);
if frame_mode.is_valid {
Some(frame_mode)
} else {
None
}
}
pub fn find_depth_mode(res: Resolution, fmt: DepthFormat) -> Option<FrameMode> {
let frame_mode_lowlevel = unsafe { ft::freenect_find_depth_mode(res.to_lowlevel(), fmt.to_lowlevel()) };
let frame_mode = FrameMode::from_lowlevel_depth(&frame_mode_lowlevel);
if frame_mode.is_valid {
Some(frame_mode)
} else {
None
}
}
pub struct VideoModeIter {
video_mode_count: c_int,
next_mode: c_int,
}
impl VideoModeIter {
fn new() -> VideoModeIter {
VideoModeIter{
video_mode_count: unsafe { ft::freenect_get_video_mode_count() },
next_mode: 0,
}
}
}
impl Iterator for VideoModeIter {
type Item = FrameMode;
fn next(&mut self) -> Option<FrameMode> {
if self.next_mode < self.video_mode_count {
let lowlevel_frame_mode = unsafe { ft::freenect_get_video_mode(self.next_mode) };
self.next_mode += 1;
Some(FrameMode::from_lowlevel_video(&lowlevel_frame_mode))
} else {
None
}
}
}
impl ExactSizeIterator for VideoModeIter {
fn len(&self) -> usize {
self.video_mode_count as usize
}
}
pub fn video_modes() -> VideoModeIter {
VideoModeIter::new()
}
pub struct DepthModeIter {
depth_mode_count: c_int,
next_mode: c_int,
}
impl DepthModeIter {
fn new() -> DepthModeIter {
DepthModeIter{
depth_mode_count: unsafe { ft::freenect_get_depth_mode_count() },
next_mode: 0,
}
}
}
impl Iterator for DepthModeIter {
type Item = FrameMode;
fn next(&mut self) -> Option<FrameMode> {
if self.next_mode < self.depth_mode_count {
let lowlevel_frame_mode = unsafe { ft::freenect_get_depth_mode(self.next_mode) };
self.next_mode += 1;
Some(FrameMode::from_lowlevel_depth(&lowlevel_frame_mode))
} else {
None
}
}
}
impl ExactSizeIterator for DepthModeIter {
fn len(&self) -> usize {
self.depth_mode_count as usize
}
}
pub fn depth_modes() -> DepthModeIter {
DepthModeIter::new()
}
|
pub fn quicksort<T: PartialEq + PartialOrd>(vec: &mut [T]) -> &mut [T] {
if vec.len() <= 1 {
return vec;
}
let pivot: usize = 0;
let mut first_opened = Vec::new();
let mut last_closed: usize = 0;
for i in 1..vec.len() {
first_opened.push(i);
if vec[i] < vec[pivot] {
let to = first_opened.remove(0);
vec.swap(i, to);
last_closed = to;
}
}
vec.swap(pivot, last_closed);
quicksort(&mut vec[0..last_closed]);
quicksort(&mut vec[last_closed+1..]);
vec
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_vec_of_u64() {
let sorted: Vec<u64> = vec![1, 1, 2, 3, 3, 4, 5, 5, 6, 9];
let mut unsorted: Vec<u64> = vec![3, 1, 4, 1, 5, 9, 2, 6, 5, 3];
quicksort(&mut unsorted);
assert_eq!(unsorted, sorted.as_slice());
}
}
Add test_vec_of_u8
Signed-off-by: Johannes Löthberg <8398eb9892c5ab2ff439ca0b5e4b0706dd9ebef9@kyriasis.com>
pub fn quicksort<T: PartialEq + PartialOrd>(vec: &mut [T]) -> &mut [T] {
if vec.len() <= 1 {
return vec;
}
let pivot: usize = 0;
let mut first_opened = Vec::new();
let mut last_closed: usize = 0;
for i in 1..vec.len() {
first_opened.push(i);
if vec[i] < vec[pivot] {
let to = first_opened.remove(0);
vec.swap(i, to);
last_closed = to;
}
}
vec.swap(pivot, last_closed);
quicksort(&mut vec[0..last_closed]);
quicksort(&mut vec[last_closed+1..]);
vec
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_vec_of_u64() {
let sorted: Vec<u64> = vec![1, 1, 2, 3, 3, 4, 5, 5, 6, 9];
let mut unsorted: Vec<u64> = vec![3, 1, 4, 1, 5, 9, 2, 6, 5, 3];
quicksort(&mut unsorted);
assert_eq!(unsorted, sorted.as_slice());
}
#[test]
fn test_vec_of_u8() {
let sorted = " Tabcdeeefghhijklmnoooopqrrstuuvwxyz".to_string().into_bytes();
let mut unsorted = "The quick brown fox jumps over the lazy dog".to_string().into_bytes();
quicksort(&mut unsorted);
assert_eq!(sorted, unsorted);
}
}
|
#![doc(html_root_url = "https://docs.rs/mio/0.7.0")]
#![deny(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![cfg_attr(test, deny(warnings))]
//! A fast, low-level IO library for Rust focusing on non-blocking APIs, event
//! notification, and other useful utilities for building high performance IO
//! apps.
//!
//! # Features
//!
//! * Non-blocking TCP, UDP
//! * I/O event queue backed by epoll, kqueue, and IOCP
//! * Zero allocations at runtime
//! * Platform specific extensions
//!
//! # Non-goals
//!
//! The following are specifically omitted from Mio and are left to the user or
//! higher-level libraries.
//!
//! * File operations
//! * Thread pools / multi-threaded event loop
//! * Timers
//!
//! # Platforms
//!
//! Currently supported platforms:
//!
//! * Android
//! * Bitrig
//! * DragonFly BSD
//! * FreeBSD
//! * FreeBSD
//! * Linux
//! * NetBSD
//! * OpenBSD
//! * Solaris
//! * Windows
//! * iOS
//! * macOS
//!
//! Mio can handle interfacing with each of the event systems of the
//! aforementioned platforms. The details of their implementation are further
//! discussed in [`Poll`].
//!
//! # Usage
//!
//! Using Mio starts by creating a [`Poll`], which reads events from the OS and
//! puts them into [`Events`]. You can handle IO events from the OS with it.
//!
//! For more detail, see [`Poll`].
//!
//! [`Poll`]: struct.Poll.html
//! [`Events`]: struct.Events.html
//!
//! # Example
//!
//! ```
//! use mio::*;
//! use mio::net::{TcpListener, TcpStream};
//!
//! // Setup some tokens to allow us to identify which event is
//! // for which socket.
//! const SERVER: Token = Token(0);
//! const CLIENT: Token = Token(1);
//!
//! let addr = "127.0.0.1:13265".parse().unwrap();
//!
//! // Setup the server socket
//! let server = TcpListener::bind(addr).unwrap();
//!
//! // Create a poll instance
//! let mut poll = Poll::new().unwrap();
//! let registry = poll.registry().clone();
//!
//! // Start listening for incoming connections
//! registry.register(
//! &server,
//! SERVER,
//! Interests::READABLE).unwrap();
//!
//! // Setup the client socket
//! let sock = TcpStream::connect(addr).unwrap();
//!
//! // Register the socket
//! registry.register(
//! &sock,
//! CLIENT,
//! Interests::READABLE).unwrap();
//!
//! // Create storage for events
//! let mut events = Events::with_capacity(1024);
//!
//! loop {
//! poll.poll(&mut events, None).unwrap();
//!
//! for event in events.iter() {
//! match event.token() {
//! SERVER => {
//! // Accept and drop the socket immediately, this will close
//! // the socket and notify the client of the EOF.
//! let _ = server.accept();
//! }
//! CLIENT => {
//! // The server just shuts down the socket, let's just exit
//! // from our event loop.
//! return;
//! }
//! _ => unreachable!(),
//! }
//! }
//! }
//!
//! ```
mod interests;
mod poll;
mod sys;
mod token;
mod waker;
pub mod event;
pub mod net;
pub use event::Events;
pub use interests::Interests;
pub use poll::{Poll, Registry};
pub use token::Token;
pub use waker::Waker;
#[cfg(unix)]
pub mod unix {
//! Unix only extensions
pub use crate::sys::EventedFd;
}
/// Windows-only extensions to the mio crate.
///
/// Mio on windows is currently implemented with IOCP for a high-performance
/// implementation of asynchronous I/O. Mio then provides TCP and UDP as sample
/// bindings for the system to connect networking types to asynchronous I/O. On
/// Unix this scheme is then also extensible to all other file descriptors with
/// the `EventedFd` type, but on Windows no such analog is available. The
/// purpose of this module, however, is to similarly provide a mechanism for
/// foreign I/O types to get hooked up into the IOCP event loop.
///
/// This module provides two types for interfacing with a custom IOCP handle:
///
/// * `Binding` - this type is intended to govern binding with mio's `Poll`
/// type. Each I/O object should contain an instance of `Binding` that's
/// interfaced with for the implementation of the `Evented` trait. The
/// `register`, `reregister`, and `deregister` methods for the `Evented` trait
/// all have rough analogs with `Binding`.
///
/// Note that this type **does not handle readiness**. That is, this type does
/// not handle whether sockets are readable/writable/etc. It's intended that
/// IOCP types will internally manage this state with a `SetReadiness` type
/// from the `poll` module. The `SetReadiness` is typically lazily created on
/// the first time that `Evented::register` is called and then stored in the
/// I/O object.
///
/// Also note that for types which represent streams of bytes the mio
/// interface of *readiness* doesn't map directly to the Windows model of
/// *completion*. This means that types will have to perform internal
/// buffering to ensure that a readiness interface can be provided. For a
/// sample implementation see the TCP/UDP modules in mio itself.
///
/// * `Overlapped` - this type is intended to be used as the concrete instances
/// of the `OVERLAPPED` type that most win32 methods expect. It's crucial, for
/// safety, that all asynchronous operations are initiated with an instance of
/// `Overlapped` and not another instantiation of `OVERLAPPED`.
///
/// Mio's `Overlapped` type is created with a function pointer that receives
/// a `OVERLAPPED_ENTRY` type when called. This `OVERLAPPED_ENTRY` type is
/// defined in the `winapi` crate. Whenever a completion is posted to an IOCP
/// object the `OVERLAPPED` that was signaled will be interpreted as
/// `Overlapped` in the mio crate and this function pointer will be invoked.
/// Through this function pointer, and through the `OVERLAPPED` pointer,
/// implementations can handle management of I/O events.
///
/// When put together these two types enable custom Windows handles to be
/// registered with mio's event loops. The `Binding` type is used to associate
/// handles and the `Overlapped` type is used to execute I/O operations. When
/// the I/O operations are completed a custom function pointer is called which
/// typically modifies a `SetReadiness` set by `Evented` methods which will get
/// later hooked into the mio event loop.
#[cfg(windows)]
pub mod windows {
pub use crate::sys::{Binding, Overlapped};
}
Disallow warnings in examples
We want to set a good example!
#![doc(html_root_url = "https://docs.rs/mio/0.7.0")]
#![deny(missing_docs, missing_debug_implementations, rust_2018_idioms)]
// Disallow warnings when running tests.
#![cfg_attr(test, deny(warnings))]
// Disallow warnings in examples.
#![doc(test(attr(deny(warnings))))]
//! A fast, low-level IO library for Rust focusing on non-blocking APIs, event
//! notification, and other useful utilities for building high performance IO
//! apps.
//!
//! # Features
//!
//! * Non-blocking TCP, UDP
//! * I/O event queue backed by epoll, kqueue, and IOCP
//! * Zero allocations at runtime
//! * Platform specific extensions
//!
//! # Non-goals
//!
//! The following are specifically omitted from Mio and are left to the user or
//! higher-level libraries.
//!
//! * File operations
//! * Thread pools / multi-threaded event loop
//! * Timers
//!
//! # Platforms
//!
//! Currently supported platforms:
//!
//! * Android
//! * Bitrig
//! * DragonFly BSD
//! * FreeBSD
//! * FreeBSD
//! * Linux
//! * NetBSD
//! * OpenBSD
//! * Solaris
//! * Windows
//! * iOS
//! * macOS
//!
//! Mio can handle interfacing with each of the event systems of the
//! aforementioned platforms. The details of their implementation are further
//! discussed in [`Poll`].
//!
//! # Usage
//!
//! Using Mio starts by creating a [`Poll`], which reads events from the OS and
//! puts them into [`Events`]. You can handle IO events from the OS with it.
//!
//! For more detail, see [`Poll`].
//!
//! [`Poll`]: struct.Poll.html
//! [`Events`]: struct.Events.html
//!
//! # Example
//!
//! ```
//! use mio::*;
//! use mio::net::{TcpListener, TcpStream};
//!
//! // Setup some tokens to allow us to identify which event is
//! // for which socket.
//! const SERVER: Token = Token(0);
//! const CLIENT: Token = Token(1);
//!
//! let addr = "127.0.0.1:13265".parse().unwrap();
//!
//! // Setup the server socket
//! let server = TcpListener::bind(addr).unwrap();
//!
//! // Create a poll instance
//! let mut poll = Poll::new().unwrap();
//! let registry = poll.registry().clone();
//!
//! // Start listening for incoming connections
//! registry.register(
//! &server,
//! SERVER,
//! Interests::READABLE).unwrap();
//!
//! // Setup the client socket
//! let sock = TcpStream::connect(addr).unwrap();
//!
//! // Register the socket
//! registry.register(
//! &sock,
//! CLIENT,
//! Interests::READABLE).unwrap();
//!
//! // Create storage for events
//! let mut events = Events::with_capacity(1024);
//!
//! loop {
//! poll.poll(&mut events, None).unwrap();
//!
//! for event in events.iter() {
//! match event.token() {
//! SERVER => {
//! // Accept and drop the socket immediately, this will close
//! // the socket and notify the client of the EOF.
//! let _ = server.accept();
//! }
//! CLIENT => {
//! // The server just shuts down the socket, let's just exit
//! // from our event loop.
//! return;
//! }
//! _ => unreachable!(),
//! }
//! }
//! }
//!
//! ```
mod interests;
mod poll;
mod sys;
mod token;
mod waker;
pub mod event;
pub mod net;
pub use event::Events;
pub use interests::Interests;
pub use poll::{Poll, Registry};
pub use token::Token;
pub use waker::Waker;
#[cfg(unix)]
pub mod unix {
//! Unix only extensions
pub use crate::sys::EventedFd;
}
/// Windows-only extensions to the mio crate.
///
/// Mio on windows is currently implemented with IOCP for a high-performance
/// implementation of asynchronous I/O. Mio then provides TCP and UDP as sample
/// bindings for the system to connect networking types to asynchronous I/O. On
/// Unix this scheme is then also extensible to all other file descriptors with
/// the `EventedFd` type, but on Windows no such analog is available. The
/// purpose of this module, however, is to similarly provide a mechanism for
/// foreign I/O types to get hooked up into the IOCP event loop.
///
/// This module provides two types for interfacing with a custom IOCP handle:
///
/// * `Binding` - this type is intended to govern binding with mio's `Poll`
/// type. Each I/O object should contain an instance of `Binding` that's
/// interfaced with for the implementation of the `Evented` trait. The
/// `register`, `reregister`, and `deregister` methods for the `Evented` trait
/// all have rough analogs with `Binding`.
///
/// Note that this type **does not handle readiness**. That is, this type does
/// not handle whether sockets are readable/writable/etc. It's intended that
/// IOCP types will internally manage this state with a `SetReadiness` type
/// from the `poll` module. The `SetReadiness` is typically lazily created on
/// the first time that `Evented::register` is called and then stored in the
/// I/O object.
///
/// Also note that for types which represent streams of bytes the mio
/// interface of *readiness* doesn't map directly to the Windows model of
/// *completion*. This means that types will have to perform internal
/// buffering to ensure that a readiness interface can be provided. For a
/// sample implementation see the TCP/UDP modules in mio itself.
///
/// * `Overlapped` - this type is intended to be used as the concrete instances
/// of the `OVERLAPPED` type that most win32 methods expect. It's crucial, for
/// safety, that all asynchronous operations are initiated with an instance of
/// `Overlapped` and not another instantiation of `OVERLAPPED`.
///
/// Mio's `Overlapped` type is created with a function pointer that receives
/// a `OVERLAPPED_ENTRY` type when called. This `OVERLAPPED_ENTRY` type is
/// defined in the `winapi` crate. Whenever a completion is posted to an IOCP
/// object the `OVERLAPPED` that was signaled will be interpreted as
/// `Overlapped` in the mio crate and this function pointer will be invoked.
/// Through this function pointer, and through the `OVERLAPPED` pointer,
/// implementations can handle management of I/O events.
///
/// When put together these two types enable custom Windows handles to be
/// registered with mio's event loops. The `Binding` type is used to associate
/// handles and the `Overlapped` type is used to execute I/O operations. When
/// the I/O operations are completed a custom function pointer is called which
/// typically modifies a `SetReadiness` set by `Evented` methods which will get
/// later hooked into the mio event loop.
#[cfg(windows)]
pub mod windows {
pub use crate::sys::{Binding, Overlapped};
}
|
//! Keep track of all your local git repositories.
//!
//! This crate houses the binary and library for the git-global subcommand, a
//! way to query statuses of all your local git repos. The binary can be
//! installed with cargo: `cargo install git-global`.
//!
//! # Usage
//!
//! ```bash
//! $ git global [status] # get the status of all known repos
//! $ git global scan # (re)scan home directory to discover git repos
//! $ git global list # list all known repos
//! $ git global info # show meta-information about git-global
//! ```
extern crate app_dirs;
extern crate chrono;
#[macro_use]
extern crate clap;
extern crate git2;
#[macro_use]
extern crate json;
extern crate walkdir;
#[cfg(test)]
extern crate tempdir;
mod cli;
mod core;
mod errors;
pub mod subcommands; // Using `pub mod` so we see the docs.
pub use cli::run_from_command_line;
pub use core::{GitGlobalResult, Repo, get_repos};
pub use errors::Result;
pub use errors::GitGlobalError;
Update cargo-doc in library file.
//! Keep track of all your git repositories.
//!
//! This crate houses the binary and library for the git-global subcommand, a
//! way to query statuses of all your git repos. The binary can be installed
//! with cargo: `cargo install git-global`.
//!
//! # Usage
//!
//! ```bash
//! $ git global [status] # show `git status` for all your git repos
//! $ git global info # show information about git-global itself
//! $ git global list # show all git repos git-global knows about
//! $ git global scan # search your filesystem for git repos and update cache
//! ```
extern crate app_dirs;
extern crate chrono;
#[macro_use]
extern crate clap;
extern crate git2;
#[macro_use]
extern crate json;
extern crate walkdir;
#[cfg(test)]
extern crate tempdir;
mod cli;
mod core;
mod errors;
pub mod subcommands; // Using `pub mod` so we see the docs.
pub use cli::run_from_command_line;
pub use core::{GitGlobalResult, Repo, get_repos};
pub use errors::Result;
pub use errors::GitGlobalError;
|
#![crate_name="ndarray"]
#![cfg_attr(has_deprecated, feature(deprecated))]
#![doc(html_root_url = "http://bluss.github.io/rust-ndarray/master/")]
//! The `ndarray` crate provides an N-dimensional container similar to numpy’s
//! ndarray.
//!
//! - [`ArrayBase`](struct.ArrayBase.html):
//! The N-dimensional array type itself.
//! - [`Array`](type.Array.html):
//! An array where the data is shared and copy on write, it
//! can act as both an owner of the data as well as a lightweight view.
//! - [`OwnedArray`](type.OwnedArray.html):
//! An array where the data is owned uniquely.
//! - [`ArrayView`](type.ArrayView.html), [`ArrayViewMut`](type.ArrayViewMut.html):
//! Lightweight array views.
//!
//! ## Highlights
//!
//! - Generic N-dimensional array
//! - Slicing, also with arbitrary step size, and negative indices to mean
//! elements from the end of the axis.
//! - There is both an easy to use copy on write array (`Array`),
//! or a regular uniquely owned array (`OwnedArray`), and both can use
//! read-only and read-write array views.
//! - Iteration and most operations are very efficient on contiguous c-order arrays
//! (the default layout, without any transposition or discontiguous subslicing),
//! and on arrays where the lowest dimension is contiguous (contiguous block
//! slicing).
//! - Array views can be used to slice and mutate any `[T]` data.
//!
//! ## Status and Lookout
//!
//! - Still iterating on the API
//! - Performance status:
//! + Arithmetic involving contiguous c-order arrays and contiguous lowest
//! dimension arrays optimizes very well.
//! + `.fold()` and `.zip_mut_with()` are the most efficient ways to
//! perform single traversal and lock step traversal respectively.
//! + Transposed arrays where the lowest dimension is not c-contiguous
//! is still a pain point.
//! - There is experimental bridging to the linear algebra package `rblas`.
//!
//! ## Crate Feature Flags
//!
//! - `assign_ops`
//! - Optional, requires nightly
//! - Enables the compound assignment operators
//! - `rustc-serialize`
//! - Optional, stable
//! - Enables serialization support
//! - `rblas`
//! - Optional, stable
//! - Enables `rblas` integration
//!
#![cfg_attr(feature = "assign_ops", feature(augmented_assignments,
op_assign_traits))]
#[cfg(feature = "serde")]
extern crate serde;
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize as serialize;
extern crate itertools as it;
#[cfg(not(nocomplex))]
extern crate num as libnum;
use libnum::Float;
use std::cmp;
use std::mem;
use std::ops::{Add, Sub, Mul, Div, Rem, Neg, Not, Shr, Shl,
BitAnd,
BitOr,
BitXor,
};
use std::rc::Rc;
use std::slice::{self, Iter, IterMut};
use it::ZipSlices;
pub use dimension::{Dimension, RemoveAxis};
pub use indexes::Indexes;
pub use shape_error::ShapeError;
pub use si::{Si, S};
use dimension::stride_offset;
use iterators::Baseiter;
pub use iterators::{
InnerIter,
InnerIterMut,
};
#[allow(deprecated)]
use linalg::{Field, Ring};
pub mod linalg;
mod arraytraits;
#[cfg(feature = "serde")]
mod arrayserialize;
mod arrayformat;
#[cfg(feature = "rblas")]
pub mod blas;
mod dimension;
mod indexes;
mod iterators;
mod si;
mod shape_error;
// NOTE: In theory, the whole library should compile
// and pass tests even if you change Ix and Ixs.
/// Array index type
pub type Ix = u32;
/// Array index type (signed)
pub type Ixs = i32;
/// An *N*-dimensional array.
///
/// The array is a general container of elements. It cannot grow or shrink, but
/// can be sliced into subsets of its data.
/// The array supports arithmetic operations by applying them elementwise.
///
/// The `ArrayBase<S, D>` is parameterized by:
///
/// - `S` for the data container
/// - `D` for the number of dimensions
///
/// Type aliases [`Array`], [`OwnedArray`], [`ArrayView`], and [`ArrayViewMut`] refer
/// to `ArrayBase` with different types for the data storage.
///
/// [`Array`]: type.Array.html
/// [`OwnedArray`]: type.OwnedArray.html
/// [`ArrayView`]: type.ArrayView.html
/// [`ArrayViewMut`]: type.ArrayViewMut.html
///
/// ## `Array` and `OwnedArray`
///
/// `OwnedArray` owns the underlying array elements directly (just like
/// a `Vec`), while [`Array`](type.Array.html) is a an array with reference
/// counted data. `Array` can act both as an owner or as a view in that regard.
/// Sharing requires that it uses copy-on-write for mutable operations.
/// Calling a method for mutating elements on `Array`, for example
/// [`view_mut()`](#method.view_mut) or [`get_mut()`](#method.get_mut),
/// will break sharing and require a clone of the data (if it is not uniquely held).
///
/// Note that all `ArrayBase` variants can change their view (slicing) of the
/// data freely, even when their data can’t be mutated.
///
/// ## Indexing and Dimension
///
/// Array indexes are represented by the types `Ix` and `Ixs`
/// (signed). ***Note: A future version will switch from `u32` to `usize`.***
///
/// The dimensionality of the array determines the number of *axes*, for example
/// a 2D array has two axes. These are listed in “big endian” order, so that
/// the greatest dimension is listed first, the lowest dimension with the most
/// rapidly varying index is the last.
/// For the 2D array this means that indices are `(row, column)`, and the order of
/// the elements is *(0, 0), (0, 1), (0, 2), ... (1, 0), (1, 1), (1, 2) ...* etc.
///
/// The number of axes for an array is fixed by the `D` parameter: `Ix` for
/// a 1D array, `(Ix, Ix)` for a 2D array etc. The `D` type is also used
/// for element indices in `.get()` and `array[index]`. The dimension type `Vec<Ix>`
/// allows a dynamic number of axes.
///
/// ## Slicing
///
/// You can use slicing to create a view of a subset of the data in
/// the array. Slicing methods include `.slice()`, `.islice()`,
/// `.slice_mut()`.
///
/// The slicing specification is passed as a function argument as a fixed size
/// array with elements of type [`Si`] with fields `Si(begin, end, stride)`,
/// where the values are signed integers, and `end` is an `Option<Ixs>`.
/// The constant [`S`] is a shorthand for the full range of an axis.
/// For example, if the array has two axes, the slice argument is passed as
/// type `&[Si; 2]`.
///
/// The macro [`s![]`](macro.s!.html) is however a much more convenient way to
/// specify the slicing argument, so it will be used in all examples.
///
/// [`Si`]: struct.Si.html
/// [`S`]: constant.S.html
///
/// ```
/// // import the s![] macro
/// #[macro_use(s)]
/// extern crate ndarray;
///
/// use ndarray::arr3;
///
/// fn main() {
///
/// // 2 submatrices of 2 rows with 3 elements per row, means a shape of `[2, 2, 3]`.
///
/// let a = arr3(&[[[ 1, 2, 3], // -- 2 rows \_
/// [ 4, 5, 6]], // -- /
/// [[ 7, 8, 9], // \_ 2 submatrices
/// [10, 11, 12]]]); // /
/// // 3 columns ..../.../.../
///
/// assert_eq!(a.shape(), &[2, 2, 3]);
///
/// // Let’s create a slice with
/// //
/// // - Both of the submatrices of the greatest dimension: `..`
/// // - Only the first row in each submatrix: `0..1`
/// // - Every element in each row: `..`
///
/// let b = a.slice(s![.., 0..1, ..]);
/// // without the macro, the explicit argument is `&[S, Si(0, Some(1), 1), S]`
///
/// let c = arr3(&[[[ 1, 2, 3]],
/// [[ 7, 8, 9]]]);
/// assert_eq!(b, c);
/// assert_eq!(b.shape(), &[2, 1, 3]);
///
/// // Let’s create a slice with
/// //
/// // - Both submatrices of the greatest dimension: `..`
/// // - The last row in each submatrix: `-1..`
/// // - Row elements in reverse order: `..;-1`
/// let d = a.slice(s![.., -1.., ..;-1]);
/// let e = arr3(&[[[ 6, 5, 4]],
/// [[12, 11, 10]]]);
/// assert_eq!(d, e);
/// }
/// ```
///
/// ## Subviews
///
/// Subview methods allow you to restrict the array view while removing
/// one axis from the array. Subview methods include `.subview()`,
/// `.isubview()`, `.subview_mut()`.
///
/// Subview takes two arguments: `axis` and `index`.
///
/// ```
/// use ndarray::{arr3, aview2};
///
/// // 2 submatrices of 2 rows with 3 elements per row, means a shape of `[2, 2, 3]`.
///
/// let a = arr3(&[[[ 1, 2, 3], // \ axis 0, submatrix 0
/// [ 4, 5, 6]], // /
/// [[ 7, 8, 9], // \ axis 0, submatrix 1
/// [10, 11, 12]]]); // /
/// // \
/// // axis 2, column 0
///
/// assert_eq!(a.shape(), &[2, 2, 3]);
///
/// // Let’s take a subview along the greatest dimension (axis 0),
/// // taking submatrix 0, then submatrix 1
///
/// let sub_0 = a.subview(0, 0);
/// let sub_1 = a.subview(0, 1);
///
/// assert_eq!(sub_0, aview2(&[[ 1, 2, 3],
/// [ 4, 5, 6]]));
/// assert_eq!(sub_1, aview2(&[[ 7, 8, 9],
/// [10, 11, 12]]));
/// assert_eq!(sub_0.shape(), &[2, 3]);
///
/// // This is the subview picking only axis 2, column 0
/// let sub_col = a.subview(2, 0);
///
/// assert_eq!(sub_col, aview2(&[[ 1, 4],
/// [ 7, 10]]));
/// ```
///
/// `.isubview()` modifies the view in the same way as `subview()`, but
/// since it is *in place*, it cannot remove the collapsed axis. It becomes
/// an axis of length 1.
///
/// ## Broadcasting
///
/// Arrays support limited *broadcasting*, where arithmetic operations with
/// array operands of different sizes can be carried out by repeating the
/// elements of the smaller dimension array. See
/// [`.broadcast()`](#method.broadcast) for a more detailed
/// description.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 1.],
/// [1., 2.]]);
/// let b = arr2(&[[0., 1.]]);
///
/// let c = arr2(&[[1., 2.],
/// [1., 3.]]);
/// // We can add because the shapes are compatible even if not equal.
/// assert!(
/// c == a + b
/// );
/// ```
///
pub struct ArrayBase<S, D> where S: Data {
/// Rc data when used as view, Uniquely held data when being mutated
data: S,
/// A pointer into the buffer held by data, may point anywhere
/// in its range.
ptr: *mut S::Elem,
/// The size of each axis
dim: D,
/// The element count stride per axis. To be parsed as `isize`.
strides: D,
}
/// Array’s inner representation.
pub unsafe trait Data {
type Elem;
fn slice(&self) -> &[Self::Elem];
}
/// Array’s writable inner representation.
pub unsafe trait DataMut : Data {
fn slice_mut(&mut self) -> &mut [Self::Elem];
fn ensure_unique<D>(&mut ArrayBase<Self, D>)
where Self: Sized, D: Dimension
{
}
}
/// Clone an Array’s storage.
pub unsafe trait DataClone : Data {
/// Unsafe because, `ptr` must point inside the current storage.
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem);
}
unsafe impl<A> Data for Rc<Vec<A>> {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
// NOTE: Copy on write
unsafe impl<A> DataMut for Rc<Vec<A>> where A: Clone {
fn slice_mut(&mut self) -> &mut [A] { &mut Rc::make_mut(self)[..] }
fn ensure_unique<D>(self_: &mut ArrayBase<Self, D>)
where Self: Sized, D: Dimension
{
if Rc::get_mut(&mut self_.data).is_some() {
return
}
if self_.dim.size() <= self_.data.len() / 2 {
unsafe {
*self_ = Array::from_vec_dim(self_.dim.clone(),
self_.iter().map(|x| x.clone()).collect());
}
return;
}
let our_off = (self_.ptr as isize - self_.data.as_ptr() as isize)
/ mem::size_of::<A>() as isize;
let rvec = Rc::make_mut(&mut self_.data);
unsafe {
self_.ptr = rvec.as_mut_ptr().offset(our_off);
}
}
}
unsafe impl<A> DataClone for Rc<Vec<A>> {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem)
-> (Self, *mut Self::Elem)
{
// pointer is preserved
(self.clone(), ptr)
}
}
unsafe impl<A> Data for Vec<A> {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
unsafe impl<A> DataMut for Vec<A> {
fn slice_mut(&mut self) -> &mut [A] { self }
}
unsafe impl<A> DataClone for Vec<A> where A: Clone {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem)
-> (Self, *mut Self::Elem)
{
let mut u = self.clone();
let our_off = (self.as_ptr() as isize - ptr as isize)
/ mem::size_of::<A>() as isize;
let new_ptr = u.as_mut_ptr().offset(our_off);
(u, new_ptr)
}
}
unsafe impl<'a, A> Data for &'a [A] {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
unsafe impl<'a, A> DataClone for &'a [A] {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem)
-> (Self, *mut Self::Elem)
{
(*self, ptr)
}
}
unsafe impl<'a, A> Data for &'a mut [A] {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
unsafe impl<'a, A> DataMut for &'a mut [A] {
fn slice_mut(&mut self) -> &mut [A] { self }
}
/// Array representation that is a unique or shared owner of its data.
pub unsafe trait DataOwned : Data {
fn new(elements: Vec<Self::Elem>) -> Self;
fn into_shared(self) -> Rc<Vec<Self::Elem>>;
}
/// Array representation that is a lightweight view.
pub unsafe trait DataShared : Clone + DataClone { }
unsafe impl<A> DataShared for Rc<Vec<A>> { }
unsafe impl<'a, A> DataShared for &'a [A] { }
unsafe impl<A> DataOwned for Vec<A> {
fn new(elements: Vec<A>) -> Self { elements }
fn into_shared(self) -> Rc<Vec<A>> { Rc::new(self) }
}
unsafe impl<A> DataOwned for Rc<Vec<A>> {
fn new(elements: Vec<A>) -> Self { Rc::new(elements) }
fn into_shared(self) -> Rc<Vec<A>> { self }
}
/// Array where the data is reference counted and copy on write, it
/// can act as both an owner as the data as well as a lightweight view.
pub type Array<A, D> = ArrayBase<Rc<Vec<A>>, D>;
/// Array where the data is owned uniquely.
pub type OwnedArray<A, D> = ArrayBase<Vec<A>, D>;
/// A lightweight array view.
///
/// `ArrayView` implements `IntoIterator`.
pub type ArrayView<'a, A, D> = ArrayBase<&'a [A], D>;
/// A lightweight read-write array view.
///
/// `ArrayViewMut` implements `IntoIterator`.
pub type ArrayViewMut<'a, A, D> = ArrayBase<&'a mut [A], D>;
impl<S: DataClone, D: Clone> Clone for ArrayBase<S, D>
{
fn clone(&self) -> ArrayBase<S, D> {
unsafe {
let (data, ptr) = self.data.clone_with_ptr(self.ptr);
ArrayBase {
data: data,
ptr: ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
}
}
}
}
impl<S: DataClone + Copy, D: Copy> Copy for ArrayBase<S, D> { }
/// Constructor methods for single dimensional `ArrayBase`.
impl<S> ArrayBase<S, Ix>
where S: DataOwned,
{
/// Create a one-dimensional array from a vector (no allocation needed).
pub fn from_vec(v: Vec<S::Elem>) -> ArrayBase<S, Ix> {
unsafe {
Self::from_vec_dim(v.len() as Ix, v)
}
}
/// Create a one-dimensional array from an iterable.
pub fn from_iter<I: IntoIterator<Item=S::Elem>>(iterable: I) -> ArrayBase<S, Ix> {
Self::from_vec(iterable.into_iter().collect())
}
/// Create a one-dimensional array from inclusive interval
/// `[start, end]` with `n` elements. `F` must be a floating point type.
pub fn linspace<F>(start: F, end: F, n: usize) -> ArrayBase<S, Ix>
where S: Data<Elem=F>,
F: libnum::Float,
usize: it::misc::ToFloat<F>,
{
Self::from_iter(it::linspace(start, end, n))
}
/// Create a one-dimensional array from interval `[start, end)`
#[cfg_attr(has_deprecated, deprecated(note="use ArrayBase::linspace() instead"))]
pub fn range(start: f32, end: f32) -> ArrayBase<S, Ix>
where S: Data<Elem=f32>,
{
let n = (end - start) as usize;
let span = if n > 0 { (n - 1) as f32 } else { 0. };
Self::linspace(start, start + span, n)
}
}
/// Constructor methods for `ArrayBase`.
impl<S, A, D> ArrayBase<S, D>
where S: DataOwned<Elem=A>,
D: Dimension,
{
/// Construct an array with copies of `elem`, dimension `dim`.
///
/// ```
/// use ndarray::Array;
/// use ndarray::arr3;
///
/// let a = Array::from_elem((2, 2, 2), 1.);
///
/// assert!(
/// a == arr3(&[[[1., 1.],
/// [1., 1.]],
/// [[1., 1.],
/// [1., 1.]]])
/// );
/// ```
pub fn from_elem(dim: D, elem: A) -> ArrayBase<S, D> where A: Clone
{
let v = vec![elem; dim.size()];
unsafe {
Self::from_vec_dim(dim, v)
}
}
/// Construct an array with zeros, dimension `dim`.
pub fn zeros(dim: D) -> ArrayBase<S, D> where A: Clone + libnum::Zero
{
Self::from_elem(dim, libnum::zero())
}
/// Construct an array with default values, dimension `dim`.
pub fn default(dim: D) -> ArrayBase<S, D>
where A: Default
{
let v = (0..dim.size()).map(|_| A::default()).collect();
unsafe {
Self::from_vec_dim(dim, v)
}
}
/// Create an array from a vector (with no allocation needed).
///
/// Unsafe because dimension is unchecked, and must be correct.
pub unsafe fn from_vec_dim(dim: D, mut v: Vec<A>) -> ArrayBase<S, D>
{
debug_assert!(dim.size() == v.len());
ArrayBase {
ptr: v.as_mut_ptr(),
data: DataOwned::new(v),
strides: dim.default_strides(),
dim: dim
}
}
}
impl<'a, A, D> ArrayView<'a, A, D>
where D: Dimension,
{
#[inline]
fn into_base_iter(self) -> Baseiter<'a, A, D> {
unsafe {
Baseiter::new(self.ptr, self.dim.clone(), self.strides.clone())
}
}
#[inline]
fn into_elements_base(self) -> ElementsBase<'a, A, D> {
ElementsBase { inner: self.into_base_iter() }
}
fn into_iter_(self) -> Elements<'a, A, D> {
Elements {
inner:
if let Some(slc) = self.into_slice() {
ElementsRepr::Slice(slc.iter())
} else {
ElementsRepr::Counted(self.into_elements_base())
}
}
}
fn into_slice(&self) -> Option<&'a [A]> {
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts(self.ptr, self.len()))
}
} else {
None
}
}
}
impl<'a, A, D> ArrayViewMut<'a, A, D>
where D: Dimension,
{
#[inline]
fn into_base_iter(self) -> Baseiter<'a, A, D> {
unsafe {
Baseiter::new(self.ptr, self.dim.clone(), self.strides.clone())
}
}
#[inline]
fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> {
ElementsBaseMut { inner: self.into_base_iter() }
}
fn into_iter_(self) -> ElementsMut<'a, A, D> {
ElementsMut {
inner:
if self.is_standard_layout() {
let slc = unsafe {
slice::from_raw_parts_mut(self.ptr, self.len())
};
ElementsRepr::Slice(slc.iter_mut())
} else {
ElementsRepr::Counted(self.into_elements_base())
}
}
}
fn _into_slice_mut(self) -> Option<&'a mut [A]>
{
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts_mut(self.ptr, self.len()))
}
} else {
None
}
}
}
impl<A, S, D> ArrayBase<S, D> where S: Data<Elem=A>, D: Dimension
{
/// Return the total number of elements in the Array.
pub fn len(&self) -> usize
{
self.dim.size()
}
/// Return the shape of the array.
pub fn dim(&self) -> D {
self.dim.clone()
}
/// Return the shape of the array as a slice.
pub fn shape(&self) -> &[Ix] {
self.dim.slice()
}
/// Return the strides of the array
pub fn strides(&self) -> &[Ixs] {
let s = self.strides.slice();
// reinterpret unsigned integer as signed
unsafe {
slice::from_raw_parts(s.as_ptr() as *const _, s.len())
}
}
/// Return a read-only view of the array
pub fn view(&self) -> ArrayView<A, D> {
debug_assert!(self.pointer_is_inbounds());
ArrayView {
ptr: self.ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
data: self.raw_data(),
}
}
/// Return a read-write view of the array
pub fn view_mut(&mut self) -> ArrayViewMut<A, D>
where S: DataMut,
{
self.ensure_unique();
ArrayViewMut {
ptr: self.ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
data: self.data.slice_mut(),
}
}
/// Return an uniquely owned copy of the array
pub fn to_owned(&self) -> OwnedArray<A, D>
where A: Clone
{
let data = if let Some(slc) = self.as_slice() {
slc.to_vec()
} else {
self.iter().cloned().collect()
};
unsafe {
ArrayBase::from_vec_dim(self.dim.clone(), data)
}
}
/// Return a shared ownership (copy on write) array.
pub fn to_shared(&self) -> Array<A, D>
where A: Clone
{
// FIXME: Avoid copying if it’s already an Array.
self.to_owned().into_shared()
}
/// Turn the array into a shared ownership (copy on write) array,
/// without any copying.
pub fn into_shared(self) -> Array<A, D>
where S: DataOwned,
{
let data = self.data.into_shared();
ArrayBase {
data: data,
ptr: self.ptr,
dim: self.dim,
strides: self.strides,
}
}
/// Return an iterator of references to the elements of the array.
///
/// Iterator element type is `&A`.
pub fn iter(&self) -> Elements<A, D> {
debug_assert!(self.pointer_is_inbounds());
self.view().into_iter_()
}
/// Return an iterator of references to the elements of the array.
///
/// Iterator element type is `(D, &A)`.
pub fn indexed_iter(&self) -> Indexed<A, D> {
Indexed(self.view().into_elements_base())
}
/// Return an iterator of mutable references to the elements of the array.
///
/// Iterator element type is `&mut A`.
pub fn iter_mut(&mut self) -> ElementsMut<A, D>
where S: DataMut,
{
self.ensure_unique();
self.view_mut().into_iter_()
}
/// Return an iterator of indexes and mutable references to the elements of the array.
///
/// Iterator element type is `(D, &mut A)`.
pub fn indexed_iter_mut(&mut self) -> IndexedMut<A, D>
where S: DataMut,
{
IndexedMut(self.view_mut().into_elements_base())
}
/// Return a sliced array.
///
/// See [*Slicing*](#slicing) for full documentation.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn slice(&self, indexes: &D::SliceArg) -> Self
where S: DataShared
{
let mut arr = self.clone();
arr.islice(indexes);
arr
}
/// Slice the array’s view in place.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn islice(&mut self, indexes: &D::SliceArg)
{
let offset = Dimension::do_slices(&mut self.dim, &mut self.strides, indexes);
unsafe {
self.ptr = self.ptr.offset(offset);
}
}
/// Return an iterator over a sliced view.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn slice_iter(&self, indexes: &D::SliceArg) -> Elements<A, D>
{
let mut it = self.view();
it.islice(indexes);
it.into_iter_()
}
/// Return a sliced read-write view of the array.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn slice_mut(&mut self, indexes: &D::SliceArg) -> ArrayViewMut<A, D>
where S: DataMut
{
let mut arr = self.view_mut();
arr.islice(indexes);
arr
}
/// ***Deprecated: use `.slice_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .slice_mut() instead"))]
pub fn slice_iter_mut(&mut self, indexes: &D::SliceArg) -> ElementsMut<A, D>
where S: DataMut,
{
self.slice_mut(indexes).into_iter()
}
/// Return a reference to the element at `index`, or return `None`
/// if the index is out of bounds.
///
/// Arrays also support indexing syntax: `array[index]`.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
///
/// assert!(
/// a.get((0, 1)) == Some(&2.) &&
/// a.get((0, 2)) == None &&
/// a[(0, 1)] == 2.
/// );
/// ```
pub fn get(&self, index: D) -> Option<&A> {
let ptr = self.ptr;
self.dim.stride_offset_checked(&self.strides, &index)
.map(move |offset| unsafe {
&*ptr.offset(offset)
})
}
/// ***Deprecated: use .get(i)***
#[cfg_attr(has_deprecated, deprecated(note="use .get() instead"))]
pub fn at(&self, index: D) -> Option<&A> {
self.get(index)
}
/// Return a mutable reference to the element at `index`, or return `None`
/// if the index is out of bounds.
pub fn get_mut(&mut self, index: D) -> Option<&mut A>
where S: DataMut,
{
self.ensure_unique();
let ptr = self.ptr;
self.dim.stride_offset_checked(&self.strides, &index)
.map(move |offset| unsafe {
&mut *ptr.offset(offset)
})
}
/// ***Deprecated: use .get_mut(i)***
#[cfg_attr(has_deprecated, deprecated(note="use .get_mut() instead"))]
pub fn at_mut(&mut self, index: D) -> Option<&mut A>
where S: DataMut,
{
self.get_mut(index)
}
/// Perform *unchecked* array indexing.
///
/// Return a reference to the element at `index`.
///
/// **Note:** only unchecked for non-debug builds of ndarray.
#[inline]
pub unsafe fn uget(&self, index: D) -> &A {
debug_assert!(self.dim.stride_offset_checked(&self.strides, &index).is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&*self.ptr.offset(off)
}
/// ***Deprecated: use `.uget()`***
#[cfg_attr(has_deprecated, deprecated(note="use .uget() instead"))]
#[inline]
pub unsafe fn uchk_at(&self, index: D) -> &A {
self.uget(index)
}
/// Perform *unchecked* array indexing.
///
/// Return a mutable reference to the element at `index`.
///
/// **Note:** Only unchecked for non-debug builds of ndarray.<br>
/// **Note:** The array must be uniquely held when mutating it.
#[inline]
pub unsafe fn uget_mut(&mut self, index: D) -> &mut A
where S: DataMut
{
//debug_assert!(Rc::get_mut(&mut self.data).is_some());
debug_assert!(self.dim.stride_offset_checked(&self.strides, &index).is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&mut *self.ptr.offset(off)
}
/// ***Deprecated: use `.uget_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .uget_mut() instead"))]
#[inline]
pub unsafe fn uchk_at_mut(&mut self, index: D) -> &mut A
where S: DataMut
{
self.uget_mut(index)
}
/// Swap axes `ax` and `bx`.
///
/// This does not move any data, it just adjusts the array’s dimensions
/// and strides.
///
/// **Panics** if the axes are out of bounds.
///
/// ```
/// use ndarray::arr2;
///
/// let mut a = arr2(&[[1., 2., 3.]]);
/// a.swap_axes(0, 1);
/// assert!(
/// a == arr2(&[[1.], [2.], [3.]])
/// );
/// ```
pub fn swap_axes(&mut self, ax: usize, bx: usize)
{
self.dim.slice_mut().swap(ax, bx);
self.strides.slice_mut().swap(ax, bx);
}
/// Along `axis`, select the subview `index` and return an
/// array with that axis removed.
///
/// See [*Subviews*](#subviews) for full documentation.
///
/// **Panics** if `axis` or `index` is out of bounds.
///
/// ```
/// use ndarray::{arr1, arr2};
///
/// let a = arr2(&[[1., 2.], // -- axis 0, row 0
/// [3., 4.], // -- axis 0, row 1
/// [5., 6.]]); // -- axis 0, row 2
/// // \ \
/// // \ axis 1, column 1
/// // axis 1, column 0
/// assert!(
/// a.subview(0, 1) == arr1(&[3., 4.]) &&
/// a.subview(1, 1) == arr1(&[2., 4., 6.])
/// );
/// ```
pub fn subview(&self, axis: usize, index: Ix) -> ArrayBase<S, <D as RemoveAxis>::Smaller>
where D: RemoveAxis,
S: DataShared,
{
let mut res = self.clone();
res.isubview(axis, index);
// don't use reshape -- we always know it will fit the size,
// and we can use remove_axis on the strides as well
ArrayBase {
data: res.data,
ptr: res.ptr,
dim: res.dim.remove_axis(axis),
strides: res.strides.remove_axis(axis),
}
}
/// Collapse dimension `axis` into length one,
/// and select the subview of `index` along that axis.
///
/// **Panics** if `index` is past the length of the axis.
pub fn isubview(&mut self, axis: usize, index: Ix)
{
dimension::do_sub(&mut self.dim, &mut self.ptr, &self.strides, axis, index)
}
/// Along `axis`, select the subview `index` and return a read-write view
/// with the axis removed.
///
/// **Panics** if `axis` or `index` is out of bounds.
///
/// ```
/// use ndarray::{arr2, aview2};
///
/// let mut a = arr2(&[[1., 2.],
/// [3., 4.]]);
///
/// a.subview_mut(1, 1).iadd_scalar(&10.);
///
/// assert!(
/// a == aview2(&[[1., 12.],
/// [3., 14.]])
/// );
/// ```
pub fn subview_mut(&mut self, axis: usize, index: Ix)
-> ArrayViewMut<A, D::Smaller>
where S: DataMut,
D: RemoveAxis,
{
let mut res = self.view_mut();
res.isubview(axis, index);
ArrayBase {
data: res.data,
ptr: res.ptr,
dim: res.dim.remove_axis(axis),
strides: res.strides.remove_axis(axis),
}
}
/// ***Deprecated: use `.subview_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .subview_mut() instead"))]
pub fn sub_iter_mut(&mut self, axis: usize, index: Ix)
-> ElementsMut<A, D>
where S: DataMut,
{
let mut it = self.view_mut();
dimension::do_sub(&mut it.dim, &mut it.ptr, &it.strides, axis, index);
it.into_iter_()
}
/// Return an iterator that traverses over all dimensions but the innermost,
/// and yields each inner row.
///
/// Iterator element is `ArrayView<A, Ix>` (1D array view).
///
/// ```
/// use ndarray::arr3;
/// let a = arr3(&[[[ 0, 1, 2], // -- row 0, 0
/// [ 3, 4, 5]], // -- row 0, 1
/// [[ 6, 7, 8], // -- row 1, 0
/// [ 9, 10, 11]]]); // -- row 1, 1
/// // `inner_iter` yields the four inner rows of the 3D array.
/// let mut row_sums = a.inner_iter().map(|v| v.scalar_sum());
/// assert_eq!(row_sums.collect::<Vec<_>>(), vec![3, 12, 21, 30]);
/// ```
pub fn inner_iter(&self) -> InnerIter<A, D> {
iterators::new_outer(self.view())
}
/// Return an iterator that traverses over all dimensions but the innermost,
/// and yields each inner row.
///
/// Iterator element is `ArrayViewMut<A, Ix>` (1D read-write array view).
pub fn inner_iter_mut(&mut self) -> InnerIterMut<A, D>
where S: DataMut
{
iterators::new_outer_mut(self.view_mut())
}
// Return (length, stride) for diagonal
fn diag_params(&self) -> (Ix, Ixs)
{
/* empty shape has len 1 */
let len = self.dim.slice().iter().map(|x| *x).min().unwrap_or(1);
let stride = self.strides.slice().iter()
.map(|x| *x as Ixs)
.fold(0, |sum, s| sum + s);
return (len, stride)
}
/// Return an iterator over the diagonal elements of the array.
///
/// The diagonal is simply the sequence indexed by *(0, 0, .., 0)*,
/// *(1, 1, ..., 1)* etc as long as all axes have elements.
pub fn diag_iter(&self) -> Elements<A, Ix>
{
let (len, stride) = self.diag_params();
let view = ArrayBase {
data: self.raw_data(),
ptr: self.ptr,
dim: len,
strides: stride as Ix,
};
view.into_iter_()
}
/// Return the diagonal as a one-dimensional array.
pub fn diag(&self) -> ArrayBase<S, Ix>
where S: DataShared,
{
let (len, stride) = self.diag_params();
ArrayBase {
data: self.data.clone(),
ptr: self.ptr,
dim: len,
strides: stride as Ix,
}
}
/// Return a read-write view over the diagonal elements of the array.
pub fn diag_mut(&mut self) -> ArrayViewMut<A, Ix>
where S: DataMut,
{
self.ensure_unique();
let (len, stride) = self.diag_params();
ArrayViewMut {
ptr: self.ptr,
data: self.raw_data_mut(),
dim: len,
strides: stride as Ix,
}
}
/// ***Deprecated: use `.diag_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .diag_mut() instead"))]
pub fn diag_iter_mut(&mut self) -> ElementsMut<A, Ix>
where S: DataMut,
{
self.diag_mut().into_iter_()
}
/// Make the array unshared.
///
/// This method is mostly only useful with unsafe code.
fn ensure_unique(&mut self)
where S: DataMut
{
debug_assert!(self.pointer_is_inbounds());
S::ensure_unique(self);
debug_assert!(self.pointer_is_inbounds());
}
#[cfg(feature = "rblas")]
/// If the array is not in the standard layout, copy all elements
/// into the standard layout so that the array is C-contiguous.
fn ensure_standard_layout(&mut self)
where S: DataOwned,
A: Clone
{
if !self.is_standard_layout() {
let mut v: Vec<A> = self.iter().cloned().collect();
self.ptr = v.as_mut_ptr();
self.data = DataOwned::new(v);
self.strides = self.dim.default_strides();
}
}
/*
/// Set the array to the standard layout, without adjusting elements.
/// Useful for overwriting.
fn force_standard_layout(&mut self) {
self.strides = self.dim.default_strides();
}
*/
/// Return `true` if the array data is laid out in contiguous “C order” in
/// memory (where the last index is the most rapidly varying).
///
/// Return `false` otherwise, i.e the array is possibly not
/// contiguous in memory, it has custom strides, etc.
pub fn is_standard_layout(&self) -> bool
{
let defaults = self.dim.default_strides();
if self.strides == defaults {
return true;
}
// check all dimensions -- a dimension of length 1 can have unequal strides
for (&dim, (&s, &ds)) in zipsl(self.dim.slice(),
zipsl(self.strides(), defaults.slice()))
{
if dim != 1 && s != (ds as Ixs) {
return false;
}
}
true
}
/// Return the array’s data as a slice, if it is contiguous and
/// the element order corresponds to the memory order. Return `None` otherwise.
pub fn as_slice(&self) -> Option<&[A]> {
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts(self.ptr, self.len()))
}
} else {
None
}
}
/// Return the array’s data as a slice, if it is contiguous and
/// the element order corresponds to the memory order. Return `None` otherwise.
pub fn as_slice_mut(&mut self) -> Option<&mut [A]>
where S: DataMut
{
if self.is_standard_layout() {
self.ensure_unique();
unsafe {
Some(slice::from_raw_parts_mut(self.ptr, self.len()))
}
} else {
None
}
}
/// Transform the array into `shape`; any shape with the same number of
/// elements is accepted.
///
/// May clone all elements if needed to arrange elements in standard
/// layout (and break sharing).
///
/// **Panics** if shapes are incompatible.
///
/// ```
/// use ndarray::{arr1, arr2};
///
/// assert!(
/// arr1(&[1., 2., 3., 4.]).reshape((2, 2))
/// == arr2(&[[1., 2.],
/// [3., 4.]])
/// );
/// ```
pub fn reshape<E: Dimension>(&self, shape: E) -> ArrayBase<S, E>
where S: DataShared + DataOwned, A: Clone,
{
if shape.size() != self.dim.size() {
panic!("Incompatible shapes in reshape, attempted from: {:?}, to: {:?}",
self.dim.slice(), shape.slice())
}
// Check if contiguous, if not => copy all, else just adapt strides
if self.is_standard_layout() {
let cl = self.clone();
ArrayBase {
data: cl.data,
ptr: cl.ptr,
strides: shape.default_strides(),
dim: shape,
}
} else {
let v = self.iter().map(|x| x.clone()).collect::<Vec<A>>();
unsafe {
ArrayBase::from_vec_dim(shape, v)
}
}
}
/// Transform the array into `shape`; any shape with the same number of
/// elements is accepted, but the source array or view must be
/// contiguous, otherwise we cannot rearrange the dimension.
///
/// **Errors** if the shapes don't have the same number of elements.<br>
/// **Errors** if the input array is not c-contiguous (this will be
/// slightly improved in the future).
///
/// ```
/// use ndarray::{aview1, aview2};
///
/// assert!(
/// aview1(&[1., 2., 3., 4.]).into_shape((2, 2)).unwrap()
/// == aview2(&[[1., 2.],
/// [3., 4.]])
/// );
/// ```
pub fn into_shape<E>(self, shape: E) -> Result<ArrayBase<S, E>, ShapeError>
where E: Dimension
{
if shape.size() != self.dim.size() {
return Err(ShapeError::IncompatibleShapes(
self.dim.slice().to_vec().into_boxed_slice(),
shape.slice().to_vec().into_boxed_slice()));
}
// Check if contiguous, if not => copy all, else just adapt strides
if self.is_standard_layout() {
Ok(ArrayBase {
data: self.data,
ptr: self.ptr,
strides: shape.default_strides(),
dim: shape,
})
} else {
Err(ShapeError::IncompatibleLayout)
}
}
/// Act like a larger size and/or shape array by *broadcasting*
/// into a larger shape, if possible.
///
/// Return `None` if shapes can not be broadcast together.
///
/// ***Background***
///
/// * Two axes are compatible if they are equal, or one of them is 1.
/// * In this instance, only the axes of the smaller side (self) can be 1.
///
/// Compare axes beginning with the *last* axis of each shape.
///
/// For example (1, 2, 4) can be broadcast into (7, 6, 2, 4)
/// because its axes are either equal or 1 (or missing);
/// while (2, 2) can *not* be broadcast into (2, 4).
///
/// The implementation creates a view with strides set to zero for the
/// axes that are to be repeated.
///
/// The broadcasting documentation for Numpy has more information.
///
/// ```
/// use ndarray::arr1;
///
/// assert!(
/// arr1(&[1., 0.]).broadcast((10, 2)).unwrap().dim()
/// == (10, 2)
/// );
/// ```
pub fn broadcast<E>(&self, dim: E)
-> Option<ArrayView<A, E>>
where E: Dimension
{
/// Return new stride when trying to grow `from` into shape `to`
///
/// Broadcasting works by returning a "fake stride" where elements
/// to repeat are in axes with 0 stride, so that several indexes point
/// to the same element.
///
/// **Note:** Cannot be used for mutable iterators, since repeating
/// elements would create aliasing pointers.
fn upcast<D: Dimension, E: Dimension>(to: &D, from: &E, stride: &E) -> Option<D> {
let mut new_stride = to.clone();
// begin at the back (the least significant dimension)
// size of the axis has to either agree or `from` has to be 1
if to.ndim() < from.ndim() {
return None
}
{
let mut new_stride_iter = new_stride.slice_mut().iter_mut().rev();
for ((er, es), dr) in from.slice().iter().rev()
.zip(stride.slice().iter().rev())
.zip(new_stride_iter.by_ref())
{
/* update strides */
if *dr == *er {
/* keep stride */
*dr = *es;
} else if *er == 1 {
/* dead dimension, zero stride */
*dr = 0
} else {
return None;
}
}
/* set remaining strides to zero */
for dr in new_stride_iter {
*dr = 0;
}
}
Some(new_stride)
}
// Note: zero strides are safe precisely because we return an read-only view
let broadcast_strides =
match upcast(&dim, &self.dim, &self.strides) {
Some(st) => st,
None => return None,
};
Some(ArrayView {
data: self.raw_data(),
ptr: self.ptr,
dim: dim,
strides: broadcast_strides,
})
}
#[cfg_attr(has_deprecated, deprecated(note="use .broadcast() instead"))]
/// ***Deprecated: Use `.broadcast()` instead.***
pub fn broadcast_iter<E>(&self, dim: E) -> Option<Elements<A, E>>
where E: Dimension,
{
self.broadcast(dim).map(|v| v.into_iter_())
}
#[inline]
fn broadcast_unwrap<E>(&self, dim: E) -> ArrayView<A, E>
where E: Dimension,
{
match self.broadcast(dim.clone()) {
Some(it) => it,
None => Self::broadcast_panic(&self.dim, &dim),
}
}
#[inline(never)]
fn broadcast_panic<E: Dimension>(from: &D, to: &E) -> ! {
panic!("Could not broadcast array from shape: {:?} to: {:?}",
from.slice(), to.slice())
}
/// Return a slice of the array’s backing data in memory order.
///
/// **Note:** Data memory order may not correspond to the index order
/// of the array. Neither is the raw data slice is restricted to just the
/// Array’s view.<br>
/// **Note:** the slice may be empty.
pub fn raw_data(&self) -> &[A] {
self.data.slice()
}
/// Return a mutable slice of the array’s backing data in memory order.
///
/// **Note:** Data memory order may not correspond to the index order
/// of the array. Neither is the raw data slice is restricted to just the
/// Array’s view.<br>
/// **Note:** the slice may be empty.
///
/// **Note:** The data is uniquely held and nonaliased
/// while it is mutably borrowed.
pub fn raw_data_mut(&mut self) -> &mut [A]
where S: DataMut,
{
self.ensure_unique();
self.data.slice_mut()
}
fn pointer_is_inbounds(&self) -> bool {
let slc = self.data.slice();
if slc.is_empty() {
// special case for data-less views
return true;
}
let ptr = slc.as_ptr() as *mut _;
let end = unsafe {
ptr.offset(slc.len() as isize)
};
self.ptr >= ptr && self.ptr <= end
}
/// Perform an elementwise assigment to `self` from `rhs`.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
pub fn assign<E: Dimension, S2>(&mut self, rhs: &ArrayBase<S2, E>)
where S: DataMut,
A: Clone,
S2: Data<Elem=A>,
{
self.zip_mut_with(rhs, |x, y| *x = y.clone());
}
/// Perform an elementwise assigment to `self` from scalar `x`.
pub fn assign_scalar(&mut self, x: &A)
where S: DataMut, A: Clone,
{
self.unordered_foreach_mut(move |elt| *elt = x.clone());
}
/// Apply closure `f` to each element in the array, in whatever
/// order is the fastest to visit.
fn unordered_foreach_mut<F>(&mut self, mut f: F)
where S: DataMut,
F: FnMut(&mut A)
{
if let Some(slc) = self.as_slice_mut() {
for elt in slc {
f(elt);
}
return;
}
for row in self.inner_iter_mut() {
for elt in row {
f(elt);
}
}
}
fn zip_with_mut_same_shape<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
debug_assert_eq!(self.shape(), rhs.shape());
if let Some(self_s) = self.as_slice_mut() {
if let Some(rhs_s) = rhs.as_slice() {
let len = cmp::min(self_s.len(), rhs_s.len());
let s = &mut self_s[..len];
let r = &rhs_s[..len];
for i in 0..len {
f(&mut s[i], &r[i]);
}
return;
}
}
// otherwise, fall back to the outer iter
self.zip_with_mut_outer_iter(rhs, f);
}
#[inline(always)]
fn zip_with_mut_outer_iter<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
debug_assert_eq!(self.shape(), rhs.shape());
// otherwise, fall back to the outer iter
let mut try_slices = true;
let mut rows = self.inner_iter_mut().zip(rhs.inner_iter());
for (mut s_row, r_row) in &mut rows {
if try_slices {
if let Some(self_s) = s_row.as_slice_mut() {
if let Some(rhs_s) = r_row.as_slice() {
let len = cmp::min(self_s.len(), rhs_s.len());
let s = &mut self_s[..len];
let r = &rhs_s[..len];
for i in 0..len {
f(&mut s[i], &r[i]);
}
continue;
}
}
try_slices = false;
}
// FIXME: Regular .zip() is slow
for (y, x) in s_row.iter_mut().zip(r_row) {
f(y, x);
}
}
}
// FIXME: Guarantee the order here or not?
/// Traverse two arrays in unspecified order, in lock step,
/// calling the closure `f` on each element pair.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
#[inline]
pub fn zip_mut_with<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
if self.dim.ndim() == rhs.dim.ndim() && self.shape() == rhs.shape() {
self.zip_with_mut_same_shape(rhs, f);
} else if rhs.dim.ndim() == 0 {
// Skip broadcast from 0-dim array
// FIXME: Order
unsafe {
let rhs_elem = &*rhs.ptr;
let f_ = &mut f;
self.unordered_foreach_mut(move |elt| f_(elt, rhs_elem));
}
} else {
let rhs_broadcast = rhs.broadcast_unwrap(self.dim());
self.zip_with_mut_outer_iter(&rhs_broadcast, f);
}
}
/// Traverse the array elements in order and apply a fold,
/// returning the resulting value.
pub fn fold<'a, F, B>(&'a self, mut init: B, mut f: F) -> B
where F: FnMut(B, &'a A) -> B, A: 'a
{
if let Some(slc) = self.as_slice() {
for elt in slc {
init = f(init, elt);
}
return init;
}
for row in self.inner_iter() {
for elt in row {
init = f(init, elt);
}
}
init
}
/// Apply `f` elementwise and return a new array with
/// the results.
///
/// Return an array with the same shape as *self*.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[ 0., 1.],
/// [-1., 2.]]);
/// assert!(
/// a.map(|x| *x >= 1.0)
/// == arr2(&[[false, true],
/// [false, true]])
/// );
/// ```
pub fn map<'a, B, F>(&'a self, mut f: F) -> OwnedArray<B, D>
where F: FnMut(&'a A) -> B,
A: 'a,
{
let mut res = Vec::with_capacity(self.dim.size());
for elt in self.iter() {
res.push(f(elt))
}
unsafe {
ArrayBase::from_vec_dim(self.dim.clone(), res)
}
}
}
/// Return an array filled with zeros
pub fn zeros<A, D>(dim: D) -> OwnedArray<A, D>
where A: Clone + libnum::Zero, D: Dimension,
{
ArrayBase::zeros(dim)
}
/// Return a zero-dimensional array with the element `x`.
pub fn arr0<A>(x: A) -> Array<A, ()>
{
unsafe { Array::from_vec_dim((), vec![x]) }
}
/// Return a one-dimensional array with elements from `xs`.
pub fn arr1<A: Clone>(xs: &[A]) -> Array<A, Ix>
{
Array::from_vec(xs.to_vec())
}
/// Return a zero-dimensional array view borrowing `x`.
pub fn aview0<A>(x: &A) -> ArrayView<A, ()> {
let data = unsafe {
std::slice::from_raw_parts(x, 1)
};
ArrayView {
data: data,
ptr: data.as_ptr() as *mut _,
dim: (),
strides: (),
}
}
/// Return a one-dimensional array view with elements borrowing `xs`.
///
/// ```
/// use ndarray::aview1;
///
/// let data = [1.0; 1024];
///
/// // Create a 2D array view from borrowed data
/// let a2d = aview1(&data).into_shape((32, 32)).unwrap();
///
/// assert!(
/// a2d.scalar_sum() == 1024.0
/// );
/// ```
pub fn aview1<A>(xs: &[A]) -> ArrayView<A, Ix> {
ArrayView {
data: xs,
ptr: xs.as_ptr() as *mut _,
dim: xs.len() as Ix,
strides: 1,
}
}
/// Return a two-dimensional array view with elements borrowing `xs`.
pub fn aview2<A, V: FixedInitializer<Elem=A>>(xs: &[V]) -> ArrayView<A, (Ix, Ix)> {
let cols = V::len();
let rows = xs.len();
let data = unsafe {
std::slice::from_raw_parts(xs.as_ptr() as *const A, cols * rows)
};
let dim = (rows as Ix, cols as Ix);
ArrayView {
data: data,
ptr: data.as_ptr() as *mut _,
strides: dim.default_strides(),
dim: dim,
}
}
/// Return a one-dimensional read-write array view with elements borrowing `xs`.
///
/// ```
/// #[macro_use(s)]
/// extern crate ndarray;
///
/// use ndarray::aview_mut1;
///
/// // Create an array view over some data, then slice it and modify it.
/// fn main() {
/// let mut data = [0; 1024];
/// {
/// let mut a = aview_mut1(&mut data).into_shape((32, 32)).unwrap();
/// a.slice_mut(s![.., ..;3]).assign_scalar(&5);
/// }
/// assert_eq!(&data[..10], [5, 0, 0, 5, 0, 0, 5, 0, 0, 5]);
/// }
/// ```
pub fn aview_mut1<A>(xs: &mut [A]) -> ArrayViewMut<A, Ix> {
ArrayViewMut {
ptr: xs.as_mut_ptr(),
dim: xs.len() as Ix,
strides: 1,
data: xs,
}
}
/// Slice or fixed-size array used for array initialization
pub unsafe trait Initializer {
type Elem;
fn as_init_slice(&self) -> &[Self::Elem];
fn is_fixed_size() -> bool { false }
}
/// Fixed-size array used for array initialization
pub unsafe trait FixedInitializer: Initializer {
fn len() -> usize;
}
unsafe impl<T> Initializer for [T] {
type Elem = T;
fn as_init_slice(&self) -> &[T] {
self
}
}
macro_rules! impl_arr_init {
(__impl $n: expr) => (
unsafe impl<T> Initializer for [T; $n] {
type Elem = T;
fn as_init_slice(&self) -> &[T] { self }
fn is_fixed_size() -> bool { true }
}
unsafe impl<T> FixedInitializer for [T; $n] {
fn len() -> usize { $n }
}
);
() => ();
($n: expr, $($m:expr,)*) => (
impl_arr_init!(__impl $n);
impl_arr_init!($($m,)*);
)
}
impl_arr_init!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,);
/// Return a two-dimensional array with elements from `xs`.
///
/// **Panics** if the slices are not all of the same length.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1, 2, 3],
/// [4, 5, 6]]);
/// assert!(
/// a.shape() == [2, 3]
/// );
/// ```
pub fn arr2<A: Clone, V: Initializer<Elem=A>>(xs: &[V]) -> Array<A, (Ix, Ix)>
{
// FIXME: Simplify this when V is fix size array
let (m, n) = (xs.len() as Ix,
xs.get(0).map_or(0, |snd| snd.as_init_slice().len() as Ix));
let dim = (m, n);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs.iter() {
let snd = snd.as_init_slice();
assert!(<V as Initializer>::is_fixed_size() || snd.len() as Ix == n);
result.extend(snd.iter().map(|x| x.clone()))
}
unsafe {
Array::from_vec_dim(dim, result)
}
}
/// Return a three-dimensional array with elements from `xs`.
///
/// **Panics** if the slices are not all of the same length.
///
/// ```
/// use ndarray::arr3;
///
/// let a = arr3(&[[[1, 2],
/// [3, 4]],
/// [[5, 6],
/// [7, 8]],
/// [[9, 0],
/// [1, 2]]]);
/// assert!(
/// a.shape() == [3, 2, 2]
/// );
/// ```
pub fn arr3<A: Clone, V: Initializer<Elem=U>, U: Initializer<Elem=A>>(xs: &[V])
-> Array<A, (Ix, Ix, Ix)>
{
// FIXME: Simplify this when U/V are fix size arrays
let m = xs.len() as Ix;
let fst = xs.get(0).map(|snd| snd.as_init_slice());
let thr = fst.and_then(|elt| elt.get(0).map(|elt2| elt2.as_init_slice()));
let n = fst.map_or(0, |v| v.len() as Ix);
let o = thr.map_or(0, |v| v.len() as Ix);
let dim = (m, n, o);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs.iter() {
let snd = snd.as_init_slice();
assert!(<V as Initializer>::is_fixed_size() || snd.len() as Ix == n);
for thr in snd.iter() {
let thr = thr.as_init_slice();
assert!(<U as Initializer>::is_fixed_size() || thr.len() as Ix == o);
result.extend(thr.iter().map(|x| x.clone()))
}
}
unsafe {
Array::from_vec_dim(dim, result)
}
}
impl<A, S, D> ArrayBase<S, D>
where S: Data<Elem=A>,
D: Dimension,
{
/// Return sum along `axis`.
///
/// ```
/// use ndarray::{aview0, aview1, arr2};
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
/// assert!(
/// a.sum(0) == aview1(&[4., 6.]) &&
/// a.sum(1) == aview1(&[3., 7.]) &&
///
/// a.sum(0).sum(0) == aview0(&10.)
/// );
/// ```
///
/// **Panics** if `axis` is out of bounds.
pub fn sum(&self, axis: usize) -> OwnedArray<A, <D as RemoveAxis>::Smaller>
where A: Clone + Add<Output=A>,
D: RemoveAxis,
{
let n = self.shape()[axis];
let mut res = self.view().subview(axis, 0).to_owned();
for i in 1..n {
let view = self.view().subview(axis, i);
res.iadd(&view);
}
res
}
/// Return the sum of all elements in the array.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
/// assert_eq!(a.scalar_sum(), 10.);
/// ```
pub fn scalar_sum(&self) -> A
where A: Clone + Add<Output=A> + libnum::Zero,
{
if let Some(slc) = self.as_slice() {
return Self::unrolled_sum(slc);
}
let mut sum = A::zero();
for row in self.inner_iter() {
if let Some(slc) = row.as_slice() {
sum = sum + Self::unrolled_sum(slc);
} else {
sum = sum + row.fold(A::zero(), |acc, elt| acc + elt.clone());
}
}
sum
}
fn unrolled_sum(mut xs: &[A]) -> A
where A: Clone + Add<Output=A> + libnum::Zero,
{
// eightfold unrolled so that floating point can be vectorized
// (even with strict floating point accuracy semantics)
let mut sum = A::zero();
let (mut p0, mut p1, mut p2, mut p3,
mut p4, mut p5, mut p6, mut p7) =
(A::zero(), A::zero(), A::zero(), A::zero(),
A::zero(), A::zero(), A::zero(), A::zero());
while xs.len() >= 8 {
p0 = p0 + xs[0].clone();
p1 = p1 + xs[1].clone();
p2 = p2 + xs[2].clone();
p3 = p3 + xs[3].clone();
p4 = p4 + xs[4].clone();
p5 = p5 + xs[5].clone();
p6 = p6 + xs[6].clone();
p7 = p7 + xs[7].clone();
xs = &xs[8..];
}
sum = sum.clone() + (p0 + p4);
sum = sum.clone() + (p1 + p5);
sum = sum.clone() + (p2 + p6);
sum = sum.clone() + (p3 + p7);
for elt in xs {
sum = sum.clone() + elt.clone();
}
sum
}
/// Return mean along `axis`.
///
/// ```
/// use ndarray::{aview1, arr2};
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
/// assert!(
/// a.mean(0) == aview1(&[2.0, 3.0]) &&
/// a.mean(1) == aview1(&[1.5, 3.5])
/// );
/// ```
///
///
/// **Panics** if `axis` is out of bounds.
#[allow(deprecated)]
pub fn mean(&self, axis: usize) -> OwnedArray<A, <D as RemoveAxis>::Smaller>
where A: Copy + Field,
D: RemoveAxis,
{
let n = self.shape()[axis];
let mut sum = self.sum(axis);
let one = libnum::one::<A>();
let mut cnt = one;
for _ in 1..n {
cnt = cnt + one;
}
sum.idiv_scalar(&cnt);
sum
}
/// Return `true` if the arrays' elementwise differences are all within
/// the given absolute tolerance.<br>
/// Return `false` otherwise, or if the shapes disagree.
pub fn allclose<S2>(&self, rhs: &ArrayBase<S2, D>, tol: A) -> bool
where A: Float + PartialOrd,
S2: Data<Elem=A>,
{
self.shape() == rhs.shape() &&
self.iter().zip(rhs.iter()).all(|(x, y)| (*x - *y).abs() <= tol)
}
}
impl<A, S> ArrayBase<S, (Ix, Ix)>
where S: Data<Elem=A>,
{
unsafe fn one_dimensional_iter<'a>(ptr: *mut A, len: Ix, stride: Ix)
-> Elements<'a, A, Ix>
{
// NOTE: `data` field is unused by into_iter
let view = ArrayView {
data: &[],
ptr: ptr,
dim: len,
strides: stride,
};
view.into_iter_()
}
/// Return an iterator over the elements of row `index`.
///
/// **Panics** if `index` is out of bounds.
pub fn row_iter(&self, index: Ix) -> Elements<A, Ix>
{
let (m, n) = self.dim;
let (sr, sc) = self.strides;
assert!(index < m);
unsafe {
Self::one_dimensional_iter(self.ptr.offset(stride_offset(index, sr)), n, sc)
}
}
/// Return an iterator over the elements of column `index`.
///
/// **Panics** if `index` is out of bounds.
pub fn col_iter(&self, index: Ix) -> Elements<A, Ix>
{
let (m, n) = self.dim;
let (sr, sc) = self.strides;
assert!(index < n);
unsafe {
Self::one_dimensional_iter(self.ptr.offset(stride_offset(index, sc)), m, sr)
}
}
/// Perform matrix multiplication of rectangular arrays `self` and `rhs`.
///
/// The array sizes must agree in the way that
/// if `self` is *M* × *N*, then `rhs` is *N* × *K*.
///
/// Return a result array with shape *M* × *K*.
///
/// **Panics** if sizes are incompatible.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 2.],
/// [0., 1.]]);
/// let b = arr2(&[[1., 2.],
/// [2., 3.]]);
///
/// assert!(
/// a.mat_mul(&b) == arr2(&[[5., 8.],
/// [2., 3.]])
/// );
/// ```
///
#[allow(deprecated)]
pub fn mat_mul(&self, rhs: &ArrayBase<S, (Ix, Ix)>) -> Array<A, (Ix, Ix)>
where A: Copy + Ring
{
// NOTE: Matrix multiplication only defined for simple types to
// avoid trouble with panicking + and *, and destructors
let ((m, a), (b, n)) = (self.dim, rhs.dim);
let (self_columns, other_rows) = (a, b);
assert!(self_columns == other_rows);
// Avoid initializing the memory in vec -- set it during iteration
// Panic safe because A: Copy
let mut res_elems = Vec::<A>::with_capacity(m as usize * n as usize);
unsafe {
res_elems.set_len(m as usize * n as usize);
}
let mut i = 0;
let mut j = 0;
for rr in res_elems.iter_mut() {
unsafe {
*rr = (0..a).fold(libnum::zero::<A>(),
move |s, k| s + *self.uget((i, k)) * *rhs.uget((k, j))
);
}
j += 1;
if j == n {
j = 0;
i += 1;
}
}
unsafe {
ArrayBase::from_vec_dim((m, n), res_elems)
}
}
/// Perform the matrix multiplication of the rectangular array `self` and
/// column vector `rhs`.
///
/// The array sizes must agree in the way that
/// if `self` is *M* × *N*, then `rhs` is *N*.
///
/// Return a result array with shape *M*.
///
/// **Panics** if sizes are incompatible.
#[allow(deprecated)]
pub fn mat_mul_col(&self, rhs: &ArrayBase<S, Ix>) -> Array<A, Ix>
where A: Copy + Ring
{
let ((m, a), n) = (self.dim, rhs.dim);
let (self_columns, other_rows) = (a, n);
assert!(self_columns == other_rows);
// Avoid initializing the memory in vec -- set it during iteration
let mut res_elems = Vec::<A>::with_capacity(m as usize);
unsafe {
res_elems.set_len(m as usize);
}
let mut i = 0;
for rr in res_elems.iter_mut() {
unsafe {
*rr = (0..a).fold(libnum::zero::<A>(),
move |s, k| s + *self.uget((i, k)) * *rhs.uget(k)
);
}
i += 1;
}
unsafe {
ArrayBase::from_vec_dim(m, res_elems)
}
}
}
// Array OPERATORS
macro_rules! impl_binary_op_inherent(
($trt:ident, $mth:ident, $imethod:ident, $imth_scalar:ident, $doc:expr) => (
/// Perform elementwise
#[doc=$doc]
/// between `self` and `rhs`,
/// *in place*.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
pub fn $imethod <E: Dimension, S2> (&mut self, rhs: &ArrayBase<S2, E>)
where A: Clone + $trt<A, Output=A>,
S2: Data<Elem=A>,
{
self.zip_mut_with(rhs, |x, y| {
*x = x.clone().$mth(y.clone());
});
}
/// Perform elementwise
#[doc=$doc]
/// between `self` and the scalar `x`,
/// *in place*.
pub fn $imth_scalar (&mut self, x: &A)
where A: Clone + $trt<A, Output=A>,
{
self.unordered_foreach_mut(move |elt| {
*elt = elt.clone(). $mth (x.clone());
});
}
);
);
/// *In-place* arithmetic operations.
impl<A, S, D> ArrayBase<S, D>
where S: DataMut<Elem=A>,
D: Dimension,
{
impl_binary_op_inherent!(Add, add, iadd, iadd_scalar, "addition");
impl_binary_op_inherent!(Sub, sub, isub, isub_scalar, "subtraction");
impl_binary_op_inherent!(Mul, mul, imul, imul_scalar, "multiplication");
impl_binary_op_inherent!(Div, div, idiv, idiv_scalar, "division");
impl_binary_op_inherent!(Rem, rem, irem, irem_scalar, "remainder");
impl_binary_op_inherent!(BitAnd, bitand, ibitand, ibitand_scalar, "bit and");
impl_binary_op_inherent!(BitOr, bitor, ibitor, ibitor_scalar, "bit or");
impl_binary_op_inherent!(BitXor, bitxor, ibitxor, ibitxor_scalar, "bit xor");
impl_binary_op_inherent!(Shl, shl, ishl, ishl_scalar, "left shift");
impl_binary_op_inherent!(Shr, shr, ishr, ishr_scalar, "right shift");
/// Perform an elementwise negation of `self`, *in place*.
pub fn ineg(&mut self)
where A: Clone + Neg<Output=A>,
{
self.unordered_foreach_mut(|elt| {
*elt = elt.clone().neg()
});
}
/// Perform an elementwise unary not of `self`, *in place*.
pub fn inot(&mut self)
where A: Clone + Not<Output=A>,
{
self.unordered_foreach_mut(|elt| {
*elt = elt.clone().not()
});
}
}
macro_rules! impl_binary_op(
($trt:ident, $mth:ident, $doc:expr) => (
/// Perform elementwise
#[doc=$doc]
/// between `self` and `rhs`,
/// and return the result (based on `self`).
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
impl<A, S, S2, D, E> $trt<ArrayBase<S2, E>> for ArrayBase<S, D>
where A: Clone + $trt<A, Output=A>,
S: DataMut<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
type Output = ArrayBase<S, D>;
fn $mth (mut self, rhs: ArrayBase<S2, E>) -> ArrayBase<S, D>
{
// FIXME: Can we co-broadcast arrays here? And how?
self.zip_mut_with(&rhs, |x, y| {
*x = x.clone(). $mth (y.clone());
});
self
}
}
/// Perform elementwise
#[doc=$doc]
/// between references `self` and `rhs`,
/// and return the result as a new `OwnedArray`.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase<S2, E>> for &'a ArrayBase<S, D>
where A: Clone + $trt<A, Output=A>,
S: Data<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
type Output = OwnedArray<A, D>;
fn $mth (self, rhs: &'a ArrayBase<S2, E>) -> OwnedArray<A, D>
{
// FIXME: Can we co-broadcast arrays here? And how?
self.to_owned().$mth(rhs.view())
}
}
);
);
mod arithmetic_ops {
use super::*;
use std::ops::*;
impl_binary_op!(Add, add, "addition");
impl_binary_op!(Sub, sub, "subtraction");
impl_binary_op!(Mul, mul, "multiplication");
impl_binary_op!(Div, div, "division");
impl_binary_op!(Rem, rem, "remainder");
impl_binary_op!(BitAnd, bitand, "bit and");
impl_binary_op!(BitOr, bitor, "bit or");
impl_binary_op!(BitXor, bitxor, "bit xor");
impl_binary_op!(Shl, shl, "left shift");
impl_binary_op!(Shr, shr, "right shift");
impl<A, S, D> Neg for ArrayBase<S, D>
where A: Clone + Neg<Output=A>,
S: DataMut<Elem=A>,
D: Dimension
{
type Output = Self;
/// Perform an elementwise negation of `self` and return the result.
fn neg(mut self) -> Self {
self.ineg();
self
}
}
impl<A, S, D> Not for ArrayBase<S, D>
where A: Clone + Not<Output=A>,
S: DataMut<Elem=A>,
D: Dimension
{
type Output = Self;
/// Perform an elementwise unary not of `self` and return the result.
fn not(mut self) -> Self {
self.inot();
self
}
}
}
#[cfg(feature = "assign_ops")]
mod assign_ops {
use super::*;
use std::ops::{
AddAssign,
SubAssign,
MulAssign,
DivAssign,
RemAssign,
BitAndAssign,
BitOrAssign,
BitXorAssign,
};
macro_rules! impl_assign_op {
($trt:ident, $method:ident, $doc:expr) => {
#[doc=$doc]
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
///
/// **Requires `feature = "assign_ops"`**
impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase<S2, E>> for ArrayBase<S, D>
where A: Clone + $trt<A>,
S: DataMut<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
fn $method(&mut self, rhs: &ArrayBase<S2, E>) {
self.zip_mut_with(rhs, |x, y| {
x.$method(y.clone());
});
}
}
};
}
impl_assign_op!(AddAssign, add_assign,
"Perform `self += rhs` as elementwise addition (in place).\n");
impl_assign_op!(SubAssign, sub_assign,
"Perform `self -= rhs` as elementwise subtraction (in place).\n");
impl_assign_op!(MulAssign, mul_assign,
"Perform `self *= rhs` as elementwise multiplication (in place).\n");
impl_assign_op!(DivAssign, div_assign,
"Perform `self /= rhs` as elementwise division (in place).\n");
impl_assign_op!(RemAssign, rem_assign,
"Perform `self %= rhs` as elementwise remainder (in place).\n");
impl_assign_op!(BitAndAssign, bitand_assign,
"Perform `self &= rhs` as elementwise bit and (in place).\n");
impl_assign_op!(BitOrAssign, bitor_assign,
"Perform `self |= rhs` as elementwise bit or (in place).\n");
impl_assign_op!(BitXorAssign, bitxor_assign,
"Perform `self ^= rhs` as elementwise bit xor (in place).\n");
}
/// An iterator over the elements of an array.
///
/// Iterator element type is `&'a A`.
pub struct Elements<'a, A: 'a, D> {
inner: ElementsRepr<Iter<'a, A>, ElementsBase<'a, A, D>>,
}
/// Counted read only iterator
struct ElementsBase<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
/// An iterator over the elements of an array (mutable).
///
/// Iterator element type is `&'a mut A`.
pub struct ElementsMut<'a, A: 'a, D> {
inner: ElementsRepr<IterMut<'a, A>, ElementsBaseMut<'a, A, D>>,
}
/// An iterator over the elements of an array.
///
/// Iterator element type is `&'a mut A`.
struct ElementsBaseMut<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
/// An iterator over the indexes and elements of an array.
#[derive(Clone)]
pub struct Indexed<'a, A: 'a, D>(ElementsBase<'a, A, D>);
/// An iterator over the indexes and elements of an array (mutable).
pub struct IndexedMut<'a, A: 'a, D>(ElementsBaseMut<'a, A, D>);
fn zipsl<T, U>(t: T, u: U) -> ZipSlices<T, U>
where T: it::misc::Slice, U: it::misc::Slice
{
ZipSlices::from_slices(t, u)
}
enum ElementsRepr<S, C> {
Slice(S),
Counted(C),
}
Add another debug assertion
#![crate_name="ndarray"]
#![cfg_attr(has_deprecated, feature(deprecated))]
#![doc(html_root_url = "http://bluss.github.io/rust-ndarray/master/")]
//! The `ndarray` crate provides an N-dimensional container similar to numpy’s
//! ndarray.
//!
//! - [`ArrayBase`](struct.ArrayBase.html):
//! The N-dimensional array type itself.
//! - [`Array`](type.Array.html):
//! An array where the data is shared and copy on write, it
//! can act as both an owner of the data as well as a lightweight view.
//! - [`OwnedArray`](type.OwnedArray.html):
//! An array where the data is owned uniquely.
//! - [`ArrayView`](type.ArrayView.html), [`ArrayViewMut`](type.ArrayViewMut.html):
//! Lightweight array views.
//!
//! ## Highlights
//!
//! - Generic N-dimensional array
//! - Slicing, also with arbitrary step size, and negative indices to mean
//! elements from the end of the axis.
//! - There is both an easy to use copy on write array (`Array`),
//! or a regular uniquely owned array (`OwnedArray`), and both can use
//! read-only and read-write array views.
//! - Iteration and most operations are very efficient on contiguous c-order arrays
//! (the default layout, without any transposition or discontiguous subslicing),
//! and on arrays where the lowest dimension is contiguous (contiguous block
//! slicing).
//! - Array views can be used to slice and mutate any `[T]` data.
//!
//! ## Status and Lookout
//!
//! - Still iterating on the API
//! - Performance status:
//! + Arithmetic involving contiguous c-order arrays and contiguous lowest
//! dimension arrays optimizes very well.
//! + `.fold()` and `.zip_mut_with()` are the most efficient ways to
//! perform single traversal and lock step traversal respectively.
//! + Transposed arrays where the lowest dimension is not c-contiguous
//! is still a pain point.
//! - There is experimental bridging to the linear algebra package `rblas`.
//!
//! ## Crate Feature Flags
//!
//! - `assign_ops`
//! - Optional, requires nightly
//! - Enables the compound assignment operators
//! - `rustc-serialize`
//! - Optional, stable
//! - Enables serialization support
//! - `rblas`
//! - Optional, stable
//! - Enables `rblas` integration
//!
#![cfg_attr(feature = "assign_ops", feature(augmented_assignments,
op_assign_traits))]
#[cfg(feature = "serde")]
extern crate serde;
#[cfg(feature = "rustc-serialize")]
extern crate rustc_serialize as serialize;
extern crate itertools as it;
#[cfg(not(nocomplex))]
extern crate num as libnum;
use libnum::Float;
use std::cmp;
use std::mem;
use std::ops::{Add, Sub, Mul, Div, Rem, Neg, Not, Shr, Shl,
BitAnd,
BitOr,
BitXor,
};
use std::rc::Rc;
use std::slice::{self, Iter, IterMut};
use it::ZipSlices;
pub use dimension::{Dimension, RemoveAxis};
pub use indexes::Indexes;
pub use shape_error::ShapeError;
pub use si::{Si, S};
use dimension::stride_offset;
use iterators::Baseiter;
pub use iterators::{
InnerIter,
InnerIterMut,
};
#[allow(deprecated)]
use linalg::{Field, Ring};
pub mod linalg;
mod arraytraits;
#[cfg(feature = "serde")]
mod arrayserialize;
mod arrayformat;
#[cfg(feature = "rblas")]
pub mod blas;
mod dimension;
mod indexes;
mod iterators;
mod si;
mod shape_error;
// NOTE: In theory, the whole library should compile
// and pass tests even if you change Ix and Ixs.
/// Array index type
pub type Ix = u32;
/// Array index type (signed)
pub type Ixs = i32;
/// An *N*-dimensional array.
///
/// The array is a general container of elements. It cannot grow or shrink, but
/// can be sliced into subsets of its data.
/// The array supports arithmetic operations by applying them elementwise.
///
/// The `ArrayBase<S, D>` is parameterized by:
///
/// - `S` for the data container
/// - `D` for the number of dimensions
///
/// Type aliases [`Array`], [`OwnedArray`], [`ArrayView`], and [`ArrayViewMut`] refer
/// to `ArrayBase` with different types for the data storage.
///
/// [`Array`]: type.Array.html
/// [`OwnedArray`]: type.OwnedArray.html
/// [`ArrayView`]: type.ArrayView.html
/// [`ArrayViewMut`]: type.ArrayViewMut.html
///
/// ## `Array` and `OwnedArray`
///
/// `OwnedArray` owns the underlying array elements directly (just like
/// a `Vec`), while [`Array`](type.Array.html) is a an array with reference
/// counted data. `Array` can act both as an owner or as a view in that regard.
/// Sharing requires that it uses copy-on-write for mutable operations.
/// Calling a method for mutating elements on `Array`, for example
/// [`view_mut()`](#method.view_mut) or [`get_mut()`](#method.get_mut),
/// will break sharing and require a clone of the data (if it is not uniquely held).
///
/// Note that all `ArrayBase` variants can change their view (slicing) of the
/// data freely, even when their data can’t be mutated.
///
/// ## Indexing and Dimension
///
/// Array indexes are represented by the types `Ix` and `Ixs`
/// (signed). ***Note: A future version will switch from `u32` to `usize`.***
///
/// The dimensionality of the array determines the number of *axes*, for example
/// a 2D array has two axes. These are listed in “big endian” order, so that
/// the greatest dimension is listed first, the lowest dimension with the most
/// rapidly varying index is the last.
/// For the 2D array this means that indices are `(row, column)`, and the order of
/// the elements is *(0, 0), (0, 1), (0, 2), ... (1, 0), (1, 1), (1, 2) ...* etc.
///
/// The number of axes for an array is fixed by the `D` parameter: `Ix` for
/// a 1D array, `(Ix, Ix)` for a 2D array etc. The `D` type is also used
/// for element indices in `.get()` and `array[index]`. The dimension type `Vec<Ix>`
/// allows a dynamic number of axes.
///
/// ## Slicing
///
/// You can use slicing to create a view of a subset of the data in
/// the array. Slicing methods include `.slice()`, `.islice()`,
/// `.slice_mut()`.
///
/// The slicing specification is passed as a function argument as a fixed size
/// array with elements of type [`Si`] with fields `Si(begin, end, stride)`,
/// where the values are signed integers, and `end` is an `Option<Ixs>`.
/// The constant [`S`] is a shorthand for the full range of an axis.
/// For example, if the array has two axes, the slice argument is passed as
/// type `&[Si; 2]`.
///
/// The macro [`s![]`](macro.s!.html) is however a much more convenient way to
/// specify the slicing argument, so it will be used in all examples.
///
/// [`Si`]: struct.Si.html
/// [`S`]: constant.S.html
///
/// ```
/// // import the s![] macro
/// #[macro_use(s)]
/// extern crate ndarray;
///
/// use ndarray::arr3;
///
/// fn main() {
///
/// // 2 submatrices of 2 rows with 3 elements per row, means a shape of `[2, 2, 3]`.
///
/// let a = arr3(&[[[ 1, 2, 3], // -- 2 rows \_
/// [ 4, 5, 6]], // -- /
/// [[ 7, 8, 9], // \_ 2 submatrices
/// [10, 11, 12]]]); // /
/// // 3 columns ..../.../.../
///
/// assert_eq!(a.shape(), &[2, 2, 3]);
///
/// // Let’s create a slice with
/// //
/// // - Both of the submatrices of the greatest dimension: `..`
/// // - Only the first row in each submatrix: `0..1`
/// // - Every element in each row: `..`
///
/// let b = a.slice(s![.., 0..1, ..]);
/// // without the macro, the explicit argument is `&[S, Si(0, Some(1), 1), S]`
///
/// let c = arr3(&[[[ 1, 2, 3]],
/// [[ 7, 8, 9]]]);
/// assert_eq!(b, c);
/// assert_eq!(b.shape(), &[2, 1, 3]);
///
/// // Let’s create a slice with
/// //
/// // - Both submatrices of the greatest dimension: `..`
/// // - The last row in each submatrix: `-1..`
/// // - Row elements in reverse order: `..;-1`
/// let d = a.slice(s![.., -1.., ..;-1]);
/// let e = arr3(&[[[ 6, 5, 4]],
/// [[12, 11, 10]]]);
/// assert_eq!(d, e);
/// }
/// ```
///
/// ## Subviews
///
/// Subview methods allow you to restrict the array view while removing
/// one axis from the array. Subview methods include `.subview()`,
/// `.isubview()`, `.subview_mut()`.
///
/// Subview takes two arguments: `axis` and `index`.
///
/// ```
/// use ndarray::{arr3, aview2};
///
/// // 2 submatrices of 2 rows with 3 elements per row, means a shape of `[2, 2, 3]`.
///
/// let a = arr3(&[[[ 1, 2, 3], // \ axis 0, submatrix 0
/// [ 4, 5, 6]], // /
/// [[ 7, 8, 9], // \ axis 0, submatrix 1
/// [10, 11, 12]]]); // /
/// // \
/// // axis 2, column 0
///
/// assert_eq!(a.shape(), &[2, 2, 3]);
///
/// // Let’s take a subview along the greatest dimension (axis 0),
/// // taking submatrix 0, then submatrix 1
///
/// let sub_0 = a.subview(0, 0);
/// let sub_1 = a.subview(0, 1);
///
/// assert_eq!(sub_0, aview2(&[[ 1, 2, 3],
/// [ 4, 5, 6]]));
/// assert_eq!(sub_1, aview2(&[[ 7, 8, 9],
/// [10, 11, 12]]));
/// assert_eq!(sub_0.shape(), &[2, 3]);
///
/// // This is the subview picking only axis 2, column 0
/// let sub_col = a.subview(2, 0);
///
/// assert_eq!(sub_col, aview2(&[[ 1, 4],
/// [ 7, 10]]));
/// ```
///
/// `.isubview()` modifies the view in the same way as `subview()`, but
/// since it is *in place*, it cannot remove the collapsed axis. It becomes
/// an axis of length 1.
///
/// ## Broadcasting
///
/// Arrays support limited *broadcasting*, where arithmetic operations with
/// array operands of different sizes can be carried out by repeating the
/// elements of the smaller dimension array. See
/// [`.broadcast()`](#method.broadcast) for a more detailed
/// description.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 1.],
/// [1., 2.]]);
/// let b = arr2(&[[0., 1.]]);
///
/// let c = arr2(&[[1., 2.],
/// [1., 3.]]);
/// // We can add because the shapes are compatible even if not equal.
/// assert!(
/// c == a + b
/// );
/// ```
///
pub struct ArrayBase<S, D> where S: Data {
/// Rc data when used as view, Uniquely held data when being mutated
data: S,
/// A pointer into the buffer held by data, may point anywhere
/// in its range.
ptr: *mut S::Elem,
/// The size of each axis
dim: D,
/// The element count stride per axis. To be parsed as `isize`.
strides: D,
}
/// Array’s inner representation.
pub unsafe trait Data {
type Elem;
fn slice(&self) -> &[Self::Elem];
}
/// Array’s writable inner representation.
pub unsafe trait DataMut : Data {
fn slice_mut(&mut self) -> &mut [Self::Elem];
fn ensure_unique<D>(&mut ArrayBase<Self, D>)
where Self: Sized, D: Dimension
{
}
}
/// Clone an Array’s storage.
pub unsafe trait DataClone : Data {
/// Unsafe because, `ptr` must point inside the current storage.
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem) -> (Self, *mut Self::Elem);
}
unsafe impl<A> Data for Rc<Vec<A>> {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
// NOTE: Copy on write
unsafe impl<A> DataMut for Rc<Vec<A>> where A: Clone {
fn slice_mut(&mut self) -> &mut [A] { &mut Rc::make_mut(self)[..] }
fn ensure_unique<D>(self_: &mut ArrayBase<Self, D>)
where Self: Sized, D: Dimension
{
if Rc::get_mut(&mut self_.data).is_some() {
return
}
if self_.dim.size() <= self_.data.len() / 2 {
unsafe {
*self_ = Array::from_vec_dim(self_.dim.clone(),
self_.iter().map(|x| x.clone()).collect());
}
return;
}
let our_off = (self_.ptr as isize - self_.data.as_ptr() as isize)
/ mem::size_of::<A>() as isize;
let rvec = Rc::make_mut(&mut self_.data);
unsafe {
self_.ptr = rvec.as_mut_ptr().offset(our_off);
}
}
}
unsafe impl<A> DataClone for Rc<Vec<A>> {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem)
-> (Self, *mut Self::Elem)
{
// pointer is preserved
(self.clone(), ptr)
}
}
unsafe impl<A> Data for Vec<A> {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
unsafe impl<A> DataMut for Vec<A> {
fn slice_mut(&mut self) -> &mut [A] { self }
}
unsafe impl<A> DataClone for Vec<A> where A: Clone {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem)
-> (Self, *mut Self::Elem)
{
let mut u = self.clone();
let our_off = (self.as_ptr() as isize - ptr as isize)
/ mem::size_of::<A>() as isize;
let new_ptr = u.as_mut_ptr().offset(our_off);
(u, new_ptr)
}
}
unsafe impl<'a, A> Data for &'a [A] {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
unsafe impl<'a, A> DataClone for &'a [A] {
unsafe fn clone_with_ptr(&self, ptr: *mut Self::Elem)
-> (Self, *mut Self::Elem)
{
(*self, ptr)
}
}
unsafe impl<'a, A> Data for &'a mut [A] {
type Elem = A;
fn slice(&self) -> &[A] { self }
}
unsafe impl<'a, A> DataMut for &'a mut [A] {
fn slice_mut(&mut self) -> &mut [A] { self }
}
/// Array representation that is a unique or shared owner of its data.
pub unsafe trait DataOwned : Data {
fn new(elements: Vec<Self::Elem>) -> Self;
fn into_shared(self) -> Rc<Vec<Self::Elem>>;
}
/// Array representation that is a lightweight view.
pub unsafe trait DataShared : Clone + DataClone { }
unsafe impl<A> DataShared for Rc<Vec<A>> { }
unsafe impl<'a, A> DataShared for &'a [A] { }
unsafe impl<A> DataOwned for Vec<A> {
fn new(elements: Vec<A>) -> Self { elements }
fn into_shared(self) -> Rc<Vec<A>> { Rc::new(self) }
}
unsafe impl<A> DataOwned for Rc<Vec<A>> {
fn new(elements: Vec<A>) -> Self { Rc::new(elements) }
fn into_shared(self) -> Rc<Vec<A>> { self }
}
/// Array where the data is reference counted and copy on write, it
/// can act as both an owner as the data as well as a lightweight view.
pub type Array<A, D> = ArrayBase<Rc<Vec<A>>, D>;
/// Array where the data is owned uniquely.
pub type OwnedArray<A, D> = ArrayBase<Vec<A>, D>;
/// A lightweight array view.
///
/// `ArrayView` implements `IntoIterator`.
pub type ArrayView<'a, A, D> = ArrayBase<&'a [A], D>;
/// A lightweight read-write array view.
///
/// `ArrayViewMut` implements `IntoIterator`.
pub type ArrayViewMut<'a, A, D> = ArrayBase<&'a mut [A], D>;
impl<S: DataClone, D: Clone> Clone for ArrayBase<S, D>
{
fn clone(&self) -> ArrayBase<S, D> {
unsafe {
let (data, ptr) = self.data.clone_with_ptr(self.ptr);
ArrayBase {
data: data,
ptr: ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
}
}
}
}
impl<S: DataClone + Copy, D: Copy> Copy for ArrayBase<S, D> { }
/// Constructor methods for single dimensional `ArrayBase`.
impl<S> ArrayBase<S, Ix>
where S: DataOwned,
{
/// Create a one-dimensional array from a vector (no allocation needed).
pub fn from_vec(v: Vec<S::Elem>) -> ArrayBase<S, Ix> {
unsafe {
Self::from_vec_dim(v.len() as Ix, v)
}
}
/// Create a one-dimensional array from an iterable.
pub fn from_iter<I: IntoIterator<Item=S::Elem>>(iterable: I) -> ArrayBase<S, Ix> {
Self::from_vec(iterable.into_iter().collect())
}
/// Create a one-dimensional array from inclusive interval
/// `[start, end]` with `n` elements. `F` must be a floating point type.
pub fn linspace<F>(start: F, end: F, n: usize) -> ArrayBase<S, Ix>
where S: Data<Elem=F>,
F: libnum::Float,
usize: it::misc::ToFloat<F>,
{
Self::from_iter(it::linspace(start, end, n))
}
/// Create a one-dimensional array from interval `[start, end)`
#[cfg_attr(has_deprecated, deprecated(note="use ArrayBase::linspace() instead"))]
pub fn range(start: f32, end: f32) -> ArrayBase<S, Ix>
where S: Data<Elem=f32>,
{
let n = (end - start) as usize;
let span = if n > 0 { (n - 1) as f32 } else { 0. };
Self::linspace(start, start + span, n)
}
}
/// Constructor methods for `ArrayBase`.
impl<S, A, D> ArrayBase<S, D>
where S: DataOwned<Elem=A>,
D: Dimension,
{
/// Construct an array with copies of `elem`, dimension `dim`.
///
/// ```
/// use ndarray::Array;
/// use ndarray::arr3;
///
/// let a = Array::from_elem((2, 2, 2), 1.);
///
/// assert!(
/// a == arr3(&[[[1., 1.],
/// [1., 1.]],
/// [[1., 1.],
/// [1., 1.]]])
/// );
/// ```
pub fn from_elem(dim: D, elem: A) -> ArrayBase<S, D> where A: Clone
{
let v = vec![elem; dim.size()];
unsafe {
Self::from_vec_dim(dim, v)
}
}
/// Construct an array with zeros, dimension `dim`.
pub fn zeros(dim: D) -> ArrayBase<S, D> where A: Clone + libnum::Zero
{
Self::from_elem(dim, libnum::zero())
}
/// Construct an array with default values, dimension `dim`.
pub fn default(dim: D) -> ArrayBase<S, D>
where A: Default
{
let v = (0..dim.size()).map(|_| A::default()).collect();
unsafe {
Self::from_vec_dim(dim, v)
}
}
/// Create an array from a vector (with no allocation needed).
///
/// Unsafe because dimension is unchecked, and must be correct.
pub unsafe fn from_vec_dim(dim: D, mut v: Vec<A>) -> ArrayBase<S, D>
{
debug_assert!(dim.size() == v.len());
ArrayBase {
ptr: v.as_mut_ptr(),
data: DataOwned::new(v),
strides: dim.default_strides(),
dim: dim
}
}
}
impl<'a, A, D> ArrayView<'a, A, D>
where D: Dimension,
{
#[inline]
fn into_base_iter(self) -> Baseiter<'a, A, D> {
unsafe {
Baseiter::new(self.ptr, self.dim.clone(), self.strides.clone())
}
}
#[inline]
fn into_elements_base(self) -> ElementsBase<'a, A, D> {
ElementsBase { inner: self.into_base_iter() }
}
fn into_iter_(self) -> Elements<'a, A, D> {
Elements {
inner:
if let Some(slc) = self.into_slice() {
ElementsRepr::Slice(slc.iter())
} else {
ElementsRepr::Counted(self.into_elements_base())
}
}
}
fn into_slice(&self) -> Option<&'a [A]> {
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts(self.ptr, self.len()))
}
} else {
None
}
}
}
impl<'a, A, D> ArrayViewMut<'a, A, D>
where D: Dimension,
{
#[inline]
fn into_base_iter(self) -> Baseiter<'a, A, D> {
unsafe {
Baseiter::new(self.ptr, self.dim.clone(), self.strides.clone())
}
}
#[inline]
fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> {
ElementsBaseMut { inner: self.into_base_iter() }
}
fn into_iter_(self) -> ElementsMut<'a, A, D> {
ElementsMut {
inner:
if self.is_standard_layout() {
let slc = unsafe {
slice::from_raw_parts_mut(self.ptr, self.len())
};
ElementsRepr::Slice(slc.iter_mut())
} else {
ElementsRepr::Counted(self.into_elements_base())
}
}
}
fn _into_slice_mut(self) -> Option<&'a mut [A]>
{
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts_mut(self.ptr, self.len()))
}
} else {
None
}
}
}
impl<A, S, D> ArrayBase<S, D> where S: Data<Elem=A>, D: Dimension
{
/// Return the total number of elements in the Array.
pub fn len(&self) -> usize
{
self.dim.size()
}
/// Return the shape of the array.
pub fn dim(&self) -> D {
self.dim.clone()
}
/// Return the shape of the array as a slice.
pub fn shape(&self) -> &[Ix] {
self.dim.slice()
}
/// Return the strides of the array
pub fn strides(&self) -> &[Ixs] {
let s = self.strides.slice();
// reinterpret unsigned integer as signed
unsafe {
slice::from_raw_parts(s.as_ptr() as *const _, s.len())
}
}
/// Return a read-only view of the array
pub fn view(&self) -> ArrayView<A, D> {
debug_assert!(self.pointer_is_inbounds());
ArrayView {
ptr: self.ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
data: self.raw_data(),
}
}
/// Return a read-write view of the array
pub fn view_mut(&mut self) -> ArrayViewMut<A, D>
where S: DataMut,
{
self.ensure_unique();
ArrayViewMut {
ptr: self.ptr,
dim: self.dim.clone(),
strides: self.strides.clone(),
data: self.data.slice_mut(),
}
}
/// Return an uniquely owned copy of the array
pub fn to_owned(&self) -> OwnedArray<A, D>
where A: Clone
{
let data = if let Some(slc) = self.as_slice() {
slc.to_vec()
} else {
self.iter().cloned().collect()
};
unsafe {
ArrayBase::from_vec_dim(self.dim.clone(), data)
}
}
/// Return a shared ownership (copy on write) array.
pub fn to_shared(&self) -> Array<A, D>
where A: Clone
{
// FIXME: Avoid copying if it’s already an Array.
self.to_owned().into_shared()
}
/// Turn the array into a shared ownership (copy on write) array,
/// without any copying.
pub fn into_shared(self) -> Array<A, D>
where S: DataOwned,
{
let data = self.data.into_shared();
ArrayBase {
data: data,
ptr: self.ptr,
dim: self.dim,
strides: self.strides,
}
}
/// Return an iterator of references to the elements of the array.
///
/// Iterator element type is `&A`.
pub fn iter(&self) -> Elements<A, D> {
debug_assert!(self.pointer_is_inbounds());
self.view().into_iter_()
}
/// Return an iterator of references to the elements of the array.
///
/// Iterator element type is `(D, &A)`.
pub fn indexed_iter(&self) -> Indexed<A, D> {
Indexed(self.view().into_elements_base())
}
/// Return an iterator of mutable references to the elements of the array.
///
/// Iterator element type is `&mut A`.
pub fn iter_mut(&mut self) -> ElementsMut<A, D>
where S: DataMut,
{
self.ensure_unique();
self.view_mut().into_iter_()
}
/// Return an iterator of indexes and mutable references to the elements of the array.
///
/// Iterator element type is `(D, &mut A)`.
pub fn indexed_iter_mut(&mut self) -> IndexedMut<A, D>
where S: DataMut,
{
IndexedMut(self.view_mut().into_elements_base())
}
/// Return a sliced array.
///
/// See [*Slicing*](#slicing) for full documentation.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn slice(&self, indexes: &D::SliceArg) -> Self
where S: DataShared
{
let mut arr = self.clone();
arr.islice(indexes);
arr
}
/// Slice the array’s view in place.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn islice(&mut self, indexes: &D::SliceArg)
{
let offset = Dimension::do_slices(&mut self.dim, &mut self.strides, indexes);
unsafe {
self.ptr = self.ptr.offset(offset);
}
debug_assert!(self.pointer_is_inbounds());
}
/// Return an iterator over a sliced view.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn slice_iter(&self, indexes: &D::SliceArg) -> Elements<A, D>
{
let mut it = self.view();
it.islice(indexes);
it.into_iter_()
}
/// Return a sliced read-write view of the array.
///
/// [`D::SliceArg`] is typically a fixed size array of `Si`, with one
/// element per axis.
///
/// [`D::SliceArg`]: trait.Dimension.html#associatedtype.SliceArg
///
/// **Panics** if an index is out of bounds or stride is zero.<br>
/// (**Panics** if `D` is `Vec` and `indexes` does not match the number of array axes.)
pub fn slice_mut(&mut self, indexes: &D::SliceArg) -> ArrayViewMut<A, D>
where S: DataMut
{
let mut arr = self.view_mut();
arr.islice(indexes);
arr
}
/// ***Deprecated: use `.slice_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .slice_mut() instead"))]
pub fn slice_iter_mut(&mut self, indexes: &D::SliceArg) -> ElementsMut<A, D>
where S: DataMut,
{
self.slice_mut(indexes).into_iter()
}
/// Return a reference to the element at `index`, or return `None`
/// if the index is out of bounds.
///
/// Arrays also support indexing syntax: `array[index]`.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
///
/// assert!(
/// a.get((0, 1)) == Some(&2.) &&
/// a.get((0, 2)) == None &&
/// a[(0, 1)] == 2.
/// );
/// ```
pub fn get(&self, index: D) -> Option<&A> {
let ptr = self.ptr;
self.dim.stride_offset_checked(&self.strides, &index)
.map(move |offset| unsafe {
&*ptr.offset(offset)
})
}
/// ***Deprecated: use .get(i)***
#[cfg_attr(has_deprecated, deprecated(note="use .get() instead"))]
pub fn at(&self, index: D) -> Option<&A> {
self.get(index)
}
/// Return a mutable reference to the element at `index`, or return `None`
/// if the index is out of bounds.
pub fn get_mut(&mut self, index: D) -> Option<&mut A>
where S: DataMut,
{
self.ensure_unique();
let ptr = self.ptr;
self.dim.stride_offset_checked(&self.strides, &index)
.map(move |offset| unsafe {
&mut *ptr.offset(offset)
})
}
/// ***Deprecated: use .get_mut(i)***
#[cfg_attr(has_deprecated, deprecated(note="use .get_mut() instead"))]
pub fn at_mut(&mut self, index: D) -> Option<&mut A>
where S: DataMut,
{
self.get_mut(index)
}
/// Perform *unchecked* array indexing.
///
/// Return a reference to the element at `index`.
///
/// **Note:** only unchecked for non-debug builds of ndarray.
#[inline]
pub unsafe fn uget(&self, index: D) -> &A {
debug_assert!(self.dim.stride_offset_checked(&self.strides, &index).is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&*self.ptr.offset(off)
}
/// ***Deprecated: use `.uget()`***
#[cfg_attr(has_deprecated, deprecated(note="use .uget() instead"))]
#[inline]
pub unsafe fn uchk_at(&self, index: D) -> &A {
self.uget(index)
}
/// Perform *unchecked* array indexing.
///
/// Return a mutable reference to the element at `index`.
///
/// **Note:** Only unchecked for non-debug builds of ndarray.<br>
/// **Note:** The array must be uniquely held when mutating it.
#[inline]
pub unsafe fn uget_mut(&mut self, index: D) -> &mut A
where S: DataMut
{
//debug_assert!(Rc::get_mut(&mut self.data).is_some());
debug_assert!(self.dim.stride_offset_checked(&self.strides, &index).is_some());
let off = Dimension::stride_offset(&index, &self.strides);
&mut *self.ptr.offset(off)
}
/// ***Deprecated: use `.uget_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .uget_mut() instead"))]
#[inline]
pub unsafe fn uchk_at_mut(&mut self, index: D) -> &mut A
where S: DataMut
{
self.uget_mut(index)
}
/// Swap axes `ax` and `bx`.
///
/// This does not move any data, it just adjusts the array’s dimensions
/// and strides.
///
/// **Panics** if the axes are out of bounds.
///
/// ```
/// use ndarray::arr2;
///
/// let mut a = arr2(&[[1., 2., 3.]]);
/// a.swap_axes(0, 1);
/// assert!(
/// a == arr2(&[[1.], [2.], [3.]])
/// );
/// ```
pub fn swap_axes(&mut self, ax: usize, bx: usize)
{
self.dim.slice_mut().swap(ax, bx);
self.strides.slice_mut().swap(ax, bx);
}
/// Along `axis`, select the subview `index` and return an
/// array with that axis removed.
///
/// See [*Subviews*](#subviews) for full documentation.
///
/// **Panics** if `axis` or `index` is out of bounds.
///
/// ```
/// use ndarray::{arr1, arr2};
///
/// let a = arr2(&[[1., 2.], // -- axis 0, row 0
/// [3., 4.], // -- axis 0, row 1
/// [5., 6.]]); // -- axis 0, row 2
/// // \ \
/// // \ axis 1, column 1
/// // axis 1, column 0
/// assert!(
/// a.subview(0, 1) == arr1(&[3., 4.]) &&
/// a.subview(1, 1) == arr1(&[2., 4., 6.])
/// );
/// ```
pub fn subview(&self, axis: usize, index: Ix) -> ArrayBase<S, <D as RemoveAxis>::Smaller>
where D: RemoveAxis,
S: DataShared,
{
let mut res = self.clone();
res.isubview(axis, index);
// don't use reshape -- we always know it will fit the size,
// and we can use remove_axis on the strides as well
ArrayBase {
data: res.data,
ptr: res.ptr,
dim: res.dim.remove_axis(axis),
strides: res.strides.remove_axis(axis),
}
}
/// Collapse dimension `axis` into length one,
/// and select the subview of `index` along that axis.
///
/// **Panics** if `index` is past the length of the axis.
pub fn isubview(&mut self, axis: usize, index: Ix)
{
dimension::do_sub(&mut self.dim, &mut self.ptr, &self.strides, axis, index)
}
/// Along `axis`, select the subview `index` and return a read-write view
/// with the axis removed.
///
/// **Panics** if `axis` or `index` is out of bounds.
///
/// ```
/// use ndarray::{arr2, aview2};
///
/// let mut a = arr2(&[[1., 2.],
/// [3., 4.]]);
///
/// a.subview_mut(1, 1).iadd_scalar(&10.);
///
/// assert!(
/// a == aview2(&[[1., 12.],
/// [3., 14.]])
/// );
/// ```
pub fn subview_mut(&mut self, axis: usize, index: Ix)
-> ArrayViewMut<A, D::Smaller>
where S: DataMut,
D: RemoveAxis,
{
let mut res = self.view_mut();
res.isubview(axis, index);
ArrayBase {
data: res.data,
ptr: res.ptr,
dim: res.dim.remove_axis(axis),
strides: res.strides.remove_axis(axis),
}
}
/// ***Deprecated: use `.subview_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .subview_mut() instead"))]
pub fn sub_iter_mut(&mut self, axis: usize, index: Ix)
-> ElementsMut<A, D>
where S: DataMut,
{
let mut it = self.view_mut();
dimension::do_sub(&mut it.dim, &mut it.ptr, &it.strides, axis, index);
it.into_iter_()
}
/// Return an iterator that traverses over all dimensions but the innermost,
/// and yields each inner row.
///
/// Iterator element is `ArrayView<A, Ix>` (1D array view).
///
/// ```
/// use ndarray::arr3;
/// let a = arr3(&[[[ 0, 1, 2], // -- row 0, 0
/// [ 3, 4, 5]], // -- row 0, 1
/// [[ 6, 7, 8], // -- row 1, 0
/// [ 9, 10, 11]]]); // -- row 1, 1
/// // `inner_iter` yields the four inner rows of the 3D array.
/// let mut row_sums = a.inner_iter().map(|v| v.scalar_sum());
/// assert_eq!(row_sums.collect::<Vec<_>>(), vec![3, 12, 21, 30]);
/// ```
pub fn inner_iter(&self) -> InnerIter<A, D> {
iterators::new_outer(self.view())
}
/// Return an iterator that traverses over all dimensions but the innermost,
/// and yields each inner row.
///
/// Iterator element is `ArrayViewMut<A, Ix>` (1D read-write array view).
pub fn inner_iter_mut(&mut self) -> InnerIterMut<A, D>
where S: DataMut
{
iterators::new_outer_mut(self.view_mut())
}
// Return (length, stride) for diagonal
fn diag_params(&self) -> (Ix, Ixs)
{
/* empty shape has len 1 */
let len = self.dim.slice().iter().map(|x| *x).min().unwrap_or(1);
let stride = self.strides.slice().iter()
.map(|x| *x as Ixs)
.fold(0, |sum, s| sum + s);
return (len, stride)
}
/// Return an iterator over the diagonal elements of the array.
///
/// The diagonal is simply the sequence indexed by *(0, 0, .., 0)*,
/// *(1, 1, ..., 1)* etc as long as all axes have elements.
pub fn diag_iter(&self) -> Elements<A, Ix>
{
let (len, stride) = self.diag_params();
let view = ArrayBase {
data: self.raw_data(),
ptr: self.ptr,
dim: len,
strides: stride as Ix,
};
view.into_iter_()
}
/// Return the diagonal as a one-dimensional array.
pub fn diag(&self) -> ArrayBase<S, Ix>
where S: DataShared,
{
let (len, stride) = self.diag_params();
ArrayBase {
data: self.data.clone(),
ptr: self.ptr,
dim: len,
strides: stride as Ix,
}
}
/// Return a read-write view over the diagonal elements of the array.
pub fn diag_mut(&mut self) -> ArrayViewMut<A, Ix>
where S: DataMut,
{
self.ensure_unique();
let (len, stride) = self.diag_params();
ArrayViewMut {
ptr: self.ptr,
data: self.raw_data_mut(),
dim: len,
strides: stride as Ix,
}
}
/// ***Deprecated: use `.diag_mut()`***
#[cfg_attr(has_deprecated, deprecated(note="use .diag_mut() instead"))]
pub fn diag_iter_mut(&mut self) -> ElementsMut<A, Ix>
where S: DataMut,
{
self.diag_mut().into_iter_()
}
/// Make the array unshared.
///
/// This method is mostly only useful with unsafe code.
fn ensure_unique(&mut self)
where S: DataMut
{
debug_assert!(self.pointer_is_inbounds());
S::ensure_unique(self);
debug_assert!(self.pointer_is_inbounds());
}
#[cfg(feature = "rblas")]
/// If the array is not in the standard layout, copy all elements
/// into the standard layout so that the array is C-contiguous.
fn ensure_standard_layout(&mut self)
where S: DataOwned,
A: Clone
{
if !self.is_standard_layout() {
let mut v: Vec<A> = self.iter().cloned().collect();
self.ptr = v.as_mut_ptr();
self.data = DataOwned::new(v);
self.strides = self.dim.default_strides();
}
}
/*
/// Set the array to the standard layout, without adjusting elements.
/// Useful for overwriting.
fn force_standard_layout(&mut self) {
self.strides = self.dim.default_strides();
}
*/
/// Return `true` if the array data is laid out in contiguous “C order” in
/// memory (where the last index is the most rapidly varying).
///
/// Return `false` otherwise, i.e the array is possibly not
/// contiguous in memory, it has custom strides, etc.
pub fn is_standard_layout(&self) -> bool
{
let defaults = self.dim.default_strides();
if self.strides == defaults {
return true;
}
// check all dimensions -- a dimension of length 1 can have unequal strides
for (&dim, (&s, &ds)) in zipsl(self.dim.slice(),
zipsl(self.strides(), defaults.slice()))
{
if dim != 1 && s != (ds as Ixs) {
return false;
}
}
true
}
/// Return the array’s data as a slice, if it is contiguous and
/// the element order corresponds to the memory order. Return `None` otherwise.
pub fn as_slice(&self) -> Option<&[A]> {
if self.is_standard_layout() {
unsafe {
Some(slice::from_raw_parts(self.ptr, self.len()))
}
} else {
None
}
}
/// Return the array’s data as a slice, if it is contiguous and
/// the element order corresponds to the memory order. Return `None` otherwise.
pub fn as_slice_mut(&mut self) -> Option<&mut [A]>
where S: DataMut
{
if self.is_standard_layout() {
self.ensure_unique();
unsafe {
Some(slice::from_raw_parts_mut(self.ptr, self.len()))
}
} else {
None
}
}
/// Transform the array into `shape`; any shape with the same number of
/// elements is accepted.
///
/// May clone all elements if needed to arrange elements in standard
/// layout (and break sharing).
///
/// **Panics** if shapes are incompatible.
///
/// ```
/// use ndarray::{arr1, arr2};
///
/// assert!(
/// arr1(&[1., 2., 3., 4.]).reshape((2, 2))
/// == arr2(&[[1., 2.],
/// [3., 4.]])
/// );
/// ```
pub fn reshape<E: Dimension>(&self, shape: E) -> ArrayBase<S, E>
where S: DataShared + DataOwned, A: Clone,
{
if shape.size() != self.dim.size() {
panic!("Incompatible shapes in reshape, attempted from: {:?}, to: {:?}",
self.dim.slice(), shape.slice())
}
// Check if contiguous, if not => copy all, else just adapt strides
if self.is_standard_layout() {
let cl = self.clone();
ArrayBase {
data: cl.data,
ptr: cl.ptr,
strides: shape.default_strides(),
dim: shape,
}
} else {
let v = self.iter().map(|x| x.clone()).collect::<Vec<A>>();
unsafe {
ArrayBase::from_vec_dim(shape, v)
}
}
}
/// Transform the array into `shape`; any shape with the same number of
/// elements is accepted, but the source array or view must be
/// contiguous, otherwise we cannot rearrange the dimension.
///
/// **Errors** if the shapes don't have the same number of elements.<br>
/// **Errors** if the input array is not c-contiguous (this will be
/// slightly improved in the future).
///
/// ```
/// use ndarray::{aview1, aview2};
///
/// assert!(
/// aview1(&[1., 2., 3., 4.]).into_shape((2, 2)).unwrap()
/// == aview2(&[[1., 2.],
/// [3., 4.]])
/// );
/// ```
pub fn into_shape<E>(self, shape: E) -> Result<ArrayBase<S, E>, ShapeError>
where E: Dimension
{
if shape.size() != self.dim.size() {
return Err(ShapeError::IncompatibleShapes(
self.dim.slice().to_vec().into_boxed_slice(),
shape.slice().to_vec().into_boxed_slice()));
}
// Check if contiguous, if not => copy all, else just adapt strides
if self.is_standard_layout() {
Ok(ArrayBase {
data: self.data,
ptr: self.ptr,
strides: shape.default_strides(),
dim: shape,
})
} else {
Err(ShapeError::IncompatibleLayout)
}
}
/// Act like a larger size and/or shape array by *broadcasting*
/// into a larger shape, if possible.
///
/// Return `None` if shapes can not be broadcast together.
///
/// ***Background***
///
/// * Two axes are compatible if they are equal, or one of them is 1.
/// * In this instance, only the axes of the smaller side (self) can be 1.
///
/// Compare axes beginning with the *last* axis of each shape.
///
/// For example (1, 2, 4) can be broadcast into (7, 6, 2, 4)
/// because its axes are either equal or 1 (or missing);
/// while (2, 2) can *not* be broadcast into (2, 4).
///
/// The implementation creates a view with strides set to zero for the
/// axes that are to be repeated.
///
/// The broadcasting documentation for Numpy has more information.
///
/// ```
/// use ndarray::arr1;
///
/// assert!(
/// arr1(&[1., 0.]).broadcast((10, 2)).unwrap().dim()
/// == (10, 2)
/// );
/// ```
pub fn broadcast<E>(&self, dim: E)
-> Option<ArrayView<A, E>>
where E: Dimension
{
/// Return new stride when trying to grow `from` into shape `to`
///
/// Broadcasting works by returning a "fake stride" where elements
/// to repeat are in axes with 0 stride, so that several indexes point
/// to the same element.
///
/// **Note:** Cannot be used for mutable iterators, since repeating
/// elements would create aliasing pointers.
fn upcast<D: Dimension, E: Dimension>(to: &D, from: &E, stride: &E) -> Option<D> {
let mut new_stride = to.clone();
// begin at the back (the least significant dimension)
// size of the axis has to either agree or `from` has to be 1
if to.ndim() < from.ndim() {
return None
}
{
let mut new_stride_iter = new_stride.slice_mut().iter_mut().rev();
for ((er, es), dr) in from.slice().iter().rev()
.zip(stride.slice().iter().rev())
.zip(new_stride_iter.by_ref())
{
/* update strides */
if *dr == *er {
/* keep stride */
*dr = *es;
} else if *er == 1 {
/* dead dimension, zero stride */
*dr = 0
} else {
return None;
}
}
/* set remaining strides to zero */
for dr in new_stride_iter {
*dr = 0;
}
}
Some(new_stride)
}
// Note: zero strides are safe precisely because we return an read-only view
let broadcast_strides =
match upcast(&dim, &self.dim, &self.strides) {
Some(st) => st,
None => return None,
};
Some(ArrayView {
data: self.raw_data(),
ptr: self.ptr,
dim: dim,
strides: broadcast_strides,
})
}
#[cfg_attr(has_deprecated, deprecated(note="use .broadcast() instead"))]
/// ***Deprecated: Use `.broadcast()` instead.***
pub fn broadcast_iter<E>(&self, dim: E) -> Option<Elements<A, E>>
where E: Dimension,
{
self.broadcast(dim).map(|v| v.into_iter_())
}
#[inline]
fn broadcast_unwrap<E>(&self, dim: E) -> ArrayView<A, E>
where E: Dimension,
{
match self.broadcast(dim.clone()) {
Some(it) => it,
None => Self::broadcast_panic(&self.dim, &dim),
}
}
#[inline(never)]
fn broadcast_panic<E: Dimension>(from: &D, to: &E) -> ! {
panic!("Could not broadcast array from shape: {:?} to: {:?}",
from.slice(), to.slice())
}
/// Return a slice of the array’s backing data in memory order.
///
/// **Note:** Data memory order may not correspond to the index order
/// of the array. Neither is the raw data slice is restricted to just the
/// Array’s view.<br>
/// **Note:** the slice may be empty.
pub fn raw_data(&self) -> &[A] {
self.data.slice()
}
/// Return a mutable slice of the array’s backing data in memory order.
///
/// **Note:** Data memory order may not correspond to the index order
/// of the array. Neither is the raw data slice is restricted to just the
/// Array’s view.<br>
/// **Note:** the slice may be empty.
///
/// **Note:** The data is uniquely held and nonaliased
/// while it is mutably borrowed.
pub fn raw_data_mut(&mut self) -> &mut [A]
where S: DataMut,
{
self.ensure_unique();
self.data.slice_mut()
}
fn pointer_is_inbounds(&self) -> bool {
let slc = self.data.slice();
if slc.is_empty() {
// special case for data-less views
return true;
}
let ptr = slc.as_ptr() as *mut _;
let end = unsafe {
ptr.offset(slc.len() as isize)
};
self.ptr >= ptr && self.ptr <= end
}
/// Perform an elementwise assigment to `self` from `rhs`.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
pub fn assign<E: Dimension, S2>(&mut self, rhs: &ArrayBase<S2, E>)
where S: DataMut,
A: Clone,
S2: Data<Elem=A>,
{
self.zip_mut_with(rhs, |x, y| *x = y.clone());
}
/// Perform an elementwise assigment to `self` from scalar `x`.
pub fn assign_scalar(&mut self, x: &A)
where S: DataMut, A: Clone,
{
self.unordered_foreach_mut(move |elt| *elt = x.clone());
}
/// Apply closure `f` to each element in the array, in whatever
/// order is the fastest to visit.
fn unordered_foreach_mut<F>(&mut self, mut f: F)
where S: DataMut,
F: FnMut(&mut A)
{
if let Some(slc) = self.as_slice_mut() {
for elt in slc {
f(elt);
}
return;
}
for row in self.inner_iter_mut() {
for elt in row {
f(elt);
}
}
}
fn zip_with_mut_same_shape<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
debug_assert_eq!(self.shape(), rhs.shape());
if let Some(self_s) = self.as_slice_mut() {
if let Some(rhs_s) = rhs.as_slice() {
let len = cmp::min(self_s.len(), rhs_s.len());
let s = &mut self_s[..len];
let r = &rhs_s[..len];
for i in 0..len {
f(&mut s[i], &r[i]);
}
return;
}
}
// otherwise, fall back to the outer iter
self.zip_with_mut_outer_iter(rhs, f);
}
#[inline(always)]
fn zip_with_mut_outer_iter<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
debug_assert_eq!(self.shape(), rhs.shape());
// otherwise, fall back to the outer iter
let mut try_slices = true;
let mut rows = self.inner_iter_mut().zip(rhs.inner_iter());
for (mut s_row, r_row) in &mut rows {
if try_slices {
if let Some(self_s) = s_row.as_slice_mut() {
if let Some(rhs_s) = r_row.as_slice() {
let len = cmp::min(self_s.len(), rhs_s.len());
let s = &mut self_s[..len];
let r = &rhs_s[..len];
for i in 0..len {
f(&mut s[i], &r[i]);
}
continue;
}
}
try_slices = false;
}
// FIXME: Regular .zip() is slow
for (y, x) in s_row.iter_mut().zip(r_row) {
f(y, x);
}
}
}
// FIXME: Guarantee the order here or not?
/// Traverse two arrays in unspecified order, in lock step,
/// calling the closure `f` on each element pair.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
#[inline]
pub fn zip_mut_with<B, S2, E, F>(&mut self, rhs: &ArrayBase<S2, E>, mut f: F)
where S: DataMut,
S2: Data<Elem=B>,
E: Dimension,
F: FnMut(&mut A, &B)
{
if self.dim.ndim() == rhs.dim.ndim() && self.shape() == rhs.shape() {
self.zip_with_mut_same_shape(rhs, f);
} else if rhs.dim.ndim() == 0 {
// Skip broadcast from 0-dim array
// FIXME: Order
unsafe {
let rhs_elem = &*rhs.ptr;
let f_ = &mut f;
self.unordered_foreach_mut(move |elt| f_(elt, rhs_elem));
}
} else {
let rhs_broadcast = rhs.broadcast_unwrap(self.dim());
self.zip_with_mut_outer_iter(&rhs_broadcast, f);
}
}
/// Traverse the array elements in order and apply a fold,
/// returning the resulting value.
pub fn fold<'a, F, B>(&'a self, mut init: B, mut f: F) -> B
where F: FnMut(B, &'a A) -> B, A: 'a
{
if let Some(slc) = self.as_slice() {
for elt in slc {
init = f(init, elt);
}
return init;
}
for row in self.inner_iter() {
for elt in row {
init = f(init, elt);
}
}
init
}
/// Apply `f` elementwise and return a new array with
/// the results.
///
/// Return an array with the same shape as *self*.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[ 0., 1.],
/// [-1., 2.]]);
/// assert!(
/// a.map(|x| *x >= 1.0)
/// == arr2(&[[false, true],
/// [false, true]])
/// );
/// ```
pub fn map<'a, B, F>(&'a self, mut f: F) -> OwnedArray<B, D>
where F: FnMut(&'a A) -> B,
A: 'a,
{
let mut res = Vec::with_capacity(self.dim.size());
for elt in self.iter() {
res.push(f(elt))
}
unsafe {
ArrayBase::from_vec_dim(self.dim.clone(), res)
}
}
}
/// Return an array filled with zeros
pub fn zeros<A, D>(dim: D) -> OwnedArray<A, D>
where A: Clone + libnum::Zero, D: Dimension,
{
ArrayBase::zeros(dim)
}
/// Return a zero-dimensional array with the element `x`.
pub fn arr0<A>(x: A) -> Array<A, ()>
{
unsafe { Array::from_vec_dim((), vec![x]) }
}
/// Return a one-dimensional array with elements from `xs`.
pub fn arr1<A: Clone>(xs: &[A]) -> Array<A, Ix>
{
Array::from_vec(xs.to_vec())
}
/// Return a zero-dimensional array view borrowing `x`.
pub fn aview0<A>(x: &A) -> ArrayView<A, ()> {
let data = unsafe {
std::slice::from_raw_parts(x, 1)
};
ArrayView {
data: data,
ptr: data.as_ptr() as *mut _,
dim: (),
strides: (),
}
}
/// Return a one-dimensional array view with elements borrowing `xs`.
///
/// ```
/// use ndarray::aview1;
///
/// let data = [1.0; 1024];
///
/// // Create a 2D array view from borrowed data
/// let a2d = aview1(&data).into_shape((32, 32)).unwrap();
///
/// assert!(
/// a2d.scalar_sum() == 1024.0
/// );
/// ```
pub fn aview1<A>(xs: &[A]) -> ArrayView<A, Ix> {
ArrayView {
data: xs,
ptr: xs.as_ptr() as *mut _,
dim: xs.len() as Ix,
strides: 1,
}
}
/// Return a two-dimensional array view with elements borrowing `xs`.
pub fn aview2<A, V: FixedInitializer<Elem=A>>(xs: &[V]) -> ArrayView<A, (Ix, Ix)> {
let cols = V::len();
let rows = xs.len();
let data = unsafe {
std::slice::from_raw_parts(xs.as_ptr() as *const A, cols * rows)
};
let dim = (rows as Ix, cols as Ix);
ArrayView {
data: data,
ptr: data.as_ptr() as *mut _,
strides: dim.default_strides(),
dim: dim,
}
}
/// Return a one-dimensional read-write array view with elements borrowing `xs`.
///
/// ```
/// #[macro_use(s)]
/// extern crate ndarray;
///
/// use ndarray::aview_mut1;
///
/// // Create an array view over some data, then slice it and modify it.
/// fn main() {
/// let mut data = [0; 1024];
/// {
/// let mut a = aview_mut1(&mut data).into_shape((32, 32)).unwrap();
/// a.slice_mut(s![.., ..;3]).assign_scalar(&5);
/// }
/// assert_eq!(&data[..10], [5, 0, 0, 5, 0, 0, 5, 0, 0, 5]);
/// }
/// ```
pub fn aview_mut1<A>(xs: &mut [A]) -> ArrayViewMut<A, Ix> {
ArrayViewMut {
ptr: xs.as_mut_ptr(),
dim: xs.len() as Ix,
strides: 1,
data: xs,
}
}
/// Slice or fixed-size array used for array initialization
pub unsafe trait Initializer {
type Elem;
fn as_init_slice(&self) -> &[Self::Elem];
fn is_fixed_size() -> bool { false }
}
/// Fixed-size array used for array initialization
pub unsafe trait FixedInitializer: Initializer {
fn len() -> usize;
}
unsafe impl<T> Initializer for [T] {
type Elem = T;
fn as_init_slice(&self) -> &[T] {
self
}
}
macro_rules! impl_arr_init {
(__impl $n: expr) => (
unsafe impl<T> Initializer for [T; $n] {
type Elem = T;
fn as_init_slice(&self) -> &[T] { self }
fn is_fixed_size() -> bool { true }
}
unsafe impl<T> FixedInitializer for [T; $n] {
fn len() -> usize { $n }
}
);
() => ();
($n: expr, $($m:expr,)*) => (
impl_arr_init!(__impl $n);
impl_arr_init!($($m,)*);
)
}
impl_arr_init!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,);
/// Return a two-dimensional array with elements from `xs`.
///
/// **Panics** if the slices are not all of the same length.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1, 2, 3],
/// [4, 5, 6]]);
/// assert!(
/// a.shape() == [2, 3]
/// );
/// ```
pub fn arr2<A: Clone, V: Initializer<Elem=A>>(xs: &[V]) -> Array<A, (Ix, Ix)>
{
// FIXME: Simplify this when V is fix size array
let (m, n) = (xs.len() as Ix,
xs.get(0).map_or(0, |snd| snd.as_init_slice().len() as Ix));
let dim = (m, n);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs.iter() {
let snd = snd.as_init_slice();
assert!(<V as Initializer>::is_fixed_size() || snd.len() as Ix == n);
result.extend(snd.iter().map(|x| x.clone()))
}
unsafe {
Array::from_vec_dim(dim, result)
}
}
/// Return a three-dimensional array with elements from `xs`.
///
/// **Panics** if the slices are not all of the same length.
///
/// ```
/// use ndarray::arr3;
///
/// let a = arr3(&[[[1, 2],
/// [3, 4]],
/// [[5, 6],
/// [7, 8]],
/// [[9, 0],
/// [1, 2]]]);
/// assert!(
/// a.shape() == [3, 2, 2]
/// );
/// ```
pub fn arr3<A: Clone, V: Initializer<Elem=U>, U: Initializer<Elem=A>>(xs: &[V])
-> Array<A, (Ix, Ix, Ix)>
{
// FIXME: Simplify this when U/V are fix size arrays
let m = xs.len() as Ix;
let fst = xs.get(0).map(|snd| snd.as_init_slice());
let thr = fst.and_then(|elt| elt.get(0).map(|elt2| elt2.as_init_slice()));
let n = fst.map_or(0, |v| v.len() as Ix);
let o = thr.map_or(0, |v| v.len() as Ix);
let dim = (m, n, o);
let mut result = Vec::<A>::with_capacity(dim.size());
for snd in xs.iter() {
let snd = snd.as_init_slice();
assert!(<V as Initializer>::is_fixed_size() || snd.len() as Ix == n);
for thr in snd.iter() {
let thr = thr.as_init_slice();
assert!(<U as Initializer>::is_fixed_size() || thr.len() as Ix == o);
result.extend(thr.iter().map(|x| x.clone()))
}
}
unsafe {
Array::from_vec_dim(dim, result)
}
}
impl<A, S, D> ArrayBase<S, D>
where S: Data<Elem=A>,
D: Dimension,
{
/// Return sum along `axis`.
///
/// ```
/// use ndarray::{aview0, aview1, arr2};
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
/// assert!(
/// a.sum(0) == aview1(&[4., 6.]) &&
/// a.sum(1) == aview1(&[3., 7.]) &&
///
/// a.sum(0).sum(0) == aview0(&10.)
/// );
/// ```
///
/// **Panics** if `axis` is out of bounds.
pub fn sum(&self, axis: usize) -> OwnedArray<A, <D as RemoveAxis>::Smaller>
where A: Clone + Add<Output=A>,
D: RemoveAxis,
{
let n = self.shape()[axis];
let mut res = self.view().subview(axis, 0).to_owned();
for i in 1..n {
let view = self.view().subview(axis, i);
res.iadd(&view);
}
res
}
/// Return the sum of all elements in the array.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
/// assert_eq!(a.scalar_sum(), 10.);
/// ```
pub fn scalar_sum(&self) -> A
where A: Clone + Add<Output=A> + libnum::Zero,
{
if let Some(slc) = self.as_slice() {
return Self::unrolled_sum(slc);
}
let mut sum = A::zero();
for row in self.inner_iter() {
if let Some(slc) = row.as_slice() {
sum = sum + Self::unrolled_sum(slc);
} else {
sum = sum + row.fold(A::zero(), |acc, elt| acc + elt.clone());
}
}
sum
}
fn unrolled_sum(mut xs: &[A]) -> A
where A: Clone + Add<Output=A> + libnum::Zero,
{
// eightfold unrolled so that floating point can be vectorized
// (even with strict floating point accuracy semantics)
let mut sum = A::zero();
let (mut p0, mut p1, mut p2, mut p3,
mut p4, mut p5, mut p6, mut p7) =
(A::zero(), A::zero(), A::zero(), A::zero(),
A::zero(), A::zero(), A::zero(), A::zero());
while xs.len() >= 8 {
p0 = p0 + xs[0].clone();
p1 = p1 + xs[1].clone();
p2 = p2 + xs[2].clone();
p3 = p3 + xs[3].clone();
p4 = p4 + xs[4].clone();
p5 = p5 + xs[5].clone();
p6 = p6 + xs[6].clone();
p7 = p7 + xs[7].clone();
xs = &xs[8..];
}
sum = sum.clone() + (p0 + p4);
sum = sum.clone() + (p1 + p5);
sum = sum.clone() + (p2 + p6);
sum = sum.clone() + (p3 + p7);
for elt in xs {
sum = sum.clone() + elt.clone();
}
sum
}
/// Return mean along `axis`.
///
/// ```
/// use ndarray::{aview1, arr2};
///
/// let a = arr2(&[[1., 2.],
/// [3., 4.]]);
/// assert!(
/// a.mean(0) == aview1(&[2.0, 3.0]) &&
/// a.mean(1) == aview1(&[1.5, 3.5])
/// );
/// ```
///
///
/// **Panics** if `axis` is out of bounds.
#[allow(deprecated)]
pub fn mean(&self, axis: usize) -> OwnedArray<A, <D as RemoveAxis>::Smaller>
where A: Copy + Field,
D: RemoveAxis,
{
let n = self.shape()[axis];
let mut sum = self.sum(axis);
let one = libnum::one::<A>();
let mut cnt = one;
for _ in 1..n {
cnt = cnt + one;
}
sum.idiv_scalar(&cnt);
sum
}
/// Return `true` if the arrays' elementwise differences are all within
/// the given absolute tolerance.<br>
/// Return `false` otherwise, or if the shapes disagree.
pub fn allclose<S2>(&self, rhs: &ArrayBase<S2, D>, tol: A) -> bool
where A: Float + PartialOrd,
S2: Data<Elem=A>,
{
self.shape() == rhs.shape() &&
self.iter().zip(rhs.iter()).all(|(x, y)| (*x - *y).abs() <= tol)
}
}
impl<A, S> ArrayBase<S, (Ix, Ix)>
where S: Data<Elem=A>,
{
unsafe fn one_dimensional_iter<'a>(ptr: *mut A, len: Ix, stride: Ix)
-> Elements<'a, A, Ix>
{
// NOTE: `data` field is unused by into_iter
let view = ArrayView {
data: &[],
ptr: ptr,
dim: len,
strides: stride,
};
view.into_iter_()
}
/// Return an iterator over the elements of row `index`.
///
/// **Panics** if `index` is out of bounds.
pub fn row_iter(&self, index: Ix) -> Elements<A, Ix>
{
let (m, n) = self.dim;
let (sr, sc) = self.strides;
assert!(index < m);
unsafe {
Self::one_dimensional_iter(self.ptr.offset(stride_offset(index, sr)), n, sc)
}
}
/// Return an iterator over the elements of column `index`.
///
/// **Panics** if `index` is out of bounds.
pub fn col_iter(&self, index: Ix) -> Elements<A, Ix>
{
let (m, n) = self.dim;
let (sr, sc) = self.strides;
assert!(index < n);
unsafe {
Self::one_dimensional_iter(self.ptr.offset(stride_offset(index, sc)), m, sr)
}
}
/// Perform matrix multiplication of rectangular arrays `self` and `rhs`.
///
/// The array sizes must agree in the way that
/// if `self` is *M* × *N*, then `rhs` is *N* × *K*.
///
/// Return a result array with shape *M* × *K*.
///
/// **Panics** if sizes are incompatible.
///
/// ```
/// use ndarray::arr2;
///
/// let a = arr2(&[[1., 2.],
/// [0., 1.]]);
/// let b = arr2(&[[1., 2.],
/// [2., 3.]]);
///
/// assert!(
/// a.mat_mul(&b) == arr2(&[[5., 8.],
/// [2., 3.]])
/// );
/// ```
///
#[allow(deprecated)]
pub fn mat_mul(&self, rhs: &ArrayBase<S, (Ix, Ix)>) -> Array<A, (Ix, Ix)>
where A: Copy + Ring
{
// NOTE: Matrix multiplication only defined for simple types to
// avoid trouble with panicking + and *, and destructors
let ((m, a), (b, n)) = (self.dim, rhs.dim);
let (self_columns, other_rows) = (a, b);
assert!(self_columns == other_rows);
// Avoid initializing the memory in vec -- set it during iteration
// Panic safe because A: Copy
let mut res_elems = Vec::<A>::with_capacity(m as usize * n as usize);
unsafe {
res_elems.set_len(m as usize * n as usize);
}
let mut i = 0;
let mut j = 0;
for rr in res_elems.iter_mut() {
unsafe {
*rr = (0..a).fold(libnum::zero::<A>(),
move |s, k| s + *self.uget((i, k)) * *rhs.uget((k, j))
);
}
j += 1;
if j == n {
j = 0;
i += 1;
}
}
unsafe {
ArrayBase::from_vec_dim((m, n), res_elems)
}
}
/// Perform the matrix multiplication of the rectangular array `self` and
/// column vector `rhs`.
///
/// The array sizes must agree in the way that
/// if `self` is *M* × *N*, then `rhs` is *N*.
///
/// Return a result array with shape *M*.
///
/// **Panics** if sizes are incompatible.
#[allow(deprecated)]
pub fn mat_mul_col(&self, rhs: &ArrayBase<S, Ix>) -> Array<A, Ix>
where A: Copy + Ring
{
let ((m, a), n) = (self.dim, rhs.dim);
let (self_columns, other_rows) = (a, n);
assert!(self_columns == other_rows);
// Avoid initializing the memory in vec -- set it during iteration
let mut res_elems = Vec::<A>::with_capacity(m as usize);
unsafe {
res_elems.set_len(m as usize);
}
let mut i = 0;
for rr in res_elems.iter_mut() {
unsafe {
*rr = (0..a).fold(libnum::zero::<A>(),
move |s, k| s + *self.uget((i, k)) * *rhs.uget(k)
);
}
i += 1;
}
unsafe {
ArrayBase::from_vec_dim(m, res_elems)
}
}
}
// Array OPERATORS
macro_rules! impl_binary_op_inherent(
($trt:ident, $mth:ident, $imethod:ident, $imth_scalar:ident, $doc:expr) => (
/// Perform elementwise
#[doc=$doc]
/// between `self` and `rhs`,
/// *in place*.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
pub fn $imethod <E: Dimension, S2> (&mut self, rhs: &ArrayBase<S2, E>)
where A: Clone + $trt<A, Output=A>,
S2: Data<Elem=A>,
{
self.zip_mut_with(rhs, |x, y| {
*x = x.clone().$mth(y.clone());
});
}
/// Perform elementwise
#[doc=$doc]
/// between `self` and the scalar `x`,
/// *in place*.
pub fn $imth_scalar (&mut self, x: &A)
where A: Clone + $trt<A, Output=A>,
{
self.unordered_foreach_mut(move |elt| {
*elt = elt.clone(). $mth (x.clone());
});
}
);
);
/// *In-place* arithmetic operations.
impl<A, S, D> ArrayBase<S, D>
where S: DataMut<Elem=A>,
D: Dimension,
{
impl_binary_op_inherent!(Add, add, iadd, iadd_scalar, "addition");
impl_binary_op_inherent!(Sub, sub, isub, isub_scalar, "subtraction");
impl_binary_op_inherent!(Mul, mul, imul, imul_scalar, "multiplication");
impl_binary_op_inherent!(Div, div, idiv, idiv_scalar, "division");
impl_binary_op_inherent!(Rem, rem, irem, irem_scalar, "remainder");
impl_binary_op_inherent!(BitAnd, bitand, ibitand, ibitand_scalar, "bit and");
impl_binary_op_inherent!(BitOr, bitor, ibitor, ibitor_scalar, "bit or");
impl_binary_op_inherent!(BitXor, bitxor, ibitxor, ibitxor_scalar, "bit xor");
impl_binary_op_inherent!(Shl, shl, ishl, ishl_scalar, "left shift");
impl_binary_op_inherent!(Shr, shr, ishr, ishr_scalar, "right shift");
/// Perform an elementwise negation of `self`, *in place*.
pub fn ineg(&mut self)
where A: Clone + Neg<Output=A>,
{
self.unordered_foreach_mut(|elt| {
*elt = elt.clone().neg()
});
}
/// Perform an elementwise unary not of `self`, *in place*.
pub fn inot(&mut self)
where A: Clone + Not<Output=A>,
{
self.unordered_foreach_mut(|elt| {
*elt = elt.clone().not()
});
}
}
macro_rules! impl_binary_op(
($trt:ident, $mth:ident, $doc:expr) => (
/// Perform elementwise
#[doc=$doc]
/// between `self` and `rhs`,
/// and return the result (based on `self`).
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
impl<A, S, S2, D, E> $trt<ArrayBase<S2, E>> for ArrayBase<S, D>
where A: Clone + $trt<A, Output=A>,
S: DataMut<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
type Output = ArrayBase<S, D>;
fn $mth (mut self, rhs: ArrayBase<S2, E>) -> ArrayBase<S, D>
{
// FIXME: Can we co-broadcast arrays here? And how?
self.zip_mut_with(&rhs, |x, y| {
*x = x.clone(). $mth (y.clone());
});
self
}
}
/// Perform elementwise
#[doc=$doc]
/// between references `self` and `rhs`,
/// and return the result as a new `OwnedArray`.
///
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase<S2, E>> for &'a ArrayBase<S, D>
where A: Clone + $trt<A, Output=A>,
S: Data<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
type Output = OwnedArray<A, D>;
fn $mth (self, rhs: &'a ArrayBase<S2, E>) -> OwnedArray<A, D>
{
// FIXME: Can we co-broadcast arrays here? And how?
self.to_owned().$mth(rhs.view())
}
}
);
);
mod arithmetic_ops {
use super::*;
use std::ops::*;
impl_binary_op!(Add, add, "addition");
impl_binary_op!(Sub, sub, "subtraction");
impl_binary_op!(Mul, mul, "multiplication");
impl_binary_op!(Div, div, "division");
impl_binary_op!(Rem, rem, "remainder");
impl_binary_op!(BitAnd, bitand, "bit and");
impl_binary_op!(BitOr, bitor, "bit or");
impl_binary_op!(BitXor, bitxor, "bit xor");
impl_binary_op!(Shl, shl, "left shift");
impl_binary_op!(Shr, shr, "right shift");
impl<A, S, D> Neg for ArrayBase<S, D>
where A: Clone + Neg<Output=A>,
S: DataMut<Elem=A>,
D: Dimension
{
type Output = Self;
/// Perform an elementwise negation of `self` and return the result.
fn neg(mut self) -> Self {
self.ineg();
self
}
}
impl<A, S, D> Not for ArrayBase<S, D>
where A: Clone + Not<Output=A>,
S: DataMut<Elem=A>,
D: Dimension
{
type Output = Self;
/// Perform an elementwise unary not of `self` and return the result.
fn not(mut self) -> Self {
self.inot();
self
}
}
}
#[cfg(feature = "assign_ops")]
mod assign_ops {
use super::*;
use std::ops::{
AddAssign,
SubAssign,
MulAssign,
DivAssign,
RemAssign,
BitAndAssign,
BitOrAssign,
BitXorAssign,
};
macro_rules! impl_assign_op {
($trt:ident, $method:ident, $doc:expr) => {
#[doc=$doc]
/// If their shapes disagree, `rhs` is broadcast to the shape of `self`.
///
/// **Panics** if broadcasting isn’t possible.
///
/// **Requires `feature = "assign_ops"`**
impl<'a, A, S, S2, D, E> $trt<&'a ArrayBase<S2, E>> for ArrayBase<S, D>
where A: Clone + $trt<A>,
S: DataMut<Elem=A>,
S2: Data<Elem=A>,
D: Dimension,
E: Dimension,
{
fn $method(&mut self, rhs: &ArrayBase<S2, E>) {
self.zip_mut_with(rhs, |x, y| {
x.$method(y.clone());
});
}
}
};
}
impl_assign_op!(AddAssign, add_assign,
"Perform `self += rhs` as elementwise addition (in place).\n");
impl_assign_op!(SubAssign, sub_assign,
"Perform `self -= rhs` as elementwise subtraction (in place).\n");
impl_assign_op!(MulAssign, mul_assign,
"Perform `self *= rhs` as elementwise multiplication (in place).\n");
impl_assign_op!(DivAssign, div_assign,
"Perform `self /= rhs` as elementwise division (in place).\n");
impl_assign_op!(RemAssign, rem_assign,
"Perform `self %= rhs` as elementwise remainder (in place).\n");
impl_assign_op!(BitAndAssign, bitand_assign,
"Perform `self &= rhs` as elementwise bit and (in place).\n");
impl_assign_op!(BitOrAssign, bitor_assign,
"Perform `self |= rhs` as elementwise bit or (in place).\n");
impl_assign_op!(BitXorAssign, bitxor_assign,
"Perform `self ^= rhs` as elementwise bit xor (in place).\n");
}
/// An iterator over the elements of an array.
///
/// Iterator element type is `&'a A`.
pub struct Elements<'a, A: 'a, D> {
inner: ElementsRepr<Iter<'a, A>, ElementsBase<'a, A, D>>,
}
/// Counted read only iterator
struct ElementsBase<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
/// An iterator over the elements of an array (mutable).
///
/// Iterator element type is `&'a mut A`.
pub struct ElementsMut<'a, A: 'a, D> {
inner: ElementsRepr<IterMut<'a, A>, ElementsBaseMut<'a, A, D>>,
}
/// An iterator over the elements of an array.
///
/// Iterator element type is `&'a mut A`.
struct ElementsBaseMut<'a, A: 'a, D> {
inner: Baseiter<'a, A, D>,
}
/// An iterator over the indexes and elements of an array.
#[derive(Clone)]
pub struct Indexed<'a, A: 'a, D>(ElementsBase<'a, A, D>);
/// An iterator over the indexes and elements of an array (mutable).
pub struct IndexedMut<'a, A: 'a, D>(ElementsBaseMut<'a, A, D>);
fn zipsl<T, U>(t: T, u: U) -> ZipSlices<T, U>
where T: it::misc::Slice, U: it::misc::Slice
{
ZipSlices::from_slices(t, u)
}
enum ElementsRepr<S, C> {
Slice(S),
Counted(C),
}
|
/*
* Copyright 2015-2017 Ben Ashford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#![crate_type = "lib"]
#![crate_name = "rs_es"]
//! A client for ElasticSearch's REST API
//!
//! The `Client` itself is used as the central access point, from which numerous
//! operations are defined implementing each of the specific ElasticSearch APIs.
//!
//! Warning: at the time of writing the majority of such APIs are currently
//! unimplemented.
#[macro_use]
extern crate serde_derive;
extern crate serde;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate hyper;
#[cfg(feature = "ssl")]
extern crate hyper_openssl;
#[macro_use]
extern crate maplit;
extern crate url;
#[macro_use]
pub mod util;
#[macro_use]
pub mod json;
pub mod error;
pub mod operations;
pub mod query;
pub mod units;
use hyper::client;
use hyper::status::StatusCode;
use hyper::header::{Headers, Authorization, Basic};
use serde::ser::Serialize;
use serde::de::Deserialize;
use error::EsError;
use url::Url;
pub trait EsResponse {
fn status_code<'a>(&'a self) -> &'a StatusCode;
fn read_response<R>(self) -> Result<R, EsError> where R: Deserialize;
}
impl EsResponse for client::response::Response {
fn status_code<'a>(&'a self) -> &'a StatusCode {
&self.status
}
fn read_response<R>(self) -> Result<R, EsError>
where R: Deserialize {
Ok(try!(serde_json::from_reader(self)))
}
}
// The client
/// Process the result of an HTTP request, returning the status code and the
/// `Json` result (if the result had a body) or an `EsError` if there were any
/// errors
///
/// This function is exposed to allow extensions to certain operations, it is
/// not expected to be used by consumers of the library
pub fn do_req(resp: client::response::Response) -> Result<client::response::Response, EsError> {
let mut resp = resp;
let status = resp.status;
match status {
StatusCode::Ok |
StatusCode::Created |
StatusCode::NotFound => Ok(resp),
_ => Err(EsError::from(&mut resp))
}
}
/// The core of the ElasticSearch client, owns a HTTP connection.
///
/// Each instance of `Client` is reusable, but only one thread can use each one
/// at once. This will be enforced by the borrow-checker as most methods are
/// defined on `&mut self`.
///
/// To create a `Client`, the URL needs to be specified.
///
/// Each ElasticSearch API operation is defined as a method on `Client`. Any
/// compulsory parameters must be given as arguments to this method. It returns
/// an operation builder that can be used to add any optional parameters.
///
/// Finally `send` is called to submit the operation:
///
/// # Examples
///
/// ```
/// use rs_es::Client;
///
/// let mut client = Client::new("http://localhost:9200");
/// ```
///
/// See the specific operations and their builder objects for details.
pub struct Client {
base_url: Url,
http_client: hyper::Client,
headers: Headers
}
/// Create a HTTP function for the given method (GET/PUT/POST/DELETE)
macro_rules! es_op {
($n:ident,$cn:ident) => {
fn $n(&mut self, url: &str) -> Result<client::response::Response, EsError> {
info!("Doing {} on {}", stringify!($n), url);
let url = self.full_url(url);
let result = try!(self.http_client
.$cn(&url)
.headers(self.headers.clone())
.send());
do_req(result)
}
}
}
/// Create a HTTP function with a request body for the given method
/// (GET/PUT/POST/DELETE)
///
macro_rules! es_body_op {
($n:ident,$cn:ident) => {
fn $n<E>(&mut self, url: &str, body: &E) -> Result<client::response::Response, EsError>
where E: Serialize {
info!("Doing {} on {}", stringify!($n), url);
let json_string = try!(serde_json::to_string(body));
debug!("Body send: {}", &json_string);
let url = self.full_url(url);
let result = try!(self.http_client
.$cn(&url)
.headers(self.headers.clone())
.body(&json_string)
.send());
do_req(result)
}
}
}
impl Client {
/// Create a new client
pub fn new(url_s: &str) -> Result<Client, url::ParseError> {
let url = try!(Url::parse(url_s));
Ok(Client {
http_client: Self::http_client(),
headers: Self::basic_auth(&url),
base_url: url
})
}
#[cfg(feature = "ssl")]
fn http_client() -> hyper::Client {
let ssl = hyper_openssl::OpensslClient::new().unwrap();
let connector = hyper::net::HttpsConnector::new(ssl);
hyper::Client::with_connector(connector)
}
#[cfg(not(feature = "ssl"))]
fn http_client() -> hyper::Client {
hyper::Client::new()
}
/// Add headers for the basic authentication to every request
/// when given host's format is `USER:PASS@HOST`.
fn basic_auth(url: &Url) -> Headers {
let mut headers = Headers::new();
let username = url.username();
if !username.is_empty() {
headers.set(
Authorization(
Basic {
username: username.to_owned(),
password: url.password().map(|p| p.to_owned())
}
)
)
}
headers
}
/// Take a nearly complete ElasticSearch URL, and stick
/// the URL on the front.
pub fn full_url(&self, suffix: &str) -> String {
self.base_url.join(suffix).unwrap().into_string()
}
es_op!(get_op, get);
es_op!(post_op, post);
es_body_op!(post_body_op, post);
es_op!(put_op, put);
es_body_op!(put_body_op, put);
es_op!(delete_op, delete);
}
#[cfg(test)]
pub mod tests {
extern crate env_logger;
pub extern crate regex;
use std::env;
use serde_json::Value;
use super::Client;
use super::operations::bulk::Action;
use super::operations::search::ScanResult;
use super::query::Query;
use super::units::Duration;
// test setup
pub fn make_client() -> Client {
let hostname = match env::var("ES_HOST") {
Ok(val) => val,
Err(_) => "http://localhost:9200".to_owned()
};
Client::new(&hostname).unwrap()
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TestDocument {
pub str_field: String,
pub int_field: i64,
pub bool_field: bool
}
impl TestDocument {
pub fn new() -> TestDocument {
TestDocument {
str_field: "I am a test".to_owned(),
int_field: 1,
bool_field: true
}
}
pub fn with_str_field(mut self, s: &str) -> TestDocument {
self.str_field = s.to_owned();
self
}
pub fn with_int_field(mut self, i: i64) -> TestDocument {
self.int_field = i;
self
}
pub fn with_bool_field(mut self, b: bool) -> TestDocument {
self.bool_field = b;
self
}
}
pub fn setup_test_data(client: &mut Client, index_name: &str) {
// TODO - this should use the Bulk API
let documents = vec![
TestDocument::new().with_str_field("Document A123").with_int_field(1),
TestDocument::new().with_str_field("Document B456").with_int_field(2),
TestDocument::new().with_str_field("Document 1ABC").with_int_field(3)
];
for ref doc in documents {
client.index(index_name, "test_type")
.with_doc(doc)
.send()
.unwrap();
}
client.refresh().with_indexes(&[index_name]).send().unwrap();
}
pub fn clean_db(mut client: &mut Client, test_idx: &str) {
// let's do some logging
let _ = env_logger::init();
let scroll = Duration::minutes(1);
let mut scan:ScanResult<Value> = match client.search_query()
.with_indexes(&[test_idx])
.with_query(&Query::build_match_all().build())
.scan(&scroll) {
Ok(scan) => scan,
Err(e) => {
warn!("Scan error: {:?}", e);
return // Ignore not-found errors
}
};
loop {
let page = scan.scroll(&mut client, &scroll).unwrap();
let hits = page.hits.hits;
if hits.len() == 0 {
break;
}
let actions: Vec<Action<()>> = hits.into_iter()
.map(|hit| {
Action::delete(hit.id)
.with_index(test_idx)
.with_doc_type(hit.doc_type)
})
.collect();
client.bulk(&actions).send().unwrap();
}
scan.close(&mut client).unwrap();
}
}
Elide lifetimes and use `?` in `lib.rs`
/*
* Copyright 2015-2017 Ben Ashford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#![crate_type = "lib"]
#![crate_name = "rs_es"]
//! A client for ElasticSearch's REST API
//!
//! The `Client` itself is used as the central access point, from which numerous
//! operations are defined implementing each of the specific ElasticSearch APIs.
//!
//! Warning: at the time of writing the majority of such APIs are currently
//! unimplemented.
#[macro_use]
extern crate serde_derive;
extern crate serde;
#[macro_use]
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate hyper;
#[cfg(feature = "ssl")]
extern crate hyper_openssl;
#[macro_use]
extern crate maplit;
extern crate url;
#[macro_use]
pub mod util;
#[macro_use]
pub mod json;
pub mod error;
pub mod operations;
pub mod query;
pub mod units;
use hyper::client;
use hyper::status::StatusCode;
use hyper::header::{Headers, Authorization, Basic};
use serde::ser::Serialize;
use serde::de::Deserialize;
use error::EsError;
use url::Url;
pub trait EsResponse {
fn status_code(&self) -> &StatusCode;
fn read_response<R>(self) -> Result<R, EsError> where R: Deserialize;
}
impl EsResponse for client::response::Response {
fn status_code(&self) -> &StatusCode {
&self.status
}
fn read_response<R>(self) -> Result<R, EsError>
where R: Deserialize {
Ok(serde_json::from_reader(self)?)
}
}
// The client
/// Process the result of an HTTP request, returning the status code and the
/// `Json` result (if the result had a body) or an `EsError` if there were any
/// errors
///
/// This function is exposed to allow extensions to certain operations, it is
/// not expected to be used by consumers of the library
pub fn do_req(resp: client::response::Response) -> Result<client::response::Response, EsError> {
let mut resp = resp;
let status = resp.status;
match status {
StatusCode::Ok |
StatusCode::Created |
StatusCode::NotFound => Ok(resp),
_ => Err(EsError::from(&mut resp))
}
}
/// The core of the ElasticSearch client, owns a HTTP connection.
///
/// Each instance of `Client` is reusable, but only one thread can use each one
/// at once. This will be enforced by the borrow-checker as most methods are
/// defined on `&mut self`.
///
/// To create a `Client`, the URL needs to be specified.
///
/// Each ElasticSearch API operation is defined as a method on `Client`. Any
/// compulsory parameters must be given as arguments to this method. It returns
/// an operation builder that can be used to add any optional parameters.
///
/// Finally `send` is called to submit the operation:
///
/// # Examples
///
/// ```
/// use rs_es::Client;
///
/// let mut client = Client::new("http://localhost:9200");
/// ```
///
/// See the specific operations and their builder objects for details.
pub struct Client {
base_url: Url,
http_client: hyper::Client,
headers: Headers
}
/// Create a HTTP function for the given method (GET/PUT/POST/DELETE)
macro_rules! es_op {
($n:ident,$cn:ident) => {
fn $n(&mut self, url: &str) -> Result<client::response::Response, EsError> {
info!("Doing {} on {}", stringify!($n), url);
let url = self.full_url(url);
let result = self.http_client
.$cn(&url)
.headers(self.headers.clone())
.send()?;
do_req(result)
}
}
}
/// Create a HTTP function with a request body for the given method
/// (GET/PUT/POST/DELETE)
///
macro_rules! es_body_op {
($n:ident,$cn:ident) => {
fn $n<E>(&mut self, url: &str, body: &E) -> Result<client::response::Response, EsError>
where E: Serialize {
info!("Doing {} on {}", stringify!($n), url);
let json_string = serde_json::to_string(body)?;
debug!("Body send: {}", &json_string);
let url = self.full_url(url);
let result = self.http_client
.$cn(&url)
.headers(self.headers.clone())
.body(&json_string)
.send()?;
do_req(result)
}
}
}
impl Client {
/// Create a new client
pub fn new(url_s: &str) -> Result<Client, url::ParseError> {
let url = Url::parse(url_s)?;
Ok(Client {
http_client: Self::http_client(),
headers: Self::basic_auth(&url),
base_url: url
})
}
#[cfg(feature = "ssl")]
fn http_client() -> hyper::Client {
let ssl = hyper_openssl::OpensslClient::new().unwrap();
let connector = hyper::net::HttpsConnector::new(ssl);
hyper::Client::with_connector(connector)
}
#[cfg(not(feature = "ssl"))]
fn http_client() -> hyper::Client {
hyper::Client::new()
}
/// Add headers for the basic authentication to every request
/// when given host's format is `USER:PASS@HOST`.
fn basic_auth(url: &Url) -> Headers {
let mut headers = Headers::new();
let username = url.username();
if !username.is_empty() {
headers.set(
Authorization(
Basic {
username: username.to_owned(),
password: url.password().map(|p| p.to_owned())
}
)
)
}
headers
}
/// Take a nearly complete ElasticSearch URL, and stick
/// the URL on the front.
pub fn full_url(&self, suffix: &str) -> String {
self.base_url.join(suffix).unwrap().into_string()
}
es_op!(get_op, get);
es_op!(post_op, post);
es_body_op!(post_body_op, post);
es_op!(put_op, put);
es_body_op!(put_body_op, put);
es_op!(delete_op, delete);
}
#[cfg(test)]
pub mod tests {
extern crate env_logger;
pub extern crate regex;
use std::env;
use serde_json::Value;
use super::Client;
use super::operations::bulk::Action;
use super::operations::search::ScanResult;
use super::query::Query;
use super::units::Duration;
// test setup
pub fn make_client() -> Client {
let hostname = match env::var("ES_HOST") {
Ok(val) => val,
Err(_) => "http://localhost:9200".to_owned()
};
Client::new(&hostname).unwrap()
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TestDocument {
pub str_field: String,
pub int_field: i64,
pub bool_field: bool
}
impl TestDocument {
pub fn new() -> TestDocument {
TestDocument {
str_field: "I am a test".to_owned(),
int_field: 1,
bool_field: true
}
}
pub fn with_str_field(mut self, s: &str) -> TestDocument {
self.str_field = s.to_owned();
self
}
pub fn with_int_field(mut self, i: i64) -> TestDocument {
self.int_field = i;
self
}
pub fn with_bool_field(mut self, b: bool) -> TestDocument {
self.bool_field = b;
self
}
}
pub fn setup_test_data(client: &mut Client, index_name: &str) {
// TODO - this should use the Bulk API
let documents = vec![
TestDocument::new().with_str_field("Document A123").with_int_field(1),
TestDocument::new().with_str_field("Document B456").with_int_field(2),
TestDocument::new().with_str_field("Document 1ABC").with_int_field(3)
];
for ref doc in documents {
client.index(index_name, "test_type")
.with_doc(doc)
.send()
.unwrap();
}
client.refresh().with_indexes(&[index_name]).send().unwrap();
}
pub fn clean_db(mut client: &mut Client, test_idx: &str) {
// let's do some logging
let _ = env_logger::init();
let scroll = Duration::minutes(1);
let mut scan:ScanResult<Value> = match client.search_query()
.with_indexes(&[test_idx])
.with_query(&Query::build_match_all().build())
.scan(&scroll) {
Ok(scan) => scan,
Err(e) => {
warn!("Scan error: {:?}", e);
return // Ignore not-found errors
}
};
loop {
let page = scan.scroll(&mut client, &scroll).unwrap();
let hits = page.hits.hits;
if hits.len() == 0 {
break;
}
let actions: Vec<Action<()>> = hits.into_iter()
.map(|hit| {
Action::delete(hit.id)
.with_index(test_idx)
.with_doc_type(hit.doc_type)
})
.collect();
client.bulk(&actions).send().unwrap();
}
scan.close(&mut client).unwrap();
}
}
|
//! # Overview
//!
//! This crate provides native rust implementations of image encoding and decoding as well as some
//! basic image manipulation functions. Additional documentation can currently also be found in the
//! [README.md file which is most easily viewed on
//! github](https://github.com/image-rs/image/blob/master/README.md).
//!
//! There are two core problems for which this library provides solutions: a unified interface for image
//! encodings and simple generic buffers for their content. It's possible to use either feature
//! without the other. The focus is on a small and stable set of common operations that can be
//! supplemented by other specialized crates. The library also prefers safe solutions with few
//! dependencies.
//!
//! # High level API
//!
//! Load images using [`io::Reader`]:
//!
//! ```rust,no_run
//! # use std::io::Cursor;
//! use image::io::Reader as ImageReader;
//! # fn main() -> Result<(), image::ImageError> {
//! # let bytes = vec![0u8];
//!
//! let img = ImageReader::open("myimage.png")?.decode()?;
//! let img2 = ImageReader::new(Cursor::new(bytes)).decode()?;
//! # Ok(())
//! # }
//! ```
//!
//! And save them using [`save`] or [`write_to`] methods:
//!
//! ```rust,no_run
//! # use std::io::Write;
//! # use image::ImageOutputFormat;
//! # use image::DynamicImage;
//! # #[cfg(feature = "png")]
//! # fn main() -> Result<(), image::ImageError> {
//! # let img: DynamicImage = unimplemented!();
//! # let img2: DynamicImage = unimplemented!();
//! img.save("empty.jpg")?;
//!
//! let mut bytes: Vec<u8> = Vec::new();
//! img2.write_to(&mut bytes, image::ImageOutputFormat::Png)?;
//! # Ok(())
//! # }
//! # #[cfg(not(feature = "png"))] fn main() {}
//! ```
//!
//! With default features, the crate includes support for [many common image formats](codecs/index.html#supported-formats).
//!
//! [`save`]: enum.DynamicImage.html#method.save
//! [`write_to`]: enum.DynamicImage.html#method.write_to
//! [`io::Reader`]: io/struct.Reader.html
//!
//! # Image buffers
//!
//! The two main types for storing images:
//! * [`ImageBuffer`] which holds statically typed image contents.
//! * [`DynamicImage`] which is an enum over the supported ImageBuffer formats
//! and supports conversions between them.
//!
//! As well as a few more specialized options:
//! * [`GenericImage`] trait for a mutable image buffer.
//! * [`GenericImageView`] trait for read only references to a GenericImage.
//! * [`flat`] module containing types for interoperability with generic channel
//! matrices and foreign interfaces.
//!
//! [`GenericImageView`]: trait.GenericImageView.html
//! [`GenericImage`]: trait.GenericImage.html
//! [`ImageBuffer`]: struct.ImageBuffer.html
//! [`DynamicImage`]: enum.DynamicImage.html
//! [`flat`]: flat/index.html
//!
//! # Low level encoding/decoding API
//!
//! The [`ImageDecoder`] and [`ImageDecoderExt`] traits are implemented for many image file
//! formats. They decode image data by directly on raw byte slices. Given an ImageDecoder, you can
//! produce a DynamicImage via [`DynamicImage::from_decoder`].
//!
//! [`ImageEncoder`] provides the analogous functionality for encoding image data.
//!
//! [`DynamicImage::from_decoder`]: enum.DynamicImage.html#method.from_decoder
//! [`ImageDecoderExt`]: trait.ImageDecoderExt.html
//! [`ImageDecoder`]: trait.ImageDecoder.html
//! [`ImageEncoder`]: trait.ImageEncoder.html
#![warn(missing_docs)]
#![warn(unused_qualifications)]
#![deny(unreachable_pub)]
#![deny(deprecated)]
#![deny(missing_copy_implementations)]
#![cfg_attr(all(test, feature = "benchmarks"), feature(test))]
// it's a bit of a pain otherwise
#![allow(clippy::many_single_char_names)]
#[cfg(all(test, feature = "benchmarks"))]
extern crate test;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
use std::io::Write;
pub use crate::color::{ColorType, ExtendedColorType};
pub use crate::color::{Luma, LumaA, Rgb, Rgba, Bgr, Bgra};
pub use crate::error::{ImageError, ImageResult};
pub use crate::image::{AnimationDecoder,
GenericImage,
GenericImageView,
ImageDecoder,
ImageDecoderExt,
ImageEncoder,
ImageFormat,
ImageOutputFormat,
Progress,
// Iterators
Pixels,
SubImage};
pub use crate::buffer_::{
GrayAlphaImage,
GrayImage,
// Image types
ImageBuffer,
RgbImage,
RgbaImage};
pub use crate::flat::FlatSamples;
// Traits
pub use crate::traits::{EncodableLayout, Primitive, Pixel};
// Opening and loading images
pub use crate::io::free_functions::{guess_format, load};
pub use crate::dynimage::{load_from_memory, load_from_memory_with_format, open,
save_buffer, save_buffer_with_format, image_dimensions};
pub use crate::dynimage::DynamicImage;
pub use crate::animation::{Delay, Frame, Frames};
// More detailed error type
pub mod error;
/// Iterators and other auxiliary structure for the `ImageBuffer` type.
pub mod buffer {
// Only those not exported at the top-level
pub use crate::buffer_::{
ConvertBuffer,
EnumeratePixels,
EnumeratePixelsMut,
EnumerateRows,
EnumerateRowsMut,
Pixels,
PixelsMut,
Rows,
RowsMut,
};
}
// Math utils
pub mod math;
// Image processing functions
pub mod imageops;
// Io bindings
pub mod io;
// Buffer representations for ffi.
pub mod flat;
/// Encoding and decoding for various image file formats.
///
/// # Supported formats
///
/// | Format | Decoding | Encoding |
/// | ------ | -------- | -------- |
/// | PNG | All supported color types | Same as decoding |
/// | JPEG | Baseline and progressive | Baseline JPEG |
/// | GIF | Yes | Yes |
/// | BMP | Yes | RGB8, RGBA8, Gray8, GrayA8 |
/// | ICO | Yes | Yes |
/// | TIFF | Baseline(no fax support) + LZW + PackBits | RGB8, RGBA8, Gray8 |
/// | WebP | Lossy(Luma channel only) | No |
/// | PNM | PBM, PGM, PPM, standard PAM | Yes |
/// | DDS | DXT1, DXT3, DXT5 | No |
/// | TGA | Yes | RGB8, RGBA8, BGR8, BGRA8, Gray8, GrayA8 |
/// | farbfeld | Yes | Yes |
///
/// ## A note on format specific features
///
/// One of the main goals of `image` is stability, in runtime but also for programmers. This
/// ensures that performance as well as safety fixes reach a majority of its user base with little
/// effort. Re-exporting all details of its dependencies would run counter to this goal as it
/// linked _all_ major version bumps between them and `image`. As such, we are wary of exposing too
/// many details, or configuration options, that are not shared between different image formats.
///
/// Nevertheless, the advantage of precise control is hard to ignore. We will thus consider
/// _wrappers_, not direct re-exports, in either of the following cases:
///
/// 1. A standard specifies that configuration _x_ is required for decoders/encoders and there
/// exists an essentially canonical way to control it.
/// 2. At least two different implementations agree on some (sub-)set of features in practice.
/// 3. A technical argument including measurements of the performance, space benefits, or otherwise
/// objectively quantified benefits can be made, and the added interface is unlikely to require
/// breaking changes.
///
/// Features that fulfill two or more criteria are preferred.
///
/// Re-exports of dependencies that reach version `1` will be discussed when it happens.
pub mod codecs {
#[cfg(feature = "avif")]
pub mod avif;
#[cfg(feature = "bmp")]
pub mod bmp;
#[cfg(feature = "dds")]
pub mod dds;
#[cfg(feature = "dxt")]
pub mod dxt;
#[cfg(feature = "farbfeld")]
pub mod farbfeld;
#[cfg(feature = "gif")]
pub mod gif;
#[cfg(feature = "hdr")]
pub mod hdr;
#[cfg(feature = "ico")]
pub mod ico;
#[cfg(feature = "jpeg")]
pub mod jpeg;
#[cfg(feature = "png")]
pub mod png;
#[cfg(feature = "pnm")]
pub mod pnm;
#[cfg(feature = "tga")]
pub mod tga;
#[cfg(feature = "tiff")]
pub mod tiff;
#[cfg(feature = "webp")]
pub mod webp;
}
#[cfg(feature = "avif")]
#[deprecated = "Use codecs::avif instead"]
pub mod avif {
//! Encoding of AVIF images.
pub use crate::codecs::avif::AvifEncoder;
}
#[cfg(feature = "bmp")]
#[deprecated = "Use codecs::bmp instead"]
pub mod bmp {
//! Decoding and Encoding of BMP Images
#[allow(deprecated)]
pub use crate::codecs::bmp::{BMPEncoder, BmpDecoder, BmpEncoder};
}
#[cfg(feature = "dds")]
#[deprecated = "Use codecs::dds instead"]
pub mod dds {
//! Decoding of DDS images
pub use crate::codecs::dds::DdsDecoder;
}
#[cfg(feature = "dxt")]
#[deprecated = "Use codecs:: instead"]
pub mod dxt {
//! Decoding of DXT (S3TC) compression
#[allow(deprecated)]
pub use crate::codecs::dxt::{
DXTEncoder, DXTReader, DXTVariant, DxtDecoder, DxtEncoder, DxtReader, DxtVariant,
};
}
#[cfg(feature = "farbfeld")]
#[deprecated = "Use codecs::farbfeld instead"]
pub mod farbfeld {
//! Decoding of farbfeld images
pub use crate::codecs::farbfeld::{FarbfeldDecoder, FarbfeldEncoder, FarbfeldReader};
}
#[cfg(feature = "gif")]
#[deprecated = "Use codecs::gif instead"]
pub mod gif {
//! Decoding of GIF Images
#[allow(deprecated)]
pub use crate::codecs::gif::{Encoder, GifDecoder, GifEncoder, GifReader};
}
#[cfg(feature = "hdr")]
#[deprecated = "Use codecs::hdr instead"]
pub mod hdr {
//! Decoding of Radiance HDR Images
#[allow(deprecated)]
pub use crate::codecs::hdr::{
read_raw_file, rgbe8, to_rgbe8, HDRAdapter, HDREncoder, HDRImageDecoderIterator,
HDRMetadata, HdrAdapter, HdrDecoder, HdrEncoder, HdrImageDecoderIterator, HdrMetadata,
HdrReader, RGBE8Pixel, Rgbe8Pixel, SIGNATURE,
};
}
#[cfg(feature = "ico")]
#[deprecated = "Use codecs::ico instead"]
pub mod ico {
//! Decoding and Encoding of ICO files
#[allow(deprecated)]
pub use crate::codecs::ico::{ICOEncoder, IcoDecoder, IcoEncoder};
}
#[cfg(feature = "jpeg")]
#[deprecated = "Use codecs::jpeg instead"]
pub mod jpeg {
//! Decoding and Encoding of JPEG Images
#[allow(deprecated)]
pub use crate::codecs::jpeg::{
JPEGEncoder, JpegDecoder, JpegEncoder, PixelDensity, PixelDensityUnit,
};
}
#[cfg(feature = "png")]
#[deprecated = "Use codecs::png instead"]
pub mod png {
//! Decoding and Encoding of PNG Images
#[allow(deprecated)]
pub use crate::codecs::png::{
ApngDecoder, PNGEncoder, PNGReader, PngDecoder, PngEncoder, PngReader,
};
}
#[cfg(feature = "pnm")]
#[deprecated = "Use codecs::pnm instead"]
pub mod pnm {
//! Decoding and Encoding of netpbm image formats (pbm, pgm, ppm and pam)
#[allow(deprecated)]
pub use crate::codecs::pnm::{
ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PNMEncoder, PNMHeader,
PNMSubtype, PixmapHeader, PnmDecoder, PnmEncoder, PnmHeader, PnmSubtype, SampleEncoding,
};
}
#[cfg(feature = "tga")]
#[deprecated = "Use codecs::tga instead"]
pub mod tga {
//! Decoding and Encoding of TGA Images
#[allow(deprecated)]
pub use crate::codecs::tga::{TgaDecoder, TgaEncoder};
}
#[cfg(feature = "tiff")]
#[deprecated = "Use codecs::tiff instead"]
pub mod tiff {
//! Decoding and Encoding of TIFF Images
#[allow(deprecated)]
pub use crate::codecs::tiff::{TiffDecoder, TiffEncoder, TiffReader};
}
#[cfg(feature = "webp")]
#[deprecated = "Use codecs::webp instead"]
pub mod webp {
//! Decoding of WebP Images
#[allow(deprecated)]
pub use crate::codecs::webp::{vp8, WebPDecoder};
}
mod animation;
#[path = "buffer.rs"]
mod buffer_;
mod color;
mod dynimage;
mod image;
mod traits;
mod utils;
// Can't use the macro-call itself within the `doc` attribute. So force it to eval it as part of
// the macro invocation.
//
// The inspiration for the macro and implementation is from
// <https://github.com/GuillaumeGomez/doc-comment>
//
// MIT License
//
// Copyright (c) 2018 Guillaume Gomez
macro_rules! insert_as_doc {
{ $content:expr } => {
#[doc = $content] extern { }
}
}
// Provides the README.md as doc, to ensure the example works!
insert_as_doc!(include_str!("../README.md"));
// Copies data from `src` to `dst`
//
// Panics if the length of `dst` is less than the length of `src`.
#[inline]
fn copy_memory(src: &[u8], mut dst: &mut [u8]) {
let len_src = src.len();
assert!(dst.len() >= len_src);
dst.write_all(src).unwrap();
}
Fix bug 1365
//! # Overview
//!
//! This crate provides native rust implementations of image encoding and decoding as well as some
//! basic image manipulation functions. Additional documentation can currently also be found in the
//! [README.md file which is most easily viewed on
//! github](https://github.com/image-rs/image/blob/master/README.md).
//!
//! There are two core problems for which this library provides solutions: a unified interface for image
//! encodings and simple generic buffers for their content. It's possible to use either feature
//! without the other. The focus is on a small and stable set of common operations that can be
//! supplemented by other specialized crates. The library also prefers safe solutions with few
//! dependencies.
//!
//! # High level API
//!
//! Load images using [`io::Reader`]:
//!
//! ```rust,no_run
//! # use std::io::Cursor;
//! use image::io::Reader as ImageReader;
//! # fn main() -> Result<(), image::ImageError> {
//! # let bytes = vec![0u8];
//!
//! let img = ImageReader::open("myimage.png")?.decode()?;
//! let img2 = ImageReader::new(Cursor::new(bytes)).decode()?;
//! # Ok(())
//! # }
//! ```
//!
//! And save them using [`save`] or [`write_to`] methods:
//!
//! ```rust,no_run
//! # use std::io::Write;
//! # use image::ImageOutputFormat;
//! # use image::DynamicImage;
//! # #[cfg(feature = "png")]
//! # fn main() -> Result<(), image::ImageError> {
//! # let img: DynamicImage = unimplemented!();
//! # let img2: DynamicImage = unimplemented!();
//! img.save("empty.jpg")?;
//!
//! let mut bytes: Vec<u8> = Vec::new();
//! img2.write_to(&mut bytes, image::ImageOutputFormat::Png)?;
//! # Ok(())
//! # }
//! # #[cfg(not(feature = "png"))] fn main() {}
//! ```
//!
//! With default features, the crate includes support for [many common image formats](codecs/index.html#supported-formats).
//!
//! [`save`]: enum.DynamicImage.html#method.save
//! [`write_to`]: enum.DynamicImage.html#method.write_to
//! [`io::Reader`]: io/struct.Reader.html
//!
//! # Image buffers
//!
//! The two main types for storing images:
//! * [`ImageBuffer`] which holds statically typed image contents.
//! * [`DynamicImage`] which is an enum over the supported ImageBuffer formats
//! and supports conversions between them.
//!
//! As well as a few more specialized options:
//! * [`GenericImage`] trait for a mutable image buffer.
//! * [`GenericImageView`] trait for read only references to a GenericImage.
//! * [`flat`] module containing types for interoperability with generic channel
//! matrices and foreign interfaces.
//!
//! [`GenericImageView`]: trait.GenericImageView.html
//! [`GenericImage`]: trait.GenericImage.html
//! [`ImageBuffer`]: struct.ImageBuffer.html
//! [`DynamicImage`]: enum.DynamicImage.html
//! [`flat`]: flat/index.html
//!
//! # Low level encoding/decoding API
//!
//! The [`ImageDecoder`] and [`ImageDecoderExt`] traits are implemented for many image file
//! formats. They decode image data by directly on raw byte slices. Given an ImageDecoder, you can
//! produce a DynamicImage via [`DynamicImage::from_decoder`].
//!
//! [`ImageEncoder`] provides the analogous functionality for encoding image data.
//!
//! [`DynamicImage::from_decoder`]: enum.DynamicImage.html#method.from_decoder
//! [`ImageDecoderExt`]: trait.ImageDecoderExt.html
//! [`ImageDecoder`]: trait.ImageDecoder.html
//! [`ImageEncoder`]: trait.ImageEncoder.html
#![warn(missing_docs)]
#![warn(unused_qualifications)]
#![deny(unreachable_pub)]
#![deny(deprecated)]
#![deny(missing_copy_implementations)]
#![cfg_attr(all(test, feature = "benchmarks"), feature(test))]
// it's a bit of a pain otherwise
#![allow(clippy::many_single_char_names)]
#[cfg(all(test, feature = "benchmarks"))]
extern crate test;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
use std::io::Write;
pub use crate::color::{ColorType, ExtendedColorType};
pub use crate::color::{Luma, LumaA, Rgb, Rgba, Bgr, Bgra};
pub use crate::error::{ImageError, ImageResult};
pub use crate::image::{AnimationDecoder,
GenericImage,
GenericImageView,
ImageDecoder,
ImageDecoderExt,
ImageEncoder,
ImageFormat,
ImageOutputFormat,
Progress,
// Iterators
Pixels,
SubImage};
pub use crate::buffer_::{
GrayAlphaImage,
GrayImage,
// Image types
ImageBuffer,
RgbImage,
RgbaImage};
pub use crate::flat::FlatSamples;
// Traits
pub use crate::traits::{EncodableLayout, Primitive, Pixel};
// Opening and loading images
pub use crate::io::free_functions::{guess_format, load};
pub use crate::dynimage::{load_from_memory, load_from_memory_with_format, open,
save_buffer, save_buffer_with_format, image_dimensions};
pub use crate::dynimage::DynamicImage;
pub use crate::animation::{Delay, Frame, Frames};
// More detailed error type
pub mod error;
/// Iterators and other auxiliary structure for the `ImageBuffer` type.
pub mod buffer {
// Only those not exported at the top-level
pub use crate::buffer_::{
ConvertBuffer,
EnumeratePixels,
EnumeratePixelsMut,
EnumerateRows,
EnumerateRowsMut,
Pixels,
PixelsMut,
Rows,
RowsMut,
};
}
// Math utils
pub mod math;
// Image processing functions
pub mod imageops;
// Io bindings
pub mod io;
// Buffer representations for ffi.
pub mod flat;
/// Encoding and decoding for various image file formats.
///
/// # Supported formats
///
/// | Format | Decoding | Encoding |
/// | ------ | -------- | -------- |
/// | PNG | All supported color types | Same as decoding |
/// | JPEG | Baseline and progressive | Baseline JPEG |
/// | GIF | Yes | Yes |
/// | BMP | Yes | RGB8, RGBA8, Gray8, GrayA8 |
/// | ICO | Yes | Yes |
/// | TIFF | Baseline(no fax support) + LZW + PackBits | RGB8, RGBA8, Gray8 |
/// | WebP | Lossy(Luma channel only) | No |
/// | PNM | PBM, PGM, PPM, standard PAM | Yes |
/// | DDS | DXT1, DXT3, DXT5 | No |
/// | TGA | Yes | RGB8, RGBA8, BGR8, BGRA8, Gray8, GrayA8 |
/// | farbfeld | Yes | Yes |
///
/// ## A note on format specific features
///
/// One of the main goals of `image` is stability, in runtime but also for programmers. This
/// ensures that performance as well as safety fixes reach a majority of its user base with little
/// effort. Re-exporting all details of its dependencies would run counter to this goal as it
/// linked _all_ major version bumps between them and `image`. As such, we are wary of exposing too
/// many details, or configuration options, that are not shared between different image formats.
///
/// Nevertheless, the advantage of precise control is hard to ignore. We will thus consider
/// _wrappers_, not direct re-exports, in either of the following cases:
///
/// 1. A standard specifies that configuration _x_ is required for decoders/encoders and there
/// exists an essentially canonical way to control it.
/// 2. At least two different implementations agree on some (sub-)set of features in practice.
/// 3. A technical argument including measurements of the performance, space benefits, or otherwise
/// objectively quantified benefits can be made, and the added interface is unlikely to require
/// breaking changes.
///
/// Features that fulfill two or more criteria are preferred.
///
/// Re-exports of dependencies that reach version `1` will be discussed when it happens.
pub mod codecs {
#[cfg(feature = "avif")]
pub mod avif;
#[cfg(feature = "bmp")]
pub mod bmp;
#[cfg(feature = "dds")]
pub mod dds;
#[cfg(feature = "dxt")]
pub mod dxt;
#[cfg(feature = "farbfeld")]
pub mod farbfeld;
#[cfg(feature = "gif")]
pub mod gif;
#[cfg(feature = "hdr")]
pub mod hdr;
#[cfg(feature = "ico")]
pub mod ico;
#[cfg(feature = "jpeg")]
pub mod jpeg;
#[cfg(feature = "png")]
pub mod png;
#[cfg(feature = "pnm")]
pub mod pnm;
#[cfg(feature = "tga")]
pub mod tga;
#[cfg(feature = "tiff")]
pub mod tiff;
#[cfg(feature = "webp")]
pub mod webp;
}
#[cfg(feature = "avif")]
#[deprecated = "Use codecs::avif instead"]
pub mod avif {
//! Encoding of AVIF images.
pub use crate::codecs::avif::AvifEncoder;
}
#[cfg(feature = "bmp")]
#[deprecated = "Use codecs::bmp instead"]
pub mod bmp {
//! Decoding and Encoding of BMP Images
#[allow(deprecated)]
pub use crate::codecs::bmp::{BMPEncoder, BmpDecoder, BmpEncoder};
}
#[cfg(feature = "dds")]
#[deprecated = "Use codecs::dds instead"]
pub mod dds {
//! Decoding of DDS images
pub use crate::codecs::dds::DdsDecoder;
}
#[cfg(feature = "dxt")]
#[deprecated = "Use codecs:: instead"]
pub mod dxt {
//! Decoding of DXT (S3TC) compression
#[allow(deprecated)]
pub use crate::codecs::dxt::{
DXTEncoder, DXTReader, DXTVariant, DxtDecoder, DxtEncoder, DxtReader, DxtVariant,
};
}
#[cfg(feature = "farbfeld")]
#[deprecated = "Use codecs::farbfeld instead"]
pub mod farbfeld {
//! Decoding of farbfeld images
pub use crate::codecs::farbfeld::{FarbfeldDecoder, FarbfeldEncoder, FarbfeldReader};
}
#[cfg(feature = "gif")]
#[deprecated = "Use codecs::gif instead"]
pub mod gif {
//! Decoding of GIF Images
#[allow(deprecated)]
pub use crate::codecs::gif::{Encoder, GifDecoder, GifEncoder, GifReader};
}
#[cfg(feature = "hdr")]
#[deprecated = "Use codecs::hdr instead"]
pub mod hdr {
//! Decoding of Radiance HDR Images
#[allow(deprecated)]
pub use crate::codecs::hdr::{
read_raw_file, rgbe8, to_rgbe8, HDRAdapter, HDREncoder, HDRImageDecoderIterator,
HDRMetadata, HdrAdapter, HdrDecoder, HdrEncoder, HdrImageDecoderIterator, HdrMetadata,
HdrReader, RGBE8Pixel, Rgbe8Pixel, SIGNATURE,
};
}
#[cfg(feature = "ico")]
#[deprecated = "Use codecs::ico instead"]
pub mod ico {
//! Decoding and Encoding of ICO files
#[allow(deprecated)]
pub use crate::codecs::ico::{ICOEncoder, IcoDecoder, IcoEncoder};
}
#[cfg(feature = "jpeg")]
#[deprecated = "Use codecs::jpeg instead"]
pub mod jpeg {
//! Decoding and Encoding of JPEG Images
#[allow(deprecated)]
pub use crate::codecs::jpeg::{
JPEGEncoder, JpegDecoder, JpegEncoder, PixelDensity, PixelDensityUnit,
};
}
#[cfg(feature = "png")]
#[deprecated = "Use codecs::png instead"]
pub mod png {
//! Decoding and Encoding of PNG Images
#[allow(deprecated)]
pub use crate::codecs::png::{
ApngDecoder, CompressionType, FilterType, PNGEncoder, PNGReader, PngDecoder, PngEncoder,
PngReader,
};
}
#[cfg(feature = "pnm")]
#[deprecated = "Use codecs::pnm instead"]
pub mod pnm {
//! Decoding and Encoding of netpbm image formats (pbm, pgm, ppm and pam)
#[allow(deprecated)]
pub use crate::codecs::pnm::{
ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PNMEncoder, PNMHeader,
PNMSubtype, PixmapHeader, PnmDecoder, PnmEncoder, PnmHeader, PnmSubtype, SampleEncoding,
};
}
#[cfg(feature = "tga")]
#[deprecated = "Use codecs::tga instead"]
pub mod tga {
//! Decoding and Encoding of TGA Images
#[allow(deprecated)]
pub use crate::codecs::tga::{TgaDecoder, TgaEncoder};
}
#[cfg(feature = "tiff")]
#[deprecated = "Use codecs::tiff instead"]
pub mod tiff {
//! Decoding and Encoding of TIFF Images
#[allow(deprecated)]
pub use crate::codecs::tiff::{TiffDecoder, TiffEncoder, TiffReader};
}
#[cfg(feature = "webp")]
#[deprecated = "Use codecs::webp instead"]
pub mod webp {
//! Decoding of WebP Images
#[allow(deprecated)]
pub use crate::codecs::webp::{vp8, WebPDecoder};
}
mod animation;
#[path = "buffer.rs"]
mod buffer_;
mod color;
mod dynimage;
mod image;
mod traits;
mod utils;
// Can't use the macro-call itself within the `doc` attribute. So force it to eval it as part of
// the macro invocation.
//
// The inspiration for the macro and implementation is from
// <https://github.com/GuillaumeGomez/doc-comment>
//
// MIT License
//
// Copyright (c) 2018 Guillaume Gomez
macro_rules! insert_as_doc {
{ $content:expr } => {
#[doc = $content] extern { }
}
}
// Provides the README.md as doc, to ensure the example works!
insert_as_doc!(include_str!("../README.md"));
// Copies data from `src` to `dst`
//
// Panics if the length of `dst` is less than the length of `src`.
#[inline]
fn copy_memory(src: &[u8], mut dst: &mut [u8]) {
let len_src = src.len();
assert!(dst.len() >= len_src);
dst.write_all(src).unwrap();
}
|
pub mod board;
use board::Board;
use std::i32;
use std::ops::Neg;
use std::marker::PhantomData;
#[derive(Copy,Clone,Debug)]
pub enum Team
{
Enemy,
Ally,
}
impl Team
{
pub fn other_team(self) -> Team
{
match self
{
Team::Enemy => Team::Ally,
Team::Ally => Team::Enemy,
}
}
}
#[derive(PartialEq,Eq,PartialOrd,Ord,Copy,Clone,Debug)]
pub enum Score
{
Lose,
Heuristic(i32),
Win,
}
impl Neg for Score
{
type Output = Self;
fn neg(self) -> Self
{
match self
{
Score::Win => Score::Lose,
Score::Lose => Score::Win,
Score::Heuristic(val) => Score::Heuristic(-val),
}
}
}
#[derive(Clone,Debug,PartialEq,Eq)]
pub struct MoveStats<M>
{
pub mv: Option<M>,
pub score: Score,
pub turns: u32,
pub nodes_visited: u64,
}
#[derive(Clone,Debug)]
pub struct Minimax<B: Board>
{
/* TODO: Some sort of caching */
phantom: PhantomData<B>
}
impl<B: Board> Minimax<B>
{
pub fn new() -> Minimax<B>
{
Minimax
{
phantom: PhantomData,
}
}
/// Minimax driver function.
///
/// `turn` is the current player.
pub fn minimax(&mut self, board: &B, turn: Team, plies: u32) -> MoveStats<B::Move>
{
match turn
{
Team::Ally =>
self.max(board, plies),
Team::Enemy =>
self.min(board, plies),
}
}
/// Generates best move for ally
fn max(&mut self, board: &B, plies: u32) -> MoveStats<B::Move>
{
let moves = board.gen_ally_moves();
/* Fail state if you can't move */
if moves.len() == 0
{
return MoveStats
{
mv: None,
score: Score::Lose,
turns: 0,
nodes_visited: 0,
};
}
/* If you cannot proceed further */
if plies == 0 || board.is_game_over()
{
MoveStats
{
mv: None,
score: board.score(),
turns: 0,
nodes_visited: 0,
}
}
else
{
let mut best = MoveStats {
mv: None,
score: Score::Lose,
turns: 0,
nodes_visited: 0,
};
for mv in moves
{
/* Make a clone of the board so we don't break this one */
let mut board_clone = board.clone();
board_clone.do_move(&mv);
/* Find enemy's best move */
let enemy_move = self.min(&board_clone, plies - 1);
/* TODO: Try to postpone losing */
if best.mv.is_none() || enemy_move.score > best.score || (enemy_move.score == best.score && enemy_move.turns < best.turns)
{
best.mv = Some(mv);
best.score = enemy_move.score;
best.turns = enemy_move.turns + 1;
}
best.nodes_visited += enemy_move.nodes_visited + 1;
}
best
}
}
/// Generates best move for enemy
fn min(&mut self, board: &B, plies: u32) -> MoveStats<B::Move>
{
let moves = board.gen_enemy_moves();
/* Fail state if you can't move */
if moves.len() == 0
{
return MoveStats
{
mv: None,
/* If enemy can't move, we win. */
score: Score::Win,
turns: 0,
nodes_visited: 0,
};
}
/* If you cannot proceed further */
if plies == 0 || board.is_game_over()
{
MoveStats
{
mv: None,
score: board.score(),
turns: 0,
nodes_visited: 0,
}
}
else
{
let mut best = MoveStats {
mv: None,
/* Technically doesn't matter, but for consistancy's sake */
score: Score::Win,
turns: 0,
nodes_visited: 0,
};
for mv in moves
{
/* Make a clone of the board so we don't break this one */
let mut board_clone = board.clone();
board_clone.do_move(&mv);
/* Find ally's best move */
let ally_move = self.max(&board_clone, plies - 1);
/* TODO: Try to postpone losing */
if best.mv.is_none() || ally_move.score < best.score || (ally_move.score == best.score && ally_move.turns < best.turns)
{
best.mv = Some(mv);
best.score = ally_move.score;
best.turns = ally_move.turns + 1;
}
best.nodes_visited += ally_move.nodes_visited + 1;
}
best
}
}
}
#[test]
fn test_score_ord()
{
assert!(Score::Win > Score::Heuristic(0));
assert!(Score::Heuristic(0) > Score::Lose);
assert!(Score::Win > Score::Lose);
assert!(Score::Heuristic(100) > Score::Heuristic(0));
assert!(Score::Heuristic(0) > Score::Heuristic(-100));
assert!(Score::Win == Score::Win);
assert!(Score::Lose == Score::Lose);
assert!(Score::Heuristic(0) == Score::Heuristic(0));
}
/* TODO: minimax tests */
minimax: cleaned if statements
pub mod board;
use board::Board;
use std::i32;
use std::ops::Neg;
use std::marker::PhantomData;
#[derive(Copy,Clone,Debug)]
pub enum Team
{
Enemy,
Ally,
}
impl Team
{
pub fn other_team(self) -> Team
{
match self
{
Team::Enemy => Team::Ally,
Team::Ally => Team::Enemy,
}
}
}
#[derive(PartialEq,Eq,PartialOrd,Ord,Copy,Clone,Debug)]
pub enum Score
{
Lose,
Heuristic(i32),
Win,
}
impl Neg for Score
{
type Output = Self;
fn neg(self) -> Self
{
match self
{
Score::Win => Score::Lose,
Score::Lose => Score::Win,
Score::Heuristic(val) => Score::Heuristic(-val),
}
}
}
#[derive(Clone,Debug,PartialEq,Eq)]
pub struct MoveStats<M>
{
pub mv: Option<M>,
pub score: Score,
pub turns: u32,
pub nodes_visited: u64,
}
#[derive(Clone,Debug)]
pub struct Minimax<B: Board>
{
/* TODO: Some sort of caching */
phantom: PhantomData<B>
}
impl<B: Board> Minimax<B>
{
pub fn new() -> Minimax<B>
{
Minimax
{
phantom: PhantomData,
}
}
/// Minimax driver function.
///
/// `turn` is the current player.
pub fn minimax(&mut self, board: &B, turn: Team, plies: u32) -> MoveStats<B::Move>
{
match turn
{
Team::Ally =>
self.max(board, plies),
Team::Enemy =>
self.min(board, plies),
}
}
/// Generates best move for ally
fn max(&mut self, board: &B, plies: u32) -> MoveStats<B::Move>
{
let moves = board.gen_ally_moves();
/* Fail state if you can't move */
if moves.len() == 0
{
return MoveStats
{
mv: None,
score: Score::Lose,
turns: 0,
nodes_visited: 0,
};
}
/* If you cannot proceed further */
if plies == 0 || board.is_game_over()
{
return MoveStats
{
mv: None,
score: board.score(),
turns: 0,
nodes_visited: 0,
}
}
let mut best = MoveStats {
mv: None,
score: Score::Lose,
turns: 0,
nodes_visited: 0,
};
for mv in moves
{
/* Make a clone of the board so we don't break this one */
let mut board_clone = board.clone();
board_clone.do_move(&mv);
/* Find enemy's best move */
let enemy_move = self.min(&board_clone, plies - 1);
/* TODO: Try to postpone losing */
if best.mv.is_none() || enemy_move.score > best.score || (enemy_move.score == best.score && enemy_move.turns < best.turns)
{
best.mv = Some(mv);
best.score = enemy_move.score;
best.turns = enemy_move.turns + 1;
}
best.nodes_visited += enemy_move.nodes_visited + 1;
}
best
}
/// Generates best move for enemy
fn min(&mut self, board: &B, plies: u32) -> MoveStats<B::Move>
{
let moves = board.gen_enemy_moves();
/* Fail state if you can't move */
if moves.len() == 0
{
return MoveStats
{
mv: None,
/* If enemy can't move, we win. */
score: Score::Win,
turns: 0,
nodes_visited: 0,
};
}
/* If you cannot proceed further */
if plies == 0 || board.is_game_over()
{
return MoveStats
{
mv: None,
score: board.score(),
turns: 0,
nodes_visited: 0,
}
}
let mut best = MoveStats {
mv: None,
/* Technically doesn't matter, but for consistancy's sake */
score: Score::Win,
turns: 0,
nodes_visited: 0,
};
for mv in moves
{
/* Make a clone of the board so we don't break this one */
let mut board_clone = board.clone();
board_clone.do_move(&mv);
/* Find ally's best move */
let ally_move = self.max(&board_clone, plies - 1);
/* TODO: Try to postpone losing */
if best.mv.is_none() || ally_move.score < best.score || (ally_move.score == best.score && ally_move.turns < best.turns)
{
best.mv = Some(mv);
best.score = ally_move.score;
best.turns = ally_move.turns + 1;
}
best.nodes_visited += ally_move.nodes_visited + 1;
}
best
}
}
#[test]
fn test_score_ord()
{
assert!(Score::Win > Score::Heuristic(0));
assert!(Score::Heuristic(0) > Score::Lose);
assert!(Score::Win > Score::Lose);
assert!(Score::Heuristic(100) > Score::Heuristic(0));
assert!(Score::Heuristic(0) > Score::Heuristic(-100));
assert!(Score::Win == Score::Win);
assert!(Score::Lose == Score::Lose);
assert!(Score::Heuristic(0) == Score::Heuristic(0));
}
/* TODO: minimax tests */
|
pub mod crypto {
pub fn crypto_mod_test(){
println!("Crypto Mod Test");
}
}
pub mod sql {
extern crate rpassword;
extern crate rusqlite;
use std::path::Path;
use self::rusqlite::Connection;
pub fn sql_mod_test(){
println!("SQL Mod Test");
}
pub fn open_db(filepath: &String) -> Connection {
use std::convert;
let path = Path::new(filepath);
//see if the db exists, to take user creds if not.
let mut db_exists = true;
if !path.exists() {
db_exists = false;
}
//Opening the connection will create the file if it does not exist, or connect to the file
//if it does.
let conn: rusqlite::Connection = Connection::open(&path).expect("Could not open a connection to the database.");
conn.execute("CREATE TABLE IF NOT EXISTS user (password TEXT);",&[]).expect("Unable to create table.");
//If the database did not exist, set the master password for it.
if !db_exists {
use std::io;
println!("Enter a password for this database.\nNote: You will not be able to see the password as you are entering it.");
let mut password = rpassword::prompt_password_stdout("Password: ").unwrap();
password = password.trim().to_string();
insert_user(&conn, &password);
}
return conn;
}
fn insert_user(conn: &Connection, pass:&String) {
conn.execute("INSERT into user(password) VALUES (?)",&[pass]).expect("Could not add password to the user table.");
}
}
working out a concise way of gathering information to enter from the user.
pub mod crypto {
pub fn crypto_mod_test(){
println!("Crypto Mod Test");
}
}
pub mod sql {
extern crate rpassword;
extern crate rusqlite;
use std::path::Path;
use self::rusqlite::Connection;
pub fn sql_mod_test(){
println!("SQL Mod Test");
}
pub fn open_db(filepath: &String) -> Connection {
use std::convert;
let path = Path::new(filepath);
//see if the db exists, to take user creds if not.
let mut db_exists = true;
if !path.exists() {
db_exists = false;
}
//Opening the connection will create the file if it does not exist, or connect to the file
//if it does.
let conn: rusqlite::Connection = Connection::open(&path).expect("Could not open a connection to the database.");
conn.execute("CREATE TABLE IF NOT EXISTS user (password TEXT);",&[]).expect("Unable to create table.");
//If the database did not exist, set the master password for it.
if !db_exists {
use std::io;
println!("Enter a password for this database.\nNote: You will not be able to see the password as you are entering it.");
let mut password = rpassword::prompt_password_stdout("Password: ").unwrap();
password = password.trim().to_string();
insert_user(&conn, &password);
}
create_entry_table(&conn);
return conn;
}
fn insert_user(conn: &Connection, pass:&String) {
conn.execute("INSERT into user(password) VALUES (?)",&[pass]).expect("Could not add password to the user table.");
}
fn create_entry_table(conn: &Connection) {
conn.execute("CREATE TABLE IF NOT EXISTS password_entry (name TEXT, username TEXT, password TEXT, url TEXT, notes TEXT)",&[]);
}
fn insert_entry(conn: &Connection) {
conn.execute("INSERT INTO password_entry (name, username, password, url, notes) VALUES (?1,?2,?3,?4,?5)",&[]);
}
}
|
extern crate crossbeam;
use std::borrow::Borrow;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::{Command, Output, ExitStatus};
use std::thread::JoinHandle;
use std::sync::Arc;
mod pipe;
pub fn cmd<T: AsRef<OsStr>>(argv: &[T]) -> Expression<'static> {
let argv_vec = argv.iter().map(|arg| arg.as_ref().to_owned()).collect();
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::ArgvCommand(argv_vec))),
}
}
pub fn sh<T: AsRef<OsStr>>(command: T) -> Expression<'static> {
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::ShCommand(command.as_ref()
.to_owned()))),
}
}
#[derive(Clone, Debug)]
pub struct Expression<'a> {
inner: Arc<ExpressionInner<'a>>,
}
impl<'a, 'b> Expression<'a>
where 'b: 'a
{
pub fn read(&self) -> Result<String, Error> {
let (handle, reader) = pipe_with_reader_thread();
let mut context = IoContext::new();
context.stdout = handle;
let status = try!(self.inner.exec(context));
let stdout_vec = try!(reader.join().unwrap());
if !status.success() {
return Err(Error::Status(Output {
status: status,
stdout: stdout_vec,
stderr: Vec::new(),
}));
}
let stdout_string = try!(std::str::from_utf8(&stdout_vec))
.trim_right_matches('\n')
.to_owned();
Ok(stdout_string)
}
pub fn pipe<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::Pipe(self.clone(),
right.borrow()
.clone()))),
}
}
pub fn then<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::Then(self.clone(),
right.borrow()
.clone()))),
}
}
pub fn input<T: IntoStdinBytes<'b>>(&self, input: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stdin(input.into_stdin_bytes()),
self.clone())),
}
}
pub fn stdin<T: IntoStdin<'b>>(&self, stdin: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stdin(stdin.into_stdin()),
self.clone())),
}
}
pub fn stdout<T: IntoOutput<'b>>(&self, stdout: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stdout(stdout.into_output()),
self.clone())),
}
}
pub fn stderr<T: IntoOutput<'b>>(&self, stderr: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stderr(stderr.into_output()),
self.clone())),
}
}
}
#[derive(Debug)]
enum ExpressionInner<'a> {
Exec(ExecutableExpression<'a>),
Io(IoRedirect<'a>, Expression<'a>),
}
impl<'a> ExpressionInner<'a> {
fn exec(&self, parent_context: IoContext) -> io::Result<ExitStatus> {
match self {
&ExpressionInner::Exec(ref executable) => executable.exec(parent_context),
&ExpressionInner::Io(ref ioarg, ref expr) => {
ioarg.with_redirected_context(parent_context, |context| expr.inner.exec(context))
}
}
}
}
#[derive(Debug)]
enum ExecutableExpression<'a> {
ArgvCommand(Vec<OsString>),
ShCommand(OsString),
Pipe(Expression<'a>, Expression<'a>),
Then(Expression<'a>, Expression<'a>),
}
impl<'a> ExecutableExpression<'a> {
fn exec(&self, context: IoContext) -> io::Result<ExitStatus> {
match self {
&ExecutableExpression::ArgvCommand(ref argv) => exec_argv(argv, context),
&ExecutableExpression::ShCommand(ref command) => exec_sh(command, context),
&ExecutableExpression::Pipe(ref left, ref right) => exec_pipe(left, right, context),
&ExecutableExpression::Then(ref left, ref right) => exec_then(left, right, context),
}
}
}
fn exec_argv<T: AsRef<OsStr>>(argv: &[T], context: IoContext) -> io::Result<ExitStatus> {
let mut command = Command::new(&argv[0]);
command.args(&argv[1..]);
command.stdin(context.stdin.into_stdio());
command.stdout(context.stdout.into_stdio());
command.stderr(context.stderr.into_stdio());
command.status()
}
fn exec_sh<T: AsRef<OsStr>>(command: T, context: IoContext) -> io::Result<ExitStatus> {
// TODO: What shell should we be using here, really?
// TODO: Figure out how cmd.Exec works on Windows.
let mut argv = Vec::new();
argv.push("bash".as_ref());
argv.push("-c".as_ref());
argv.push(command.as_ref());
exec_argv(&argv, context)
}
fn exec_pipe(left: &Expression, right: &Expression, context: IoContext) -> io::Result<ExitStatus> {
let (read_pipe, write_pipe) = pipe::open_pipe();
let left_context = IoContext {
stdin: context.stdin,
stdout: write_pipe,
stderr: context.stderr.clone(),
};
let right_context = IoContext {
stdin: read_pipe,
stdout: context.stdout,
stderr: context.stderr,
};
let (left_result, right_result) = crossbeam::scope(|scope| {
let left_joiner = scope.spawn(|| left.inner.exec(left_context));
let right_result = right.inner.exec(right_context);
let left_result = left_joiner.join();
(left_result, right_result)
});
let right_status = try!(right_result);
let left_status = try!(left_result);
if !right_status.success() {
Ok(right_status)
} else {
Ok(left_status)
}
}
fn exec_then(left: &Expression, right: &Expression, context: IoContext) -> io::Result<ExitStatus> {
let status = try!(left.inner.exec(context.clone()));
if !status.success() {
Ok(status)
} else {
right.inner.exec(context)
}
}
#[derive(Debug)]
enum IoRedirect<'a> {
Stdin(InputRedirect<'a>),
Stdout(OutputRedirect<'a>),
Stderr(OutputRedirect<'a>),
}
impl<'a> IoRedirect<'a> {
fn with_redirected_context<F, T>(&self, parent_context: IoContext, inner: F) -> io::Result<T>
where F: FnOnce(IoContext) -> io::Result<T>
{
crossbeam::scope(|scope| {
let mut context = parent_context; // move it into the closure
let mut maybe_stdin_thread = None;
// Perform the redirect.
match self {
&IoRedirect::Stdin(ref redir) => {
let (handle, maybe_thread) = try!(redir.open_handle_maybe_thread(scope));
maybe_stdin_thread = maybe_thread;
context.stdin = handle;
}
&IoRedirect::Stdout(ref redir) => {
context.stdout = try!(redir.open_handle(&context.stdout, &context.stderr));
}
&IoRedirect::Stderr(ref redir) => {
context.stderr = try!(redir.open_handle(&context.stdout, &context.stderr));
}
}
// Run the inner closure.
let ret = try!(inner(context));
// Join the input thread, if any.
if let Some(thread) = maybe_stdin_thread {
try!(thread.join());
}
Ok(ret)
})
}
}
#[derive(Debug)]
pub enum InputRedirect<'a> {
Null,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
BytesSlice(&'a [u8]),
BytesVec(Vec<u8>),
}
impl<'a> InputRedirect<'a> {
fn open_handle_maybe_thread(&'a self,
scope: &crossbeam::Scope<'a>)
-> io::Result<(pipe::Handle, Option<WriterThreadJoiner>)> {
let mut maybe_thread = None;
let handle = match self {
&InputRedirect::Null => pipe::Handle::from_file(try!(File::open("/dev/null"))), // TODO: Windows
&InputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::open(p))),
&InputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::open(p))),
&InputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
&InputRedirect::File(ref f) => pipe::Handle::dup_file(f),
&InputRedirect::BytesSlice(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
&InputRedirect::BytesVec(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
};
Ok((handle, maybe_thread))
}
}
pub trait IntoStdinBytes<'a> {
fn into_stdin_bytes(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdinBytes<'a> for &'a [u8] {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a str {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl<'a> IntoStdinBytes<'a> for &'a String {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for String {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self.into_bytes())
}
}
pub trait IntoStdin<'a> {
fn into_stdin(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdin<'a> for InputRedirect<'a> {
fn into_stdin(self) -> InputRedirect<'a> {
self
}
}
impl<'a> IntoStdin<'a> for &'a Path {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self)
}
}
impl<'a> IntoStdin<'a> for &'a PathBuf {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for PathBuf {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self)
}
}
impl<'a> IntoStdin<'a> for &'a str {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a String {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for String {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a OsStr {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a OsString {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for OsString {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a File {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::FileRef(self)
}
}
impl IntoStdin<'static> for File {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::File(self)
}
}
#[derive(Debug)]
pub enum OutputRedirect<'a> {
Null,
Stdout,
Stderr,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
}
impl<'a> OutputRedirect<'a> {
fn open_handle(&self,
inherited_stdout: &pipe::Handle,
inherited_stderr: &pipe::Handle)
-> io::Result<pipe::Handle> {
Ok(match self {
&OutputRedirect::Null => pipe::Handle::from_file(try!(File::create("/dev/null"))), // TODO: Windows
&OutputRedirect::Stdout => inherited_stdout.clone(),
&OutputRedirect::Stderr => inherited_stderr.clone(),
&OutputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::create(p))),
&OutputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::create(p))),
&OutputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
&OutputRedirect::File(ref f) => pipe::Handle::dup_file(f),
})
}
}
pub trait IntoOutput<'a> {
fn into_output(self) -> OutputRedirect<'a>;
}
impl<'a> IntoOutput<'a> for OutputRedirect<'a> {
fn into_output(self) -> OutputRedirect<'a> {
self
}
}
impl<'a> IntoOutput<'a> for &'a Path {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self)
}
}
impl<'a> IntoOutput<'a> for &'a PathBuf {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for PathBuf {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self)
}
}
impl<'a> IntoOutput<'a> for &'a str {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a String {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for String {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a OsStr {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a OsString {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for OsString {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a File {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::FileRef(self)
}
}
impl IntoOutput<'static> for File {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::File(self)
}
}
#[derive(Debug)]
pub enum Error {
Io(io::Error),
Utf8(std::str::Utf8Error),
Status(Output),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<std::str::Utf8Error> for Error {
fn from(err: std::str::Utf8Error) -> Error {
Error::Utf8(err)
}
}
// An IoContext represents the file descriptors child processes are talking to at execution time.
// It's initialized in run(), with dups of the stdin/stdout/stderr pipes, and then passed down to
// sub-expressions. Compound expressions will clone() it, and redirections will modify it.
#[derive(Clone, Debug)]
pub struct IoContext {
stdin: pipe::Handle,
stdout: pipe::Handle,
stderr: pipe::Handle,
}
impl IoContext {
fn new() -> IoContext {
IoContext {
stdin: pipe::Handle::stdin(),
stdout: pipe::Handle::stdout(),
stderr: pipe::Handle::stderr(),
}
}
}
fn pipe_with_reader_thread() -> (pipe::Handle, JoinHandle<io::Result<Vec<u8>>>) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = std::thread::spawn(move || {
let mut read_file = read_pipe.into_file();
let mut output = Vec::new();
try!(read_file.read_to_end(&mut output));
Ok(output)
});
(write_pipe, thread)
}
type WriterThreadJoiner = crossbeam::ScopedJoinHandle<io::Result<()>>;
fn pipe_with_writer_thread<'a>(input: &'a [u8],
scope: &crossbeam::Scope<'a>)
-> (pipe::Handle, WriterThreadJoiner) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = scope.spawn(move || {
let mut write_file = write_pipe.into_file();
try!(write_file.write_all(&input));
Ok(())
});
(read_pipe, thread)
}
#[cfg(test)]
mod test {
extern crate tempfile;
use super::*;
use std::io::prelude::*;
use std::io::SeekFrom;
use std::path::Path;
#[test]
fn test_cmd() {
let output = cmd(&["echo", "hi"]).read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_sh() {
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_pipe() {
let output = sh("echo hi").pipe(sh("sed s/i/o/")).read().unwrap();
assert_eq!("ho", output);
}
#[test]
fn test_then() {
let output = sh("echo -n hi").then(sh("echo lo")).read().unwrap();
assert_eq!("hilo", output);
}
#[test]
fn test_input() {
// TODO: Fixed-length bytes input like b"foo" works poorly here. Why?
let expr = sh("sed s/f/g/").input("foo");
let output = expr.read().unwrap();
assert_eq!("goo", output);
}
#[test]
fn test_null() {
// TODO: The separation between InputRedirect and OutputRedirect here is tedious.
let expr = cmd(&["cat"])
.stdin(InputRedirect::Null)
.stdout(OutputRedirect::Null)
.stderr(OutputRedirect::Null);
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let mut input_file = tempfile::NamedTempFile::new().unwrap();
let output_file = tempfile::NamedTempFile::new().unwrap();
input_file.write_all(b"foo").unwrap();
let expr = sh("sed s/o/a/g").stdin(input_file.path()).stdout(output_file.path());
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
output_file.as_ref().read_to_string(&mut file_output).unwrap();
assert_eq!("faa", file_output);
}
#[test]
fn test_owned_input() {
fn with_input<'a>(expr: &Expression<'a>) -> Expression<'a> {
let mystr = format!("I own this: {}", "foo");
// This would be a lifetime error if we tried to use &mystr.
expr.input(mystr)
}
let c = cmd(&["cat"]);
let c_with_input = with_input(&c);
let output = c_with_input.read().unwrap();
assert_eq!("I own this: foo", output);
}
#[test]
fn test_stderr_to_stdout() {
let command = sh("echo hi >&2").stderr(OutputRedirect::Stdout);
let output = command.read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let mut temp = tempfile::NamedTempFile::new().unwrap();
temp.write_all(b"example").unwrap();
temp.seek(SeekFrom::Start(0)).unwrap();
let expr = cmd(&["cat"]).stdin(temp.as_ref());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
// We don't get automatic Deref when we're matching trait implementations, so in addition
// to implementing String and &str, we *also* implement &String.
// TODO: See if specialization can clean this up.
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
sh("true").stdin(&*mystr).input(&*myvec).stdout(&*mypathbuf);
sh("true").stdin(&mystr).input(&myvec).stdout(&mypathbuf);
sh("true").stdin(mystr).input(myvec).stdout(mypathbuf);
}
}
get rid of unnecessary &'s in matches
extern crate crossbeam;
use std::borrow::Borrow;
use std::ffi::{OsStr, OsString};
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::{Command, Output, ExitStatus};
use std::thread::JoinHandle;
use std::sync::Arc;
mod pipe;
pub fn cmd<T: AsRef<OsStr>>(argv: &[T]) -> Expression<'static> {
let argv_vec = argv.iter().map(|arg| arg.as_ref().to_owned()).collect();
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::ArgvCommand(argv_vec))),
}
}
pub fn sh<T: AsRef<OsStr>>(command: T) -> Expression<'static> {
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::ShCommand(command.as_ref()
.to_owned()))),
}
}
#[derive(Clone, Debug)]
pub struct Expression<'a> {
inner: Arc<ExpressionInner<'a>>,
}
impl<'a, 'b> Expression<'a>
where 'b: 'a
{
pub fn read(&self) -> Result<String, Error> {
let (handle, reader) = pipe_with_reader_thread();
let mut context = IoContext::new();
context.stdout = handle;
let status = try!(self.inner.exec(context));
let stdout_vec = try!(reader.join().unwrap());
if !status.success() {
return Err(Error::Status(Output {
status: status,
stdout: stdout_vec,
stderr: Vec::new(),
}));
}
let stdout_string = try!(std::str::from_utf8(&stdout_vec))
.trim_right_matches('\n')
.to_owned();
Ok(stdout_string)
}
pub fn pipe<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::Pipe(self.clone(),
right.borrow()
.clone()))),
}
}
pub fn then<T: Borrow<Expression<'b>>>(&self, right: T) -> Expression<'a> {
Expression {
inner: Arc::new(ExpressionInner::Exec(ExecutableExpression::Then(self.clone(),
right.borrow()
.clone()))),
}
}
pub fn input<T: IntoStdinBytes<'b>>(&self, input: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stdin(input.into_stdin_bytes()),
self.clone())),
}
}
pub fn stdin<T: IntoStdin<'b>>(&self, stdin: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stdin(stdin.into_stdin()),
self.clone())),
}
}
pub fn stdout<T: IntoOutput<'b>>(&self, stdout: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stdout(stdout.into_output()),
self.clone())),
}
}
pub fn stderr<T: IntoOutput<'b>>(&self, stderr: T) -> Self {
Expression {
inner: Arc::new(ExpressionInner::Io(IoRedirect::Stderr(stderr.into_output()),
self.clone())),
}
}
}
#[derive(Debug)]
enum ExpressionInner<'a> {
Exec(ExecutableExpression<'a>),
Io(IoRedirect<'a>, Expression<'a>),
}
impl<'a> ExpressionInner<'a> {
fn exec(&self, parent_context: IoContext) -> io::Result<ExitStatus> {
match *self {
ExpressionInner::Exec(ref executable) => executable.exec(parent_context),
ExpressionInner::Io(ref ioarg, ref expr) => {
ioarg.with_redirected_context(parent_context, |context| expr.inner.exec(context))
}
}
}
}
#[derive(Debug)]
enum ExecutableExpression<'a> {
ArgvCommand(Vec<OsString>),
ShCommand(OsString),
Pipe(Expression<'a>, Expression<'a>),
Then(Expression<'a>, Expression<'a>),
}
impl<'a> ExecutableExpression<'a> {
fn exec(&self, context: IoContext) -> io::Result<ExitStatus> {
match *self {
ExecutableExpression::ArgvCommand(ref argv) => exec_argv(argv, context),
ExecutableExpression::ShCommand(ref command) => exec_sh(command, context),
ExecutableExpression::Pipe(ref left, ref right) => exec_pipe(left, right, context),
ExecutableExpression::Then(ref left, ref right) => exec_then(left, right, context),
}
}
}
fn exec_argv<T: AsRef<OsStr>>(argv: &[T], context: IoContext) -> io::Result<ExitStatus> {
let mut command = Command::new(&argv[0]);
command.args(&argv[1..]);
command.stdin(context.stdin.into_stdio());
command.stdout(context.stdout.into_stdio());
command.stderr(context.stderr.into_stdio());
command.status()
}
fn exec_sh<T: AsRef<OsStr>>(command: T, context: IoContext) -> io::Result<ExitStatus> {
// TODO: What shell should we be using here, really?
// TODO: Figure out how cmd.Exec works on Windows.
let mut argv = Vec::new();
argv.push("bash".as_ref());
argv.push("-c".as_ref());
argv.push(command.as_ref());
exec_argv(&argv, context)
}
fn exec_pipe(left: &Expression, right: &Expression, context: IoContext) -> io::Result<ExitStatus> {
let (read_pipe, write_pipe) = pipe::open_pipe();
let left_context = IoContext {
stdin: context.stdin,
stdout: write_pipe,
stderr: context.stderr.clone(),
};
let right_context = IoContext {
stdin: read_pipe,
stdout: context.stdout,
stderr: context.stderr,
};
let (left_result, right_result) = crossbeam::scope(|scope| {
let left_joiner = scope.spawn(|| left.inner.exec(left_context));
let right_result = right.inner.exec(right_context);
let left_result = left_joiner.join();
(left_result, right_result)
});
let right_status = try!(right_result);
let left_status = try!(left_result);
if !right_status.success() {
Ok(right_status)
} else {
Ok(left_status)
}
}
fn exec_then(left: &Expression, right: &Expression, context: IoContext) -> io::Result<ExitStatus> {
let status = try!(left.inner.exec(context.clone()));
if !status.success() {
Ok(status)
} else {
right.inner.exec(context)
}
}
#[derive(Debug)]
enum IoRedirect<'a> {
Stdin(InputRedirect<'a>),
Stdout(OutputRedirect<'a>),
Stderr(OutputRedirect<'a>),
}
impl<'a> IoRedirect<'a> {
fn with_redirected_context<F, T>(&self, parent_context: IoContext, inner: F) -> io::Result<T>
where F: FnOnce(IoContext) -> io::Result<T>
{
crossbeam::scope(|scope| {
let mut context = parent_context; // move it into the closure
let mut maybe_stdin_thread = None;
// Perform the redirect.
match *self {
IoRedirect::Stdin(ref redir) => {
let (handle, maybe_thread) = try!(redir.open_handle_maybe_thread(scope));
maybe_stdin_thread = maybe_thread;
context.stdin = handle;
}
IoRedirect::Stdout(ref redir) => {
context.stdout = try!(redir.open_handle(&context.stdout, &context.stderr));
}
IoRedirect::Stderr(ref redir) => {
context.stderr = try!(redir.open_handle(&context.stdout, &context.stderr));
}
}
// Run the inner closure.
let ret = try!(inner(context));
// Join the input thread, if any.
if let Some(thread) = maybe_stdin_thread {
try!(thread.join());
}
Ok(ret)
})
}
}
#[derive(Debug)]
pub enum InputRedirect<'a> {
Null,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
BytesSlice(&'a [u8]),
BytesVec(Vec<u8>),
}
impl<'a> InputRedirect<'a> {
fn open_handle_maybe_thread(&'a self,
scope: &crossbeam::Scope<'a>)
-> io::Result<(pipe::Handle, Option<WriterThreadJoiner>)> {
let mut maybe_thread = None;
let handle = match *self {
InputRedirect::Null => pipe::Handle::from_file(try!(File::open("/dev/null"))), // TODO: Windows
InputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::open(p))),
InputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::open(p))),
InputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
InputRedirect::File(ref f) => pipe::Handle::dup_file(f),
InputRedirect::BytesSlice(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
InputRedirect::BytesVec(ref b) => {
let (handle, thread) = pipe_with_writer_thread(b, scope);
maybe_thread = Some(thread);
handle
}
};
Ok((handle, maybe_thread))
}
}
pub trait IntoStdinBytes<'a> {
fn into_stdin_bytes(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdinBytes<'a> for &'a [u8] {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for Vec<u8> {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self)
}
}
impl<'a> IntoStdinBytes<'a> for &'a str {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl<'a> IntoStdinBytes<'a> for &'a String {
fn into_stdin_bytes(self) -> InputRedirect<'a> {
InputRedirect::BytesSlice(self.as_ref())
}
}
impl IntoStdinBytes<'static> for String {
fn into_stdin_bytes(self) -> InputRedirect<'static> {
InputRedirect::BytesVec(self.into_bytes())
}
}
pub trait IntoStdin<'a> {
fn into_stdin(self) -> InputRedirect<'a>;
}
impl<'a> IntoStdin<'a> for InputRedirect<'a> {
fn into_stdin(self) -> InputRedirect<'a> {
self
}
}
impl<'a> IntoStdin<'a> for &'a Path {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self)
}
}
impl<'a> IntoStdin<'a> for &'a PathBuf {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for PathBuf {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self)
}
}
impl<'a> IntoStdin<'a> for &'a str {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a String {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for String {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a OsStr {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoStdin<'a> for &'a OsString {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::Path(self.as_ref())
}
}
impl IntoStdin<'static> for OsString {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoStdin<'a> for &'a File {
fn into_stdin(self) -> InputRedirect<'a> {
InputRedirect::FileRef(self)
}
}
impl IntoStdin<'static> for File {
fn into_stdin(self) -> InputRedirect<'static> {
InputRedirect::File(self)
}
}
#[derive(Debug)]
pub enum OutputRedirect<'a> {
Null,
Stdout,
Stderr,
Path(&'a Path),
PathBuf(PathBuf),
FileRef(&'a File),
File(File),
}
impl<'a> OutputRedirect<'a> {
fn open_handle(&self,
inherited_stdout: &pipe::Handle,
inherited_stderr: &pipe::Handle)
-> io::Result<pipe::Handle> {
Ok(match *self {
OutputRedirect::Null => pipe::Handle::from_file(try!(File::create("/dev/null"))), // TODO: Windows
OutputRedirect::Stdout => inherited_stdout.clone(),
OutputRedirect::Stderr => inherited_stderr.clone(),
OutputRedirect::Path(ref p) => pipe::Handle::from_file(try!(File::create(p))),
OutputRedirect::PathBuf(ref p) => pipe::Handle::from_file(try!(File::create(p))),
OutputRedirect::FileRef(ref f) => pipe::Handle::dup_file(f),
OutputRedirect::File(ref f) => pipe::Handle::dup_file(f),
})
}
}
pub trait IntoOutput<'a> {
fn into_output(self) -> OutputRedirect<'a>;
}
impl<'a> IntoOutput<'a> for OutputRedirect<'a> {
fn into_output(self) -> OutputRedirect<'a> {
self
}
}
impl<'a> IntoOutput<'a> for &'a Path {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self)
}
}
impl<'a> IntoOutput<'a> for &'a PathBuf {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for PathBuf {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self)
}
}
impl<'a> IntoOutput<'a> for &'a str {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a String {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for String {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a OsStr {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl<'a> IntoOutput<'a> for &'a OsString {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::Path(self.as_ref())
}
}
impl IntoOutput<'static> for OsString {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::PathBuf(self.into())
}
}
impl<'a> IntoOutput<'a> for &'a File {
fn into_output(self) -> OutputRedirect<'a> {
OutputRedirect::FileRef(self)
}
}
impl IntoOutput<'static> for File {
fn into_output(self) -> OutputRedirect<'static> {
OutputRedirect::File(self)
}
}
#[derive(Debug)]
pub enum Error {
Io(io::Error),
Utf8(std::str::Utf8Error),
Status(Output),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<std::str::Utf8Error> for Error {
fn from(err: std::str::Utf8Error) -> Error {
Error::Utf8(err)
}
}
// An IoContext represents the file descriptors child processes are talking to at execution time.
// It's initialized in run(), with dups of the stdin/stdout/stderr pipes, and then passed down to
// sub-expressions. Compound expressions will clone() it, and redirections will modify it.
#[derive(Clone, Debug)]
pub struct IoContext {
stdin: pipe::Handle,
stdout: pipe::Handle,
stderr: pipe::Handle,
}
impl IoContext {
fn new() -> IoContext {
IoContext {
stdin: pipe::Handle::stdin(),
stdout: pipe::Handle::stdout(),
stderr: pipe::Handle::stderr(),
}
}
}
fn pipe_with_reader_thread() -> (pipe::Handle, JoinHandle<io::Result<Vec<u8>>>) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = std::thread::spawn(move || {
let mut read_file = read_pipe.into_file();
let mut output = Vec::new();
try!(read_file.read_to_end(&mut output));
Ok(output)
});
(write_pipe, thread)
}
type WriterThreadJoiner = crossbeam::ScopedJoinHandle<io::Result<()>>;
fn pipe_with_writer_thread<'a>(input: &'a [u8],
scope: &crossbeam::Scope<'a>)
-> (pipe::Handle, WriterThreadJoiner) {
let (read_pipe, write_pipe) = pipe::open_pipe();
let thread = scope.spawn(move || {
let mut write_file = write_pipe.into_file();
try!(write_file.write_all(&input));
Ok(())
});
(read_pipe, thread)
}
#[cfg(test)]
mod test {
extern crate tempfile;
use super::*;
use std::io::prelude::*;
use std::io::SeekFrom;
use std::path::Path;
#[test]
fn test_cmd() {
let output = cmd(&["echo", "hi"]).read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_sh() {
let output = sh("echo hi").read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_pipe() {
let output = sh("echo hi").pipe(sh("sed s/i/o/")).read().unwrap();
assert_eq!("ho", output);
}
#[test]
fn test_then() {
let output = sh("echo -n hi").then(sh("echo lo")).read().unwrap();
assert_eq!("hilo", output);
}
#[test]
fn test_input() {
// TODO: Fixed-length bytes input like b"foo" works poorly here. Why?
let expr = sh("sed s/f/g/").input("foo");
let output = expr.read().unwrap();
assert_eq!("goo", output);
}
#[test]
fn test_null() {
// TODO: The separation between InputRedirect and OutputRedirect here is tedious.
let expr = cmd(&["cat"])
.stdin(InputRedirect::Null)
.stdout(OutputRedirect::Null)
.stderr(OutputRedirect::Null);
let output = expr.read().unwrap();
assert_eq!("", output);
}
#[test]
fn test_path() {
let mut input_file = tempfile::NamedTempFile::new().unwrap();
let output_file = tempfile::NamedTempFile::new().unwrap();
input_file.write_all(b"foo").unwrap();
let expr = sh("sed s/o/a/g").stdin(input_file.path()).stdout(output_file.path());
let output = expr.read().unwrap();
assert_eq!("", output);
let mut file_output = String::new();
output_file.as_ref().read_to_string(&mut file_output).unwrap();
assert_eq!("faa", file_output);
}
#[test]
fn test_owned_input() {
fn with_input<'a>(expr: &Expression<'a>) -> Expression<'a> {
let mystr = format!("I own this: {}", "foo");
// This would be a lifetime error if we tried to use &mystr.
expr.input(mystr)
}
let c = cmd(&["cat"]);
let c_with_input = with_input(&c);
let output = c_with_input.read().unwrap();
assert_eq!("I own this: foo", output);
}
#[test]
fn test_stderr_to_stdout() {
let command = sh("echo hi >&2").stderr(OutputRedirect::Stdout);
let output = command.read().unwrap();
assert_eq!("hi", output);
}
#[test]
fn test_file() {
let mut temp = tempfile::NamedTempFile::new().unwrap();
temp.write_all(b"example").unwrap();
temp.seek(SeekFrom::Start(0)).unwrap();
let expr = cmd(&["cat"]).stdin(temp.as_ref());
let output = expr.read().unwrap();
assert_eq!(output, "example");
}
#[test]
fn test_ergonomics() {
// We don't get automatic Deref when we're matching trait implementations, so in addition
// to implementing String and &str, we *also* implement &String.
// TODO: See if specialization can clean this up.
let mystr = "owned string".to_owned();
let mypathbuf = Path::new("a/b/c").to_owned();
let myvec = vec![1, 2, 3];
// These are nonsense expressions. We just want to make sure they compile.
sh("true").stdin(&*mystr).input(&*myvec).stdout(&*mypathbuf);
sh("true").stdin(&mystr).input(&myvec).stdout(&mypathbuf);
sh("true").stdin(mystr).input(myvec).stdout(mypathbuf);
}
}
|
//! Track and report errors, exceptions and messages from your Rust application to Rollbar.
extern crate backtrace;
extern crate futures;
extern crate hyper;
extern crate hyper_tls;
extern crate serde;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate serde_json;
extern crate tokio;
//use std::io::{self, Write};
use std::{error, fmt, panic, thread};
use std::borrow::ToOwned;
use std::sync::Arc;
use backtrace::Backtrace;
//use hyper::client::HttpConnector;
use hyper::{Method, Request};
use hyper::rt::Future;
use hyper_tls::HttpsConnector;
use tokio::runtime::current_thread;
/// Report an error. Any type that implements `error::Error` is accepted.
#[macro_export]
macro_rules! report_error {
($client:ident, $err:ident) => {{
let backtrace = ::backtrace::Backtrace::new();
let line = line!() - 2;
$client.build_report()
.from_error(&$err)
.with_frame(::rollbar::FrameBuilder::new()
.with_line_number(line)
.with_file_name(file!())
.build())
.with_backtrace(&backtrace)
.send()
}}
}
/// Report an error message. Any type that implements `fmt::Display` is accepted.
#[macro_export]
macro_rules! report_error_message {
($client:ident, $err:expr) => {{
let backtrace = ::backtrace::Backtrace::new();
let line = line!();
$client.build_report()
.from_error_message(&$err)
.with_frame(::rollbar::FrameBuilder::new()
.with_line_number(line)
.with_file_name(file!())
.build())
.with_backtrace(&backtrace)
.send()
}}
}
/// Set a global hook for the `panic`s your application could raise.
#[macro_export]
macro_rules! report_panics {
($client:ident) => {{
::std::panic::set_hook(::std::boxed::Box::new(move |panic_info| {
let backtrace = ::backtrace::Backtrace::new();
$client.build_report()
.from_panic(panic_info)
.with_backtrace(&backtrace)
.send();
}))
}}
}
/// Send a plain text message to Rollbar with severity level `INFO`.
#[macro_export]
macro_rules! report_message {
($client:ident, $message:expr) => {{
$client.build_report()
.from_message($message)
.with_level(::rollbar::Level::INFO)
.send()
}}
}
macro_rules! add_field {
($n:ident, $f:ident, $t:ty) => (
pub fn $n(&mut self, val: $t) -> &mut Self {
self.$f = Some(val);
self
}
);
}
macro_rules! add_generic_field {
($n:ident, $f:ident, $t:path) => (
pub fn $n<T: $t>(&mut self, val: T) -> &mut Self {
self.$f = Some(val.into());
self
}
);
}
/// Variants for setting the severity level.
/// If not specified, the default value is `ERROR`.
#[derive(Serialize, Clone)]
pub enum Level {
CRITICAL,
ERROR,
WARNING,
INFO,
DEBUG
}
impl<'a> From<&'a str> for Level {
fn from(s: &'a str) -> Level {
match s {
"critical" => Level::CRITICAL,
"warning" => Level::WARNING,
"info" => Level::INFO,
"debug" => Level::DEBUG,
_ => Level::ERROR
}
}
}
impl ToString for Level {
fn to_string(&self) -> String {
match self {
&Level::CRITICAL => "critical".to_string(),
&Level::ERROR => "error".to_string(),
&Level::WARNING => "warning".to_string(),
&Level::INFO => "info".to_string(),
&Level::DEBUG => "debug".to_string()
}
}
}
// https://rollbar.com/docs/api/items_post/
const URL: &'static str = "https://api.rollbar.com/api/1/item/";
/// Builder for a generic request to Rollbar.
pub struct ReportBuilder<'a> {
client: &'a Client,
send_strategy: Option<Box<Fn(Arc<hyper::Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>>, String) -> thread::JoinHandle<Option<ResponseStatus>>>>
}
/// Wrapper for a trace, payload of a single exception.
#[derive(Serialize, Default, Debug)]
struct Trace {
frames: Vec<FrameBuilder>,
exception: Exception
}
/// Wrapper for an exception, which describes the occurred error.
#[derive(Serialize, Debug)]
struct Exception {
class: String,
message: String,
description: String
}
impl Default for Exception {
fn default() -> Self {
Exception {
class: thread::current().name().unwrap_or("unnamed").to_owned(),
message: String::new(),
description: String::new()
}
}
}
/// Builder for a frame. A collection of frames identifies a stack trace.
#[derive(Serialize, Default, Clone, Debug)]
pub struct FrameBuilder {
/// The name of the file in which the error had origin.
#[serde(rename = "filename")]
file_name: String,
/// The line of code in in which the error had origin.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "lineno")]
line_number: Option<u32>,
/// Set the number of the column in which an error occurred.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "colno")]
column_number: Option<u32>,
/// The method or the function name which caused caused the error.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "method")]
function_name: Option<String>,
/// The line of code which caused caused the error.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "code")]
function_code_line: Option<String>,
}
impl<'a> FrameBuilder {
/// Create a new FrameBuilder.
pub fn new() -> Self {
FrameBuilder {
file_name: file!().to_owned(),
..Default::default()
}
}
/// Tell the origin of the error by adding the file name to the report.
pub fn with_file_name<T: Into<String>>(&'a mut self, file_name: T) -> &'a mut Self {
self.file_name = file_name.into();
self
}
/// Set the number of the line in which an error occurred.
add_field!(with_line_number, line_number, u32);
/// Set the number of the column in which an error occurred.
add_field!(with_column_number, column_number, u32);
/// Set the method or the function name which caused caused the error.
add_generic_field!(with_function_name, function_name, Into<String>);
/// Set the line of code which caused caused the error.
add_generic_field!(with_function_code_line, function_code_line, Into<String>);
/// Conclude the creation of the frame.
pub fn build(&self) -> Self {
self.to_owned()
}
}
/// Builder specialized for reporting errors.
#[derive(Serialize)]
pub struct ReportErrorBuilder<'a> {
#[serde(skip_serializing)]
report_builder: &'a ReportBuilder<'a>,
/// The trace containing the stack frames.
trace: Trace,
/// The severity level of the error. `Level::ERROR` is the default value.
#[serde(skip_serializing_if = "Option::is_none")]
level: Option<Level>,
/// The title shown in the dashboard for this report.
#[serde(skip_serializing_if = "Option::is_none")]
title: Option<String>
}
impl<'a> ReportErrorBuilder<'a> {
/// Attach a `backtrace::Backtrace` to the `description` of the report.
pub fn with_backtrace(&mut self, backtrace: &'a Backtrace) -> &mut Self {
self.trace.frames.extend(
backtrace.frames()
.iter()
.flat_map(|frames| frames.symbols())
.map(|symbol|
// http://alexcrichton.com/backtrace-rs/backtrace/struct.Symbol.html
FrameBuilder {
file_name: symbol.filename()
.map_or_else(|| "".to_owned(), |p| format!("{}", p.display())),
line_number: symbol.lineno(),
function_name: symbol.name()
.map(|s| format!("{}", s)),
function_code_line: symbol.addr()
.map(|s| format!("{:?}", s)),
..Default::default()
}
)
.collect::<Vec<FrameBuilder>>()
);
self
}
/// Add a new frame to the collection of stack frames.
pub fn with_frame(&mut self, frame_builder: FrameBuilder) -> &mut Self {
self.trace.frames.push(frame_builder);
self
}
/// Set the security level of the report. `Level::ERROR` is the default value.
add_generic_field!(with_level, level, Into<Level>);
/// Set the title to show in the dashboard for this report.
add_generic_field!(with_title, title, Into<String>);
/// Send the report to Rollbar.
pub fn send(&mut self) -> thread::JoinHandle<Option<ResponseStatus>> {
let client = self.report_builder.client;
match self.report_builder.send_strategy {
Some(ref send_strategy) => {
let http_client = client.http_client.to_owned();
send_strategy(http_client, self.to_string())
},
None => { client.send(self.to_string()) }
}
}
}
impl<'a> ToString for ReportErrorBuilder<'a> {
fn to_string(&self) -> String {
let client = self.report_builder.client;
json!({
"access_token": client.access_token,
"data": {
"environment": client.environment,
"body": {
"trace": self.trace,
},
"level": self.level
.to_owned()
.unwrap_or(Level::ERROR)
.to_string(),
"language": "rust",
"title": self.title
}
}).to_string()
}
}
/// Builder specialized for reporting messages.
pub struct ReportMessageBuilder<'a> {
report_builder: &'a ReportBuilder<'a>,
/// The message that must be reported.
message: &'a str,
/// The severity level of the error. `Level::ERROR` is the default value.
level: Option<Level>
}
impl<'a> ReportMessageBuilder<'a> {
/// Set the security level of the report. `Level::ERROR` is the default value
add_generic_field!(with_level, level, Into<Level>);
/// Send the message to Rollbar.
pub fn send(&mut self) -> thread::JoinHandle<Option<ResponseStatus>> {
let client = self.report_builder.client;
match self.report_builder.send_strategy {
Some(ref send_strategy) => {
let http_client = client.http_client.to_owned();
send_strategy(http_client, self.to_string())
},
None => { client.send(self.to_string()) }
}
}
}
impl<'a> ToString for ReportMessageBuilder<'a> {
fn to_string(&self) -> String {
let client = self.report_builder.client;
json!({
"access_token": client.access_token,
"data": {
"environment": client.environment,
"body": {
"message": {
"body": self.message
}
},
"level": self.level
.to_owned()
.unwrap_or(Level::INFO)
.to_string()
}
}).to_string()
}
}
impl<'a> ReportBuilder<'a> {
/// To be used when a panic report must be sent.
pub fn from_panic(&'a mut self, panic_info: &'a panic::PanicInfo) -> ReportErrorBuilder<'a> {
let mut trace = Trace::default();
let payload = panic_info.payload();
let message = match payload.downcast_ref::<String>() {
Some(s) => s.to_owned(),
None => match payload.downcast_ref::<String>() {
Some(s) => s.to_owned(),
None => "Box<Any>".to_owned()
}
};
trace.exception.message = message.to_owned();
trace.exception.description = trace.exception.message.to_owned();
if let Some(location) = panic_info.location() {
trace.frames.push(FrameBuilder {
file_name: location.file().to_owned(),
line_number: Some(location.line()),
..Default::default()
});
}
ReportErrorBuilder {
report_builder: self,
trace: trace,
level: None,
title: Some(message.to_owned())
}
}
// TODO: remove self?
/// To be used when an `error::Error` must be reported.
pub fn from_error<E: error::Error>(&'a mut self, error: &'a E) -> ReportErrorBuilder<'a> {
let mut trace = Trace::default();
trace.exception.message = error.description().to_owned();
trace.exception.description = error.source().map_or_else(|| format!("{:?}", error), |c| format!("{:?}", c));
ReportErrorBuilder {
report_builder: self,
trace: trace,
level: None,
title: Some(format!("{}", error))
}
}
/// To be used when a error message must be reported.
pub fn from_error_message<T: fmt::Display>(&'a mut self, error_message: &'a T) -> ReportErrorBuilder<'a> {
let message = format!("{}", error_message);
let mut trace = Trace::default();
trace.exception.message = message.to_owned();
trace.exception.description = message.to_owned();
ReportErrorBuilder {
report_builder: self,
trace: trace,
level: None,
title: Some(message)
}
}
/// To be used when a message must be tracked by Rollbar.
pub fn from_message(&'a mut self, message: &'a str) -> ReportMessageBuilder<'a> {
ReportMessageBuilder {
report_builder: self,
message: message,
level: None
}
}
/// Use given function to send a request to Rollbar instead of the built-in one.
add_field!(with_send_strategy, send_strategy,
Box<Fn(Arc<hyper::Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>>, String) ->
thread::JoinHandle<Option<ResponseStatus>>>);
}
/// The access point to the library.
pub struct Client {
http_client: Arc<hyper::Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>>,
access_token: String,
environment: String
}
impl Client {
/// Create a new `Client`.
///
/// Your available `environment`s are listed at
/// <https://rollbar.com/{your_organization}/{your_app}/settings/general>.
///
/// You can get the `access_token` at
/// <https://rollbar.com/{your_organization}/{your_app}/settings/access_tokens>.
pub fn new<T: Into<String>>(access_token: T, environment: T) -> Client {
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = hyper::Client::builder().build::<_, hyper::Body>(https);
Client {
http_client: Arc::new(client),
access_token: access_token.into(),
environment: environment.into()
}
}
/// Create a `ReportBuilder` to build a new report for Rollbar.
pub fn build_report(&self) -> ReportBuilder {
ReportBuilder {
client: self,
send_strategy: None
}
}
/// Function used internally to send payloads to Rollbar as default `send_strategy`.
fn send(&self, payload: String) -> thread::JoinHandle<Option<ResponseStatus>> {
let body = hyper::Body::from(payload);
let request = Request::builder()
.method(Method::POST)
.uri(URL)
.body(body)
.expect("Cannot build post request!");
let job = self.http_client
.request(request)
.map(|res| {
Some(ResponseStatus::from(res.status()))
})
.map_err( |error| {
println!("Error while sending a report to Rollbar.");
print!("The error returned by Rollbar was: {:?}.\n\n", error);
None::<ResponseStatus>
});
thread::spawn(move || {
current_thread::Runtime::new().unwrap().block_on(job).unwrap()
})
}
}
/// Wrapper for `hyper::StatusCode`.
#[derive(Debug)]
pub struct ResponseStatus(hyper::StatusCode);
impl From<hyper::StatusCode> for ResponseStatus {
fn from(status_code: hyper::StatusCode) -> ResponseStatus {
ResponseStatus(status_code)
}
}
impl ResponseStatus {
/// Return a description provided by Rollbar for the status code returned by each request.
pub fn description(&self) -> &str {
match self.0.as_u16() {
200 => "The item was accepted for processing.",
400 => "No JSON payload was found, or it could not be decoded.",
401 => "No access token was found in the request.",
403 => "Check that your `access_token` is valid, enabled, and has the correct scope. The response will contain a `message` key explaining the problem.",
413 => "Max payload size is 128kb. Try removing or truncating unnecessary large data included in the payload, like whole binary files or long strings.",
422 => "A syntactically valid JSON payload was found, but it had one or more semantic errors. The response will contain a `message` key describing the errors.",
429 => "Request dropped because the rate limit has been reached for this access token, or the account is on the Free plan and the plan limit has been reached.",
500 => "There was an error on Rollbar's end",
_ => "An undefined error occurred."
}
}
/// Return the canonical description for the status code returned by each request.
pub fn canonical_reason(&self) -> String {
format!("{}", self.0)
}
}
impl fmt::Display for ResponseStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Error {}: {}", self.canonical_reason(), self.description())
}
}
#[cfg(test)]
mod tests {
extern crate serde_json;
extern crate hyper;
extern crate backtrace;
use std::panic;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::channel;
use backtrace::Backtrace;
use serde_json::Value;
use super::{Client, FrameBuilder, Level};
macro_rules! normalize_frames {
($payload:expr, $expected_payload:expr, $expected_frames:expr) => {
// check the description/backtrace is is not empty and also check
// that it is different from the message and then ignore it from now on
let payload_ = $payload.to_owned();
let description = payload_.get("data").unwrap()
.get("body").unwrap()
.get("trace").unwrap()
.get("exception").unwrap()
.get("description").unwrap();
let message = payload_.get("data").unwrap()
.get("body").unwrap()
.get("trace").unwrap()
.get("exception").unwrap()
.get("message").unwrap();
match description {
&Value::String(ref s) => assert!(!s.is_empty()),
_ => assert!(false)
}
match message {
&Value::String(ref s) => assert!(!s.is_empty()),
_ => assert!(false)
}
$payload.get_mut("data").unwrap()
.get_mut("body").unwrap()
.get_mut("trace").unwrap()
.get_mut("frames").unwrap()
.as_array_mut().unwrap()
.truncate($expected_frames);
}
}
#[test]
fn test_report_panics() {
let (tx, rx) = channel();
{
let tx = Arc::new(Mutex::new(tx));
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
panic::set_hook(Box::new(move |panic_info| {
let backtrace = Backtrace::new();
let payload = client.build_report()
.from_panic(panic_info)
.with_backtrace(&backtrace)
.with_level("info")
.to_string();
let payload = Arc::new(Mutex::new(payload));
tx.lock().unwrap().send(payload).unwrap();
}));
let result = panic::catch_unwind(|| {
// just to trick the linter
let zero = "0".parse::<i32>().unwrap();
let _ = 1/zero;
});
assert!(result.is_err());
}
// remove the hook to avoid double panics
let _ = panic::take_hook();
let lock = rx.recv().unwrap();
let payload = match lock.lock() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
let mut payload: Value = serde_json::from_str(&*payload).unwrap();
let mut expected_payload = json!({
"access_token": "ACCESS_TOKEN",
"data": {
"environment": "ENVIRONMENT",
"body": {
"trace": {
"frames": [{
"filename": "src/lib.rs",
"lineno": 268
}],
"exception": {
"class": "tests::test_report_panics",
"message": "attempt to divide by zero",
"description": "attempt to divide by zero"
}
}
},
"level": "info",
"language": "rust",
"title": "attempt to divide by zero"
}
});
let payload_ = payload.to_owned();
let line_number = payload_.get("data").unwrap()
.get("body").unwrap()
.get("trace").unwrap()
.get("frames").unwrap()
.get(0).unwrap()
.get("lineno").unwrap();
assert!(line_number.as_u64().unwrap() > 0);
*expected_payload.get_mut("data").unwrap()
.get_mut("body").unwrap()
.get_mut("trace").unwrap()
.get_mut("frames").unwrap()
.get_mut(0).unwrap()
.get_mut("lineno").unwrap() = line_number.to_owned();
normalize_frames!(payload, expected_payload, 1);
assert_eq!(expected_payload.to_string(), payload.to_string());
}
#[test]
fn test_report_error() {
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
match "笑".parse::<i32>() {
Ok(_) => { assert!(false); },
Err(e) => {
let payload = client.build_report()
.from_error_message(&e)
.with_level(Level::WARNING)
.with_frame(FrameBuilder::new()
.with_column_number(42)
.build())
.with_frame(FrameBuilder::new()
.with_column_number(24)
.build())
.with_title("w")
.to_string();
let expected_payload = json!({
"access_token": "ACCESS_TOKEN",
"data": {
"environment": "ENVIRONMENT",
"body": {
"trace": {
"frames": [{
"filename": "src/lib.rs",
"colno": 42
}, {
"filename": "src/lib.rs",
"colno": 24
}],
"exception": {
"class": "tests::test_report_error",
"message": "invalid digit found in string",
"description": "invalid digit found in string"
}
}
},
"level": "warning",
"language": "rust",
"title": "w"
}
});
let mut payload: Value = serde_json::from_str(&*payload).unwrap();
normalize_frames!(payload, expected_payload, 2);
assert_eq!(expected_payload.to_string(), payload.to_string());
}
}
}
#[test]
fn test_report_message() {
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
let payload = client.build_report()
.from_message("hai")
.with_level("warning")
.to_string();
let expected_payload = json!({
"access_token": "ACCESS_TOKEN",
"data": {
"environment": "ENVIRONMENT",
"body": {
"message": {
"body": "hai"
}
},
"level": "warning"
}
}).to_string();
assert_eq!(payload, expected_payload);
}
#[test]
fn test_response() {
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
let status_handle = client.build_report()
.from_message("hai")
.with_level("info")
.send();
match status_handle.join().unwrap() {
Some(status) => {
assert_eq!(status.to_string(),
"Error 401 Unauthorized: No access token was found in the request.".to_owned());
}
None => { assert!(false); }
}
}
}
Sanitize macros (#14)
* rustfmt
* Using $crate to make the macro not leak a dependency.
//! Track and report errors, exceptions and messages from your Rust application to Rollbar.
pub extern crate backtrace;
extern crate futures;
extern crate hyper;
extern crate hyper_tls;
extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
extern crate tokio;
//use std::io::{self, Write};
use std::borrow::ToOwned;
use std::sync::Arc;
use std::{error, fmt, panic, thread};
use backtrace::Backtrace;
//use hyper::client::HttpConnector;
use hyper::rt::Future;
use hyper::{Method, Request};
use hyper_tls::HttpsConnector;
use tokio::runtime::current_thread;
/// Report an error. Any type that implements `error::Error` is accepted.
#[macro_export]
macro_rules! report_error {
($client:ident, $err:ident) => {{
let backtrace = $crate::backtrace::Backtrace::new();
let line = line!() - 2;
$client
.build_report()
.from_error(&$err)
.with_frame(
::rollbar::FrameBuilder::new()
.with_line_number(line)
.with_file_name(file!())
.build(),
)
.with_backtrace(&backtrace)
.send()
}};
}
/// Report an error message. Any type that implements `fmt::Display` is accepted.
#[macro_export]
macro_rules! report_error_message {
($client:ident, $err:expr) => {{
let backtrace = $crate::backtrace::Backtrace::new();
let line = line!();
$client
.build_report()
.from_error_message(&$err)
.with_frame(
::rollbar::FrameBuilder::new()
.with_line_number(line)
.with_file_name(file!())
.build(),
)
.with_backtrace(&backtrace)
.send()
}};
}
/// Set a global hook for the `panic`s your application could raise.
#[macro_export]
macro_rules! report_panics {
($client:ident) => {{
::std::panic::set_hook(::std::boxed::Box::new(move |panic_info| {
let backtrace = $crate::backtrace::Backtrace::new();
$client
.build_report()
.from_panic(panic_info)
.with_backtrace(&backtrace)
.send();
}))
}};
}
/// Send a plain text message to Rollbar with severity level `INFO`.
#[macro_export]
macro_rules! report_message {
($client:ident, $message:expr) => {{
$client
.build_report()
.from_message($message)
.with_level(::rollbar::Level::INFO)
.send()
}};
}
macro_rules! add_field {
($n:ident, $f:ident, $t:ty) => (
pub fn $n(&mut self, val: $t) -> &mut Self {
self.$f = Some(val);
self
}
);
}
macro_rules! add_generic_field {
($n:ident, $f:ident, $t:path) => (
pub fn $n<T: $t>(&mut self, val: T) -> &mut Self {
self.$f = Some(val.into());
self
}
);
}
/// Variants for setting the severity level.
/// If not specified, the default value is `ERROR`.
#[derive(Serialize, Clone)]
pub enum Level {
CRITICAL,
ERROR,
WARNING,
INFO,
DEBUG,
}
impl<'a> From<&'a str> for Level {
fn from(s: &'a str) -> Level {
match s {
"critical" => Level::CRITICAL,
"warning" => Level::WARNING,
"info" => Level::INFO,
"debug" => Level::DEBUG,
_ => Level::ERROR,
}
}
}
impl ToString for Level {
fn to_string(&self) -> String {
match self {
&Level::CRITICAL => "critical".to_string(),
&Level::ERROR => "error".to_string(),
&Level::WARNING => "warning".to_string(),
&Level::INFO => "info".to_string(),
&Level::DEBUG => "debug".to_string(),
}
}
}
// https://rollbar.com/docs/api/items_post/
const URL: &'static str = "https://api.rollbar.com/api/1/item/";
/// Builder for a generic request to Rollbar.
pub struct ReportBuilder<'a> {
client: &'a Client,
send_strategy: Option<
Box<
dyn Fn(
Arc<hyper::Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>>,
String,
) -> thread::JoinHandle<Option<ResponseStatus>>,
>,
>,
}
/// Wrapper for a trace, payload of a single exception.
#[derive(Serialize, Default, Debug)]
struct Trace {
frames: Vec<FrameBuilder>,
exception: Exception,
}
/// Wrapper for an exception, which describes the occurred error.
#[derive(Serialize, Debug)]
struct Exception {
class: String,
message: String,
description: String,
}
impl Default for Exception {
fn default() -> Self {
Exception {
class: thread::current().name().unwrap_or("unnamed").to_owned(),
message: String::new(),
description: String::new(),
}
}
}
/// Builder for a frame. A collection of frames identifies a stack trace.
#[derive(Serialize, Default, Clone, Debug)]
pub struct FrameBuilder {
/// The name of the file in which the error had origin.
#[serde(rename = "filename")]
file_name: String,
/// The line of code in in which the error had origin.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "lineno")]
line_number: Option<u32>,
/// Set the number of the column in which an error occurred.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "colno")]
column_number: Option<u32>,
/// The method or the function name which caused caused the error.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "method")]
function_name: Option<String>,
/// The line of code which caused caused the error.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "code")]
function_code_line: Option<String>,
}
impl<'a> FrameBuilder {
/// Create a new FrameBuilder.
pub fn new() -> Self {
FrameBuilder {
file_name: file!().to_owned(),
..Default::default()
}
}
/// Tell the origin of the error by adding the file name to the report.
pub fn with_file_name<T: Into<String>>(&'a mut self, file_name: T) -> &'a mut Self {
self.file_name = file_name.into();
self
}
/// Set the number of the line in which an error occurred.
add_field!(with_line_number, line_number, u32);
/// Set the number of the column in which an error occurred.
add_field!(with_column_number, column_number, u32);
/// Set the method or the function name which caused caused the error.
add_generic_field!(with_function_name, function_name, Into<String>);
/// Set the line of code which caused caused the error.
add_generic_field!(with_function_code_line, function_code_line, Into<String>);
/// Conclude the creation of the frame.
pub fn build(&self) -> Self {
self.to_owned()
}
}
/// Builder specialized for reporting errors.
#[derive(Serialize)]
pub struct ReportErrorBuilder<'a> {
#[serde(skip_serializing)]
report_builder: &'a ReportBuilder<'a>,
/// The trace containing the stack frames.
trace: Trace,
/// The severity level of the error. `Level::ERROR` is the default value.
#[serde(skip_serializing_if = "Option::is_none")]
level: Option<Level>,
/// The title shown in the dashboard for this report.
#[serde(skip_serializing_if = "Option::is_none")]
title: Option<String>,
}
impl<'a> ReportErrorBuilder<'a> {
/// Attach a `backtrace::Backtrace` to the `description` of the report.
pub fn with_backtrace(&mut self, backtrace: &'a Backtrace) -> &mut Self {
self.trace.frames.extend(
backtrace
.frames()
.iter()
.flat_map(|frames| frames.symbols())
.map(|symbol|
// http://alexcrichton.com/backtrace-rs/backtrace/struct.Symbol.html
FrameBuilder {
file_name: symbol.filename()
.map_or_else(|| "".to_owned(), |p| format!("{}", p.display())),
line_number: symbol.lineno(),
function_name: symbol.name()
.map(|s| format!("{}", s)),
function_code_line: symbol.addr()
.map(|s| format!("{:?}", s)),
..Default::default()
})
.collect::<Vec<FrameBuilder>>(),
);
self
}
/// Add a new frame to the collection of stack frames.
pub fn with_frame(&mut self, frame_builder: FrameBuilder) -> &mut Self {
self.trace.frames.push(frame_builder);
self
}
/// Set the security level of the report. `Level::ERROR` is the default value.
add_generic_field!(with_level, level, Into<Level>);
/// Set the title to show in the dashboard for this report.
add_generic_field!(with_title, title, Into<String>);
/// Send the report to Rollbar.
pub fn send(&mut self) -> thread::JoinHandle<Option<ResponseStatus>> {
let client = self.report_builder.client;
match self.report_builder.send_strategy {
Some(ref send_strategy) => {
let http_client = client.http_client.to_owned();
send_strategy(http_client, self.to_string())
}
None => client.send(self.to_string()),
}
}
}
impl<'a> ToString for ReportErrorBuilder<'a> {
fn to_string(&self) -> String {
let client = self.report_builder.client;
json!({
"access_token": client.access_token,
"data": {
"environment": client.environment,
"body": {
"trace": self.trace,
},
"level": self.level
.to_owned()
.unwrap_or(Level::ERROR)
.to_string(),
"language": "rust",
"title": self.title
}
})
.to_string()
}
}
/// Builder specialized for reporting messages.
pub struct ReportMessageBuilder<'a> {
report_builder: &'a ReportBuilder<'a>,
/// The message that must be reported.
message: &'a str,
/// The severity level of the error. `Level::ERROR` is the default value.
level: Option<Level>,
}
impl<'a> ReportMessageBuilder<'a> {
/// Set the security level of the report. `Level::ERROR` is the default value
add_generic_field!(with_level, level, Into<Level>);
/// Send the message to Rollbar.
pub fn send(&mut self) -> thread::JoinHandle<Option<ResponseStatus>> {
let client = self.report_builder.client;
match self.report_builder.send_strategy {
Some(ref send_strategy) => {
let http_client = client.http_client.to_owned();
send_strategy(http_client, self.to_string())
}
None => client.send(self.to_string()),
}
}
}
impl<'a> ToString for ReportMessageBuilder<'a> {
fn to_string(&self) -> String {
let client = self.report_builder.client;
json!({
"access_token": client.access_token,
"data": {
"environment": client.environment,
"body": {
"message": {
"body": self.message
}
},
"level": self.level
.to_owned()
.unwrap_or(Level::INFO)
.to_string()
}
})
.to_string()
}
}
impl<'a> ReportBuilder<'a> {
/// To be used when a panic report must be sent.
pub fn from_panic(&'a mut self, panic_info: &'a panic::PanicInfo) -> ReportErrorBuilder<'a> {
let mut trace = Trace::default();
let payload = panic_info.payload();
let message = match payload.downcast_ref::<String>() {
Some(s) => s.to_owned(),
None => match payload.downcast_ref::<String>() {
Some(s) => s.to_owned(),
None => "Box<Any>".to_owned(),
},
};
trace.exception.message = message.to_owned();
trace.exception.description = trace.exception.message.to_owned();
if let Some(location) = panic_info.location() {
trace.frames.push(FrameBuilder {
file_name: location.file().to_owned(),
line_number: Some(location.line()),
..Default::default()
});
}
ReportErrorBuilder {
report_builder: self,
trace: trace,
level: None,
title: Some(message.to_owned()),
}
}
// TODO: remove self?
/// To be used when an `error::Error` must be reported.
pub fn from_error<E: error::Error>(&'a mut self, error: &'a E) -> ReportErrorBuilder<'a> {
let mut trace = Trace::default();
trace.exception.message = error.description().to_owned();
trace.exception.description = error
.source()
.map_or_else(|| format!("{:?}", error), |c| format!("{:?}", c));
ReportErrorBuilder {
report_builder: self,
trace: trace,
level: None,
title: Some(format!("{}", error)),
}
}
/// To be used when a error message must be reported.
pub fn from_error_message<T: fmt::Display>(
&'a mut self,
error_message: &'a T,
) -> ReportErrorBuilder<'a> {
let message = format!("{}", error_message);
let mut trace = Trace::default();
trace.exception.message = message.to_owned();
trace.exception.description = message.to_owned();
ReportErrorBuilder {
report_builder: self,
trace: trace,
level: None,
title: Some(message),
}
}
/// To be used when a message must be tracked by Rollbar.
pub fn from_message(&'a mut self, message: &'a str) -> ReportMessageBuilder<'a> {
ReportMessageBuilder {
report_builder: self,
message: message,
level: None,
}
}
/// Use given function to send a request to Rollbar instead of the built-in one.
add_field!(
with_send_strategy,
send_strategy,
Box<
dyn Fn(
Arc<hyper::Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>>,
String,
) -> thread::JoinHandle<Option<ResponseStatus>>,
>
);
}
/// The access point to the library.
pub struct Client {
http_client: Arc<hyper::Client<hyper_tls::HttpsConnector<hyper::client::HttpConnector>>>,
access_token: String,
environment: String,
}
impl Client {
/// Create a new `Client`.
///
/// Your available `environment`s are listed at
/// <https://rollbar.com/{your_organization}/{your_app}/settings/general>.
///
/// You can get the `access_token` at
/// <https://rollbar.com/{your_organization}/{your_app}/settings/access_tokens>.
pub fn new<T: Into<String>>(access_token: T, environment: T) -> Client {
let https = HttpsConnector::new(4).expect("TLS initialization failed");
let client = hyper::Client::builder().build::<_, hyper::Body>(https);
Client {
http_client: Arc::new(client),
access_token: access_token.into(),
environment: environment.into(),
}
}
/// Create a `ReportBuilder` to build a new report for Rollbar.
pub fn build_report(&self) -> ReportBuilder {
ReportBuilder {
client: self,
send_strategy: None,
}
}
/// Function used internally to send payloads to Rollbar as default `send_strategy`.
fn send(&self, payload: String) -> thread::JoinHandle<Option<ResponseStatus>> {
let body = hyper::Body::from(payload);
let request = Request::builder()
.method(Method::POST)
.uri(URL)
.body(body)
.expect("Cannot build post request!");
let job = self
.http_client
.request(request)
.map(|res| Some(ResponseStatus::from(res.status())))
.map_err(|error| {
println!("Error while sending a report to Rollbar.");
print!("The error returned by Rollbar was: {:?}.\n\n", error);
None::<ResponseStatus>
});
thread::spawn(move || {
current_thread::Runtime::new()
.unwrap()
.block_on(job)
.unwrap()
})
}
}
/// Wrapper for `hyper::StatusCode`.
#[derive(Debug)]
pub struct ResponseStatus(hyper::StatusCode);
impl From<hyper::StatusCode> for ResponseStatus {
fn from(status_code: hyper::StatusCode) -> ResponseStatus {
ResponseStatus(status_code)
}
}
impl ResponseStatus {
/// Return a description provided by Rollbar for the status code returned by each request.
pub fn description(&self) -> &str {
match self.0.as_u16() {
200 => "The item was accepted for processing.",
400 => "No JSON payload was found, or it could not be decoded.",
401 => "No access token was found in the request.",
403 => "Check that your `access_token` is valid, enabled, and has the correct scope. The response will contain a `message` key explaining the problem.",
413 => "Max payload size is 128kb. Try removing or truncating unnecessary large data included in the payload, like whole binary files or long strings.",
422 => "A syntactically valid JSON payload was found, but it had one or more semantic errors. The response will contain a `message` key describing the errors.",
429 => "Request dropped because the rate limit has been reached for this access token, or the account is on the Free plan and the plan limit has been reached.",
500 => "There was an error on Rollbar's end",
_ => "An undefined error occurred."
}
}
/// Return the canonical description for the status code returned by each request.
pub fn canonical_reason(&self) -> String {
format!("{}", self.0)
}
}
impl fmt::Display for ResponseStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Error {}: {}",
self.canonical_reason(),
self.description()
)
}
}
#[cfg(test)]
mod tests {
extern crate backtrace;
extern crate hyper;
extern crate serde_json;
use std::panic;
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
use backtrace::Backtrace;
use serde_json::Value;
use super::{Client, FrameBuilder, Level};
macro_rules! normalize_frames {
($payload:expr, $expected_payload:expr, $expected_frames:expr) => {
// check the description/backtrace is is not empty and also check
// that it is different from the message and then ignore it from now on
let payload_ = $payload.to_owned();
let description = payload_
.get("data")
.unwrap()
.get("body")
.unwrap()
.get("trace")
.unwrap()
.get("exception")
.unwrap()
.get("description")
.unwrap();
let message = payload_
.get("data")
.unwrap()
.get("body")
.unwrap()
.get("trace")
.unwrap()
.get("exception")
.unwrap()
.get("message")
.unwrap();
match description {
&Value::String(ref s) => assert!(!s.is_empty()),
_ => assert!(false),
}
match message {
&Value::String(ref s) => assert!(!s.is_empty()),
_ => assert!(false),
}
$payload
.get_mut("data")
.unwrap()
.get_mut("body")
.unwrap()
.get_mut("trace")
.unwrap()
.get_mut("frames")
.unwrap()
.as_array_mut()
.unwrap()
.truncate($expected_frames);
};
}
#[test]
fn test_report_panics() {
let (tx, rx) = channel();
{
let tx = Arc::new(Mutex::new(tx));
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
panic::set_hook(Box::new(move |panic_info| {
let backtrace = Backtrace::new();
let payload = client
.build_report()
.from_panic(panic_info)
.with_backtrace(&backtrace)
.with_level("info")
.to_string();
let payload = Arc::new(Mutex::new(payload));
tx.lock().unwrap().send(payload).unwrap();
}));
let result = panic::catch_unwind(|| {
// just to trick the linter
let zero = "0".parse::<i32>().unwrap();
let _ = 1 / zero;
});
assert!(result.is_err());
}
// remove the hook to avoid double panics
let _ = panic::take_hook();
let lock = rx.recv().unwrap();
let payload = match lock.lock() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
let mut payload: Value = serde_json::from_str(&*payload).unwrap();
let mut expected_payload = json!({
"access_token": "ACCESS_TOKEN",
"data": {
"environment": "ENVIRONMENT",
"body": {
"trace": {
"frames": [{
"filename": "src/lib.rs",
"lineno": 268
}],
"exception": {
"class": "tests::test_report_panics",
"message": "attempt to divide by zero",
"description": "attempt to divide by zero"
}
}
},
"level": "info",
"language": "rust",
"title": "attempt to divide by zero"
}
});
let payload_ = payload.to_owned();
let line_number = payload_
.get("data")
.unwrap()
.get("body")
.unwrap()
.get("trace")
.unwrap()
.get("frames")
.unwrap()
.get(0)
.unwrap()
.get("lineno")
.unwrap();
assert!(line_number.as_u64().unwrap() > 0);
*expected_payload
.get_mut("data")
.unwrap()
.get_mut("body")
.unwrap()
.get_mut("trace")
.unwrap()
.get_mut("frames")
.unwrap()
.get_mut(0)
.unwrap()
.get_mut("lineno")
.unwrap() = line_number.to_owned();
normalize_frames!(payload, expected_payload, 1);
assert_eq!(expected_payload.to_string(), payload.to_string());
}
#[test]
fn test_report_error() {
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
match "笑".parse::<i32>() {
Ok(_) => {
assert!(false);
}
Err(e) => {
let payload = client
.build_report()
.from_error_message(&e)
.with_level(Level::WARNING)
.with_frame(FrameBuilder::new().with_column_number(42).build())
.with_frame(FrameBuilder::new().with_column_number(24).build())
.with_title("w")
.to_string();
let expected_payload = json!({
"access_token": "ACCESS_TOKEN",
"data": {
"environment": "ENVIRONMENT",
"body": {
"trace": {
"frames": [{
"filename": "src/lib.rs",
"colno": 42
}, {
"filename": "src/lib.rs",
"colno": 24
}],
"exception": {
"class": "tests::test_report_error",
"message": "invalid digit found in string",
"description": "invalid digit found in string"
}
}
},
"level": "warning",
"language": "rust",
"title": "w"
}
});
let mut payload: Value = serde_json::from_str(&*payload).unwrap();
normalize_frames!(payload, expected_payload, 2);
assert_eq!(expected_payload.to_string(), payload.to_string());
}
}
}
#[test]
fn test_report_message() {
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
let payload = client
.build_report()
.from_message("hai")
.with_level("warning")
.to_string();
let expected_payload = json!({
"access_token": "ACCESS_TOKEN",
"data": {
"environment": "ENVIRONMENT",
"body": {
"message": {
"body": "hai"
}
},
"level": "warning"
}
})
.to_string();
assert_eq!(payload, expected_payload);
}
#[test]
fn test_response() {
let client = Client::new("ACCESS_TOKEN", "ENVIRONMENT");
let status_handle = client
.build_report()
.from_message("hai")
.with_level("info")
.send();
match status_handle.join().unwrap() {
Some(status) => {
assert_eq!(
status.to_string(),
"Error 401 Unauthorized: No access token was found in the request.".to_owned()
);
}
None => {
assert!(false);
}
}
}
}
|
#![deny(missing_docs)]
//! A data set library.
use std::any::Any;
/// Implemented by data sets for runtime reflection.
/// A data set is a collection of tables, usually `Vec<T>`.
pub trait DataSet {
/// Gets the table descriptions of the data set.
fn tables(&self) -> &[Table];
/// Read data from a column.
/// The type T is the column type.
/// Returns a `ReadData` which points directly inside the table.
fn read<T: Any>(&self, table: &str, column: &str) -> Option<ReadData<T>>;
}
/// Implemented by datasets that has a table for generic programming.
pub trait HasTable<T>: DataSet {
/// Get access to the full table.
/// Uses a raw pointer to access multiple tables at the same time.
fn raw_table(&mut self) -> *mut Vec<T>;
/// Gets an immutable view into table.
fn get_table(&self) -> &[T];
/// Adds a value.
fn add(&mut self, val: T) -> usize {
let mut table: &mut Vec<T> = unsafe { &mut *self.raw_table() };
table.push(val);
table.len() - 1
}
}
/// Contains table information.
pub struct Table<'a> {
/// The name of table.
pub name: &'a str,
/// The columns.
pub columns: &'a [Column<'a>],
}
/// Contains column information.
pub struct Column<'a> {
/// The name of column.
pub name: &'a str,
/// The type of column.
pub column_type: &'a str,
}
/// An unsafe way of reading data.
/// This is used for reflection when the types in the data set are unknown.
pub struct ReadData<T> {
/// The current pointer.
pub ptr: *const T,
/// The number of items left.
pub len: usize,
/// The number of bytes to jump to next item.
pub size: usize,
}
impl<T> ReadData<T> {
/// Gets pointer at index location.
pub fn get(&self, index: usize) -> Option<*const T> {
if index >= self.len { None }
else {
Some(unsafe {
(self.ptr as *const u8)
.offset((self.size * index) as isize) as *const T
})
}
}
}
impl<T> Iterator for ReadData<T> {
type Item = *const T;
fn next(&mut self) -> Option<*const T> {
if self.len == 0 { None }
else {
self.len -= 1;
let res = self.ptr;
self.ptr = unsafe {
(self.ptr as *const u8)
.offset(self.size as isize) as *const T
};
Some(res)
}
}
}
#[macro_export]
macro_rules! has_table_impls {
($x:path { $($n:ident : $t:path),* }) => {
$(
impl HasTable<$t> for $x {
fn raw_table(&mut self) -> *mut Vec<$t> {
&mut self.$n as *mut _
}
fn get_table(&self) -> &[$t] {
&self.$n[0..]
}
}
)*
}
}
/// Generates an impl of `DataSet` for a type.
#[macro_export]
macro_rules! dataset_impl {
($dataset:ident { $($table_name:ident : $table_type:ident { $($n:ident : $t:ident),* })* }) => {
impl DataSet for $dataset {
fn tables(&self) -> &[Table] {
static TABLES: &'static [Table<'static>] = &[
$(
Table { name: stringify!($table_type), columns: &[
$(
Column { name: stringify!($n), column_type: stringify!($t) },
)*
] },
)*
];
TABLES
}
fn read<T: Any>(&self, table: &str, column: &str) -> Option<ReadData<T>> {
use std::mem::{ size_of, transmute };
use std::ptr::null;
match (table, column) {
$($(
(stringify!($table_type), stringify!($n)) => {
if TypeId::of::<T>() == TypeId::of::<$t>() {
if self.$table_name.len() == 0 {
Some(ReadData {
ptr: null(),
len: 0,
size: 0,
})
} else {
Some(unsafe {transmute(ReadData {
ptr: &self.$table_name[0].$n,
len: self.$table_name.len(),
size: size_of::<$table_type>()
})})
}
} else {
None
}
}
)*)*
_ => None
}
}
}
}
}
Added some docs
#![deny(missing_docs)]
//! A data set library.
//!
//! ### What is a data set?
//!
//! A data set is a collection of tables that contain data,
//! very similar to a database.
//! The data points to other tables to create a data structures.
//!
//! In this library, a row in a table corresponds to a normal Rust struct.
//! Any struct can be used, to make it highly flexible and integrated with Rust.
//! The functionality of a data set can be added to naive application structures
//! by implementing the `DataSet` trait, using the macros.
//!
//! Because the memory in a table can be reallocated or serialized,
//! it is common to use `usize` to point to data in another table.
//! The semantics of references is open and must be handled manually.
//!
//! A table row type must be unique for a data set.
//! There can not be two tables with same type.
//!
//! ### Motivation
//!
//! This library has two purposes:
//!
//! 1. Runtime reflection of data without knowing the internal types.
//! 2. Generic programming that require a specific set of tables.
//!
use std::any::Any;
/// Implemented by data sets for runtime reflection.
/// A data set is a collection of tables, usually `Vec<T>`.
/// For implementation, use the macro `dataset_impl!`.
pub trait DataSet {
/// Gets the table descriptions of the data set.
fn tables(&self) -> &[Table];
/// Read data from a column.
/// The type T is the column type.
/// Returns a `ReadData` which points directly inside the table.
fn read<T: Any>(&self, table: &str, column: &str) -> Option<ReadData<T>>;
}
/// Implemented by data sets that has a table for generic programming.
pub trait HasTable<T>: DataSet {
/// Get access to the full table.
/// Uses a raw pointer to access multiple tables at the same time.
fn raw_table(&mut self) -> *mut Vec<T>;
/// Gets an immutable view into table.
fn get_table(&self) -> &[T];
/// Adds a value.
fn add(&mut self, val: T) -> usize {
let mut table: &mut Vec<T> = unsafe { &mut *self.raw_table() };
table.push(val);
table.len() - 1
}
}
/// Contains table information.
pub struct Table<'a> {
/// The name of table.
pub name: &'a str,
/// The columns.
pub columns: &'a [Column<'a>],
}
/// Contains column information.
pub struct Column<'a> {
/// The name of column.
pub name: &'a str,
/// The type of column.
pub column_type: &'a str,
}
/// An unsafe way of reading data.
/// This is used for reflection when the types in the data set are unknown.
pub struct ReadData<T> {
/// The current pointer.
pub ptr: *const T,
/// The number of items left.
pub len: usize,
/// The number of bytes to jump to next item.
pub size: usize,
}
impl<T> ReadData<T> {
/// Gets pointer at index location.
pub fn get(&self, index: usize) -> Option<*const T> {
if index >= self.len { None }
else {
Some(unsafe {
(self.ptr as *const u8)
.offset((self.size * index) as isize) as *const T
})
}
}
}
impl<T> Iterator for ReadData<T> {
type Item = *const T;
fn next(&mut self) -> Option<*const T> {
if self.len == 0 { None }
else {
self.len -= 1;
let res = self.ptr;
self.ptr = unsafe {
(self.ptr as *const u8)
.offset(self.size as isize) as *const T
};
Some(res)
}
}
}
#[macro_export]
macro_rules! has_table_impls {
($x:path { $($n:ident : $t:path),* }) => {
$(
impl HasTable<$t> for $x {
fn raw_table(&mut self) -> *mut Vec<$t> {
&mut self.$n as *mut _
}
fn get_table(&self) -> &[$t] {
&self.$n[0..]
}
}
)*
}
}
/// Generates an impl of `DataSet` for a type.
#[macro_export]
macro_rules! dataset_impl {
($dataset:ident { $($table_name:ident : $table_type:ident { $($n:ident : $t:ident),* })* }) => {
impl DataSet for $dataset {
fn tables(&self) -> &[Table] {
static TABLES: &'static [Table<'static>] = &[
$(
Table { name: stringify!($table_type), columns: &[
$(
Column { name: stringify!($n), column_type: stringify!($t) },
)*
] },
)*
];
TABLES
}
fn read<T: Any>(&self, table: &str, column: &str) -> Option<ReadData<T>> {
use std::mem::{ size_of, transmute };
use std::ptr::null;
match (table, column) {
$($(
(stringify!($table_type), stringify!($n)) => {
if TypeId::of::<T>() == TypeId::of::<$t>() {
if self.$table_name.len() == 0 {
Some(ReadData {
ptr: null(),
len: 0,
size: 0,
})
} else {
Some(unsafe {transmute(ReadData {
ptr: &self.$table_name[0].$n,
len: self.$table_name.len(),
size: size_of::<$table_type>()
})})
}
} else {
None
}
}
)*)*
_ => None
}
}
}
}
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The crazy macro_rules magic in this file is thanks to dtolnay@
// and is a way of attaching rustdoc to each of the possible directives
// within the include_cpp outer macro. None of the directives actually
// do anything - all the magic is handled entirely by
// autocxx_macro::include_cpp_impl.
/// Include some C++ headers in your Rust project.
///
/// This macro allows you to include one or more C++ headers within
/// your Rust code, and call their functions fairly naturally.
///
/// # Examples
///
/// C++ header (`input.h`):
/// ```cpp
/// #include <cstdint>
///
/// uint32_t do_math(uint32_t a);
/// ```
///
/// Rust code:
/// ```
/// # use autocxx_macro::include_cpp_impl as include_cpp;
/// include_cpp!(
/// # parse_only
/// #include "input.h"
/// generate!("do_math")
/// );
///
/// # mod ffi { pub fn do_math(a: u32) -> u32 { a+3 } }
/// # fn main() {
/// ffi::do_math(3);
/// # }
/// ```
///
/// # Configuring the build
///
/// To build this, you'll need to:
/// * Educate the procedural macro about where to find the C++ headers. Set the
/// `AUTOCXX_INC` environment variable to a list of directories to search.
/// * Build the C++ side of the bindings. You'll need to use the `autocxx-gen`
/// crate (or similar) to process the same .rs code into C++ header and
/// implementation files.
///
/// # Syntax
///
/// Within the brackets of the `include_cxx!(...)` macro, you should provide
/// a list of at least the following:
///
/// * `#include "cpp_header.h"`: a header filename to parse and include
/// * `generate!("type_or_function_name")`: a type or function name whose declaration
/// should be made available to C++.
///
/// Other directives are possible as documented in this crate.
///
/// # How to generate structs
///
/// All C++ types can be owned within a [UniquePtr][autocxx_engine::cxx::UniquePtr]
/// within Rust. To let this be possible, simply pass the names of these
/// types within [generate] (or just [generate] any function which requires these types).
///
/// However, only _some_ C++ `struct`s can be owned _by value_ within Rust. Those
/// types must be freely byte-copyable, because Rust is free to do that at
/// any time. If you believe your `struct` meets those criteria, you can
/// use [generate_pod] instead.
///
/// Use [generate] under normal circumstances, but [generate_pod] only for structs
/// where you absolutely do need to pass them truly by value and have direct field access.
///
/// This doesn't just make a difference to the generated code for the type;
/// it also makes a difference to any functions which take or return that type.
/// If there's a C++ function which takes a struct by value, but that struct
/// is not declared as POD-safe, then we'll generate wrapper functions to move
/// that type into and out of [UniquePtr][autocxx_engine::cxx::UniquePtr]s.
///
///
/// # Generated code
///
/// You will find that this macro expands to the equivalent of:
///
/// ```no_run
/// mod ffi {
/// pub fn do_math(a: u32) -> u32
/// # { a+3 }
/// pub const kMyCxxConst: i32 = 3;
/// pub const MY_PREPROCESSOR_DEFINITION: i64 = 3i64;
/// }
/// ```
///
/// # Built-in types
///
/// The generated code uses `cxx` for interop: see that crate for many important
/// considerations including safety and the list of built-in types, for example
/// [UniquePtr][autocxx_engine::cxx::UniquePtr] and
/// [CxxString][autocxx_engine::cxx::CxxString].
///
/// # Making strings
///
/// Unless you use [exclude_utilities], you will find a function
/// called `make_string` exists inside the generated `ffi` block:
///
/// ```no_run
/// mod ffi {
/// # use autocxx_engine::cxx::UniquePtr;
/// # use autocxx_engine::cxx::CxxString;
/// pub fn make_string(str_: &str) -> UniquePtr<CxxString>
/// # { unreachable!() }
/// }
/// ```
///
/// # Making other C++ types
///
/// Types gain a `_make_unique` function. At present this is not
/// an associated function; it's simply the type name followed by
/// that suffix.
///
/// ```
/// mod ffi {
/// # struct UniquePtr<T>(T);
/// struct Bob {
/// a: u32,
/// }
/// pub fn Bob_make_unique() -> UniquePtr<Bob>
/// # { unreachable!() }
/// }
/// ```
/// # Preprocessor symbols
///
/// `#define` and other preprocessor symbols will appear as constants.
/// For integers this is straightforward; for strings they appear
/// as `[u8]` with a null terminator. To get a string, do this:
///
/// ```cpp
/// #define BOB "Hello"
/// ```
///
/// ```
/// # mod ffi { pub static BOB: [u8; 6] = [72u8, 101u8, 108u8, 108u8, 111u8, 0u8]; }
/// assert_eq!(std::str::from_utf8(&ffi::BOB).unwrap().trim_end_matches(char::from(0)), "Hello");
/// ```
///
/// # Namespaces
///
/// The C++ namespace structure is reflected in mods within the generated
/// ffi mod. However, at present there is an internal limitation that
/// autocxx can't handle multiple symbols with the same identifier, even
/// if they're in different namespaces. This will be fixed in future.
#[macro_export]
macro_rules! include_cpp {
(
$(#$include:ident $lit:literal)*
$($mac:ident!($($arg:tt)*))*
) => {
$($crate::$include!{__docs})*
$($crate::$mac!{__docs})*
$crate::include_cpp_impl! {
$(#include $lit)*
$($mac!($($arg)*))*
}
};
}
/// Include a C++ header. A directive to be included inside
/// [include_cpp] - see [include_cpp] for details
#[macro_export]
macro_rules! include {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
/// Generate Rust bindings for the given C++ type or function.
/// A directive to be included inside
/// [include_cpp] - see [include_cpp] for general information.
/// See also [generate_pod].
#[macro_export]
macro_rules! generate {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
/// Generate as "plain old data".
/// Generate Rust bindings for the given C++ type such that
/// it can be passed and owned by value in Rust. This only works
/// for C++ types which have trivial move constructors and no
/// destructor - you'll encounter a compile error otherwise.
/// If your type doesn't match that description, use [generate]
/// instead, and own the type using [UniquePtr][autocxx_engine::cxx::UniquePtr].
/// A directive to be included inside
/// [include_cpp] - see [include_cpp] for general information.
#[macro_export]
macro_rules! generate_pod {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
/// Skip the normal generation of a `make_string` function
/// and other utilities which we might generate normally.
/// A directive to be included inside
/// [include_cpp] - see [include_cpp] for general information.
#[macro_export]
macro_rules! exclude_utilities {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
#[doc(hidden)]
#[macro_export]
macro_rules! usage {
(__docs) => {};
($($tt:tt)*) => {
compile_error! {r#"usage: include_cpp! {
#include "path/to/header.h"
generate!(...)
generate_pod!(...)
}
"#}
};
}
#[doc(hidden)]
pub use autocxx_macro::include_cpp_impl;
Improving string constant documentation.
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The crazy macro_rules magic in this file is thanks to dtolnay@
// and is a way of attaching rustdoc to each of the possible directives
// within the include_cpp outer macro. None of the directives actually
// do anything - all the magic is handled entirely by
// autocxx_macro::include_cpp_impl.
/// Include some C++ headers in your Rust project.
///
/// This macro allows you to include one or more C++ headers within
/// your Rust code, and call their functions fairly naturally.
///
/// # Examples
///
/// C++ header (`input.h`):
/// ```cpp
/// #include <cstdint>
///
/// uint32_t do_math(uint32_t a);
/// ```
///
/// Rust code:
/// ```
/// # use autocxx_macro::include_cpp_impl as include_cpp;
/// include_cpp!(
/// # parse_only
/// #include "input.h"
/// generate!("do_math")
/// );
///
/// # mod ffi { pub fn do_math(a: u32) -> u32 { a+3 } }
/// # fn main() {
/// ffi::do_math(3);
/// # }
/// ```
///
/// # Configuring the build
///
/// To build this, you'll need to:
/// * Educate the procedural macro about where to find the C++ headers. Set the
/// `AUTOCXX_INC` environment variable to a list of directories to search.
/// * Build the C++ side of the bindings. You'll need to use the `autocxx-gen`
/// crate (or similar) to process the same .rs code into C++ header and
/// implementation files.
///
/// # Syntax
///
/// Within the brackets of the `include_cxx!(...)` macro, you should provide
/// a list of at least the following:
///
/// * `#include "cpp_header.h"`: a header filename to parse and include
/// * `generate!("type_or_function_name")`: a type or function name whose declaration
/// should be made available to C++.
///
/// Other directives are possible as documented in this crate.
///
/// # How to generate structs
///
/// All C++ types can be owned within a [UniquePtr][autocxx_engine::cxx::UniquePtr]
/// within Rust. To let this be possible, simply pass the names of these
/// types within [generate] (or just [generate] any function which requires these types).
///
/// However, only _some_ C++ `struct`s can be owned _by value_ within Rust. Those
/// types must be freely byte-copyable, because Rust is free to do that at
/// any time. If you believe your `struct` meets those criteria, you can
/// use [generate_pod] instead.
///
/// Use [generate] under normal circumstances, but [generate_pod] only for structs
/// where you absolutely do need to pass them truly by value and have direct field access.
///
/// This doesn't just make a difference to the generated code for the type;
/// it also makes a difference to any functions which take or return that type.
/// If there's a C++ function which takes a struct by value, but that struct
/// is not declared as POD-safe, then we'll generate wrapper functions to move
/// that type into and out of [UniquePtr][autocxx_engine::cxx::UniquePtr]s.
///
///
/// # Generated code
///
/// You will find that this macro expands to the equivalent of:
///
/// ```no_run
/// mod ffi {
/// pub fn do_math(a: u32) -> u32
/// # { a+3 }
/// pub const kMyCxxConst: i32 = 3;
/// pub const MY_PREPROCESSOR_DEFINITION: i64 = 3i64;
/// }
/// ```
///
/// # Built-in types
///
/// The generated code uses `cxx` for interop: see that crate for many important
/// considerations including safety and the list of built-in types, for example
/// [UniquePtr][autocxx_engine::cxx::UniquePtr] and
/// [CxxString][autocxx_engine::cxx::CxxString].
///
/// # Making strings
///
/// Unless you use [exclude_utilities], you will find a function
/// called `make_string` exists inside the generated `ffi` block:
///
/// ```no_run
/// mod ffi {
/// # use autocxx_engine::cxx::UniquePtr;
/// # use autocxx_engine::cxx::CxxString;
/// pub fn make_string(str_: &str) -> UniquePtr<CxxString>
/// # { unreachable!() }
/// }
/// ```
///
/// # Making other C++ types
///
/// Types gain a `_make_unique` function. At present this is not
/// an associated function; it's simply the type name followed by
/// that suffix.
///
/// ```
/// mod ffi {
/// # struct UniquePtr<T>(T);
/// struct Bob {
/// a: u32,
/// }
/// pub fn Bob_make_unique() -> UniquePtr<Bob>
/// # { unreachable!() }
/// }
/// ```
/// # Preprocessor symbols
///
/// `#define` and other preprocessor symbols will appear as constants.
/// At present there is no way to do compile-time disablement of code
/// (equivalent of `#ifdef`).
///
/// # String constants
///
/// Whether from a preprocessor symbol or from a C++ `char*` constant,
/// strings appear as `[u8]` with a null terminator. To get a Rust string,
/// do this:
///
/// ```cpp
/// #define BOB "Hello"
/// ```
///
/// ```
/// # mod ffi { pub static BOB: [u8; 6] = [72u8, 101u8, 108u8, 108u8, 111u8, 0u8]; }
/// assert_eq!(std::str::from_utf8(&ffi::BOB).unwrap().trim_end_matches(char::from(0)), "Hello");
/// ```
///
/// # Namespaces
///
/// The C++ namespace structure is reflected in mods within the generated
/// ffi mod. However, at present there is an internal limitation that
/// autocxx can't handle multiple symbols with the same identifier, even
/// if they're in different namespaces. This will be fixed in future.
#[macro_export]
macro_rules! include_cpp {
(
$(#$include:ident $lit:literal)*
$($mac:ident!($($arg:tt)*))*
) => {
$($crate::$include!{__docs})*
$($crate::$mac!{__docs})*
$crate::include_cpp_impl! {
$(#include $lit)*
$($mac!($($arg)*))*
}
};
}
/// Include a C++ header. A directive to be included inside
/// [include_cpp] - see [include_cpp] for details
#[macro_export]
macro_rules! include {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
/// Generate Rust bindings for the given C++ type or function.
/// A directive to be included inside
/// [include_cpp] - see [include_cpp] for general information.
/// See also [generate_pod].
#[macro_export]
macro_rules! generate {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
/// Generate as "plain old data".
/// Generate Rust bindings for the given C++ type such that
/// it can be passed and owned by value in Rust. This only works
/// for C++ types which have trivial move constructors and no
/// destructor - you'll encounter a compile error otherwise.
/// If your type doesn't match that description, use [generate]
/// instead, and own the type using [UniquePtr][autocxx_engine::cxx::UniquePtr].
/// A directive to be included inside
/// [include_cpp] - see [include_cpp] for general information.
#[macro_export]
macro_rules! generate_pod {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
/// Skip the normal generation of a `make_string` function
/// and other utilities which we might generate normally.
/// A directive to be included inside
/// [include_cpp] - see [include_cpp] for general information.
#[macro_export]
macro_rules! exclude_utilities {
($($tt:tt)*) => { $crate::usage!{$($tt)*} };
}
#[doc(hidden)]
#[macro_export]
macro_rules! usage {
(__docs) => {};
($($tt:tt)*) => {
compile_error! {r#"usage: include_cpp! {
#include "path/to/header.h"
generate!(...)
generate_pod!(...)
}
"#}
};
}
#[doc(hidden)]
pub use autocxx_macro::include_cpp_impl;
|
//! Chomp is a fast monadic-style parser combinator library for the Rust programming language. It was
//! written as the culmination of the experiments detailed in these blog posts:
//!
//! * [Part 1](http://m4rw3r.github.io/parser-combinator-experiments-rust/)
//! * [Part 2](http://m4rw3r.github.io/parser-combinator-experiments-errors)
//! * [Part 3](http://m4rw3r.github.io/parser-combinator-experiments-part-3)
//!
//! For its current capabilities, you will find that Chomp performs consistently as well, if not
//! better, than optimized C parsers, while being vastly more expressive. For an example that
//! builds a performant HTTP parser out of smaller parsers, see
//! [http_parser.rs](examples/http_parser.rs).
//!
//! # Example
//!
//! ```
//! # #[macro_use] extern crate chomp;
//! # fn main() {
//! use chomp::{Input, U8Result, parse_only};
//! use chomp::{take_while1, token};
//!
//! #[derive(Debug, Eq, PartialEq)]
//! struct Name<'a> {
//! first: &'a [u8],
//! last: &'a [u8],
//! }
//!
//! fn name(i: Input<u8>) -> U8Result<Name> {
//! parse!{i;
//! let first = take_while1(|c| c != b' ');
//! token(b' '); // skipping this char
//! let last = take_while1(|c| c != b'\n');
//!
//! ret Name{
//! first: first,
//! last: last,
//! }
//! }
//! }
//!
//! assert_eq!(parse_only(name, "Martin Wernstål\n".as_bytes()), Ok(Name{
//! first: b"Martin",
//! last: "Wernstål".as_bytes()
//! }));
//! # }
//! ```
//!
//! # Usage
//!
//! Chomp's functionality is split between three modules:
//!
//! * `parsers` contains the basic parsers used to parse streams of input.
//! * `combinators` contains functions which take parsers and return new ones.
//! * `primitives` contains the building blocks used to make new parsers. This is advanced usage and
//! is far more involved than using the pre-existing parsers, but is sometimes unavoidable.
//!
//! A parser is, at its simplest, a function that takes a slice of input and returns a
//! `ParserResult<I, T, E>`, where `I`, `T`, and `E` are the input, output, and error types,
//! respectively. Parsers are usually parameterized over values or other parsers as well, so these
//! appear as extra arguments in the parsing function. As an example, here is the signature of the
//! `token` parser, which matches a particular input.
//!
//! ```ignore
//! fn token<I: Copy + Eq>(i: Input<I>, t: I) -> SimpleResult<I, I> {...}
//! ```
//!
//! Notice that the first argument is an `Input<I>`, and the second argument is some `I`.
//! `Input<I>` is just a datatype over the current state of the parser and a slice of input `I`,
//! and prevents the parser writer from accidentally mutating the state of the parser. Later, when
//! we introduce the `parse!` macro, we will see that using a parser in this macro just means
//! supplying all of the arguments but the input, as so:
//!
//! ```ignore
//! token(b'T');
//! ```
//!
//! Note that you cannot do this outside of the `parse!` macro. `SimpleResult<I, T>` is a
//! convenience type alias over `ParseResult<I, T, Error<u8>>`, and `Error<I>` is just a convenient
//! "default" error type that will be sufficient for most uses. For more sophisticated usage, one
//! can always write a custom error type.
//!
//! A very useful parser is the `satisfy` parser:
//!
//! ```ignore
//! fn satisfy<I: Copy, F>(i: Input<I>, f: F) -> SimpleResult<I, I>
//! where F: FnOnce(I) -> bool { ... }
//! ```
//!
//! Besides the input state, satisfy's only parameter is a predicate function and will succeed only
//! if the next piece of input satisfies the supplied predicate. Here's an example that might be
//! used in the `parse!` macro:
//!
//! ```
//! # #[macro_use] extern crate chomp;
//! # fn main() {
//! # use chomp::{Input, satisfy, parse_only};
//! # let r = parse_only(parser!{
//! satisfy(|c| {
//! match c {
//! b'c' | b'h' | b'a' | b'r' => true,
//! _ => false,
//! }
//! })
//! # }, b"h");
//! # assert_eq!(r, Ok(b'h'));
//! # }
//! ```
//!
//! This parser will only succeed if the character is one of the characters in "char".
//!
//! Lastly, here is the parser combinator `count`, which will attempt to run a parser a number of
//! times on its input.
//!
//! ```ignore
//! pub fn count<'a, I, T, E, F, U>(i: Input<'a, I>, num: usize, p: F) -> ParseResult<'a, I, T, E>
//! where I: Copy,
//! U: 'a,
//! F: FnMut(Input<'a, I>) -> ParseResult<'a, I, U, E>,
//! T: FromIterator<U> { ... }
//! ```
//!
//! Using parsers is almost entirely done using the `parse!` macro, which enables us to do three
//! distinct things:
//!
//! * Sequence parsers over the remaining input
//! * Store intermediate results into datatypes
//! * Return a datatype at the end, which may be the result of any arbitrary computation over the
//! intermediate results.
//!
//! In other words, just as a normal Rust function usually looks something like this:
//!
//! ```
//! # fn launch_missiles() {}
//! # fn read_number() -> u8 { 3 }
//! fn f() -> (u8, u8, u8) {
//! let a = read_number();
//! let b = read_number();
//! launch_missiles();
//! return (a, b, a + b);
//! }
//! ```
//!
//! A Chomp parser with a similar structure looks like this:
//!
//! ```
//! # #[macro_use] extern crate chomp;
//! # use chomp::{Input, parse_only, satisfy, string, token, U8Result};
//! fn f(i: Input<u8>) -> U8Result<(u8, u8, u8)> {
//! parse!{i;
//! let a = digit();
//! let b = digit();
//! string(b"missiles");
//! ret (a, b, a + b)
//! }
//! }
//!
//! fn digit(i: Input<u8>) -> U8Result<u8> {
//! satisfy(i, |c| b'0' <= c && c <= b'9').map(|c| c - b'0')
//! }
//! # fn main() {
//! # let r = parse_only(f, b"33missiles");
//! # assert_eq!(r, Ok((3, 3, 6)));
//! # }
//! ```
//!
//! Readers familiar with Haskell or F# will recognize this as a "monadic computation" or
//! "computation expression".
//!
//! You use the `parse!` macro as follows:
//!
//! - Write the input parameter first, with a semicolon.
//! - Write any number of valid parser actions or identifier bindings, where:
//! - a parser action takes the form `parser(params*)`, with the input parameter omitted.
//! - an identifier binding takes the form `let identifer = parser(params*);`, with the input
//! parameter omitted.
//! - Write the final line of the macro, which must always be either a parser action, or a return
//! statement which takes the form `ret expression`. The type of `expression` becomes the return
//! type of the entire parser, should it succeed.
//!
//! The entire grammar for the macro is listed elsewhere in this documentation.
#[macro_use]
extern crate bitflags;
extern crate conv;
#[macro_use]
mod macros;
mod input;
mod parse;
mod parse_result;
pub mod ascii;
pub mod buffer;
pub mod parsers;
pub mod combinators;
pub use combinators::{
count,
option,
or,
many,
many1,
sep_by,
sep_by1,
many_till,
skip_many,
skip_many1,
matched_by,
};
pub use parsers::{
any,
eof,
not_token,
peek,
peek_next,
satisfy,
satisfy_with,
scan,
string,
run_scanner,
take,
take_remainder,
take_till,
take_while,
take_while1,
token,
};
pub use parsers::Error;
pub use input::Input;
pub use parse::{
ParseError,
parse_only,
};
pub use parse_result::{
ParseResult,
SimpleResult,
U8Result,
};
/// Module used to construct fundamental parsers and combinators.
///
/// # Primitive
///
/// Only used by fundamental parsers and combinators.
pub mod primitives {
pub use input::{
InputBuffer,
InputClone,
};
pub use parse_result::{
IntoInner,
State,
};
/// Input utilities.
///
/// # Primitive
///
/// Only used by fundamental parsers and combinators.
pub mod input {
pub use input::{DEFAULT, END_OF_INPUT, new};
}
/// ParseResult utilities.
///
/// # Primitive
///
/// Only used by fundamental parsers and combinators.
///
/// # Note
///
/// Prefer to use ``Input::ret``, ``Input::err`` or ``Input::incomplete`` instead of using
/// ``parse_result::new``.
pub mod parse_result {
pub use parse_result::new;
}
}
doc: Added link to announcement post in library documentation too
//! Chomp is a fast monadic-style parser combinator library for the Rust programming language. It was
//! written as the culmination of the experiments detailed in these blog posts:
//!
//! * [Part 1](http://m4rw3r.github.io/parser-combinator-experiments-rust/)
//! * [Part 2](http://m4rw3r.github.io/parser-combinator-experiments-errors)
//! * [Part 3](http://m4rw3r.github.io/parser-combinator-experiments-part-3)
//! * [Chomp 0.1 Announcement](http://m4rw3r.github.io/parser-combinators-road-chomp-0-1/)
//!
//! For its current capabilities, you will find that Chomp performs consistently as well, if not
//! better, than optimized C parsers, while being vastly more expressive. For an example that
//! builds a performant HTTP parser out of smaller parsers, see
//! [http_parser.rs](examples/http_parser.rs).
//!
//! # Example
//!
//! ```
//! # #[macro_use] extern crate chomp;
//! # fn main() {
//! use chomp::{Input, U8Result, parse_only};
//! use chomp::{take_while1, token};
//!
//! #[derive(Debug, Eq, PartialEq)]
//! struct Name<'a> {
//! first: &'a [u8],
//! last: &'a [u8],
//! }
//!
//! fn name(i: Input<u8>) -> U8Result<Name> {
//! parse!{i;
//! let first = take_while1(|c| c != b' ');
//! token(b' '); // skipping this char
//! let last = take_while1(|c| c != b'\n');
//!
//! ret Name{
//! first: first,
//! last: last,
//! }
//! }
//! }
//!
//! assert_eq!(parse_only(name, "Martin Wernstål\n".as_bytes()), Ok(Name{
//! first: b"Martin",
//! last: "Wernstål".as_bytes()
//! }));
//! # }
//! ```
//!
//! # Usage
//!
//! Chomp's functionality is split between three modules:
//!
//! * `parsers` contains the basic parsers used to parse streams of input.
//! * `combinators` contains functions which take parsers and return new ones.
//! * `primitives` contains the building blocks used to make new parsers. This is advanced usage and
//! is far more involved than using the pre-existing parsers, but is sometimes unavoidable.
//!
//! A parser is, at its simplest, a function that takes a slice of input and returns a
//! `ParserResult<I, T, E>`, where `I`, `T`, and `E` are the input, output, and error types,
//! respectively. Parsers are usually parameterized over values or other parsers as well, so these
//! appear as extra arguments in the parsing function. As an example, here is the signature of the
//! `token` parser, which matches a particular input.
//!
//! ```ignore
//! fn token<I: Copy + Eq>(i: Input<I>, t: I) -> SimpleResult<I, I> {...}
//! ```
//!
//! Notice that the first argument is an `Input<I>`, and the second argument is some `I`.
//! `Input<I>` is just a datatype over the current state of the parser and a slice of input `I`,
//! and prevents the parser writer from accidentally mutating the state of the parser. Later, when
//! we introduce the `parse!` macro, we will see that using a parser in this macro just means
//! supplying all of the arguments but the input, as so:
//!
//! ```ignore
//! token(b'T');
//! ```
//!
//! Note that you cannot do this outside of the `parse!` macro. `SimpleResult<I, T>` is a
//! convenience type alias over `ParseResult<I, T, Error<u8>>`, and `Error<I>` is just a convenient
//! "default" error type that will be sufficient for most uses. For more sophisticated usage, one
//! can always write a custom error type.
//!
//! A very useful parser is the `satisfy` parser:
//!
//! ```ignore
//! fn satisfy<I: Copy, F>(i: Input<I>, f: F) -> SimpleResult<I, I>
//! where F: FnOnce(I) -> bool { ... }
//! ```
//!
//! Besides the input state, satisfy's only parameter is a predicate function and will succeed only
//! if the next piece of input satisfies the supplied predicate. Here's an example that might be
//! used in the `parse!` macro:
//!
//! ```
//! # #[macro_use] extern crate chomp;
//! # fn main() {
//! # use chomp::{Input, satisfy, parse_only};
//! # let r = parse_only(parser!{
//! satisfy(|c| {
//! match c {
//! b'c' | b'h' | b'a' | b'r' => true,
//! _ => false,
//! }
//! })
//! # }, b"h");
//! # assert_eq!(r, Ok(b'h'));
//! # }
//! ```
//!
//! This parser will only succeed if the character is one of the characters in "char".
//!
//! Lastly, here is the parser combinator `count`, which will attempt to run a parser a number of
//! times on its input.
//!
//! ```ignore
//! pub fn count<'a, I, T, E, F, U>(i: Input<'a, I>, num: usize, p: F) -> ParseResult<'a, I, T, E>
//! where I: Copy,
//! U: 'a,
//! F: FnMut(Input<'a, I>) -> ParseResult<'a, I, U, E>,
//! T: FromIterator<U> { ... }
//! ```
//!
//! Using parsers is almost entirely done using the `parse!` macro, which enables us to do three
//! distinct things:
//!
//! * Sequence parsers over the remaining input
//! * Store intermediate results into datatypes
//! * Return a datatype at the end, which may be the result of any arbitrary computation over the
//! intermediate results.
//!
//! In other words, just as a normal Rust function usually looks something like this:
//!
//! ```
//! # fn launch_missiles() {}
//! # fn read_number() -> u8 { 3 }
//! fn f() -> (u8, u8, u8) {
//! let a = read_number();
//! let b = read_number();
//! launch_missiles();
//! return (a, b, a + b);
//! }
//! ```
//!
//! A Chomp parser with a similar structure looks like this:
//!
//! ```
//! # #[macro_use] extern crate chomp;
//! # use chomp::{Input, parse_only, satisfy, string, token, U8Result};
//! fn f(i: Input<u8>) -> U8Result<(u8, u8, u8)> {
//! parse!{i;
//! let a = digit();
//! let b = digit();
//! string(b"missiles");
//! ret (a, b, a + b)
//! }
//! }
//!
//! fn digit(i: Input<u8>) -> U8Result<u8> {
//! satisfy(i, |c| b'0' <= c && c <= b'9').map(|c| c - b'0')
//! }
//! # fn main() {
//! # let r = parse_only(f, b"33missiles");
//! # assert_eq!(r, Ok((3, 3, 6)));
//! # }
//! ```
//!
//! Readers familiar with Haskell or F# will recognize this as a "monadic computation" or
//! "computation expression".
//!
//! You use the `parse!` macro as follows:
//!
//! - Write the input parameter first, with a semicolon.
//! - Write any number of valid parser actions or identifier bindings, where:
//! - a parser action takes the form `parser(params*)`, with the input parameter omitted.
//! - an identifier binding takes the form `let identifer = parser(params*);`, with the input
//! parameter omitted.
//! - Write the final line of the macro, which must always be either a parser action, or a return
//! statement which takes the form `ret expression`. The type of `expression` becomes the return
//! type of the entire parser, should it succeed.
//!
//! The entire grammar for the macro is listed elsewhere in this documentation.
#[macro_use]
extern crate bitflags;
extern crate conv;
#[macro_use]
mod macros;
mod input;
mod parse;
mod parse_result;
pub mod ascii;
pub mod buffer;
pub mod parsers;
pub mod combinators;
pub use combinators::{
count,
option,
or,
many,
many1,
sep_by,
sep_by1,
many_till,
skip_many,
skip_many1,
matched_by,
};
pub use parsers::{
any,
eof,
not_token,
peek,
peek_next,
satisfy,
satisfy_with,
scan,
string,
run_scanner,
take,
take_remainder,
take_till,
take_while,
take_while1,
token,
};
pub use parsers::Error;
pub use input::Input;
pub use parse::{
ParseError,
parse_only,
};
pub use parse_result::{
ParseResult,
SimpleResult,
U8Result,
};
/// Module used to construct fundamental parsers and combinators.
///
/// # Primitive
///
/// Only used by fundamental parsers and combinators.
pub mod primitives {
pub use input::{
InputBuffer,
InputClone,
};
pub use parse_result::{
IntoInner,
State,
};
/// Input utilities.
///
/// # Primitive
///
/// Only used by fundamental parsers and combinators.
pub mod input {
pub use input::{DEFAULT, END_OF_INPUT, new};
}
/// ParseResult utilities.
///
/// # Primitive
///
/// Only used by fundamental parsers and combinators.
///
/// # Note
///
/// Prefer to use ``Input::ret``, ``Input::err`` or ``Input::incomplete`` instead of using
/// ``parse_result::new``.
pub mod parse_result {
pub use parse_result::new;
}
}
|
extern crate winapi;
extern crate kernel32;
/// Entry point which will be called by the system once the DLL has been loaded
/// in the target process. Declaring this function is optional.
///
/// # Safety
///
/// What you can safely do inside here is very limited, see the Microsoft documentation
/// about "DllMain". Rust also doesn't officially support a "life before main()",
/// though it is unclear what that that means exactly for DllMain.
#[no_mangle]
#[allow(non_snake_case, unused_variables)]
pub extern "system" fn DllMain(
dll_module: winapi::HINSTANCE,
call_reason: winapi::DWORD,
reserved: winapi::LPVOID)
{
// Declare these constants locally since they are only used in this function
const DLL_PROCESS_ATTACH: winapi::DWORD = 1;
const DLL_PROCESS_DETACH: winapi::DWORD = 0;
match call_reason {
DLL_PROCESS_ATTACH => demo_init(),
DLL_PROCESS_DETACH => (),
_ => ()
}
}
fn demo_init() {
unsafe { kernel32::AllocConsole() };
println!("Hello, world!");
}
Fix function signature
extern crate winapi;
extern crate kernel32;
/// Entry point which will be called by the system once the DLL has been loaded
/// in the target process. Declaring this function is optional.
///
/// # Safety
///
/// What you can safely do inside here is very limited, see the Microsoft documentation
/// about "DllMain". Rust also doesn't officially support a "life before main()",
/// though it is unclear what that that means exactly for DllMain.
#[no_mangle]
#[allow(non_snake_case, unused_variables)]
pub extern "system" fn DllMain(
dll_module: winapi::HINSTANCE,
call_reason: winapi::DWORD,
reserved: winapi::LPVOID)
-> winapi::BOOL
{
const DLL_PROCESS_ATTACH: winapi::DWORD = 1;
const DLL_PROCESS_DETACH: winapi::DWORD = 0;
match call_reason {
DLL_PROCESS_ATTACH => demo_init(),
DLL_PROCESS_DETACH => (),
_ => ()
}
winapi::TRUE
}
fn demo_init() {
unsafe { kernel32::AllocConsole() };
println!("Hello, world!");
}
|
// see 'rustc -W help'
#![warn(missing_docs, unused_extern_crates, unused_results)]
//! An IRC bot that posts comments to github when W3C-style IRC minuting is
//! combined with "Github:", "Github topic:", or "Github issue:" lines that
//! give the github issue to comment in.
extern crate hubcaps;
extern crate hyper;
extern crate hyper_native_tls;
extern crate irc;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate regex;
use hubcaps::{Credentials, Github};
use hubcaps::comments::CommentOptions;
use hyper::Client;
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use irc::client::data::command::Command;
use irc::client::prelude::*;
use regex::Regex;
use std::cmp;
use std::collections::HashMap;
use std::fmt;
use std::thread;
#[derive(Copy, Clone)]
/// Whether to use a real github connection for real use of the bot, or a fake
/// one for testing.
pub enum GithubType {
/// Use a real github connection for operating the bot.
RealGithubConnection,
/// Don't make real connections to github (for tests).
MockGithubConnection,
}
/// Run the main loop of the bot, given an IRC server (with a real or mock
/// connection).
pub fn main_loop_iteration<'opts>(
server: IrcServer,
irc_state: &mut IRCState<'opts>,
options: &'opts HashMap<String, String>,
message: &Message,
) {
match message.command {
Command::PRIVMSG(ref target, ref msg) => {
match message.source_nickname() {
None => {
warn!(
"PRIVMSG without a source! {}",
format!("{}", message).trim()
);
}
Some(ref source) => {
let source_ = String::from(*source);
let line = if msg.starts_with("\x01ACTION ") && msg.ends_with("\x01") {
ChannelLine {
source: source_,
is_action: true,
message: filter_bot_hidden(&msg[8..msg.len() - 1]),
}
} else {
ChannelLine {
source: source_,
is_action: false,
message: filter_bot_hidden(msg),
}
};
let mynick = server.current_nickname();
if target == mynick {
// An actual private message.
info!("[{}] {}", source, line);
handle_bot_command(
&server,
options,
irc_state,
&line.message,
source,
false,
None,
)
} else if target.starts_with('#') {
// A message in a channel.
info!("[{}] {}", target, line);
match check_command_in_channel(mynick, &line.message) {
Some(ref command) => handle_bot_command(
&server,
options,
irc_state,
command,
target,
line.is_action,
Some(source),
),
None => {
if !is_present_plus(&*line.message) {
let this_channel_data = irc_state.channel_data(target, options);
if let Some(response) =
this_channel_data.add_line(&server, line)
{
send_irc_line(&server, target, true, response);
}
}
}
}
} else {
warn!(
"UNEXPECTED TARGET {} in message {}",
target,
format!("{}", message).trim()
);
}
}
}
}
Command::INVITE(ref target, ref channel) => {
if target == server.current_nickname() {
// Join channels when invited.
server.send_join(channel).unwrap();
}
}
_ => (),
}
}
/// Remove anything in a line that is after [off] to prevent it from being
/// logged, to match the convention of other W3C logging bots.
fn filter_bot_hidden(line: &str) -> String {
match line.find("[off]") {
None => String::from(line),
Some(index) => String::from(&line[..index]) + "[hidden]",
}
}
// Is this message either case-insensitively "Present+" or something that
// begins with "Present+ " (with space)?
fn is_present_plus(line: &str) -> bool {
let bytes = line.as_bytes();
let present_plus = "present+".as_bytes();
match bytes.len().cmp(&present_plus.len()) {
std::cmp::Ordering::Less => false,
std::cmp::Ordering::Equal => bytes.eq_ignore_ascii_case(present_plus),
std::cmp::Ordering::Greater => {
bytes[..present_plus.len() + 1].eq_ignore_ascii_case("present+ ".as_bytes())
}
}
}
// Take a message in the channel, and see if it was a message sent to
// this bot.
fn check_command_in_channel(mynick: &str, msg: &String) -> Option<String> {
if !msg.starts_with(mynick) {
return None;
}
let after_nick = &msg[mynick.len()..];
if !after_nick.starts_with(":") && !after_nick.starts_with(",") {
return None;
}
let after_punct = &after_nick[1..];
Some(String::from(after_punct.trim_left()))
}
fn send_irc_line(server: &IrcServer, target: &str, is_action: bool, line: String) {
let adjusted_line = if is_action {
info!("[{}] > * {}", target, line);
format!("\x01ACTION {}\x01", line)
} else {
info!("[{}] > {}", target, line);
line
};
server.send_privmsg(target, &*adjusted_line).unwrap();
}
fn handle_bot_command<'opts>(
server: &IrcServer,
options: &'opts HashMap<String, String>,
irc_state: &mut IRCState<'opts>,
command: &str,
response_target: &str,
response_is_action: bool,
response_username: Option<&str>,
) {
lazy_static! {
static ref CODE_DESCRIPTION: String =
format!("{} version {}, compiled from {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
include_str!(concat!(env!("OUT_DIR"), "/git-hash")).trim_right());
}
let send_line = |response_username: Option<&str>, line: &str| {
let line_with_nick = match response_username {
None => String::from(line),
Some(username) => String::from(username) + ", " + line,
};
send_irc_line(server, response_target, response_is_action, line_with_nick);
};
// Remove a question mark at the end of the command if it exists
let command_without_question_mark = if command.ends_with("?") {
&command[..command.len() - 1]
} else {
command
};
match command_without_question_mark {
"help" => {
send_line(response_username, "The commands I understand are:");
send_line(None, " help - Send this message.");
send_line(None, " intro - Send a message describing what I do.");
send_line(
None,
" status - Send a message with current bot status.",
);
send_line(
None,
" bye - Leave the channel. (You can /invite me back.)",
);
send_line(
None,
" end topic - End the current topic without starting a new one.",
);
send_line(
None,
" reboot - Make me leave the server and exit. If properly configured, I will \
then update myself and return.",
);
}
"intro" => {
let config = server.config();
send_line(
None,
"My job is to leave comments in github when the group discusses github issues and \
takes minutes in IRC.",
);
send_line(
None,
"I separate discussions by the \"Topic:\" lines, and I know what github issues to \
use only by lines of the form \"GitHub: <url> | none\".",
);
send_line(
None,
&*format!(
"I'm only allowed to comment on issues in the repositories: {}.",
options["github_repos_allowed"]
),
);
let owners = if let Some(v) = config.owners.as_ref() {
v.join(" ")
} else {
String::from("")
};
send_line(
None,
&*format!(
"My source code is at {} and I'm run by {}.",
options["source"], owners
),
);
}
"status" => {
send_line(
response_username,
&*format!(
"This is {}, which is probably in the repository at \
https://github.com/dbaron/wgmeeting-github-ircbot/",
*CODE_DESCRIPTION
),
);
send_line(None, "I currently have data for the following channels:");
let mut sorted_channels: Vec<&String> = irc_state.channel_data.keys().collect();
sorted_channels.sort();
for channel in sorted_channels {
let ref channel_data = irc_state.channel_data[channel];
if let Some(ref topic) = channel_data.current_topic {
send_line(
None,
&*format!(
" {} ({} lines buffered on \"{}\")",
channel,
topic.lines.len(),
topic.topic
),
);
match topic.github_url {
None => send_line(None, " no GitHub URL to comment on"),
Some(ref github_url) => {
send_line(None, &*format!(" will comment on {}", github_url))
}
};
} else {
send_line(None, &*format!(" {} (no topic data buffered)", channel));
}
}
}
"bye" => {
if response_target.starts_with('#') {
let this_channel_data = irc_state.channel_data(response_target, options);
this_channel_data.end_topic(server);
server
.send(Command::PART(
String::from(response_target),
Some(format!(
"Leaving at request of {}. Feel free to /invite me back.",
response_username.unwrap()
)),
))
.unwrap();
} else {
send_line(response_username, "'bye' only works in a channel");
}
}
"end topic" => {
if response_target.starts_with('#') {
let this_channel_data = irc_state.channel_data(response_target, options);
this_channel_data.end_topic(server);
} else {
send_line(response_username, "'end topic' only works in a channel");
}
}
"reboot" => {
let mut channels_with_topics = irc_state
.channel_data
.iter()
.filter_map(|(channel, channel_data)| {
if channel_data.current_topic.is_some() {
Some(channel)
} else {
None
}
})
.collect::<Vec<_>>();
if channels_with_topics.is_empty() {
// quit from the server, with a message
server
.send(Command::QUIT(Some(format!(
"{}, rebooting at request of {}.",
*CODE_DESCRIPTION,
response_username.unwrap()
))))
.unwrap();
// exit, and assume whatever started the bot will restart it
unimplemented!(); // This will exit. Maybe do something cleaner later?
} else {
// refuse to reboot
channels_with_topics.sort();
send_line(
response_username,
&*format!(
"Sorry, I can't reboot right now because I have buffered topics in{}.",
channels_with_topics
.iter()
.flat_map(|s| " ".chars().chain(s.chars()))
.collect::<String>()
),
);
}
}
_ => {
send_line(
response_username,
"Sorry, I don't understand that command. Try 'help'.",
);
}
}
}
/// The data from IRC channels that we're storing in order to make comments in
/// github.
pub struct IRCState<'opts> {
channel_data: HashMap<String, ChannelData<'opts>>,
github_type: GithubType,
}
impl<'opts> IRCState<'opts> {
/// Create an empty IRCState.
pub fn new(github_type_: GithubType) -> IRCState<'opts> {
IRCState {
channel_data: HashMap::new(),
github_type: github_type_,
}
}
fn channel_data(
&mut self,
channel: &str,
options: &'opts HashMap<String, String>,
) -> &mut ChannelData<'opts> {
let github_type = self.github_type;
self.channel_data
.entry(String::from(channel))
.or_insert_with(|| ChannelData::new(channel, options, github_type))
}
}
struct ChannelLine {
source: String,
is_action: bool,
message: String,
}
struct TopicData {
topic: String,
github_url: Option<String>,
lines: Vec<ChannelLine>,
resolutions: Vec<String>,
}
struct ChannelData<'opts> {
channel_name: String,
current_topic: Option<TopicData>,
options: &'opts HashMap<String, String>,
github_type: GithubType,
}
impl fmt::Display for ChannelLine {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_action {
write!(f, "* {} {}", self.source, self.message)
} else {
write!(f, "<{}> {}", self.source, self.message)
}
}
}
impl TopicData {
fn new(topic: &str) -> TopicData {
let topic_ = String::from(topic);
TopicData {
topic: topic_,
github_url: None,
lines: vec![],
resolutions: vec![],
}
}
}
/// https://github.github.com/gfm/#code-spans describes how code spans can
/// be escaped with any number of ` characters. This function attempts to
/// use as few as possibly by finding the maximum sequence of ` characters
/// in the text that we want to escape, and then surrounding the text by
/// one more than that number of characters.
fn escape_as_code_span(s: &str) -> String {
// // This is simpler but potentially O(N^2), but only if people type lots
// // of backticks.
// let tick_count = (1..).find(|n| !s.contains("`".repeat(n)));
// Note: max doesn't include cur.
let (cur, max) = s.chars().fold((0, 0), |(cur, max), char| {
if char == '`' {
(cur + 1, max)
} else {
(0, cmp::max(cur, max))
}
});
let tick_count = cmp::max(cur, max) + 1;
let tick_string = "`".repeat(tick_count);
let backtick_byte = "`".as_bytes().first();
let space_first = if s.as_bytes().first() == backtick_byte {
" "
} else {
""
};
let space_last = if s.as_bytes().last() == backtick_byte {
" "
} else {
""
};
format!(
"{}{}{}{}{}",
tick_string, space_first, s, space_last, tick_string
)
}
fn escape_for_html_block(s: &str) -> String {
// Insert a zero width no-break space (U+FEFF, also byte order mark) between
// word-starting-# and a digit, so that github doesn't linkify things like "#1"
// into links to github issues.
//
// Do this first, in case we later start doing escaping that produces HTML
// numeric character references in decimal.
lazy_static! {
static ref ISSUE_RE: Regex =
Regex::new(r"(?P<space>[[:space:]])[#](?P<number>[0-9])")
.unwrap();
}
let no_issue_links = ISSUE_RE.replace_all(s, "${space}#\u{feff}${number}");
no_issue_links.replace("&", "&").replace("<", "<")
}
impl fmt::Display for TopicData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Use `...` around the topic and resolutions, and ```-escaping around
// the IRC log to avoid most concern about escaping.
if self.resolutions.len() == 0 {
try!(write!(
f,
"The Working Group just discussed {}.\n",
if self.topic == "" {
String::from("this issue")
} else {
escape_as_code_span(&*self.topic)
}
));
} else {
try!(write!(
f,
"The Working Group just discussed {}, and agreed to the \
following resolutions:\n\n",
escape_as_code_span(&*self.topic)
));
for resolution in &self.resolutions {
try!(write!(f, "* {}\n", escape_as_code_span(&*resolution)));
}
}
try!(write!(
f,
"\n<details><summary>The full IRC log of that \
discussion</summary>\n"
));
for line in &self.lines {
try!(write!(
f,
"{}<br>\n",
escape_for_html_block(&*format!("{}", line))
));
}
try!(write!(f, "</details>\n"));
Ok(())
}
}
/// A case-insensitive version of starts_with.
fn ci_starts_with(s: &str, prefix: &str) -> bool {
debug_assert!(prefix.to_lowercase() == prefix);
debug_assert!(prefix.len() == prefix.chars().count());
s.len() >= prefix.len()
&& prefix
.as_bytes()
.eq_ignore_ascii_case(&s.as_bytes()[..prefix.len()])
}
/// Remove a case-insensitive start of the line, and if that prefix is
/// present return the rest of the line.
fn strip_ci_prefix(s: &str, prefix: &str) -> Option<String> {
if ci_starts_with(s, prefix) {
Some(String::from(s[prefix.len()..].trim_left()))
} else {
None
}
}
/// Remove a case-insensitive start of the line (given multiple options
/// for what that start is), and if that prefix is present return the
/// rest of the line.
fn strip_one_ci_prefix<'a, T>(s: &str, prefixes: T) -> Option<String>
where
T: Iterator<Item = &'a &'a str>,
{
prefixes
.filter_map(|prefix| strip_ci_prefix(s, &prefix))
.next()
}
impl<'opts> ChannelData<'opts> {
fn new(
channel_name_: &str,
options_: &'opts HashMap<String, String>,
github_type_: GithubType,
) -> ChannelData<'opts> {
ChannelData {
channel_name: String::from(channel_name_),
current_topic: None,
options: options_,
github_type: github_type_,
}
}
// Returns the response that should be sent to the message over IRC.
fn add_line(&mut self, server: &IrcServer, line: ChannelLine) -> Option<String> {
if line.is_action == false {
if let Some(ref topic) = strip_ci_prefix(&line.message, "topic:") {
self.start_topic(server, topic);
}
}
if line.source == "trackbot" && line.is_action == true
&& line.message == "is ending a teleconference."
{
self.end_topic(server);
}
match self.current_topic {
None => match extract_github_url(&line.message, self.options, &None, false) {
(Some(_), None) => Some(String::from(
"I can't set a github URL because you haven't started a \
topic.",
)),
(None, Some(ref extract_response)) => Some(
String::from(
"I can't set a github URL because you haven't started a topic. \
Also, ",
) + extract_response,
),
(None, None) => None,
_ => panic!("unexpected state"),
},
Some(ref mut data) => {
let (new_url_option, extract_failure_response) =
extract_github_url(&line.message, self.options, &data.github_url, true);
let response = match (new_url_option.as_ref(), &data.github_url) {
(None, _) => extract_failure_response,
(Some(&None), &None) => None,
(Some(&None), _) => {
Some(String::from("OK, I won't post this discussion to GitHub."))
}
(Some(&Some(ref new_url)), &None) => {
Some(format!("OK, I'll post this discussion to {}.", new_url))
}
(Some(new_url), old_url) if *old_url == *new_url => None,
(Some(&Some(ref new_url)), &Some(ref old_url)) => Some(format!(
"OK, I'll post this discussion to {} instead of {} like \
you said before.",
new_url, old_url
)),
};
if let Some(new_url) = new_url_option {
data.github_url = new_url;
}
if !line.is_action {
if line.message.starts_with("RESOLUTION")
|| line.message.starts_with("RESOLVED")
|| line.message.starts_with("SUMMARY")
{
data.resolutions.push(line.message.clone());
}
data.lines.push(line);
}
response
}
}
}
fn start_topic(&mut self, server: &IrcServer, topic: &str) {
self.end_topic(server);
self.current_topic = Some(TopicData::new(topic));
}
fn end_topic(&mut self, server: &IrcServer) {
// TODO: Test the topic boundary code.
if let Some(topic) = self.current_topic.take() {
if topic.github_url.is_some() {
let task = GithubCommentTask::new(
server,
&*self.channel_name,
topic,
self.options,
self.github_type,
);
task.run();
}
}
}
}
/// Return a pair where:
/// * the first item is a nested option, the outer option representing
/// whether to replace the current github URL, and the inner option
/// being part of that URL (so that we can replace to no-url)
/// * the second item being a response to send over IRC, if needed, which
/// will only be present if the first item is None
fn extract_github_url(
message: &str,
options: &HashMap<String, String>,
current_github_url: &Option<String>,
in_topic: bool,
) -> (Option<Option<String>>, Option<String>) {
lazy_static! {
static ref GITHUB_URL_WHOLE_RE: Regex =
Regex::new(r"^(?P<issueurl>https://github.com/(?P<repo>[^/]*/[^/]*)/(issues|pull)/(?P<number>[0-9]+))([#][^ ]*)?$")
.unwrap();
static ref GITHUB_URL_PART_RE: Regex =
Regex::new(r"https://github.com/(?P<repo>[^/]*/[^/]*)/(issues|pull)/(?P<number>[0-9]+)")
.unwrap();
}
let ref allowed_repos = options["github_repos_allowed"];
if let Some(ref maybe_url) = strip_one_ci_prefix(
&message,
["github:", "github topic:", "github issue:"].into_iter(),
) {
if maybe_url.to_lowercase() == "none" {
(Some(None), None)
} else if let Some(ref caps) = GITHUB_URL_WHOLE_RE.captures(maybe_url) {
if allowed_repos
.split_whitespace()
.collect::<Vec<_>>()
.contains(&&caps["repo"])
{
(Some(Some(String::from(&caps["issueurl"]))), None)
} else {
(
None,
Some(format!(
"I can't comment on that github issue because it's not in \
a repository I'm allowed to comment on, which are: {}.",
allowed_repos
)),
)
}
} else {
(
None,
Some(String::from(
"I can't comment on that because it doesn't look like a \
github issue to me.",
)),
)
}
} else {
if let Some(ref rematch) = GITHUB_URL_PART_RE.find(message) {
if &Some(String::from(rematch.as_str())) == current_github_url || !in_topic {
(None, None)
} else {
(
None,
Some(String::from(
"Because I don't want to spam github issues unnecessarily, \
I won't comment in that github issue unless you write \
\"Github: <issue-url> | none\" (or \"Github issue: \
...\"/\"Github topic: ...\").",
)),
)
}
} else {
(None, None)
}
}
}
struct GithubCommentTask {
// a clone of the IRCServer is OK, because it reference-counts almost all of its internals
server: IrcServer,
response_target: String,
data: TopicData,
github: Option<Github>, // None means we're mocking the connection
}
impl GithubCommentTask {
fn new(
server_: &IrcServer,
response_target_: &str,
data_: TopicData,
options: &HashMap<String, String>,
github_type_: GithubType,
) -> GithubCommentTask {
let github_ = match github_type_ {
GithubType::RealGithubConnection => Some(Github::new(
&*options["github_uastring"],
Client::with_connector(HttpsConnector::new(NativeTlsClient::new().unwrap())),
Credentials::Token(options["github_access_token"].clone()),
)),
GithubType::MockGithubConnection => None,
};
GithubCommentTask {
server: server_.clone(),
response_target: String::from(response_target_),
data: data_,
github: github_,
}
}
#[allow(unused_results)]
fn run(self) {
// For real github connections, run on another thread, but for fake
// ones, run synchronously to make testing easier.
match self.github {
Some(_) => {
thread::spawn(move || {
self.main();
});
}
None => self.main(),
}
}
fn main(&self) {
lazy_static! {
static ref GITHUB_URL_RE: Regex =
Regex::new(r"^https://github.com/(?P<owner>[^/]*)/(?P<repo>[^/]*)/(?P<type>(issues|pull))/(?P<number>[0-9]+)$")
.unwrap();
}
if let Some(ref github_url) = self.data.github_url {
if let Some(ref caps) = GITHUB_URL_RE.captures(github_url) {
let comment_text = format!("{}", self.data);
let response = match self.github {
Some(ref github) => {
let repo =
github.repo(String::from(&caps["owner"]), String::from(&caps["repo"]));
let num = caps["number"].parse::<u64>().unwrap();
// FIXME: share this better (without making the
// borrow checker object)!
let commentopts = &CommentOptions { body: comment_text };
let err = match &(caps["type"]) {
"issues" => repo.issue(num).comments().create(commentopts),
"pull" => repo.pulls().get(num).comments().create(commentopts),
_ => panic!("the regexp should not have allowed this"),
};
let mut response = if err.is_ok() {
format!("Successfully commented on {}", github_url)
} else {
format!(
"UNABLE TO COMMENT on {} due to error: {:?}",
github_url, err
)
};
if self.data.resolutions.len() > 0 && &(caps["type"]) == "issues" {
// We had resolutions, so remove the "Agenda+" and
// "Agenda+ F2F" tags, if present.
// FIXME: Do this for pulls too, once
// hubcaps gives access to labels on a pull
// request.
// Explicitly discard any errors. That's because
// this
// might give an error if the label isn't present.
// FIXME: But it might also give a (different)
// error if
// we don't have write access to the repository,
// so we
// really ought to distinguish, and report the
// latter.
let issue = repo.issue(num);
let labels = issue.labels();
for label in ["Agenda+", "Agenda+ F2F"].into_iter() {
if labels.remove(label).is_ok() {
response.push_str(&*format!(
" and removed the \"{}\" label",
label
));
}
}
}
response
}
None => {
// Mock the github comments by sending them over IRC
// to a fake user called github-comments.
let send_github_comment_line = |line: &str| {
send_irc_line(
&self.server,
"github-comments",
false,
String::from(line),
)
};
send_github_comment_line(
format!("!BEGIN GITHUB COMMENT IN {}", github_url).as_str(),
);
for line in comment_text.split('\n') {
send_github_comment_line(line);
}
send_github_comment_line(
format!("!END GITHUB COMMENT IN {}", github_url).as_str(),
);
format!("{} on {}", "Successfully commented", github_url)
}
};
send_irc_line(&self.server, &*self.response_target, true, response);
} else {
warn!(
"How does {} fail to match now when it matched before?",
github_url
)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_present_plus() {
assert_eq!(is_present_plus("present+"), true);
assert_eq!(is_present_plus("Present+"), true);
assert_eq!(is_present_plus("prESeNT+"), true);
assert_eq!(is_present_plus("present+dbaron"), false);
assert_eq!(is_present_plus("say present+"), false);
assert_eq!(is_present_plus("preSEnt+ dbaron"), true);
}
#[test]
fn test_strip_ci_prefix() {
assert_eq!(
strip_ci_prefix("Topic:hello", "topic:"),
Some(String::from("hello"))
);
assert_eq!(
strip_ci_prefix("Topic: hello", "topic:"),
Some(String::from("hello"))
);
assert_eq!(
strip_ci_prefix("topic: hello", "topic:"),
Some(String::from("hello"))
);
assert_eq!(strip_ci_prefix("Issue: hello", "topic:"), None);
assert_eq!(strip_ci_prefix("Topic: hello", "issue:"), None);
assert_eq!(strip_ci_prefix("Github topic: hello", "topic:"), None);
}
#[test]
fn test_strip_one_ci_prefix() {
assert_eq!(
strip_one_ci_prefix("GitHub:url goes here", ["issue:", "github:"].into_iter()),
Some(String::from("url goes here"))
);
assert_eq!(
strip_one_ci_prefix("GITHUB: url goes here", ["issue:", "github:"].into_iter()),
Some(String::from("url goes here"))
);
assert_eq!(
strip_one_ci_prefix("issue: url goes here", ["issue:", "github:"].into_iter()),
Some(String::from("url goes here"))
);
assert_eq!(
strip_one_ci_prefix("topic: url goes here", ["issue:", "github:"].into_iter()),
None
);
}
}
Add FIXME after testing the error message (with a bad access token).
// see 'rustc -W help'
#![warn(missing_docs, unused_extern_crates, unused_results)]
//! An IRC bot that posts comments to github when W3C-style IRC minuting is
//! combined with "Github:", "Github topic:", or "Github issue:" lines that
//! give the github issue to comment in.
extern crate hubcaps;
extern crate hyper;
extern crate hyper_native_tls;
extern crate irc;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate regex;
use hubcaps::{Credentials, Github};
use hubcaps::comments::CommentOptions;
use hyper::Client;
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use irc::client::data::command::Command;
use irc::client::prelude::*;
use regex::Regex;
use std::cmp;
use std::collections::HashMap;
use std::fmt;
use std::thread;
#[derive(Copy, Clone)]
/// Whether to use a real github connection for real use of the bot, or a fake
/// one for testing.
pub enum GithubType {
/// Use a real github connection for operating the bot.
RealGithubConnection,
/// Don't make real connections to github (for tests).
MockGithubConnection,
}
/// Run the main loop of the bot, given an IRC server (with a real or mock
/// connection).
pub fn main_loop_iteration<'opts>(
server: IrcServer,
irc_state: &mut IRCState<'opts>,
options: &'opts HashMap<String, String>,
message: &Message,
) {
match message.command {
Command::PRIVMSG(ref target, ref msg) => {
match message.source_nickname() {
None => {
warn!(
"PRIVMSG without a source! {}",
format!("{}", message).trim()
);
}
Some(ref source) => {
let source_ = String::from(*source);
let line = if msg.starts_with("\x01ACTION ") && msg.ends_with("\x01") {
ChannelLine {
source: source_,
is_action: true,
message: filter_bot_hidden(&msg[8..msg.len() - 1]),
}
} else {
ChannelLine {
source: source_,
is_action: false,
message: filter_bot_hidden(msg),
}
};
let mynick = server.current_nickname();
if target == mynick {
// An actual private message.
info!("[{}] {}", source, line);
handle_bot_command(
&server,
options,
irc_state,
&line.message,
source,
false,
None,
)
} else if target.starts_with('#') {
// A message in a channel.
info!("[{}] {}", target, line);
match check_command_in_channel(mynick, &line.message) {
Some(ref command) => handle_bot_command(
&server,
options,
irc_state,
command,
target,
line.is_action,
Some(source),
),
None => {
if !is_present_plus(&*line.message) {
let this_channel_data = irc_state.channel_data(target, options);
if let Some(response) =
this_channel_data.add_line(&server, line)
{
send_irc_line(&server, target, true, response);
}
}
}
}
} else {
warn!(
"UNEXPECTED TARGET {} in message {}",
target,
format!("{}", message).trim()
);
}
}
}
}
Command::INVITE(ref target, ref channel) => {
if target == server.current_nickname() {
// Join channels when invited.
server.send_join(channel).unwrap();
}
}
_ => (),
}
}
/// Remove anything in a line that is after [off] to prevent it from being
/// logged, to match the convention of other W3C logging bots.
fn filter_bot_hidden(line: &str) -> String {
match line.find("[off]") {
None => String::from(line),
Some(index) => String::from(&line[..index]) + "[hidden]",
}
}
// Is this message either case-insensitively "Present+" or something that
// begins with "Present+ " (with space)?
fn is_present_plus(line: &str) -> bool {
let bytes = line.as_bytes();
let present_plus = "present+".as_bytes();
match bytes.len().cmp(&present_plus.len()) {
std::cmp::Ordering::Less => false,
std::cmp::Ordering::Equal => bytes.eq_ignore_ascii_case(present_plus),
std::cmp::Ordering::Greater => {
bytes[..present_plus.len() + 1].eq_ignore_ascii_case("present+ ".as_bytes())
}
}
}
// Take a message in the channel, and see if it was a message sent to
// this bot.
fn check_command_in_channel(mynick: &str, msg: &String) -> Option<String> {
if !msg.starts_with(mynick) {
return None;
}
let after_nick = &msg[mynick.len()..];
if !after_nick.starts_with(":") && !after_nick.starts_with(",") {
return None;
}
let after_punct = &after_nick[1..];
Some(String::from(after_punct.trim_left()))
}
fn send_irc_line(server: &IrcServer, target: &str, is_action: bool, line: String) {
let adjusted_line = if is_action {
info!("[{}] > * {}", target, line);
format!("\x01ACTION {}\x01", line)
} else {
info!("[{}] > {}", target, line);
line
};
server.send_privmsg(target, &*adjusted_line).unwrap();
}
fn handle_bot_command<'opts>(
server: &IrcServer,
options: &'opts HashMap<String, String>,
irc_state: &mut IRCState<'opts>,
command: &str,
response_target: &str,
response_is_action: bool,
response_username: Option<&str>,
) {
lazy_static! {
static ref CODE_DESCRIPTION: String =
format!("{} version {}, compiled from {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
include_str!(concat!(env!("OUT_DIR"), "/git-hash")).trim_right());
}
let send_line = |response_username: Option<&str>, line: &str| {
let line_with_nick = match response_username {
None => String::from(line),
Some(username) => String::from(username) + ", " + line,
};
send_irc_line(server, response_target, response_is_action, line_with_nick);
};
// Remove a question mark at the end of the command if it exists
let command_without_question_mark = if command.ends_with("?") {
&command[..command.len() - 1]
} else {
command
};
match command_without_question_mark {
"help" => {
send_line(response_username, "The commands I understand are:");
send_line(None, " help - Send this message.");
send_line(None, " intro - Send a message describing what I do.");
send_line(
None,
" status - Send a message with current bot status.",
);
send_line(
None,
" bye - Leave the channel. (You can /invite me back.)",
);
send_line(
None,
" end topic - End the current topic without starting a new one.",
);
send_line(
None,
" reboot - Make me leave the server and exit. If properly configured, I will \
then update myself and return.",
);
}
"intro" => {
let config = server.config();
send_line(
None,
"My job is to leave comments in github when the group discusses github issues and \
takes minutes in IRC.",
);
send_line(
None,
"I separate discussions by the \"Topic:\" lines, and I know what github issues to \
use only by lines of the form \"GitHub: <url> | none\".",
);
send_line(
None,
&*format!(
"I'm only allowed to comment on issues in the repositories: {}.",
options["github_repos_allowed"]
),
);
let owners = if let Some(v) = config.owners.as_ref() {
v.join(" ")
} else {
String::from("")
};
send_line(
None,
&*format!(
"My source code is at {} and I'm run by {}.",
options["source"], owners
),
);
}
"status" => {
send_line(
response_username,
&*format!(
"This is {}, which is probably in the repository at \
https://github.com/dbaron/wgmeeting-github-ircbot/",
*CODE_DESCRIPTION
),
);
send_line(None, "I currently have data for the following channels:");
let mut sorted_channels: Vec<&String> = irc_state.channel_data.keys().collect();
sorted_channels.sort();
for channel in sorted_channels {
let ref channel_data = irc_state.channel_data[channel];
if let Some(ref topic) = channel_data.current_topic {
send_line(
None,
&*format!(
" {} ({} lines buffered on \"{}\")",
channel,
topic.lines.len(),
topic.topic
),
);
match topic.github_url {
None => send_line(None, " no GitHub URL to comment on"),
Some(ref github_url) => {
send_line(None, &*format!(" will comment on {}", github_url))
}
};
} else {
send_line(None, &*format!(" {} (no topic data buffered)", channel));
}
}
}
"bye" => {
if response_target.starts_with('#') {
let this_channel_data = irc_state.channel_data(response_target, options);
this_channel_data.end_topic(server);
server
.send(Command::PART(
String::from(response_target),
Some(format!(
"Leaving at request of {}. Feel free to /invite me back.",
response_username.unwrap()
)),
))
.unwrap();
} else {
send_line(response_username, "'bye' only works in a channel");
}
}
"end topic" => {
if response_target.starts_with('#') {
let this_channel_data = irc_state.channel_data(response_target, options);
this_channel_data.end_topic(server);
} else {
send_line(response_username, "'end topic' only works in a channel");
}
}
"reboot" => {
let mut channels_with_topics = irc_state
.channel_data
.iter()
.filter_map(|(channel, channel_data)| {
if channel_data.current_topic.is_some() {
Some(channel)
} else {
None
}
})
.collect::<Vec<_>>();
if channels_with_topics.is_empty() {
// quit from the server, with a message
server
.send(Command::QUIT(Some(format!(
"{}, rebooting at request of {}.",
*CODE_DESCRIPTION,
response_username.unwrap()
))))
.unwrap();
// exit, and assume whatever started the bot will restart it
unimplemented!(); // This will exit. Maybe do something cleaner later?
} else {
// refuse to reboot
channels_with_topics.sort();
send_line(
response_username,
&*format!(
"Sorry, I can't reboot right now because I have buffered topics in{}.",
channels_with_topics
.iter()
.flat_map(|s| " ".chars().chain(s.chars()))
.collect::<String>()
),
);
}
}
_ => {
send_line(
response_username,
"Sorry, I don't understand that command. Try 'help'.",
);
}
}
}
/// The data from IRC channels that we're storing in order to make comments in
/// github.
pub struct IRCState<'opts> {
channel_data: HashMap<String, ChannelData<'opts>>,
github_type: GithubType,
}
impl<'opts> IRCState<'opts> {
/// Create an empty IRCState.
pub fn new(github_type_: GithubType) -> IRCState<'opts> {
IRCState {
channel_data: HashMap::new(),
github_type: github_type_,
}
}
fn channel_data(
&mut self,
channel: &str,
options: &'opts HashMap<String, String>,
) -> &mut ChannelData<'opts> {
let github_type = self.github_type;
self.channel_data
.entry(String::from(channel))
.or_insert_with(|| ChannelData::new(channel, options, github_type))
}
}
struct ChannelLine {
source: String,
is_action: bool,
message: String,
}
struct TopicData {
topic: String,
github_url: Option<String>,
lines: Vec<ChannelLine>,
resolutions: Vec<String>,
}
struct ChannelData<'opts> {
channel_name: String,
current_topic: Option<TopicData>,
options: &'opts HashMap<String, String>,
github_type: GithubType,
}
impl fmt::Display for ChannelLine {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_action {
write!(f, "* {} {}", self.source, self.message)
} else {
write!(f, "<{}> {}", self.source, self.message)
}
}
}
impl TopicData {
fn new(topic: &str) -> TopicData {
let topic_ = String::from(topic);
TopicData {
topic: topic_,
github_url: None,
lines: vec![],
resolutions: vec![],
}
}
}
/// https://github.github.com/gfm/#code-spans describes how code spans can
/// be escaped with any number of ` characters. This function attempts to
/// use as few as possibly by finding the maximum sequence of ` characters
/// in the text that we want to escape, and then surrounding the text by
/// one more than that number of characters.
fn escape_as_code_span(s: &str) -> String {
// // This is simpler but potentially O(N^2), but only if people type lots
// // of backticks.
// let tick_count = (1..).find(|n| !s.contains("`".repeat(n)));
// Note: max doesn't include cur.
let (cur, max) = s.chars().fold((0, 0), |(cur, max), char| {
if char == '`' {
(cur + 1, max)
} else {
(0, cmp::max(cur, max))
}
});
let tick_count = cmp::max(cur, max) + 1;
let tick_string = "`".repeat(tick_count);
let backtick_byte = "`".as_bytes().first();
let space_first = if s.as_bytes().first() == backtick_byte {
" "
} else {
""
};
let space_last = if s.as_bytes().last() == backtick_byte {
" "
} else {
""
};
format!(
"{}{}{}{}{}",
tick_string, space_first, s, space_last, tick_string
)
}
fn escape_for_html_block(s: &str) -> String {
// Insert a zero width no-break space (U+FEFF, also byte order mark) between
// word-starting-# and a digit, so that github doesn't linkify things like "#1"
// into links to github issues.
//
// Do this first, in case we later start doing escaping that produces HTML
// numeric character references in decimal.
lazy_static! {
static ref ISSUE_RE: Regex =
Regex::new(r"(?P<space>[[:space:]])[#](?P<number>[0-9])")
.unwrap();
}
let no_issue_links = ISSUE_RE.replace_all(s, "${space}#\u{feff}${number}");
no_issue_links.replace("&", "&").replace("<", "<")
}
impl fmt::Display for TopicData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Use `...` around the topic and resolutions, and ```-escaping around
// the IRC log to avoid most concern about escaping.
if self.resolutions.len() == 0 {
try!(write!(
f,
"The Working Group just discussed {}.\n",
if self.topic == "" {
String::from("this issue")
} else {
escape_as_code_span(&*self.topic)
}
));
} else {
try!(write!(
f,
"The Working Group just discussed {}, and agreed to the \
following resolutions:\n\n",
escape_as_code_span(&*self.topic)
));
for resolution in &self.resolutions {
try!(write!(f, "* {}\n", escape_as_code_span(&*resolution)));
}
}
try!(write!(
f,
"\n<details><summary>The full IRC log of that \
discussion</summary>\n"
));
for line in &self.lines {
try!(write!(
f,
"{}<br>\n",
escape_for_html_block(&*format!("{}", line))
));
}
try!(write!(f, "</details>\n"));
Ok(())
}
}
/// A case-insensitive version of starts_with.
fn ci_starts_with(s: &str, prefix: &str) -> bool {
debug_assert!(prefix.to_lowercase() == prefix);
debug_assert!(prefix.len() == prefix.chars().count());
s.len() >= prefix.len()
&& prefix
.as_bytes()
.eq_ignore_ascii_case(&s.as_bytes()[..prefix.len()])
}
/// Remove a case-insensitive start of the line, and if that prefix is
/// present return the rest of the line.
fn strip_ci_prefix(s: &str, prefix: &str) -> Option<String> {
if ci_starts_with(s, prefix) {
Some(String::from(s[prefix.len()..].trim_left()))
} else {
None
}
}
/// Remove a case-insensitive start of the line (given multiple options
/// for what that start is), and if that prefix is present return the
/// rest of the line.
fn strip_one_ci_prefix<'a, T>(s: &str, prefixes: T) -> Option<String>
where
T: Iterator<Item = &'a &'a str>,
{
prefixes
.filter_map(|prefix| strip_ci_prefix(s, &prefix))
.next()
}
impl<'opts> ChannelData<'opts> {
fn new(
channel_name_: &str,
options_: &'opts HashMap<String, String>,
github_type_: GithubType,
) -> ChannelData<'opts> {
ChannelData {
channel_name: String::from(channel_name_),
current_topic: None,
options: options_,
github_type: github_type_,
}
}
// Returns the response that should be sent to the message over IRC.
fn add_line(&mut self, server: &IrcServer, line: ChannelLine) -> Option<String> {
if line.is_action == false {
if let Some(ref topic) = strip_ci_prefix(&line.message, "topic:") {
self.start_topic(server, topic);
}
}
if line.source == "trackbot" && line.is_action == true
&& line.message == "is ending a teleconference."
{
self.end_topic(server);
}
match self.current_topic {
None => match extract_github_url(&line.message, self.options, &None, false) {
(Some(_), None) => Some(String::from(
"I can't set a github URL because you haven't started a \
topic.",
)),
(None, Some(ref extract_response)) => Some(
String::from(
"I can't set a github URL because you haven't started a topic. \
Also, ",
) + extract_response,
),
(None, None) => None,
_ => panic!("unexpected state"),
},
Some(ref mut data) => {
let (new_url_option, extract_failure_response) =
extract_github_url(&line.message, self.options, &data.github_url, true);
let response = match (new_url_option.as_ref(), &data.github_url) {
(None, _) => extract_failure_response,
(Some(&None), &None) => None,
(Some(&None), _) => {
Some(String::from("OK, I won't post this discussion to GitHub."))
}
(Some(&Some(ref new_url)), &None) => {
Some(format!("OK, I'll post this discussion to {}.", new_url))
}
(Some(new_url), old_url) if *old_url == *new_url => None,
(Some(&Some(ref new_url)), &Some(ref old_url)) => Some(format!(
"OK, I'll post this discussion to {} instead of {} like \
you said before.",
new_url, old_url
)),
};
if let Some(new_url) = new_url_option {
data.github_url = new_url;
}
if !line.is_action {
if line.message.starts_with("RESOLUTION")
|| line.message.starts_with("RESOLVED")
|| line.message.starts_with("SUMMARY")
{
data.resolutions.push(line.message.clone());
}
data.lines.push(line);
}
response
}
}
}
fn start_topic(&mut self, server: &IrcServer, topic: &str) {
self.end_topic(server);
self.current_topic = Some(TopicData::new(topic));
}
fn end_topic(&mut self, server: &IrcServer) {
// TODO: Test the topic boundary code.
if let Some(topic) = self.current_topic.take() {
if topic.github_url.is_some() {
let task = GithubCommentTask::new(
server,
&*self.channel_name,
topic,
self.options,
self.github_type,
);
task.run();
}
}
}
}
/// Return a pair where:
/// * the first item is a nested option, the outer option representing
/// whether to replace the current github URL, and the inner option
/// being part of that URL (so that we can replace to no-url)
/// * the second item being a response to send over IRC, if needed, which
/// will only be present if the first item is None
fn extract_github_url(
message: &str,
options: &HashMap<String, String>,
current_github_url: &Option<String>,
in_topic: bool,
) -> (Option<Option<String>>, Option<String>) {
lazy_static! {
static ref GITHUB_URL_WHOLE_RE: Regex =
Regex::new(r"^(?P<issueurl>https://github.com/(?P<repo>[^/]*/[^/]*)/(issues|pull)/(?P<number>[0-9]+))([#][^ ]*)?$")
.unwrap();
static ref GITHUB_URL_PART_RE: Regex =
Regex::new(r"https://github.com/(?P<repo>[^/]*/[^/]*)/(issues|pull)/(?P<number>[0-9]+)")
.unwrap();
}
let ref allowed_repos = options["github_repos_allowed"];
if let Some(ref maybe_url) = strip_one_ci_prefix(
&message,
["github:", "github topic:", "github issue:"].into_iter(),
) {
if maybe_url.to_lowercase() == "none" {
(Some(None), None)
} else if let Some(ref caps) = GITHUB_URL_WHOLE_RE.captures(maybe_url) {
if allowed_repos
.split_whitespace()
.collect::<Vec<_>>()
.contains(&&caps["repo"])
{
(Some(Some(String::from(&caps["issueurl"]))), None)
} else {
(
None,
Some(format!(
"I can't comment on that github issue because it's not in \
a repository I'm allowed to comment on, which are: {}.",
allowed_repos
)),
)
}
} else {
(
None,
Some(String::from(
"I can't comment on that because it doesn't look like a \
github issue to me.",
)),
)
}
} else {
if let Some(ref rematch) = GITHUB_URL_PART_RE.find(message) {
if &Some(String::from(rematch.as_str())) == current_github_url || !in_topic {
(None, None)
} else {
(
None,
Some(String::from(
"Because I don't want to spam github issues unnecessarily, \
I won't comment in that github issue unless you write \
\"Github: <issue-url> | none\" (or \"Github issue: \
...\"/\"Github topic: ...\").",
)),
)
}
} else {
(None, None)
}
}
}
struct GithubCommentTask {
// a clone of the IRCServer is OK, because it reference-counts almost all of its internals
server: IrcServer,
response_target: String,
data: TopicData,
github: Option<Github>, // None means we're mocking the connection
}
impl GithubCommentTask {
fn new(
server_: &IrcServer,
response_target_: &str,
data_: TopicData,
options: &HashMap<String, String>,
github_type_: GithubType,
) -> GithubCommentTask {
let github_ = match github_type_ {
GithubType::RealGithubConnection => Some(Github::new(
&*options["github_uastring"],
Client::with_connector(HttpsConnector::new(NativeTlsClient::new().unwrap())),
Credentials::Token(options["github_access_token"].clone()),
)),
GithubType::MockGithubConnection => None,
};
GithubCommentTask {
server: server_.clone(),
response_target: String::from(response_target_),
data: data_,
github: github_,
}
}
#[allow(unused_results)]
fn run(self) {
// For real github connections, run on another thread, but for fake
// ones, run synchronously to make testing easier.
match self.github {
Some(_) => {
thread::spawn(move || {
self.main();
});
}
None => self.main(),
}
}
fn main(&self) {
lazy_static! {
static ref GITHUB_URL_RE: Regex =
Regex::new(r"^https://github.com/(?P<owner>[^/]*)/(?P<repo>[^/]*)/(?P<type>(issues|pull))/(?P<number>[0-9]+)$")
.unwrap();
}
if let Some(ref github_url) = self.data.github_url {
if let Some(ref caps) = GITHUB_URL_RE.captures(github_url) {
let comment_text = format!("{}", self.data);
let response = match self.github {
Some(ref github) => {
let repo =
github.repo(String::from(&caps["owner"]), String::from(&caps["repo"]));
let num = caps["number"].parse::<u64>().unwrap();
// FIXME: share this better (without making the
// borrow checker object)!
let commentopts = &CommentOptions { body: comment_text };
let err = match &(caps["type"]) {
"issues" => repo.issue(num).comments().create(commentopts),
"pull" => repo.pulls().get(num).comments().create(commentopts),
_ => panic!("the regexp should not have allowed this"),
};
let mut response = if err.is_ok() {
format!("Successfully commented on {}", github_url)
} else {
format!(
// FIXME: Remove newlines *and backtrace* from err.
"UNABLE TO COMMENT on {} due to error: {:?}",
github_url, err
)
};
if self.data.resolutions.len() > 0 && &(caps["type"]) == "issues" {
// We had resolutions, so remove the "Agenda+" and
// "Agenda+ F2F" tags, if present.
// FIXME: Do this for pulls too, once
// hubcaps gives access to labels on a pull
// request.
// Explicitly discard any errors. That's because
// this
// might give an error if the label isn't present.
// FIXME: But it might also give a (different)
// error if
// we don't have write access to the repository,
// so we
// really ought to distinguish, and report the
// latter.
let issue = repo.issue(num);
let labels = issue.labels();
for label in ["Agenda+", "Agenda+ F2F"].into_iter() {
if labels.remove(label).is_ok() {
response.push_str(&*format!(
" and removed the \"{}\" label",
label
));
}
}
}
response
}
None => {
// Mock the github comments by sending them over IRC
// to a fake user called github-comments.
let send_github_comment_line = |line: &str| {
send_irc_line(
&self.server,
"github-comments",
false,
String::from(line),
)
};
send_github_comment_line(
format!("!BEGIN GITHUB COMMENT IN {}", github_url).as_str(),
);
for line in comment_text.split('\n') {
send_github_comment_line(line);
}
send_github_comment_line(
format!("!END GITHUB COMMENT IN {}", github_url).as_str(),
);
format!("{} on {}", "Successfully commented", github_url)
}
};
send_irc_line(&self.server, &*self.response_target, true, response);
} else {
warn!(
"How does {} fail to match now when it matched before?",
github_url
)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_present_plus() {
assert_eq!(is_present_plus("present+"), true);
assert_eq!(is_present_plus("Present+"), true);
assert_eq!(is_present_plus("prESeNT+"), true);
assert_eq!(is_present_plus("present+dbaron"), false);
assert_eq!(is_present_plus("say present+"), false);
assert_eq!(is_present_plus("preSEnt+ dbaron"), true);
}
#[test]
fn test_strip_ci_prefix() {
assert_eq!(
strip_ci_prefix("Topic:hello", "topic:"),
Some(String::from("hello"))
);
assert_eq!(
strip_ci_prefix("Topic: hello", "topic:"),
Some(String::from("hello"))
);
assert_eq!(
strip_ci_prefix("topic: hello", "topic:"),
Some(String::from("hello"))
);
assert_eq!(strip_ci_prefix("Issue: hello", "topic:"), None);
assert_eq!(strip_ci_prefix("Topic: hello", "issue:"), None);
assert_eq!(strip_ci_prefix("Github topic: hello", "topic:"), None);
}
#[test]
fn test_strip_one_ci_prefix() {
assert_eq!(
strip_one_ci_prefix("GitHub:url goes here", ["issue:", "github:"].into_iter()),
Some(String::from("url goes here"))
);
assert_eq!(
strip_one_ci_prefix("GITHUB: url goes here", ["issue:", "github:"].into_iter()),
Some(String::from("url goes here"))
);
assert_eq!(
strip_one_ci_prefix("issue: url goes here", ["issue:", "github:"].into_iter()),
Some(String::from("url goes here"))
);
assert_eq!(
strip_one_ci_prefix("topic: url goes here", ["issue:", "github:"].into_iter()),
None
);
}
}
|
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
use serialize::json::{Json, ToJson};
#[cfg(feature = "serde_type")]
use serde_json::value::{Value as Json, ToJson, Map};
use pest::prelude::*;
use std::collections::{VecDeque, BTreeMap};
use grammar::{Rdp, Rule};
static DEFAULT_VALUE: Json = Json::Null;
pub type Object = BTreeMap<String, Json>;
/// The context wrap data you render on your templates.
///
#[derive(Debug, Clone)]
pub struct Context {
data: Json,
}
#[inline]
fn parse_json_visitor_inner<'a>(path_stack: &mut VecDeque<&'a str>, path: &'a str) {
let path_in = StringInput::new(path);
let mut parser = Rdp::new(path_in);
if parser.path() {
for seg in parser.queue().iter() {
match seg.rule {
Rule::path_var | Rule::path_idx | Rule::path_key => {}
Rule::path_up => {
path_stack.pop_back();
}
Rule::path_id | Rule::path_raw_id | Rule::path_num_id => {
let id = &path[seg.start..seg.end];
path_stack.push_back(id);
}
_ => {}
}
}
}
}
#[inline]
fn parse_json_visitor<'a>(path_stack: &mut VecDeque<&'a str>,
base_path: &'a str,
path_context: &'a VecDeque<String>,
relative_path: &'a str) {
let path_in = StringInput::new(relative_path);
let mut parser = Rdp::new(path_in);
if parser.path() {
let mut path_context_depth: i64 = -1;
let mut iter = parser.queue().iter();
loop {
if let Some(sg) = iter.next() {
if sg.rule == Rule::path_up {
path_context_depth += 1;
} else {
break;
}
} else {
break;
}
}
if path_context_depth >= 0 {
if let Some(context_base_path) = path_context.get(path_context_depth as usize) {
parse_json_visitor_inner(path_stack, context_base_path);
} else {
parse_json_visitor_inner(path_stack, base_path);
}
} else {
parse_json_visitor_inner(path_stack, base_path);
}
parse_json_visitor_inner(path_stack, relative_path);
}
// TODO: report invalid path
}
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
fn merge_json(base: &Json, addition: &Object) -> Json {
let mut base_map = match base {
&Json::Object(ref m) => m.clone(),
_ => {
let mut map: BTreeMap<String, Json> = BTreeMap::new();
map.insert("this".to_owned(), base.clone());
map
}
};
for (k, v) in addition.iter() {
base_map.insert(k.clone(), v.clone());
}
Json::Object(base_map)
}
#[cfg(feature="serde_type")]
fn merge_json(base: &Json, addition: &Object) -> Json {
let mut base_map = match base {
&Json::Object(ref m) => m.clone(),
_ => {
let mut map = Map::new();
map.insert("this".to_owned(), base.clone());
map
}
};
for (k, v) in addition.iter() {
base_map.insert(k.clone(), v.clone());
}
Json::Object(base_map)
}
impl Context {
/// Create a context with null data
pub fn null() -> Context {
Context { data: Json::Null }
}
/// Create a context with given data
pub fn wraps<T: ToJson>(e: &T) -> Context {
Context { data: to_json(e) }
}
/// Extend current context with another JSON object
/// If current context is a JSON object, it's identical to a normal merge
/// Otherwise, the current value will be stored in new JSON object with key `this`, and merged
/// keys are also available.
pub fn extend(&self, hash: &Object) -> Context {
let new_data = merge_json(&self.data, hash);
Context { data: new_data }
}
/// Navigate the context with base path and relative path
/// Typically you will set base path to `RenderContext.get_path()`
/// and set relative path to helper argument or so.
///
/// If you want to navigate from top level, set the base path to `"."`
pub fn navigate(&self,
base_path: &str,
path_context: &VecDeque<String>,
relative_path: &str)
-> &Json {
let mut path_stack: VecDeque<&str> = VecDeque::new();
parse_json_visitor(&mut path_stack, base_path, path_context, relative_path);
let paths: Vec<&str> = path_stack.iter().map(|x| *x).collect();
let mut data: &Json = &self.data;
for p in paths.iter() {
if *p == "this" && data.as_object().and_then(|m| m.get("this")).is_none() {
continue;
}
data = match *data {
Json::Array(ref l) => {
p.parse::<usize>()
.and_then(|idx_u| Ok(l.get(idx_u).unwrap_or(&DEFAULT_VALUE)))
.unwrap_or(&DEFAULT_VALUE)
}
Json::Object(ref m) => m.get(*p).unwrap_or(&DEFAULT_VALUE),
_ => &DEFAULT_VALUE,
}
}
data
}
pub fn data(&self) -> &Json {
&self.data
}
pub fn data_mut(&mut self) -> &mut Json {
&mut self.data
}
}
/// Render Json data with default format
pub trait JsonRender {
fn render(&self) -> String;
}
pub trait JsonTruthy {
fn is_truthy(&self) -> bool;
}
impl JsonRender for Json {
fn render(&self) -> String {
match *self {
Json::String(ref s) => s.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::I64(i) => i.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::U64(i) => i.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::F64(f) => f.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::Boolean(i) => i.to_string(),
#[cfg(feature = "serde_type")]
Json::Bool(i) => i.to_string(),
#[cfg(feature = "serde_type")]
Json::Number(ref n) => n.to_string(),
Json::Null => "".to_owned(),
Json::Array(ref a) => {
let mut buf = String::new();
buf.push('[');
for i in a.iter() {
buf.push_str(i.render().as_ref());
buf.push_str(", ");
}
buf.push(']');
buf
}
Json::Object(_) => "[object]".to_owned(),
}
}
}
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
pub fn to_json<T>(src: &T) -> Json
where T: ToJson
{
src.to_json()
}
#[cfg(feature = "serde_type")]
pub fn to_json<T>(src: &T) -> Json
where T: ToJson
{
src.to_json().unwrap_or(Json::Null)
}
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
pub fn as_string(src: &Json) -> Option<&str> {
src.as_string()
}
#[cfg(feature = "serde_type")]
pub fn as_string(src: &Json) -> Option<&str> {
src.as_str()
}
impl JsonTruthy for Json {
fn is_truthy(&self) -> bool {
match *self {
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::I64(i) => i != 0,
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::U64(i) => i != 0,
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::F64(i) => i != 0.0 && !i.is_nan(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::Boolean(ref i) => *i,
#[cfg(feature = "serde_type")]
Json::Bool(ref i) => *i,
#[cfg(feature = "serde_type")]
Json::Number(ref n) => n.as_f64().map(|f| f.is_normal()).unwrap_or(false),
Json::Null => false,
Json::String(ref i) => i.len() > 0,
Json::Array(ref i) => i.len() > 0,
Json::Object(ref i) => i.len() > 0,
}
}
}
#[cfg(test)]
#[cfg(feature = "serde_type")]
mod test {
use context::{self, JsonRender, Context};
use std::collections::{VecDeque, BTreeMap};
use serde_json::error::Error;
use serde_json::value::{self, Value as Json, ToJson, Map};
#[test]
fn test_json_render() {
let raw = "<p>Hello world</p>\n<p thing=\"hello\"</p>";
let thing = Json::String(raw.to_string());
assert_eq!(raw, thing.render());
}
struct Address {
city: String,
country: String,
}
impl ToJson for Address {
fn to_json(&self) -> Result<Json, Error> {
let mut m = Map::new();
m.insert("city".to_string(), context::to_json(&self.city));
m.insert("country".to_string(), context::to_json(&self.country));
m.to_json()
}
}
struct Person {
name: String,
age: i16,
addr: Address,
titles: Vec<String>,
}
impl ToJson for Person {
fn to_json(&self) -> Result<Json, Error> {
let mut m = Map::new();
m.insert("name".to_string(), context::to_json(&self.name));
m.insert("age".to_string(), context::to_json(&self.age));
m.insert("addr".to_string(), context::to_json(&self.addr));
m.insert("titles".to_string(), context::to_json(&self.titles));
m.to_json()
}
}
#[test]
fn test_render() {
let v = "hello";
let ctx = Context::wraps(&v.to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this").render(),
v.to_string());
}
#[test]
fn test_navigation() {
let addr = Address {
city: "Beijing".to_string(),
country: "China".to_string(),
};
let person = Person {
name: "Ning Sun".to_string(),
age: 27,
addr: addr,
titles: vec!["programmer".to_string(), "cartographier".to_string()],
};
let ctx = Context::wraps(&person);
assert_eq!(ctx.navigate(".", &VecDeque::new(), "./name/../addr/country").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[country]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[\"country\"]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.['country']").render(),
"China".to_string());
let v = true;
let ctx2 = Context::wraps(&v);
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "this").render(),
"true".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles.[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]/../../age").render(),
"27".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this.titles[0]/../../age").render(),
"27".to_string());
}
#[test]
fn test_this() {
let mut map_with_this = Map::new();
map_with_this.insert("this".to_string(), context::to_json(&"hello"));
map_with_this.insert("age".to_string(), context::to_json(&5usize));
let ctx1 = Context::wraps(&map_with_this);
let mut map_without_this = Map::new();
map_without_this.insert("age".to_string(), context::to_json(&4usize));
let ctx2 = Context::wraps(&map_without_this);
assert_eq!(ctx1.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
}
#[test]
fn test_extend() {
let mut map = Map::new();
map.insert("age".to_string(), context::to_json(&4usize));
let ctx1 = Context::wraps(&map);
let s = "hello".to_owned();
let ctx2 = Context::wraps(&s);
let mut hash = BTreeMap::new();
hash.insert("tag".to_owned(), context::to_json(&"h1"));
let ctx_a1 = ctx1.extend(&hash);
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
let ctx_a2 = ctx2.extend(&hash);
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
}
}
#[cfg(test)]
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
mod test {
use context::{JsonRender, Context};
use std::collections::{VecDeque, BTreeMap};
use serialize::json::{Json, ToJson};
#[test]
fn test_json_render() {
let raw = "<p>Hello world</p>\n<p thing=\"hello\"</p>";
let thing = Json::String(raw.to_string());
assert_eq!(raw, thing.render());
}
struct Address {
city: String,
country: String,
}
impl ToJson for Address {
fn to_json(&self) -> Json {
let mut m = BTreeMap::new();
m.insert("city".to_string(), self.city.to_json());
m.insert("country".to_string(), self.country.to_json());
Json::Object(m)
}
}
struct Person {
name: String,
age: i16,
addr: Address,
titles: Vec<String>,
}
impl ToJson for Person {
fn to_json(&self) -> Json {
let mut m = BTreeMap::new();
m.insert("name".to_string(), self.name.to_json());
m.insert("age".to_string(), self.age.to_json());
m.insert("addr".to_string(), self.addr.to_json());
m.insert("titles".to_string(), self.titles.to_json());
Json::Object(m)
}
}
#[test]
fn test_render() {
let v = "hello";
let ctx = Context::wraps(&v.to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this").render(),
v.to_string());
}
#[test]
fn test_key_name_with_this() {
let m = btreemap!{
"this_name".to_string() => "the_value".to_string()
};
let ctx = Context::wraps(&m);
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this_name").render(),
"the_value".to_string());
}
#[test]
fn test_navigation() {
let addr = Address {
city: "Beijing".to_string(),
country: "China".to_string(),
};
let person = Person {
name: "Ning Sun".to_string(),
age: 27,
addr: addr,
titles: vec!["programmer".to_string(), "cartographier".to_string()],
};
let ctx = Context::wraps(&person);
assert_eq!(ctx.navigate(".", &VecDeque::new(), "./name/../addr/country").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[country]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[\"country\"]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.['country']").render(),
"China".to_string());
let v = true;
let ctx2 = Context::wraps(&v);
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "this").render(),
"true".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles.[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]/../../age").render(),
"27".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this.titles[0]/../../age").render(),
"27".to_string());
}
#[test]
fn test_this() {
let mut map_with_this = BTreeMap::new();
map_with_this.insert("this".to_string(), "hello".to_json());
map_with_this.insert("age".to_string(), 5usize.to_json());
let ctx1 = Context::wraps(&map_with_this);
let mut map_without_this = BTreeMap::new();
map_without_this.insert("age".to_string(), 4usize.to_json());
let ctx2 = Context::wraps(&map_without_this);
assert_eq!(ctx1.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
}
#[test]
fn test_extend() {
let mut map = BTreeMap::new();
map.insert("age".to_string(), 4usize.to_json());
let ctx1 = Context::wraps(&map);
let s = "hello".to_owned();
let ctx2 = Context::wraps(&s);
let mut hash = BTreeMap::new();
hash.insert("tag".to_owned(), "h1".to_json());
let ctx_a1 = ctx1.extend(&hash);
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
let ctx_a2 = ctx2.extend(&hash);
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
}
}
(fix) remove unused imports
Signed-off-by: Ning Sun <a92165a57b6171072bd20ae225e0a6a0a2860443@about.me>
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
use serialize::json::{Json, ToJson};
#[cfg(feature = "serde_type")]
use serde_json::value::{Value as Json, ToJson, Map};
use pest::prelude::*;
use std::collections::{VecDeque, BTreeMap};
use grammar::{Rdp, Rule};
static DEFAULT_VALUE: Json = Json::Null;
pub type Object = BTreeMap<String, Json>;
/// The context wrap data you render on your templates.
///
#[derive(Debug, Clone)]
pub struct Context {
data: Json,
}
#[inline]
fn parse_json_visitor_inner<'a>(path_stack: &mut VecDeque<&'a str>, path: &'a str) {
let path_in = StringInput::new(path);
let mut parser = Rdp::new(path_in);
if parser.path() {
for seg in parser.queue().iter() {
match seg.rule {
Rule::path_var | Rule::path_idx | Rule::path_key => {}
Rule::path_up => {
path_stack.pop_back();
}
Rule::path_id | Rule::path_raw_id | Rule::path_num_id => {
let id = &path[seg.start..seg.end];
path_stack.push_back(id);
}
_ => {}
}
}
}
}
#[inline]
fn parse_json_visitor<'a>(path_stack: &mut VecDeque<&'a str>,
base_path: &'a str,
path_context: &'a VecDeque<String>,
relative_path: &'a str) {
let path_in = StringInput::new(relative_path);
let mut parser = Rdp::new(path_in);
if parser.path() {
let mut path_context_depth: i64 = -1;
let mut iter = parser.queue().iter();
loop {
if let Some(sg) = iter.next() {
if sg.rule == Rule::path_up {
path_context_depth += 1;
} else {
break;
}
} else {
break;
}
}
if path_context_depth >= 0 {
if let Some(context_base_path) = path_context.get(path_context_depth as usize) {
parse_json_visitor_inner(path_stack, context_base_path);
} else {
parse_json_visitor_inner(path_stack, base_path);
}
} else {
parse_json_visitor_inner(path_stack, base_path);
}
parse_json_visitor_inner(path_stack, relative_path);
}
// TODO: report invalid path
}
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
fn merge_json(base: &Json, addition: &Object) -> Json {
let mut base_map = match base {
&Json::Object(ref m) => m.clone(),
_ => {
let mut map: BTreeMap<String, Json> = BTreeMap::new();
map.insert("this".to_owned(), base.clone());
map
}
};
for (k, v) in addition.iter() {
base_map.insert(k.clone(), v.clone());
}
Json::Object(base_map)
}
#[cfg(feature="serde_type")]
fn merge_json(base: &Json, addition: &Object) -> Json {
let mut base_map = match base {
&Json::Object(ref m) => m.clone(),
_ => {
let mut map = Map::new();
map.insert("this".to_owned(), base.clone());
map
}
};
for (k, v) in addition.iter() {
base_map.insert(k.clone(), v.clone());
}
Json::Object(base_map)
}
impl Context {
/// Create a context with null data
pub fn null() -> Context {
Context { data: Json::Null }
}
/// Create a context with given data
pub fn wraps<T: ToJson>(e: &T) -> Context {
Context { data: to_json(e) }
}
/// Extend current context with another JSON object
/// If current context is a JSON object, it's identical to a normal merge
/// Otherwise, the current value will be stored in new JSON object with key `this`, and merged
/// keys are also available.
pub fn extend(&self, hash: &Object) -> Context {
let new_data = merge_json(&self.data, hash);
Context { data: new_data }
}
/// Navigate the context with base path and relative path
/// Typically you will set base path to `RenderContext.get_path()`
/// and set relative path to helper argument or so.
///
/// If you want to navigate from top level, set the base path to `"."`
pub fn navigate(&self,
base_path: &str,
path_context: &VecDeque<String>,
relative_path: &str)
-> &Json {
let mut path_stack: VecDeque<&str> = VecDeque::new();
parse_json_visitor(&mut path_stack, base_path, path_context, relative_path);
let paths: Vec<&str> = path_stack.iter().map(|x| *x).collect();
let mut data: &Json = &self.data;
for p in paths.iter() {
if *p == "this" && data.as_object().and_then(|m| m.get("this")).is_none() {
continue;
}
data = match *data {
Json::Array(ref l) => {
p.parse::<usize>()
.and_then(|idx_u| Ok(l.get(idx_u).unwrap_or(&DEFAULT_VALUE)))
.unwrap_or(&DEFAULT_VALUE)
}
Json::Object(ref m) => m.get(*p).unwrap_or(&DEFAULT_VALUE),
_ => &DEFAULT_VALUE,
}
}
data
}
pub fn data(&self) -> &Json {
&self.data
}
pub fn data_mut(&mut self) -> &mut Json {
&mut self.data
}
}
/// Render Json data with default format
pub trait JsonRender {
fn render(&self) -> String;
}
pub trait JsonTruthy {
fn is_truthy(&self) -> bool;
}
impl JsonRender for Json {
fn render(&self) -> String {
match *self {
Json::String(ref s) => s.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::I64(i) => i.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::U64(i) => i.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::F64(f) => f.to_string(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::Boolean(i) => i.to_string(),
#[cfg(feature = "serde_type")]
Json::Bool(i) => i.to_string(),
#[cfg(feature = "serde_type")]
Json::Number(ref n) => n.to_string(),
Json::Null => "".to_owned(),
Json::Array(ref a) => {
let mut buf = String::new();
buf.push('[');
for i in a.iter() {
buf.push_str(i.render().as_ref());
buf.push_str(", ");
}
buf.push(']');
buf
}
Json::Object(_) => "[object]".to_owned(),
}
}
}
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
pub fn to_json<T>(src: &T) -> Json
where T: ToJson
{
src.to_json()
}
#[cfg(feature = "serde_type")]
pub fn to_json<T>(src: &T) -> Json
where T: ToJson
{
src.to_json().unwrap_or(Json::Null)
}
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
pub fn as_string(src: &Json) -> Option<&str> {
src.as_string()
}
#[cfg(feature = "serde_type")]
pub fn as_string(src: &Json) -> Option<&str> {
src.as_str()
}
impl JsonTruthy for Json {
fn is_truthy(&self) -> bool {
match *self {
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::I64(i) => i != 0,
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::U64(i) => i != 0,
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::F64(i) => i != 0.0 && !i.is_nan(),
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
Json::Boolean(ref i) => *i,
#[cfg(feature = "serde_type")]
Json::Bool(ref i) => *i,
#[cfg(feature = "serde_type")]
Json::Number(ref n) => n.as_f64().map(|f| f.is_normal()).unwrap_or(false),
Json::Null => false,
Json::String(ref i) => i.len() > 0,
Json::Array(ref i) => i.len() > 0,
Json::Object(ref i) => i.len() > 0,
}
}
}
#[cfg(test)]
#[cfg(feature = "serde_type")]
mod test {
use context::{self, JsonRender, Context};
use std::collections::{VecDeque, BTreeMap};
use serde_json::error::Error;
use serde_json::value::{Value as Json, ToJson, Map};
#[test]
fn test_json_render() {
let raw = "<p>Hello world</p>\n<p thing=\"hello\"</p>";
let thing = Json::String(raw.to_string());
assert_eq!(raw, thing.render());
}
struct Address {
city: String,
country: String,
}
impl ToJson for Address {
fn to_json(&self) -> Result<Json, Error> {
let mut m = Map::new();
m.insert("city".to_string(), context::to_json(&self.city));
m.insert("country".to_string(), context::to_json(&self.country));
m.to_json()
}
}
struct Person {
name: String,
age: i16,
addr: Address,
titles: Vec<String>,
}
impl ToJson for Person {
fn to_json(&self) -> Result<Json, Error> {
let mut m = Map::new();
m.insert("name".to_string(), context::to_json(&self.name));
m.insert("age".to_string(), context::to_json(&self.age));
m.insert("addr".to_string(), context::to_json(&self.addr));
m.insert("titles".to_string(), context::to_json(&self.titles));
m.to_json()
}
}
#[test]
fn test_render() {
let v = "hello";
let ctx = Context::wraps(&v.to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this").render(),
v.to_string());
}
#[test]
fn test_navigation() {
let addr = Address {
city: "Beijing".to_string(),
country: "China".to_string(),
};
let person = Person {
name: "Ning Sun".to_string(),
age: 27,
addr: addr,
titles: vec!["programmer".to_string(), "cartographier".to_string()],
};
let ctx = Context::wraps(&person);
assert_eq!(ctx.navigate(".", &VecDeque::new(), "./name/../addr/country").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[country]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[\"country\"]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.['country']").render(),
"China".to_string());
let v = true;
let ctx2 = Context::wraps(&v);
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "this").render(),
"true".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles.[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]/../../age").render(),
"27".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this.titles[0]/../../age").render(),
"27".to_string());
}
#[test]
fn test_this() {
let mut map_with_this = Map::new();
map_with_this.insert("this".to_string(), context::to_json(&"hello"));
map_with_this.insert("age".to_string(), context::to_json(&5usize));
let ctx1 = Context::wraps(&map_with_this);
let mut map_without_this = Map::new();
map_without_this.insert("age".to_string(), context::to_json(&4usize));
let ctx2 = Context::wraps(&map_without_this);
assert_eq!(ctx1.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
}
#[test]
fn test_extend() {
let mut map = Map::new();
map.insert("age".to_string(), context::to_json(&4usize));
let ctx1 = Context::wraps(&map);
let s = "hello".to_owned();
let ctx2 = Context::wraps(&s);
let mut hash = BTreeMap::new();
hash.insert("tag".to_owned(), context::to_json(&"h1"));
let ctx_a1 = ctx1.extend(&hash);
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
let ctx_a2 = ctx2.extend(&hash);
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
}
}
#[cfg(test)]
#[cfg(all(feature = "rustc_ser_type", not(feature = "serde_type")))]
mod test {
use context::{JsonRender, Context};
use std::collections::{VecDeque, BTreeMap};
use serialize::json::{Json, ToJson};
#[test]
fn test_json_render() {
let raw = "<p>Hello world</p>\n<p thing=\"hello\"</p>";
let thing = Json::String(raw.to_string());
assert_eq!(raw, thing.render());
}
struct Address {
city: String,
country: String,
}
impl ToJson for Address {
fn to_json(&self) -> Json {
let mut m = BTreeMap::new();
m.insert("city".to_string(), self.city.to_json());
m.insert("country".to_string(), self.country.to_json());
Json::Object(m)
}
}
struct Person {
name: String,
age: i16,
addr: Address,
titles: Vec<String>,
}
impl ToJson for Person {
fn to_json(&self) -> Json {
let mut m = BTreeMap::new();
m.insert("name".to_string(), self.name.to_json());
m.insert("age".to_string(), self.age.to_json());
m.insert("addr".to_string(), self.addr.to_json());
m.insert("titles".to_string(), self.titles.to_json());
Json::Object(m)
}
}
#[test]
fn test_render() {
let v = "hello";
let ctx = Context::wraps(&v.to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this").render(),
v.to_string());
}
#[test]
fn test_key_name_with_this() {
let m = btreemap!{
"this_name".to_string() => "the_value".to_string()
};
let ctx = Context::wraps(&m);
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this_name").render(),
"the_value".to_string());
}
#[test]
fn test_navigation() {
let addr = Address {
city: "Beijing".to_string(),
country: "China".to_string(),
};
let person = Person {
name: "Ning Sun".to_string(),
age: 27,
addr: addr,
titles: vec!["programmer".to_string(), "cartographier".to_string()],
};
let ctx = Context::wraps(&person);
assert_eq!(ctx.navigate(".", &VecDeque::new(), "./name/../addr/country").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[country]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.[\"country\"]").render(),
"China".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "addr.['country']").render(),
"China".to_string());
let v = true;
let ctx2 = Context::wraps(&v);
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "this").render(),
"true".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles.[0]").render(),
"programmer".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "titles[0]/../../age").render(),
"27".to_string());
assert_eq!(ctx.navigate(".", &VecDeque::new(), "this.titles[0]/../../age").render(),
"27".to_string());
}
#[test]
fn test_this() {
let mut map_with_this = BTreeMap::new();
map_with_this.insert("this".to_string(), "hello".to_json());
map_with_this.insert("age".to_string(), 5usize.to_json());
let ctx1 = Context::wraps(&map_with_this);
let mut map_without_this = BTreeMap::new();
map_without_this.insert("age".to_string(), 4usize.to_json());
let ctx2 = Context::wraps(&map_without_this);
assert_eq!(ctx1.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx2.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
}
#[test]
fn test_extend() {
let mut map = BTreeMap::new();
map.insert("age".to_string(), 4usize.to_json());
let ctx1 = Context::wraps(&map);
let s = "hello".to_owned();
let ctx2 = Context::wraps(&s);
let mut hash = BTreeMap::new();
hash.insert("tag".to_owned(), "h1".to_json());
let ctx_a1 = ctx1.extend(&hash);
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "age").render(),
"4".to_owned());
assert_eq!(ctx_a1.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
let ctx_a2 = ctx2.extend(&hash);
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "this").render(),
"hello".to_owned());
assert_eq!(ctx_a2.navigate(".", &VecDeque::new(), "tag").render(),
"h1".to_owned());
}
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// TODO we're going to allocate a whole bunch of temp Strings, is it worth
// keeping some scratch mem for this and running our own StrPool?
// TODO for lint violations of names, emit a refactor script
#[macro_use]
extern crate log;
extern crate syntex_syntax as syntax;
extern crate rustc_serialize;
extern crate strings;
extern crate unicode_segmentation;
extern crate regex;
extern crate diff;
extern crate term;
use syntax::ast;
use syntax::codemap::{mk_sp, CodeMap, Span};
use syntax::errors::{Handler, DiagnosticBuilder};
use syntax::errors::emitter::{ColorConfig, EmitterWriter};
use syntax::parse::{self, ParseSess};
use std::io::{stdout, Write};
use std::ops::{Add, Sub};
use std::path::{Path, PathBuf};
use std::rc::Rc;
use std::collections::HashMap;
use std::fmt;
use issues::{BadIssueSeeker, Issue};
use filemap::FileMap;
use visitor::FmtVisitor;
use config::Config;
pub use self::summary::Summary;
#[macro_use]
mod utils;
pub mod config;
pub mod filemap;
mod visitor;
mod checkstyle;
mod items;
mod missed_spans;
mod lists;
mod types;
mod expr;
mod imports;
mod issues;
mod rewrite;
mod string;
mod comment;
mod modules;
pub mod rustfmt_diff;
mod chains;
mod macros;
mod patterns;
mod summary;
const MIN_STRING: usize = 10;
// When we get scoped annotations, we should have rustfmt::skip.
const SKIP_ANNOTATION: &'static str = "rustfmt_skip";
pub trait Spanned {
fn span(&self) -> Span;
}
impl Spanned for ast::Expr {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Pat {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Ty {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Arg {
fn span(&self) -> Span {
if items::is_named_arg(self) {
mk_sp(self.pat.span.lo, self.ty.span.hi)
} else {
self.ty.span
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct Indent {
// Width of the block indent, in characters. Must be a multiple of
// Config::tab_spaces.
pub block_indent: usize,
// Alignment in characters.
pub alignment: usize,
}
impl Indent {
pub fn new(block_indent: usize, alignment: usize) -> Indent {
Indent {
block_indent: block_indent,
alignment: alignment,
}
}
pub fn empty() -> Indent {
Indent::new(0, 0)
}
pub fn block_indent(mut self, config: &Config) -> Indent {
self.block_indent += config.tab_spaces;
self
}
pub fn block_unindent(mut self, config: &Config) -> Indent {
self.block_indent -= config.tab_spaces;
self
}
pub fn width(&self) -> usize {
self.block_indent + self.alignment
}
pub fn to_string(&self, config: &Config) -> String {
let (num_tabs, num_spaces) = if config.hard_tabs {
(self.block_indent / config.tab_spaces, self.alignment)
} else {
(0, self.block_indent + self.alignment)
};
let num_chars = num_tabs + num_spaces;
let mut indent = String::with_capacity(num_chars);
for _ in 0..num_tabs {
indent.push('\t')
}
for _ in 0..num_spaces {
indent.push(' ')
}
indent
}
}
impl Add for Indent {
type Output = Indent;
fn add(self, rhs: Indent) -> Indent {
Indent {
block_indent: self.block_indent + rhs.block_indent,
alignment: self.alignment + rhs.alignment,
}
}
}
impl Sub for Indent {
type Output = Indent;
fn sub(self, rhs: Indent) -> Indent {
Indent::new(self.block_indent - rhs.block_indent,
self.alignment - rhs.alignment)
}
}
impl Add<usize> for Indent {
type Output = Indent;
fn add(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment + rhs)
}
}
impl Sub<usize> for Indent {
type Output = Indent;
fn sub(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment - rhs)
}
}
pub enum ErrorKind {
// Line has exceeded character limit
LineOverflow,
// Line ends in whitespace
TrailingWhitespace,
// TO-DO or FIX-ME item without an issue number
BadIssue(Issue),
}
impl fmt::Display for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ErrorKind::LineOverflow => write!(fmt, "line exceeded maximum length"),
ErrorKind::TrailingWhitespace => write!(fmt, "left behind trailing whitespace"),
ErrorKind::BadIssue(issue) => write!(fmt, "found {}", issue),
}
}
}
// Formatting errors that are identified *after* rustfmt has run.
pub struct FormattingError {
line: u32,
kind: ErrorKind,
}
impl FormattingError {
fn msg_prefix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "Rustfmt failed at",
ErrorKind::BadIssue(_) => "WARNING:",
}
}
fn msg_suffix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "(sorry)",
ErrorKind::BadIssue(_) => "",
}
}
}
pub struct FormatReport {
// Maps stringified file paths to their associated formatting errors.
file_error_map: HashMap<String, Vec<FormattingError>>,
}
impl FormatReport {
fn new() -> FormatReport {
FormatReport { file_error_map: HashMap::new() }
}
pub fn warning_count(&self) -> usize {
self.file_error_map.iter().map(|(_, ref errors)| errors.len()).fold(0, |acc, x| acc + x)
}
pub fn has_warnings(&self) -> bool {
self.warning_count() > 0
}
}
impl fmt::Display for FormatReport {
// Prints all the formatting errors.
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
for (file, errors) in &self.file_error_map {
for error in errors {
try!(write!(fmt,
"{} {}:{}: {} {}\n",
error.msg_prefix(),
file,
error.line,
error.kind,
error.msg_suffix()));
}
}
Ok(())
}
}
// Formatting which depends on the AST.
fn format_ast(krate: &ast::Crate,
parse_session: &ParseSess,
main_file: &Path,
config: &Config)
-> FileMap {
let mut file_map = FileMap::new();
for (path, module) in modules::list_files(krate, parse_session.codemap()) {
if config.skip_children && path.as_path() != main_file {
continue;
}
let path = path.to_str().unwrap();
if config.verbose {
println!("Formatting {}", path);
}
let mut visitor = FmtVisitor::from_codemap(parse_session, config);
visitor.format_separate_mod(module);
file_map.insert(path.to_owned(), visitor.buffer);
}
file_map
}
// Formatting done on a char by char or line by line basis.
// TODO(#209) warn on bad license
// TODO(#20) other stuff for parity with make tidy
fn format_lines(file_map: &mut FileMap, config: &Config) -> FormatReport {
let mut truncate_todo = Vec::new();
let mut report = FormatReport::new();
// Iterate over the chars in the file map.
for (f, text) in file_map.iter() {
let mut trims = vec![];
let mut last_wspace: Option<usize> = None;
let mut line_len = 0;
let mut cur_line = 1;
let mut newline_count = 0;
let mut errors = vec![];
let mut issue_seeker = BadIssueSeeker::new(config.report_todo, config.report_fixme);
for (c, b) in text.chars() {
if c == '\r' {
line_len += c.len_utf8();
continue;
}
// Add warnings for bad todos/ fixmes
if let Some(issue) = issue_seeker.inspect(c) {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::BadIssue(issue),
});
}
if c == '\n' {
// Check for (and record) trailing whitespace.
if let Some(lw) = last_wspace {
trims.push((cur_line, lw, b));
line_len -= b - lw;
}
// Check for any line width errors we couldn't correct.
if line_len > config.max_width {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::LineOverflow,
});
}
line_len = 0;
cur_line += 1;
newline_count += 1;
last_wspace = None;
} else {
newline_count = 0;
line_len += c.len_utf8();
if c.is_whitespace() {
if last_wspace.is_none() {
last_wspace = Some(b);
}
} else {
last_wspace = None;
}
}
}
if newline_count > 1 {
debug!("track truncate: {} {} {}", f, text.len, newline_count);
truncate_todo.push((f.to_owned(), text.len - newline_count + 1))
}
for &(l, _, _) in &trims {
errors.push(FormattingError {
line: l,
kind: ErrorKind::TrailingWhitespace,
});
}
report.file_error_map.insert(f.to_owned(), errors);
}
for (f, l) in truncate_todo {
file_map.get_mut(&f).unwrap().truncate(l);
}
report
}
fn parse_input(input: Input, parse_session: &ParseSess) -> Result<ast::Crate, DiagnosticBuilder> {
match input {
Input::File(file) => parse::parse_crate_from_file(&file, Vec::new(), &parse_session),
Input::Text(text) => {
parse::parse_crate_from_source_str("stdin".to_owned(), text, Vec::new(), &parse_session)
}
}
}
pub fn format_input(input: Input, config: &Config) -> (Summary, FileMap, FormatReport) {
let mut summary = Summary::new();
let codemap = Rc::new(CodeMap::new());
let tty_handler = Handler::with_tty_emitter(ColorConfig::Auto,
None,
true,
false,
codemap.clone());
let mut parse_session = ParseSess::with_span_handler(tty_handler, codemap.clone());
let main_file = match input {
Input::File(ref file) => file.clone(),
Input::Text(..) => PathBuf::from("stdin"),
};
let krate = match parse_input(input, &parse_session) {
Ok(krate) => krate,
Err(mut diagnostic) => {
diagnostic.emit();
summary.add_parsing_error();
return (summary, FileMap::new(), FormatReport::new());
}
};
if parse_session.span_diagnostic.has_errors() {
summary.add_parsing_error();
}
// Suppress error output after parsing.
let silent_emitter = Box::new(EmitterWriter::new(Box::new(Vec::new()), None, codemap.clone()));
parse_session.span_diagnostic = Handler::with_emitter(true, false, silent_emitter);
let mut file_map = format_ast(&krate, &parse_session, &main_file, config);
// For some reason, the codemap does not include terminating
// newlines so we must add one on for each file. This is sad.
filemap::append_newlines(&mut file_map);
let report = format_lines(&mut file_map, config);
if report.has_warnings() {
summary.add_formatting_error();
}
(summary, file_map, report)
}
pub enum Input {
File(PathBuf),
Text(String),
}
pub fn run(input: Input, config: &Config) -> Summary {
let (mut summary, file_map, report) = format_input(input, config);
msg!("{}", report);
let mut out = stdout();
let write_result = filemap::write_all_files(&file_map, &mut out, config);
if let Err(msg) = write_result {
msg!("Error writing files: {}", msg);
summary.add_operational_error();
}
summary
}
Don't print empty lines to stderr (#940)
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// TODO we're going to allocate a whole bunch of temp Strings, is it worth
// keeping some scratch mem for this and running our own StrPool?
// TODO for lint violations of names, emit a refactor script
#[macro_use]
extern crate log;
extern crate syntex_syntax as syntax;
extern crate rustc_serialize;
extern crate strings;
extern crate unicode_segmentation;
extern crate regex;
extern crate diff;
extern crate term;
use syntax::ast;
use syntax::codemap::{mk_sp, CodeMap, Span};
use syntax::errors::{Handler, DiagnosticBuilder};
use syntax::errors::emitter::{ColorConfig, EmitterWriter};
use syntax::parse::{self, ParseSess};
use std::io::{stdout, Write};
use std::ops::{Add, Sub};
use std::path::{Path, PathBuf};
use std::rc::Rc;
use std::collections::HashMap;
use std::fmt;
use issues::{BadIssueSeeker, Issue};
use filemap::FileMap;
use visitor::FmtVisitor;
use config::Config;
pub use self::summary::Summary;
#[macro_use]
mod utils;
pub mod config;
pub mod filemap;
mod visitor;
mod checkstyle;
mod items;
mod missed_spans;
mod lists;
mod types;
mod expr;
mod imports;
mod issues;
mod rewrite;
mod string;
mod comment;
mod modules;
pub mod rustfmt_diff;
mod chains;
mod macros;
mod patterns;
mod summary;
const MIN_STRING: usize = 10;
// When we get scoped annotations, we should have rustfmt::skip.
const SKIP_ANNOTATION: &'static str = "rustfmt_skip";
pub trait Spanned {
fn span(&self) -> Span;
}
impl Spanned for ast::Expr {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Pat {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Ty {
fn span(&self) -> Span {
self.span
}
}
impl Spanned for ast::Arg {
fn span(&self) -> Span {
if items::is_named_arg(self) {
mk_sp(self.pat.span.lo, self.ty.span.hi)
} else {
self.ty.span
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct Indent {
// Width of the block indent, in characters. Must be a multiple of
// Config::tab_spaces.
pub block_indent: usize,
// Alignment in characters.
pub alignment: usize,
}
impl Indent {
pub fn new(block_indent: usize, alignment: usize) -> Indent {
Indent {
block_indent: block_indent,
alignment: alignment,
}
}
pub fn empty() -> Indent {
Indent::new(0, 0)
}
pub fn block_indent(mut self, config: &Config) -> Indent {
self.block_indent += config.tab_spaces;
self
}
pub fn block_unindent(mut self, config: &Config) -> Indent {
self.block_indent -= config.tab_spaces;
self
}
pub fn width(&self) -> usize {
self.block_indent + self.alignment
}
pub fn to_string(&self, config: &Config) -> String {
let (num_tabs, num_spaces) = if config.hard_tabs {
(self.block_indent / config.tab_spaces, self.alignment)
} else {
(0, self.block_indent + self.alignment)
};
let num_chars = num_tabs + num_spaces;
let mut indent = String::with_capacity(num_chars);
for _ in 0..num_tabs {
indent.push('\t')
}
for _ in 0..num_spaces {
indent.push(' ')
}
indent
}
}
impl Add for Indent {
type Output = Indent;
fn add(self, rhs: Indent) -> Indent {
Indent {
block_indent: self.block_indent + rhs.block_indent,
alignment: self.alignment + rhs.alignment,
}
}
}
impl Sub for Indent {
type Output = Indent;
fn sub(self, rhs: Indent) -> Indent {
Indent::new(self.block_indent - rhs.block_indent,
self.alignment - rhs.alignment)
}
}
impl Add<usize> for Indent {
type Output = Indent;
fn add(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment + rhs)
}
}
impl Sub<usize> for Indent {
type Output = Indent;
fn sub(self, rhs: usize) -> Indent {
Indent::new(self.block_indent, self.alignment - rhs)
}
}
pub enum ErrorKind {
// Line has exceeded character limit
LineOverflow,
// Line ends in whitespace
TrailingWhitespace,
// TO-DO or FIX-ME item without an issue number
BadIssue(Issue),
}
impl fmt::Display for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ErrorKind::LineOverflow => write!(fmt, "line exceeded maximum length"),
ErrorKind::TrailingWhitespace => write!(fmt, "left behind trailing whitespace"),
ErrorKind::BadIssue(issue) => write!(fmt, "found {}", issue),
}
}
}
// Formatting errors that are identified *after* rustfmt has run.
pub struct FormattingError {
line: u32,
kind: ErrorKind,
}
impl FormattingError {
fn msg_prefix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "Rustfmt failed at",
ErrorKind::BadIssue(_) => "WARNING:",
}
}
fn msg_suffix(&self) -> &str {
match self.kind {
ErrorKind::LineOverflow |
ErrorKind::TrailingWhitespace => "(sorry)",
ErrorKind::BadIssue(_) => "",
}
}
}
pub struct FormatReport {
// Maps stringified file paths to their associated formatting errors.
file_error_map: HashMap<String, Vec<FormattingError>>,
}
impl FormatReport {
fn new() -> FormatReport {
FormatReport { file_error_map: HashMap::new() }
}
pub fn warning_count(&self) -> usize {
self.file_error_map.iter().map(|(_, ref errors)| errors.len()).fold(0, |acc, x| acc + x)
}
pub fn has_warnings(&self) -> bool {
self.warning_count() > 0
}
}
impl fmt::Display for FormatReport {
// Prints all the formatting errors.
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
for (file, errors) in &self.file_error_map {
for error in errors {
try!(write!(fmt,
"{} {}:{}: {} {}\n",
error.msg_prefix(),
file,
error.line,
error.kind,
error.msg_suffix()));
}
}
Ok(())
}
}
// Formatting which depends on the AST.
fn format_ast(krate: &ast::Crate,
parse_session: &ParseSess,
main_file: &Path,
config: &Config)
-> FileMap {
let mut file_map = FileMap::new();
for (path, module) in modules::list_files(krate, parse_session.codemap()) {
if config.skip_children && path.as_path() != main_file {
continue;
}
let path = path.to_str().unwrap();
if config.verbose {
println!("Formatting {}", path);
}
let mut visitor = FmtVisitor::from_codemap(parse_session, config);
visitor.format_separate_mod(module);
file_map.insert(path.to_owned(), visitor.buffer);
}
file_map
}
// Formatting done on a char by char or line by line basis.
// TODO(#209) warn on bad license
// TODO(#20) other stuff for parity with make tidy
fn format_lines(file_map: &mut FileMap, config: &Config) -> FormatReport {
let mut truncate_todo = Vec::new();
let mut report = FormatReport::new();
// Iterate over the chars in the file map.
for (f, text) in file_map.iter() {
let mut trims = vec![];
let mut last_wspace: Option<usize> = None;
let mut line_len = 0;
let mut cur_line = 1;
let mut newline_count = 0;
let mut errors = vec![];
let mut issue_seeker = BadIssueSeeker::new(config.report_todo, config.report_fixme);
for (c, b) in text.chars() {
if c == '\r' {
line_len += c.len_utf8();
continue;
}
// Add warnings for bad todos/ fixmes
if let Some(issue) = issue_seeker.inspect(c) {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::BadIssue(issue),
});
}
if c == '\n' {
// Check for (and record) trailing whitespace.
if let Some(lw) = last_wspace {
trims.push((cur_line, lw, b));
line_len -= b - lw;
}
// Check for any line width errors we couldn't correct.
if line_len > config.max_width {
errors.push(FormattingError {
line: cur_line,
kind: ErrorKind::LineOverflow,
});
}
line_len = 0;
cur_line += 1;
newline_count += 1;
last_wspace = None;
} else {
newline_count = 0;
line_len += c.len_utf8();
if c.is_whitespace() {
if last_wspace.is_none() {
last_wspace = Some(b);
}
} else {
last_wspace = None;
}
}
}
if newline_count > 1 {
debug!("track truncate: {} {} {}", f, text.len, newline_count);
truncate_todo.push((f.to_owned(), text.len - newline_count + 1))
}
for &(l, _, _) in &trims {
errors.push(FormattingError {
line: l,
kind: ErrorKind::TrailingWhitespace,
});
}
report.file_error_map.insert(f.to_owned(), errors);
}
for (f, l) in truncate_todo {
file_map.get_mut(&f).unwrap().truncate(l);
}
report
}
fn parse_input(input: Input, parse_session: &ParseSess) -> Result<ast::Crate, DiagnosticBuilder> {
match input {
Input::File(file) => parse::parse_crate_from_file(&file, Vec::new(), &parse_session),
Input::Text(text) => {
parse::parse_crate_from_source_str("stdin".to_owned(), text, Vec::new(), &parse_session)
}
}
}
pub fn format_input(input: Input, config: &Config) -> (Summary, FileMap, FormatReport) {
let mut summary = Summary::new();
let codemap = Rc::new(CodeMap::new());
let tty_handler = Handler::with_tty_emitter(ColorConfig::Auto,
None,
true,
false,
codemap.clone());
let mut parse_session = ParseSess::with_span_handler(tty_handler, codemap.clone());
let main_file = match input {
Input::File(ref file) => file.clone(),
Input::Text(..) => PathBuf::from("stdin"),
};
let krate = match parse_input(input, &parse_session) {
Ok(krate) => krate,
Err(mut diagnostic) => {
diagnostic.emit();
summary.add_parsing_error();
return (summary, FileMap::new(), FormatReport::new());
}
};
if parse_session.span_diagnostic.has_errors() {
summary.add_parsing_error();
}
// Suppress error output after parsing.
let silent_emitter = Box::new(EmitterWriter::new(Box::new(Vec::new()), None, codemap.clone()));
parse_session.span_diagnostic = Handler::with_emitter(true, false, silent_emitter);
let mut file_map = format_ast(&krate, &parse_session, &main_file, config);
// For some reason, the codemap does not include terminating
// newlines so we must add one on for each file. This is sad.
filemap::append_newlines(&mut file_map);
let report = format_lines(&mut file_map, config);
if report.has_warnings() {
summary.add_formatting_error();
}
(summary, file_map, report)
}
pub enum Input {
File(PathBuf),
Text(String),
}
pub fn run(input: Input, config: &Config) -> Summary {
let (mut summary, file_map, report) = format_input(input, config);
if report.has_warnings() {
msg!("{}", report);
}
let mut out = stdout();
let write_result = filemap::write_all_files(&file_map, &mut out, config);
if let Err(msg) = write_result {
msg!("Error writing files: {}", msg);
summary.add_operational_error();
}
summary
}
|
use super::gl;
pub struct Context;
impl Context {
pub unsafe fn current_context() -> Self {
Context
}
pub fn clear_color(&mut self, color: super::Color) {
unsafe {
gl::ClearColor(color.r, color.g, color.b, color.a);
}
}
pub fn clear(&mut self, buffers: super::BufferBits) {
unsafe {
gl::Clear(buffers.bits())
}
}
}
Add `Context.gen_buffer`
use std::mem;
use super::gl;
use super::gl_lib::types::*;
use super::Buffer;
pub struct Context;
impl Context {
pub unsafe fn current_context() -> Self {
Context
}
pub fn clear_color(&mut self, color: super::Color) {
unsafe {
gl::ClearColor(color.r, color.g, color.b, color.a);
}
}
pub fn clear(&mut self, buffers: super::BufferBits) {
unsafe {
gl::Clear(buffers.bits())
}
}
pub fn gen_buffer(&self) -> Buffer {
unsafe {
let mut id : GLuint = mem::uninitialized();
gl::GenBuffers(1, &mut id as *mut GLuint);
Buffer::from_id(id)
}
}
}
|
#![feature(no_std)]
#![feature(core)]
#![no_std]
#![crate_name = "multiboot"]
#![crate_type = "lib"]
extern crate core;
#[cfg(test)]
#[macro_use]
extern crate std;
use core::mem::{transmute};
/// Value that is in rax after multiboot jumps to our entry point
pub const SIGNATURE_RAX: u64 = 0x2BADB002;
#[derive(Debug)]
pub enum MemType {
RAM = 1,
Unusable = 2,
}
/// Multiboot struct clients mainly interact with
/// To create this use Multiboot::new()
pub struct Multiboot<'a> {
header: &'a MultibootHeader,
paddr_to_vaddr: fn(u64) -> u64,
}
/// Representation of Multiboot header according to specification.
#[derive(Debug)]
#[repr(packed)]
struct MultibootHeader {
flags: u32,
mem_lower: u32,
mem_upper: u32,
boot_device: u32,
cmdline: u32,
mods_count: u32,
mods_addr: u32,
elf_symbols: ElfSymbols,
mmap_length: u32,
mmap_addr: u32,
}
/// Multiboot format of the MMAP buffer.
/// Note that size is defined to be at -4 bytes.
#[derive(Debug)]
#[repr(packed)]
struct MemEntry {
size: u32,
base_addr: u64,
length: u64,
mtype: u32
}
#[derive(Debug)]
#[repr(packed)]
struct ElfSymbols {
num: u32,
size: u32,
addr: u32,
shndx: u32,
}
impl<'a> Multiboot<'a> {
/// Initializes the multiboot structure.
///
/// # Arguments
///
/// * `mboot_ptr` - The physical address of the multiboot header. On qemu for example
/// this is typically at 0x9500.
/// * `paddr_to_vaddr` - Translation of the physical addresses into kernel addresses.
///
/// `paddr_to_vaddr` translates physical it into a kernel accessible address.
/// The simplest paddr_to_vaddr function would for example be just the identity
/// function. But this may vary depending on how your page table layout looks like.
///
pub fn new(mboot_ptr: u64, paddr_to_vaddr: fn(paddr: u64) -> u64) -> Multiboot<'a> {
let header = paddr_to_vaddr(mboot_ptr);
let mb: &MultibootHeader = unsafe { transmute::<u64, &MultibootHeader>(header) };
Multiboot { header: mb, paddr_to_vaddr: paddr_to_vaddr }
}
/// Discover all memory regions in the multiboot memory map.
///
/// # Arguments
///
/// * `discovery_callback` - Function to notify your memory system about regions.
///
pub fn find_memory(&'a self, discovery_callback: fn(base: u64, length: u64, MemType))
{
let paddr_to_vaddr = self.paddr_to_vaddr;
let mut current = self.header.mmap_addr;
let end = self.header.mmap_addr + self.header.mmap_length;
while current < end
{
let memory_region: &MemEntry = unsafe { transmute::<u64, &MemEntry>(paddr_to_vaddr(current as u64)) };
let mtype = match memory_region.mtype {
1 => MemType::RAM,
2 => MemType::Unusable,
_ => MemType::Unusable
};
discovery_callback(memory_region.base_addr, memory_region.length, mtype);
current += memory_region.size + 4;
}
}
}
Use FnMut closure to inform about new memory regions.
#![feature(no_std)]
#![feature(core)]
#![no_std]
#![crate_name = "multiboot"]
#![crate_type = "lib"]
extern crate core;
#[cfg(test)]
extern crate std;
use core::mem::{transmute};
use core::ops::FnMut;
/// Value that is in rax after multiboot jumps to our entry point
pub const SIGNATURE_RAX: u64 = 0x2BADB002;
#[derive(Debug, PartialEq, Eq)]
pub enum MemType {
RAM = 1,
Unusable = 2,
}
/// Multiboot struct clients mainly interact with
/// To create this use Multiboot::new()
pub struct Multiboot<'a> {
header: &'a MultibootHeader,
paddr_to_vaddr: fn(u64) -> u64,
}
/// Representation of Multiboot header according to specification.
#[derive(Debug)]
#[repr(packed)]
struct MultibootHeader {
flags: u32,
mem_lower: u32,
mem_upper: u32,
boot_device: u32,
cmdline: u32,
mods_count: u32,
mods_addr: u32,
elf_symbols: ElfSymbols,
mmap_length: u32,
mmap_addr: u32,
}
/// Multiboot format of the MMAP buffer.
/// Note that size is defined to be at -4 bytes.
#[derive(Debug)]
#[repr(packed)]
struct MemEntry {
size: u32,
base_addr: u64,
length: u64,
mtype: u32
}
#[derive(Debug)]
#[repr(packed)]
struct ElfSymbols {
num: u32,
size: u32,
addr: u32,
shndx: u32,
}
impl<'a> Multiboot<'a> {
/// Initializes the multiboot structure.
///
/// # Arguments
///
/// * `mboot_ptr` - The physical address of the multiboot header. On qemu for example
/// this is typically at 0x9500.
/// * `paddr_to_vaddr` - Translation of the physical addresses into kernel addresses.
///
/// `paddr_to_vaddr` translates physical it into a kernel accessible address.
/// The simplest paddr_to_vaddr function would for example be just the identity
/// function. But this may vary depending on how your page table layout looks like.
///
pub fn new(mboot_ptr: u64, paddr_to_vaddr: fn(paddr: u64) -> u64) -> Multiboot<'a> {
let header = paddr_to_vaddr(mboot_ptr);
let mb: &MultibootHeader = unsafe { transmute::<u64, &MultibootHeader>(header) };
Multiboot { header: mb, paddr_to_vaddr: paddr_to_vaddr }
}
/// Discover all memory regions in the multiboot memory map.
///
/// # Arguments
///
/// * `discovery_callback` - Function to notify your memory system about regions.
///
pub fn find_memory<F: FnMut(u64, u64, MemType)>(&'a self, mut discovery_callback: F)
{
let paddr_to_vaddr = self.paddr_to_vaddr;
let mut current = self.header.mmap_addr;
let end = self.header.mmap_addr + self.header.mmap_length;
while current < end
{
let memory_region: &MemEntry = unsafe { transmute::<u64, &MemEntry>(paddr_to_vaddr(current as u64)) };
let mtype = match memory_region.mtype {
1 => MemType::RAM,
2 => MemType::Unusable,
_ => MemType::Unusable
};
discovery_callback(memory_region.base_addr, memory_region.length, mtype);
current += memory_region.size + 4;
}
}
}
|
// Copyright (c) 2017-2018, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
#![allow(safe_extern_statics)]
#![allow(non_upper_case_globals)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
#![cfg_attr(feature = "cargo-clippy", allow(unnecessary_mut_passed))]
#![cfg_attr(feature = "cargo-clippy", allow(needless_range_loop))]
#![cfg_attr(feature = "cargo-clippy", allow(collapsible_if))]
use ec::Writer;
use partition::BlockSize::*;
use partition::PredictionMode::*;
use partition::TxSize::*;
use partition::TxType::*;
use partition::*;
use lrf::WIENER_TAPS_MID;
use lrf::SGR_XQD_MID;
use plane::*;
use util::clamp;
use util::msb;
use std::*;
use entropymode::*;
use token_cdfs::*;
use encoder::FrameInvariants;
use scan_order::*;
use encoder::ReferenceMode;
use self::REF_CONTEXTS;
use self::SINGLE_REFS;
pub const PLANES: usize = 3;
const PARTITION_PLOFFSET: usize = 4;
const PARTITION_BLOCK_SIZES: usize = 4 + 1;
const PARTITION_CONTEXTS_PRIMARY: usize = PARTITION_BLOCK_SIZES * PARTITION_PLOFFSET;
pub const PARTITION_CONTEXTS: usize = PARTITION_CONTEXTS_PRIMARY;
pub const PARTITION_TYPES: usize = 4;
pub const MI_SIZE_LOG2: usize = 2;
pub const MI_SIZE: usize = (1 << MI_SIZE_LOG2);
const MAX_MIB_SIZE_LOG2: usize = (MAX_SB_SIZE_LOG2 - MI_SIZE_LOG2);
pub const MAX_MIB_SIZE: usize = (1 << MAX_MIB_SIZE_LOG2);
pub const MAX_MIB_MASK: usize = (MAX_MIB_SIZE - 1);
const MAX_SB_SIZE_LOG2: usize = 6;
pub const MAX_SB_SIZE: usize = (1 << MAX_SB_SIZE_LOG2);
const MAX_SB_SQUARE: usize = (MAX_SB_SIZE * MAX_SB_SIZE);
pub const MAX_TX_SIZE: usize = 64;
const MAX_TX_SQUARE: usize = MAX_TX_SIZE * MAX_TX_SIZE;
pub const INTRA_MODES: usize = 13;
pub const UV_INTRA_MODES: usize = 14;
pub const CFL_JOINT_SIGNS: usize = 8;
pub const CFL_ALPHA_CONTEXTS: usize = 6;
pub const CFL_ALPHABET_SIZE: usize = 16;
pub const SKIP_MODE_CONTEXTS: usize = 3;
pub const COMP_INDEX_CONTEXTS: usize = 6;
pub const COMP_GROUP_IDX_CONTEXTS: usize = 6;
pub const BLOCK_SIZE_GROUPS: usize = 4;
pub const MAX_ANGLE_DELTA: usize = 3;
pub const DIRECTIONAL_MODES: usize = 8;
pub const KF_MODE_CONTEXTS: usize = 5;
pub const EXT_PARTITION_TYPES: usize = 10;
pub const TX_SIZE_SQR_CONTEXTS: usize = 4; // Coded tx_size <= 32x32, so is the # of CDF contexts from tx sizes
pub const TX_SETS: usize = 9;
pub const TX_SETS_INTRA: usize = 3;
pub const TX_SETS_INTER: usize = 4;
pub const TXFM_PARTITION_CONTEXTS: usize = ((TxSize::TX_SIZES - TxSize::TX_8X8 as usize) * 6 - 3);
const MAX_REF_MV_STACK_SIZE: usize = 8;
pub const REF_CAT_LEVEL: u32 = 640;
pub const FRAME_LF_COUNT: usize = 4;
pub const MAX_LOOP_FILTER: usize = 63;
const DELTA_LF_SMALL: u32 = 3;
pub const DELTA_LF_PROBS: usize = DELTA_LF_SMALL as usize;
const DELTA_Q_SMALL: u32 = 3;
pub const DELTA_Q_PROBS: usize = DELTA_Q_SMALL as usize;
// Number of transform types in each set type
static num_tx_set: [usize; TX_SETS] =
[1, 2, 5, 7, 7, 10, 12, 16, 16];
pub static av1_tx_used: [[usize; TX_TYPES]; TX_SETS] = [
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
];
// Maps set types above to the indices used for intra
static tx_set_index_intra: [i8; TX_SETS] =
[0, -1, 2, -1, 1, -1, -1, -1, -16];
// Maps set types above to the indices used for inter
static tx_set_index_inter: [i8; TX_SETS] =
[0, 3, -1, -1, -1, -1, 2, -1, 1];
static av1_tx_ind: [[usize; TX_TYPES]; TX_SETS] = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 3, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 5, 6, 4, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0],
[1, 5, 6, 4, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0],
[1, 2, 3, 6, 4, 5, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 8, 6, 7, 9, 10, 11, 0, 1, 2, 0, 0, 0, 0],
[7, 8, 9, 12, 10, 11, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6],
[7, 8, 9, 12, 10, 11, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6]
];
static ss_size_lookup: [[[BlockSize; 2]; 2]; BlockSize::BLOCK_SIZES_ALL] = [
// ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1
// ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1
[ [ BLOCK_4X4, BLOCK_4X4 ], [BLOCK_4X4, BLOCK_4X4 ] ],
[ [ BLOCK_4X8, BLOCK_4X4 ], [BLOCK_4X4, BLOCK_4X4 ] ],
[ [ BLOCK_8X4, BLOCK_4X4 ], [BLOCK_4X4, BLOCK_4X4 ] ],
[ [ BLOCK_8X8, BLOCK_8X4 ], [BLOCK_4X8, BLOCK_4X4 ] ],
[ [ BLOCK_8X16, BLOCK_8X8 ], [BLOCK_4X16, BLOCK_4X8 ] ],
[ [ BLOCK_16X8, BLOCK_16X4 ], [BLOCK_8X8, BLOCK_8X4 ] ],
[ [ BLOCK_16X16, BLOCK_16X8 ], [BLOCK_8X16, BLOCK_8X8 ] ],
[ [ BLOCK_16X32, BLOCK_16X16 ], [BLOCK_8X32, BLOCK_8X16 ] ],
[ [ BLOCK_32X16, BLOCK_32X8 ], [BLOCK_16X16, BLOCK_16X8 ] ],
[ [ BLOCK_32X32, BLOCK_32X16 ], [BLOCK_16X32, BLOCK_16X16 ] ],
[ [ BLOCK_32X64, BLOCK_32X32 ], [BLOCK_16X64, BLOCK_16X32 ] ],
[ [ BLOCK_64X32, BLOCK_64X16 ], [BLOCK_32X32, BLOCK_32X16 ] ],
[ [ BLOCK_64X64, BLOCK_64X32 ], [BLOCK_32X64, BLOCK_32X32 ] ],
[ [ BLOCK_64X128, BLOCK_64X64 ], [ BLOCK_INVALID, BLOCK_32X64 ] ],
[ [ BLOCK_128X64, BLOCK_INVALID ], [ BLOCK_64X64, BLOCK_64X32 ] ],
[ [ BLOCK_128X128, BLOCK_128X64 ], [ BLOCK_64X128, BLOCK_64X64 ] ],
[ [ BLOCK_4X16, BLOCK_4X8 ], [BLOCK_4X16, BLOCK_4X8 ] ],
[ [ BLOCK_16X4, BLOCK_16X4 ], [BLOCK_8X4, BLOCK_8X4 ] ],
[ [ BLOCK_8X32, BLOCK_8X16 ], [BLOCK_INVALID, BLOCK_4X16 ] ],
[ [ BLOCK_32X8, BLOCK_INVALID ], [BLOCK_16X8, BLOCK_16X4 ] ],
[ [ BLOCK_16X64, BLOCK_16X32 ], [BLOCK_INVALID, BLOCK_8X32 ] ],
[ [ BLOCK_64X16, BLOCK_INVALID ], [BLOCK_32X16, BLOCK_32X8 ] ]
];
pub fn get_plane_block_size(bsize: BlockSize, subsampling_x: usize, subsampling_y: usize)
-> BlockSize {
ss_size_lookup[bsize as usize][subsampling_x][subsampling_y]
}
// Generates 4 bit field in which each bit set to 1 represents
// a blocksize partition 1111 means we split 64x64, 32x32, 16x16
// and 8x8. 1000 means we just split the 64x64 to 32x32
static partition_context_lookup: [[u8; 2]; BlockSize::BLOCK_SIZES_ALL] = [
[ 31, 31 ], // 4X4 - {0b11111, 0b11111}
[ 31, 30 ], // 4X8 - {0b11111, 0b11110}
[ 30, 31 ], // 8X4 - {0b11110, 0b11111}
[ 30, 30 ], // 8X8 - {0b11110, 0b11110}
[ 30, 28 ], // 8X16 - {0b11110, 0b11100}
[ 28, 30 ], // 16X8 - {0b11100, 0b11110}
[ 28, 28 ], // 16X16 - {0b11100, 0b11100}
[ 28, 24 ], // 16X32 - {0b11100, 0b11000}
[ 24, 28 ], // 32X16 - {0b11000, 0b11100}
[ 24, 24 ], // 32X32 - {0b11000, 0b11000}
[ 24, 16 ], // 32X64 - {0b11000, 0b10000}
[ 16, 24 ], // 64X32 - {0b10000, 0b11000}
[ 16, 16 ], // 64X64 - {0b10000, 0b10000}
[ 16, 0 ], // 64X128- {0b10000, 0b00000}
[ 0, 16 ], // 128X64- {0b00000, 0b10000}
[ 0, 0 ], // 128X128-{0b00000, 0b00000}
[ 31, 28 ], // 4X16 - {0b11111, 0b11100}
[ 28, 31 ], // 16X4 - {0b11100, 0b11111}
[ 30, 24 ], // 8X32 - {0b11110, 0b11000}
[ 24, 30 ], // 32X8 - {0b11000, 0b11110}
[ 28, 16 ], // 16X64 - {0b11100, 0b10000}
[ 16, 28 ] // 64X16 - {0b10000, 0b11100}
];
static size_group_lookup: [u8; BlockSize::BLOCK_SIZES_ALL] = [
0, 0,
0, 1,
1, 1,
2, 2,
2, 3,
3, 3,
3, 3, 3, 3, 0,
0, 1,
1, 2,
2
];
static num_pels_log2_lookup: [u8; BlockSize::BLOCK_SIZES_ALL] = [
4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13, 13, 14, 6, 6, 8, 8, 10, 10];
pub const PLANE_TYPES: usize = 2;
const REF_TYPES: usize = 2;
pub const SKIP_CONTEXTS: usize = 3;
pub const INTRA_INTER_CONTEXTS: usize = 4;
pub const INTER_MODE_CONTEXTS: usize = 8;
pub const DRL_MODE_CONTEXTS: usize = 3;
pub const COMP_INTER_CONTEXTS: usize = 5;
pub const COMP_REF_TYPE_CONTEXTS: usize = 5;
pub const UNI_COMP_REF_CONTEXTS: usize = 3;
// Level Map
pub const TXB_SKIP_CONTEXTS: usize = 13;
pub const EOB_COEF_CONTEXTS: usize = 9;
const SIG_COEF_CONTEXTS_2D: usize = 26;
const SIG_COEF_CONTEXTS_1D: usize = 16;
pub const SIG_COEF_CONTEXTS_EOB: usize = 4;
pub const SIG_COEF_CONTEXTS: usize = SIG_COEF_CONTEXTS_2D + SIG_COEF_CONTEXTS_1D;
const COEFF_BASE_CONTEXTS: usize = SIG_COEF_CONTEXTS;
pub const DC_SIGN_CONTEXTS: usize = 3;
const BR_TMP_OFFSET: usize = 12;
const BR_REF_CAT: usize = 4;
pub const LEVEL_CONTEXTS: usize = 21;
pub const NUM_BASE_LEVELS: usize = 2;
pub const BR_CDF_SIZE: usize = 4;
const COEFF_BASE_RANGE: usize = 4 * (BR_CDF_SIZE - 1);
const COEFF_CONTEXT_BITS: usize = 6;
const COEFF_CONTEXT_MASK: usize = (1 << COEFF_CONTEXT_BITS) - 1;
const MAX_BASE_BR_RANGE: usize = COEFF_BASE_RANGE + NUM_BASE_LEVELS + 1;
const BASE_CONTEXT_POSITION_NUM: usize = 12;
// Pad 4 extra columns to remove horizontal availability check.
const TX_PAD_HOR_LOG2: usize = 2;
const TX_PAD_HOR: usize = 4;
// Pad 6 extra rows (2 on top and 4 on bottom) to remove vertical availability
// check.
const TX_PAD_TOP: usize = 2;
const TX_PAD_BOTTOM: usize = 4;
const TX_PAD_VER: usize = (TX_PAD_TOP + TX_PAD_BOTTOM);
// Pad 16 extra bytes to avoid reading overflow in SIMD optimization.
const TX_PAD_END: usize = 16;
const TX_PAD_2D: usize =
((MAX_TX_SIZE + TX_PAD_HOR) * (MAX_TX_SIZE + TX_PAD_VER) + TX_PAD_END);
const TX_CLASSES: usize = 3;
#[derive(Copy, Clone, PartialEq)]
pub enum TxClass {
TX_CLASS_2D = 0,
TX_CLASS_HORIZ = 1,
TX_CLASS_VERT = 2
}
#[derive(Copy, Clone, PartialEq)]
pub enum SegLvl {
SEG_LVL_ALT_Q = 0, /* Use alternate Quantizer .... */
SEG_LVL_ALT_LF_Y_V = 1, /* Use alternate loop filter value on y plane vertical */
SEG_LVL_ALT_LF_Y_H = 2, /* Use alternate loop filter value on y plane horizontal */
SEG_LVL_ALT_LF_U = 3, /* Use alternate loop filter value on u plane */
SEG_LVL_ALT_LF_V = 4, /* Use alternate loop filter value on v plane */
SEG_LVL_REF_FRAME = 5, /* Optional Segment reference frame */
SEG_LVL_SKIP = 6, /* Optional Segment (0,0) + skip mode */
SEG_LVL_GLOBALMV = 7,
SEG_LVL_MAX = 8
}
pub const seg_feature_bits: [u32; SegLvl::SEG_LVL_MAX as usize] =
[ 8, 6, 6, 6, 6, 3, 0, 0 ];
pub const seg_feature_is_signed: [bool; SegLvl::SEG_LVL_MAX as usize] =
[ true, true, true, true, true, false, false, false, ];
use context::TxClass::*;
static tx_type_to_class: [TxClass; TX_TYPES] = [
TX_CLASS_2D, // DCT_DCT
TX_CLASS_2D, // ADST_DCT
TX_CLASS_2D, // DCT_ADST
TX_CLASS_2D, // ADST_ADST
TX_CLASS_2D, // FLIPADST_DCT
TX_CLASS_2D, // DCT_FLIPADST
TX_CLASS_2D, // FLIPADST_FLIPADST
TX_CLASS_2D, // ADST_FLIPADST
TX_CLASS_2D, // FLIPADST_ADST
TX_CLASS_2D, // IDTX
TX_CLASS_VERT, // V_DCT
TX_CLASS_HORIZ, // H_DCT
TX_CLASS_VERT, // V_ADST
TX_CLASS_HORIZ, // H_ADST
TX_CLASS_VERT, // V_FLIPADST
TX_CLASS_HORIZ // H_FLIPADST
];
static eob_to_pos_small: [u8; 33] = [
0, 1, 2, // 0-2
3, 3, // 3-4
4, 4, 4, 4, // 5-8
5, 5, 5, 5, 5, 5, 5, 5, // 9-16
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 // 17-32
];
static eob_to_pos_large: [u8; 17] = [
6, // place holder
7, // 33-64
8, 8, // 65-128
9, 9, 9, 9, // 129-256
10, 10, 10, 10, 10, 10, 10, 10, // 257-512
11 // 513-
];
static k_eob_group_start: [u16; 12] = [ 0, 1, 2, 3, 5, 9,
17, 33, 65, 129, 257, 513 ];
static k_eob_offset_bits: [u16; 12] = [ 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ];
fn clip_max3(x: u8) -> u8 {
if x > 3 {
3
} else {
x
}
}
// The ctx offset table when TX is TX_CLASS_2D.
// TX col and row indices are clamped to 4
#[cfg_attr(rustfmt, rustfmt_skip)]
static av1_nz_map_ctx_offset: [[[i8; 5]; 5]; TxSize::TX_SIZES_ALL] = [
// TX_4X4
[
[ 0, 1, 6, 6, 0],
[ 1, 6, 6, 21, 0],
[ 6, 6, 21, 21, 0],
[ 6, 21, 21, 21, 0],
[ 0, 0, 0, 0, 0]
],
// TX_8X8
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_16X16
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_32X32
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_64X64
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_4X8
[
[ 0, 11, 11, 11, 0],
[11, 11, 11, 11, 0],
[ 6, 6, 21, 21, 0],
[ 6, 21, 21, 21, 0],
[21, 21, 21, 21, 0]
],
// TX_8X4
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[ 0, 0, 0, 0, 0]
],
// TX_8X16
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_16X8
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_16X32
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_32X16
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_32X64
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_64X32
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_4X16
[
[ 0, 11, 11, 11, 0],
[11, 11, 11, 11, 0],
[ 6, 6, 21, 21, 0],
[ 6, 21, 21, 21, 0],
[21, 21, 21, 21, 0]
],
// TX_16X4
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[ 0, 0, 0, 0, 0]
],
// TX_8X32
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_32X8
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_16X64
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_64X16
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
]
];
const NZ_MAP_CTX_0: usize = SIG_COEF_CONTEXTS_2D;
const NZ_MAP_CTX_5: usize = (NZ_MAP_CTX_0 + 5);
const NZ_MAP_CTX_10: usize = (NZ_MAP_CTX_0 + 10);
static nz_map_ctx_offset_1d: [usize; 32] = [
NZ_MAP_CTX_0, NZ_MAP_CTX_5, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10 ];
const CONTEXT_MAG_POSITION_NUM: usize = 3;
static mag_ref_offset_with_txclass: [[[usize; 2]; CONTEXT_MAG_POSITION_NUM]; 3] = [
[ [ 0, 1 ], [ 1, 0 ], [ 1, 1 ] ],
[ [ 0, 1 ], [ 1, 0 ], [ 0, 2 ] ],
[ [ 0, 1 ], [ 1, 0 ], [ 2, 0 ] ] ];
// End of Level Map
pub fn has_chroma(
bo: &BlockOffset, bsize: BlockSize, subsampling_x: usize,
subsampling_y: usize
) -> bool {
let bw = bsize.width_mi();
let bh = bsize.height_mi();
((bo.x & 0x01) == 1 || (bw & 0x01) == 0 || subsampling_x == 0)
&& ((bo.y & 0x01) == 1 || (bh & 0x01) == 0 || subsampling_y == 0)
}
pub fn get_tx_set(
tx_size: TxSize, is_inter: bool, use_reduced_set: bool
) -> TxSet {
let tx_size_sqr_up = tx_size.sqr_up();
let tx_size_sqr = tx_size.sqr();
if tx_size.width() >= 64 || tx_size.height() >= 64 {
TxSet::TX_SET_DCTONLY
} else if tx_size_sqr_up == TxSize::TX_32X32 {
if is_inter {
TxSet::TX_SET_DCT_IDTX
} else {
TxSet::TX_SET_DCTONLY
}
} else if use_reduced_set {
if is_inter {
TxSet::TX_SET_DCT_IDTX
} else {
TxSet::TX_SET_DTT4_IDTX
}
} else if is_inter {
if tx_size_sqr == TxSize::TX_16X16 {
TxSet::TX_SET_DTT9_IDTX_1DDCT
} else {
TxSet::TX_SET_ALL16
}
} else {
if tx_size_sqr == TxSize::TX_16X16 {
TxSet::TX_SET_DTT4_IDTX
} else {
TxSet::TX_SET_DTT4_IDTX_1DDCT
}
}
}
fn get_tx_set_index(
tx_size: TxSize, is_inter: bool, use_reduced_set: bool
) -> i8 {
let set_type = get_tx_set(tx_size, is_inter, use_reduced_set);
if is_inter {
tx_set_index_inter[set_type as usize]
} else {
tx_set_index_intra[set_type as usize]
}
}
static intra_mode_to_tx_type_context: [TxType; INTRA_MODES] = [
DCT_DCT, // DC
ADST_DCT, // V
DCT_ADST, // H
DCT_DCT, // D45
ADST_ADST, // D135
ADST_DCT, // D117
DCT_ADST, // D153
DCT_ADST, // D207
ADST_DCT, // D63
ADST_ADST, // SMOOTH
ADST_DCT, // SMOOTH_V
DCT_ADST, // SMOOTH_H
ADST_ADST, // PAETH
];
static uv2y: [PredictionMode; UV_INTRA_MODES] = [
DC_PRED, // UV_DC_PRED
V_PRED, // UV_V_PRED
H_PRED, // UV_H_PRED
D45_PRED, // UV_D45_PRED
D135_PRED, // UV_D135_PRED
D117_PRED, // UV_D117_PRED
D153_PRED, // UV_D153_PRED
D207_PRED, // UV_D207_PRED
D63_PRED, // UV_D63_PRED
SMOOTH_PRED, // UV_SMOOTH_PRED
SMOOTH_V_PRED, // UV_SMOOTH_V_PRED
SMOOTH_H_PRED, // UV_SMOOTH_H_PRED
PAETH_PRED, // UV_PAETH_PRED
DC_PRED // CFL_PRED
];
pub fn uv_intra_mode_to_tx_type_context(pred: PredictionMode) -> TxType {
intra_mode_to_tx_type_context[uv2y[pred as usize] as usize]
}
#[derive(Clone,Copy)]
pub struct NMVComponent {
classes_cdf: [u16; MV_CLASSES + 1],
class0_fp_cdf: [[u16; MV_FP_SIZE + 1]; CLASS0_SIZE],
fp_cdf: [u16; MV_FP_SIZE + 1],
sign_cdf: [u16; 2 + 1],
class0_hp_cdf: [u16; 2 + 1],
hp_cdf: [u16; 2 + 1],
class0_cdf: [u16; CLASS0_SIZE + 1],
bits_cdf: [[u16; 2 + 1]; MV_OFFSET_BITS],
}
#[derive(Clone,Copy)]
pub struct NMVContext {
joints_cdf: [u16; MV_JOINTS + 1],
comps: [NMVComponent; 2],
}
extern "C" {
//static av1_scan_orders: [[SCAN_ORDER; TX_TYPES]; TxSize::TX_SIZES_ALL];
}
// lv_map
static default_nmv_context: NMVContext = {
NMVContext {
joints_cdf: cdf!(4096, 11264, 19328),
comps: [
NMVComponent {
classes_cdf: cdf!(
28672, 30976, 31858, 32320, 32551, 32656, 32740, 32757, 32762, 32767
),
class0_fp_cdf: [cdf!(16384, 24576, 26624), cdf!(12288, 21248, 24128)],
fp_cdf: cdf!(8192, 17408, 21248),
sign_cdf: cdf!(128 * 128),
class0_hp_cdf: cdf!(160 * 128),
hp_cdf: cdf!(128 * 128),
class0_cdf: cdf!(216 * 128),
bits_cdf: [
cdf!(128 * 136),
cdf!(128 * 140),
cdf!(128 * 148),
cdf!(128 * 160),
cdf!(128 * 176),
cdf!(128 * 192),
cdf!(128 * 224),
cdf!(128 * 234),
cdf!(128 * 234),
cdf!(128 * 240)
]
},
NMVComponent {
classes_cdf: cdf!(
28672, 30976, 31858, 32320, 32551, 32656, 32740, 32757, 32762, 32767
),
class0_fp_cdf: [cdf!(16384, 24576, 26624), cdf!(12288, 21248, 24128)],
fp_cdf: cdf!(8192, 17408, 21248),
sign_cdf: cdf!(128 * 128),
class0_hp_cdf: cdf!(160 * 128),
hp_cdf: cdf!(128 * 128),
class0_cdf: cdf!(216 * 128),
bits_cdf: [
cdf!(128 * 136),
cdf!(128 * 140),
cdf!(128 * 148),
cdf!(128 * 160),
cdf!(128 * 176),
cdf!(128 * 192),
cdf!(128 * 224),
cdf!(128 * 234),
cdf!(128 * 234),
cdf!(128 * 240)
]
}
]
}
};
#[derive(Clone)]
pub struct CandidateMV {
pub this_mv: MotionVector,
pub comp_mv: MotionVector,
pub weight: u32
}
#[derive(Clone,Copy)]
pub struct CDFContext {
partition_cdf: [[u16; EXT_PARTITION_TYPES + 1]; PARTITION_CONTEXTS],
kf_y_cdf: [[[u16; INTRA_MODES + 1]; KF_MODE_CONTEXTS]; KF_MODE_CONTEXTS],
y_mode_cdf: [[u16; INTRA_MODES + 1]; BLOCK_SIZE_GROUPS],
uv_mode_cdf: [[[u16; UV_INTRA_MODES + 1]; INTRA_MODES]; 2],
cfl_sign_cdf: [u16; CFL_JOINT_SIGNS + 1],
cfl_alpha_cdf: [[u16; CFL_ALPHABET_SIZE + 1]; CFL_ALPHA_CONTEXTS],
newmv_cdf: [[u16; 2 + 1]; NEWMV_MODE_CONTEXTS],
zeromv_cdf: [[u16; 2 + 1]; GLOBALMV_MODE_CONTEXTS],
refmv_cdf: [[u16; 2 + 1]; REFMV_MODE_CONTEXTS],
intra_tx_cdf:
[[[[u16; TX_TYPES + 1]; INTRA_MODES]; TX_SIZE_SQR_CONTEXTS]; TX_SETS_INTRA],
inter_tx_cdf: [[[u16; TX_TYPES + 1]; TX_SIZE_SQR_CONTEXTS]; TX_SETS_INTER],
skip_cdfs: [[u16; 3]; SKIP_CONTEXTS],
intra_inter_cdfs: [[u16; 3]; INTRA_INTER_CONTEXTS],
angle_delta_cdf: [[u16; 2 * MAX_ANGLE_DELTA + 1 + 1]; DIRECTIONAL_MODES],
filter_intra_cdfs: [[u16; 3]; BlockSize::BLOCK_SIZES_ALL],
comp_mode_cdf: [[u16; 3]; COMP_INTER_CONTEXTS],
comp_ref_type_cdf: [[u16; 3]; COMP_REF_TYPE_CONTEXTS],
comp_ref_cdf: [[[u16; 3]; FWD_REFS - 1]; REF_CONTEXTS],
comp_bwd_ref_cdf: [[[u16; 3]; BWD_REFS - 1]; REF_CONTEXTS],
single_ref_cdfs: [[[u16; 2 + 1]; SINGLE_REFS - 1]; REF_CONTEXTS],
drl_cdfs: [[u16; 2 + 1]; DRL_MODE_CONTEXTS],
compound_mode_cdf: [[u16; INTER_COMPOUND_MODES + 1]; INTER_MODE_CONTEXTS],
nmv_context: NMVContext,
deblock_delta_multi_cdf: [[u16; DELTA_LF_PROBS + 1 + 1]; FRAME_LF_COUNT],
deblock_delta_cdf: [u16; DELTA_LF_PROBS + 1 + 1],
spatial_segmentation_cdfs: [[u16; 8 + 1]; 3],
// lv_map
txb_skip_cdf: [[[u16; 3]; TXB_SKIP_CONTEXTS]; TxSize::TX_SIZES],
dc_sign_cdf: [[[u16; 3]; DC_SIGN_CONTEXTS]; PLANE_TYPES],
eob_extra_cdf:
[[[[u16; 3]; EOB_COEF_CONTEXTS]; PLANE_TYPES]; TxSize::TX_SIZES],
eob_flag_cdf16: [[[u16; 5 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf32: [[[u16; 6 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf64: [[[u16; 7 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf128: [[[u16; 8 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf256: [[[u16; 9 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf512: [[[u16; 10 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf1024: [[[u16; 11 + 1]; 2]; PLANE_TYPES],
coeff_base_eob_cdf:
[[[[u16; 3 + 1]; SIG_COEF_CONTEXTS_EOB]; PLANE_TYPES]; TxSize::TX_SIZES],
coeff_base_cdf:
[[[[u16; 4 + 1]; SIG_COEF_CONTEXTS]; PLANE_TYPES]; TxSize::TX_SIZES],
coeff_br_cdf: [[[[u16; BR_CDF_SIZE + 1]; LEVEL_CONTEXTS]; PLANE_TYPES];
TxSize::TX_SIZES]
}
impl CDFContext {
pub fn new(quantizer: u8) -> CDFContext {
let qctx = match quantizer {
0..=20 => 0,
21..=60 => 1,
61..=120 => 2,
_ => 3
};
CDFContext {
partition_cdf: default_partition_cdf,
kf_y_cdf: default_kf_y_mode_cdf,
y_mode_cdf: default_if_y_mode_cdf,
uv_mode_cdf: default_uv_mode_cdf,
cfl_sign_cdf: default_cfl_sign_cdf,
cfl_alpha_cdf: default_cfl_alpha_cdf,
newmv_cdf: default_newmv_cdf,
zeromv_cdf: default_zeromv_cdf,
refmv_cdf: default_refmv_cdf,
intra_tx_cdf: default_intra_ext_tx_cdf,
inter_tx_cdf: default_inter_ext_tx_cdf,
skip_cdfs: default_skip_cdfs,
intra_inter_cdfs: default_intra_inter_cdf,
angle_delta_cdf: default_angle_delta_cdf,
filter_intra_cdfs: default_filter_intra_cdfs,
comp_mode_cdf: default_comp_mode_cdf,
comp_ref_type_cdf: default_comp_ref_type_cdf,
comp_ref_cdf: default_comp_ref_cdf,
comp_bwd_ref_cdf: default_comp_bwdref_cdf,
single_ref_cdfs: default_single_ref_cdf,
drl_cdfs: default_drl_cdf,
compound_mode_cdf: default_compound_mode_cdf,
nmv_context: default_nmv_context,
deblock_delta_multi_cdf: default_delta_lf_multi_cdf,
deblock_delta_cdf: default_delta_lf_cdf,
spatial_segmentation_cdfs: default_spatial_pred_seg_tree_cdf,
// lv_map
txb_skip_cdf: av1_default_txb_skip_cdfs[qctx],
dc_sign_cdf: av1_default_dc_sign_cdfs[qctx],
eob_extra_cdf: av1_default_eob_extra_cdfs[qctx],
eob_flag_cdf16: av1_default_eob_multi16_cdfs[qctx],
eob_flag_cdf32: av1_default_eob_multi32_cdfs[qctx],
eob_flag_cdf64: av1_default_eob_multi64_cdfs[qctx],
eob_flag_cdf128: av1_default_eob_multi128_cdfs[qctx],
eob_flag_cdf256: av1_default_eob_multi256_cdfs[qctx],
eob_flag_cdf512: av1_default_eob_multi512_cdfs[qctx],
eob_flag_cdf1024: av1_default_eob_multi1024_cdfs[qctx],
coeff_base_eob_cdf: av1_default_coeff_base_eob_multi_cdfs[qctx],
coeff_base_cdf: av1_default_coeff_base_multi_cdfs[qctx],
coeff_br_cdf: av1_default_coeff_lps_multi_cdfs[qctx]
}
}
pub fn reset_counts(&mut self) {
macro_rules! reset_1d {
($field:expr) => (let r = $field.last_mut().unwrap(); *r = 0;)
}
macro_rules! reset_2d {
($field:expr) => (for mut x in $field.iter_mut() { reset_1d!(x); })
}
macro_rules! reset_3d {
($field:expr) => (for mut x in $field.iter_mut() { reset_2d!(x); })
}
macro_rules! reset_4d {
($field:expr) => (for mut x in $field.iter_mut() { reset_3d!(x); })
}
for i in 0..4 { self.partition_cdf[i][4] = 0; }
for i in 4..16 { self.partition_cdf[i][10] = 0; }
for i in 16..20 { self.partition_cdf[i][8] = 0; }
reset_3d!(self.kf_y_cdf);
reset_2d!(self.y_mode_cdf);
for i in 0..INTRA_MODES {
self.uv_mode_cdf[0][i][UV_INTRA_MODES - 1] = 0;
self.uv_mode_cdf[1][i][UV_INTRA_MODES] = 0;
}
reset_1d!(self.cfl_sign_cdf);
reset_2d!(self.cfl_alpha_cdf);
reset_2d!(self.newmv_cdf);
reset_2d!(self.zeromv_cdf);
reset_2d!(self.refmv_cdf);
for i in 0..TX_SIZE_SQR_CONTEXTS {
for j in 0..INTRA_MODES {
self.intra_tx_cdf[1][i][j][7] = 0;
self.intra_tx_cdf[2][i][j][5] = 0;
}
self.inter_tx_cdf[1][i][16] = 0;
self.inter_tx_cdf[2][i][12] = 0;
self.inter_tx_cdf[3][i][2] = 0;
}
reset_2d!(self.skip_cdfs);
reset_2d!(self.intra_inter_cdfs);
reset_2d!(self.angle_delta_cdf);
reset_2d!(self.filter_intra_cdfs);
reset_2d!(self.comp_mode_cdf);
reset_2d!(self.comp_ref_type_cdf);
reset_3d!(self.comp_ref_cdf);
reset_3d!(self.comp_bwd_ref_cdf);
reset_3d!(self.single_ref_cdfs);
reset_2d!(self.drl_cdfs);
reset_2d!(self.compound_mode_cdf);
reset_2d!(self.deblock_delta_multi_cdf);
reset_1d!(self.deblock_delta_cdf);
reset_2d!(self.spatial_segmentation_cdfs);
reset_1d!(self.nmv_context.joints_cdf);
for i in 0..2 {
reset_1d!(self.nmv_context.comps[i].classes_cdf);
reset_2d!(self.nmv_context.comps[i].class0_fp_cdf);
reset_1d!(self.nmv_context.comps[i].fp_cdf);
reset_1d!(self.nmv_context.comps[i].sign_cdf);
reset_1d!(self.nmv_context.comps[i].class0_hp_cdf);
reset_1d!(self.nmv_context.comps[i].hp_cdf);
reset_1d!(self.nmv_context.comps[i].class0_cdf);
reset_2d!(self.nmv_context.comps[i].bits_cdf);
}
// lv_map
reset_3d!(self.txb_skip_cdf);
reset_3d!(self.dc_sign_cdf);
reset_4d!(self.eob_extra_cdf);
reset_3d!(self.eob_flag_cdf16);
reset_3d!(self.eob_flag_cdf32);
reset_3d!(self.eob_flag_cdf64);
reset_3d!(self.eob_flag_cdf128);
reset_3d!(self.eob_flag_cdf256);
reset_3d!(self.eob_flag_cdf512);
reset_3d!(self.eob_flag_cdf1024);
reset_4d!(self.coeff_base_eob_cdf);
reset_4d!(self.coeff_base_cdf);
reset_4d!(self.coeff_br_cdf);
}
pub fn build_map(&self) -> Vec<(&'static str, usize, usize)> {
use std::mem::size_of_val;
let partition_cdf_start =
self.partition_cdf.first().unwrap().as_ptr() as usize;
let partition_cdf_end =
partition_cdf_start + size_of_val(&self.partition_cdf);
let kf_y_cdf_start = self.kf_y_cdf.first().unwrap().as_ptr() as usize;
let kf_y_cdf_end = kf_y_cdf_start + size_of_val(&self.kf_y_cdf);
let y_mode_cdf_start = self.y_mode_cdf.first().unwrap().as_ptr() as usize;
let y_mode_cdf_end = y_mode_cdf_start + size_of_val(&self.y_mode_cdf);
let uv_mode_cdf_start =
self.uv_mode_cdf.first().unwrap().as_ptr() as usize;
let uv_mode_cdf_end = uv_mode_cdf_start + size_of_val(&self.uv_mode_cdf);
let cfl_sign_cdf_start = self.cfl_sign_cdf.as_ptr() as usize;
let cfl_sign_cdf_end = cfl_sign_cdf_start + size_of_val(&self.cfl_sign_cdf);
let cfl_alpha_cdf_start =
self.cfl_alpha_cdf.first().unwrap().as_ptr() as usize;
let cfl_alpha_cdf_end =
cfl_alpha_cdf_start + size_of_val(&self.cfl_alpha_cdf);
let intra_tx_cdf_start =
self.intra_tx_cdf.first().unwrap().as_ptr() as usize;
let intra_tx_cdf_end =
intra_tx_cdf_start + size_of_val(&self.intra_tx_cdf);
let inter_tx_cdf_start =
self.inter_tx_cdf.first().unwrap().as_ptr() as usize;
let inter_tx_cdf_end =
inter_tx_cdf_start + size_of_val(&self.inter_tx_cdf);
let skip_cdfs_start = self.skip_cdfs.first().unwrap().as_ptr() as usize;
let skip_cdfs_end = skip_cdfs_start + size_of_val(&self.skip_cdfs);
let intra_inter_cdfs_start =
self.intra_inter_cdfs.first().unwrap().as_ptr() as usize;
let intra_inter_cdfs_end =
intra_inter_cdfs_start + size_of_val(&self.intra_inter_cdfs);
let angle_delta_cdf_start =
self.angle_delta_cdf.first().unwrap().as_ptr() as usize;
let angle_delta_cdf_end =
angle_delta_cdf_start + size_of_val(&self.angle_delta_cdf);
let filter_intra_cdfs_start =
self.filter_intra_cdfs.first().unwrap().as_ptr() as usize;
let filter_intra_cdfs_end =
filter_intra_cdfs_start + size_of_val(&self.filter_intra_cdfs);
let comp_mode_cdf_start =
self.comp_mode_cdf.first().unwrap().as_ptr() as usize;
let comp_mode_cdf_end =
comp_mode_cdf_start + size_of_val(&self.comp_mode_cdf);
let comp_ref_type_cdf_start =
self.comp_ref_type_cdf.first().unwrap().as_ptr() as usize;
let comp_ref_type_cdf_end =
comp_ref_type_cdf_start + size_of_val(&self.comp_ref_type_cdf);
let comp_ref_cdf_start =
self.comp_ref_cdf.first().unwrap().as_ptr() as usize;
let comp_ref_cdf_end =
comp_ref_cdf_start + size_of_val(&self.comp_ref_cdf);
let comp_bwd_ref_cdf_start =
self.comp_bwd_ref_cdf.first().unwrap().as_ptr() as usize;
let comp_bwd_ref_cdf_end =
comp_bwd_ref_cdf_start + size_of_val(&self.comp_bwd_ref_cdf);
let deblock_delta_multi_cdf_start =
self.deblock_delta_multi_cdf.first().unwrap().as_ptr() as usize;
let deblock_delta_multi_cdf_end =
deblock_delta_multi_cdf_start + size_of_val(&self.deblock_delta_multi_cdf);
let deblock_delta_cdf_start =
self.deblock_delta_cdf.as_ptr() as usize;
let deblock_delta_cdf_end =
deblock_delta_cdf_start + size_of_val(&self.deblock_delta_cdf);
let spatial_segmentation_cdfs_start =
self.spatial_segmentation_cdfs.first().unwrap().as_ptr() as usize;
let spatial_segmentation_cdfs_end =
spatial_segmentation_cdfs_start + size_of_val(&self.spatial_segmentation_cdfs);
let txb_skip_cdf_start =
self.txb_skip_cdf.first().unwrap().as_ptr() as usize;
let txb_skip_cdf_end =
txb_skip_cdf_start + size_of_val(&self.txb_skip_cdf);
let dc_sign_cdf_start =
self.dc_sign_cdf.first().unwrap().as_ptr() as usize;
let dc_sign_cdf_end = dc_sign_cdf_start + size_of_val(&self.dc_sign_cdf);
let eob_extra_cdf_start =
self.eob_extra_cdf.first().unwrap().as_ptr() as usize;
let eob_extra_cdf_end =
eob_extra_cdf_start + size_of_val(&self.eob_extra_cdf);
let eob_flag_cdf16_start =
self.eob_flag_cdf16.first().unwrap().as_ptr() as usize;
let eob_flag_cdf16_end =
eob_flag_cdf16_start + size_of_val(&self.eob_flag_cdf16);
let eob_flag_cdf32_start =
self.eob_flag_cdf32.first().unwrap().as_ptr() as usize;
let eob_flag_cdf32_end =
eob_flag_cdf32_start + size_of_val(&self.eob_flag_cdf32);
let eob_flag_cdf64_start =
self.eob_flag_cdf64.first().unwrap().as_ptr() as usize;
let eob_flag_cdf64_end =
eob_flag_cdf64_start + size_of_val(&self.eob_flag_cdf64);
let eob_flag_cdf128_start =
self.eob_flag_cdf128.first().unwrap().as_ptr() as usize;
let eob_flag_cdf128_end =
eob_flag_cdf128_start + size_of_val(&self.eob_flag_cdf128);
let eob_flag_cdf256_start =
self.eob_flag_cdf256.first().unwrap().as_ptr() as usize;
let eob_flag_cdf256_end =
eob_flag_cdf256_start + size_of_val(&self.eob_flag_cdf256);
let eob_flag_cdf512_start =
self.eob_flag_cdf512.first().unwrap().as_ptr() as usize;
let eob_flag_cdf512_end =
eob_flag_cdf512_start + size_of_val(&self.eob_flag_cdf512);
let eob_flag_cdf1024_start =
self.eob_flag_cdf1024.first().unwrap().as_ptr() as usize;
let eob_flag_cdf1024_end =
eob_flag_cdf1024_start + size_of_val(&self.eob_flag_cdf1024);
let coeff_base_eob_cdf_start =
self.coeff_base_eob_cdf.first().unwrap().as_ptr() as usize;
let coeff_base_eob_cdf_end =
coeff_base_eob_cdf_start + size_of_val(&self.coeff_base_eob_cdf);
let coeff_base_cdf_start =
self.coeff_base_cdf.first().unwrap().as_ptr() as usize;
let coeff_base_cdf_end =
coeff_base_cdf_start + size_of_val(&self.coeff_base_cdf);
let coeff_br_cdf_start =
self.coeff_br_cdf.first().unwrap().as_ptr() as usize;
let coeff_br_cdf_end =
coeff_br_cdf_start + size_of_val(&self.coeff_br_cdf);
vec![
("partition_cdf", partition_cdf_start, partition_cdf_end),
("kf_y_cdf", kf_y_cdf_start, kf_y_cdf_end),
("y_mode_cdf", y_mode_cdf_start, y_mode_cdf_end),
("uv_mode_cdf", uv_mode_cdf_start, uv_mode_cdf_end),
("cfl_sign_cdf", cfl_sign_cdf_start, cfl_sign_cdf_end),
("cfl_alpha_cdf", cfl_alpha_cdf_start, cfl_alpha_cdf_end),
("intra_tx_cdf", intra_tx_cdf_start, intra_tx_cdf_end),
("inter_tx_cdf", inter_tx_cdf_start, inter_tx_cdf_end),
("skip_cdfs", skip_cdfs_start, skip_cdfs_end),
("intra_inter_cdfs", intra_inter_cdfs_start, intra_inter_cdfs_end),
("angle_delta_cdf", angle_delta_cdf_start, angle_delta_cdf_end),
("filter_intra_cdfs", filter_intra_cdfs_start, filter_intra_cdfs_end),
("comp_mode_cdf", comp_mode_cdf_start, comp_mode_cdf_end),
("comp_ref_type_cdf", comp_ref_type_cdf_start, comp_ref_type_cdf_end),
("comp_ref_cdf", comp_ref_cdf_start, comp_ref_cdf_end),
("comp_bwd_ref_cdf", comp_bwd_ref_cdf_start, comp_bwd_ref_cdf_end),
("deblock_delta_multi_cdf", deblock_delta_multi_cdf_start, deblock_delta_multi_cdf_end),
("deblock_delta_cdf", deblock_delta_cdf_start, deblock_delta_cdf_end),
("spatial_segmentation_cdfs", spatial_segmentation_cdfs_start, spatial_segmentation_cdfs_end),
("txb_skip_cdf", txb_skip_cdf_start, txb_skip_cdf_end),
("dc_sign_cdf", dc_sign_cdf_start, dc_sign_cdf_end),
("eob_extra_cdf", eob_extra_cdf_start, eob_extra_cdf_end),
("eob_flag_cdf16", eob_flag_cdf16_start, eob_flag_cdf16_end),
("eob_flag_cdf32", eob_flag_cdf32_start, eob_flag_cdf32_end),
("eob_flag_cdf64", eob_flag_cdf64_start, eob_flag_cdf64_end),
("eob_flag_cdf128", eob_flag_cdf128_start, eob_flag_cdf128_end),
("eob_flag_cdf256", eob_flag_cdf256_start, eob_flag_cdf256_end),
("eob_flag_cdf512", eob_flag_cdf512_start, eob_flag_cdf512_end),
("eob_flag_cdf1024", eob_flag_cdf1024_start, eob_flag_cdf1024_end),
("coeff_base_eob_cdf", coeff_base_eob_cdf_start, coeff_base_eob_cdf_end),
("coeff_base_cdf", coeff_base_cdf_start, coeff_base_cdf_end),
("coeff_br_cdf", coeff_br_cdf_start, coeff_br_cdf_end),
]
}
}
impl fmt::Debug for CDFContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CDFContext contains too many numbers to print :-(")
}
}
#[cfg(test)]
mod test {
#[test]
fn cdf_map() {
use super::*;
let cdf = CDFContext::new(8);
let cdf_map = FieldMap {
map: cdf.build_map()
};
let f = &cdf.partition_cdf[2];
cdf_map.lookup(f.as_ptr() as usize);
}
use super::CFLSign;
use super::CFLSign::*;
static cfl_alpha_signs: [[CFLSign; 2]; 8] = [
[ CFL_SIGN_ZERO, CFL_SIGN_NEG ],
[ CFL_SIGN_ZERO, CFL_SIGN_POS ],
[ CFL_SIGN_NEG, CFL_SIGN_ZERO ],
[ CFL_SIGN_NEG, CFL_SIGN_NEG ],
[ CFL_SIGN_NEG, CFL_SIGN_POS ],
[ CFL_SIGN_POS, CFL_SIGN_ZERO ],
[ CFL_SIGN_POS, CFL_SIGN_NEG ],
[ CFL_SIGN_POS, CFL_SIGN_POS ]
];
static cfl_context: [[usize; 8]; 2] = [
[ 0, 0, 0, 1, 2, 3, 4, 5 ],
[ 0, 3, 0, 1, 4, 0, 2, 5 ]
];
#[test]
fn cfl_joint_sign() {
use super::*;
let mut cfl = CFLParams::new();
for (joint_sign, &signs) in cfl_alpha_signs.iter().enumerate() {
cfl.sign = signs;
assert!(cfl.joint_sign() as usize == joint_sign);
for uv in 0..2 {
if signs[uv] != CFL_SIGN_ZERO {
assert!(cfl.context(uv) == cfl_context[uv][joint_sign]);
}
}
}
}
}
const SUPERBLOCK_TO_PLANE_SHIFT: usize = MAX_SB_SIZE_LOG2;
const SUPERBLOCK_TO_BLOCK_SHIFT: usize = MAX_MIB_SIZE_LOG2;
pub const BLOCK_TO_PLANE_SHIFT: usize = MI_SIZE_LOG2;
pub const LOCAL_BLOCK_MASK: usize = (1 << SUPERBLOCK_TO_BLOCK_SHIFT) - 1;
/// Absolute offset in superblocks inside a plane, where a superblock is defined
/// to be an N*N square where N = (1 << SUPERBLOCK_TO_PLANE_SHIFT).
#[derive(Clone)]
pub struct SuperBlockOffset {
pub x: usize,
pub y: usize
}
impl SuperBlockOffset {
/// Offset of a block inside the current superblock.
pub fn block_offset(&self, block_x: usize, block_y: usize) -> BlockOffset {
BlockOffset {
x: (self.x << SUPERBLOCK_TO_BLOCK_SHIFT) + block_x,
y: (self.y << SUPERBLOCK_TO_BLOCK_SHIFT) + block_y
}
}
/// Offset of the top-left pixel of this block.
pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset {
PlaneOffset {
x: (self.x as isize) << (SUPERBLOCK_TO_PLANE_SHIFT - plane.xdec),
y: (self.y as isize) << (SUPERBLOCK_TO_PLANE_SHIFT - plane.ydec)
}
}
}
/// Absolute offset in blocks inside a plane, where a block is defined
/// to be an N*N square where N = (1 << BLOCK_TO_PLANE_SHIFT).
#[derive(Clone)]
pub struct BlockOffset {
pub x: usize,
pub y: usize
}
impl BlockOffset {
/// Offset of the superblock in which this block is located.
pub fn sb_offset(&self) -> SuperBlockOffset {
SuperBlockOffset {
x: self.x >> SUPERBLOCK_TO_BLOCK_SHIFT,
y: self.y >> SUPERBLOCK_TO_BLOCK_SHIFT
}
}
/// Offset of the top-left pixel of this block.
pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset {
let po = self.sb_offset().plane_offset(plane);
let x_offset = self.x & LOCAL_BLOCK_MASK;
let y_offset = self.y & LOCAL_BLOCK_MASK;
PlaneOffset {
x: po.x + (x_offset as isize >> plane.xdec << BLOCK_TO_PLANE_SHIFT),
y: po.y + (y_offset as isize >> plane.ydec << BLOCK_TO_PLANE_SHIFT)
}
}
pub fn y_in_sb(&self) -> usize {
self.y % MAX_MIB_SIZE
}
pub fn with_offset(&self, col_offset: isize, row_offset: isize) -> BlockOffset {
let x = self.x as isize + col_offset;
let y = self.y as isize + row_offset;
BlockOffset {
x: x as usize,
y: y as usize
}
}
}
#[derive(Copy, Clone)]
pub struct Block {
pub mode: PredictionMode,
pub partition: PartitionType,
pub skip: bool,
pub ref_frames: [usize; 2],
pub mv: [MotionVector; 2],
pub neighbors_ref_counts: [usize; TOTAL_REFS_PER_FRAME],
pub cdef_index: u8,
pub n4_w: usize, /* block width in the unit of mode_info */
pub n4_h: usize, /* block height in the unit of mode_info */
pub tx_w: usize, /* transform width in the unit of mode_info */
pub tx_h: usize, /* transform height in the unit of mode_info */
pub is_sec_rect: bool,
// The block-level deblock_deltas are left-shifted by
// fi.deblock.block_delta_shift and added to the frame-configured
// deltas
pub deblock_deltas: [i8; FRAME_LF_COUNT],
pub segmentation_idx: u8
}
impl Block {
pub fn default() -> Block {
Block {
mode: PredictionMode::DC_PRED,
partition: PartitionType::PARTITION_NONE,
skip: false,
ref_frames: [INTRA_FRAME; 2],
mv: [ MotionVector { row:0, col: 0 }; 2],
neighbors_ref_counts: [0; TOTAL_REFS_PER_FRAME],
cdef_index: 0,
n4_w: BLOCK_64X64.width_mi(),
n4_h: BLOCK_64X64.height_mi(),
tx_w: TX_64X64.width_mi(),
tx_h: TX_64X64.height_mi(),
is_sec_rect: false,
deblock_deltas: [0, 0, 0, 0],
segmentation_idx: 0,
}
}
pub fn is_inter(&self) -> bool {
self.mode >= PredictionMode::NEARESTMV
}
pub fn has_second_ref(&self) -> bool {
self.ref_frames[1] > INTRA_FRAME && self.ref_frames[1] != NONE_FRAME
}
}
pub struct TXB_CTX {
pub txb_skip_ctx: usize,
pub dc_sign_ctx: usize
}
#[derive(Clone, Default)]
pub struct BlockContext {
pub cols: usize,
pub rows: usize,
pub cdef_coded: bool,
pub code_deltas: bool,
pub update_seg: bool,
pub preskip_segid: bool,
above_partition_context: Vec<u8>,
left_partition_context: [u8; MAX_MIB_SIZE],
above_coeff_context: [Vec<u8>; PLANES],
left_coeff_context: [[u8; MAX_MIB_SIZE]; PLANES],
blocks: Vec<Vec<Block>>
}
impl BlockContext {
pub fn new(cols: usize, rows: usize) -> BlockContext {
// Align power of two
let aligned_cols = (cols + ((1 << MAX_MIB_SIZE_LOG2) - 1))
& !((1 << MAX_MIB_SIZE_LOG2) - 1);
BlockContext {
cols,
rows,
cdef_coded: false,
code_deltas: false,
update_seg: false,
preskip_segid: true,
above_partition_context: vec![0; aligned_cols],
left_partition_context: [0; MAX_MIB_SIZE],
above_coeff_context: [
vec![0; cols << (MI_SIZE_LOG2 - TxSize::smallest_width_log2())],
vec![0; cols << (MI_SIZE_LOG2 - TxSize::smallest_width_log2())],
vec![0; cols << (MI_SIZE_LOG2 - TxSize::smallest_width_log2())]
],
left_coeff_context: [[0; MAX_MIB_SIZE]; PLANES],
blocks: vec![vec![Block::default(); cols]; rows]
}
}
pub fn checkpoint(&mut self) -> BlockContext {
BlockContext {
cols: self.cols,
rows: self.rows,
cdef_coded: self.cdef_coded,
code_deltas: self.code_deltas,
update_seg: self.update_seg,
preskip_segid: self.preskip_segid,
above_partition_context: self.above_partition_context.clone(),
left_partition_context: self.left_partition_context,
above_coeff_context: self.above_coeff_context.clone(),
left_coeff_context: self.left_coeff_context,
blocks: vec![vec![Block::default(); 0]; 0]
}
}
pub fn rollback(&mut self, checkpoint: &BlockContext) {
self.cols = checkpoint.cols;
self.rows = checkpoint.rows;
self.cdef_coded = checkpoint.cdef_coded;
self.above_partition_context = checkpoint.above_partition_context.clone();
self.left_partition_context = checkpoint.left_partition_context;
self.above_coeff_context = checkpoint.above_coeff_context.clone();
self.left_coeff_context = checkpoint.left_coeff_context;
}
pub fn at_mut(&mut self, bo: &BlockOffset) -> &mut Block {
&mut self.blocks[bo.y][bo.x]
}
pub fn at(&self, bo: &BlockOffset) -> &Block {
&self.blocks[bo.y][bo.x]
}
pub fn above_of(&mut self, bo: &BlockOffset) -> Block {
if bo.y > 0 {
self.blocks[bo.y - 1][bo.x]
} else {
Block::default()
}
}
pub fn left_of(&mut self, bo: &BlockOffset) -> Block {
if bo.x > 0 {
self.blocks[bo.y][bo.x - 1]
} else {
Block::default()
}
}
pub fn above_left_of(&mut self, bo: &BlockOffset) -> Block {
if bo.x > 0 && bo.y > 0 {
self.blocks[bo.y - 1][bo.x - 1]
} else {
Block::default()
}
}
pub fn for_each<F>(&mut self, bo: &BlockOffset, bsize: BlockSize, f: F)
where
F: Fn(&mut Block) -> ()
{
let bw = bsize.width_mi();
let bh = bsize.height_mi();
for y in 0..bh {
for x in 0..bw {
f(&mut self.blocks[bo.y + y as usize][bo.x + x as usize]);
}
}
}
pub fn set_dc_sign(&mut self, cul_level: &mut u32, dc_val: i32) {
if dc_val < 0 {
*cul_level |= 1 << COEFF_CONTEXT_BITS;
} else if dc_val > 0 {
*cul_level += 2 << COEFF_CONTEXT_BITS;
}
}
fn set_coeff_context(
&mut self, plane: usize, bo: &BlockOffset, tx_size: TxSize, xdec: usize,
ydec: usize, value: u8
) {
for bx in 0..tx_size.width_mi() {
self.above_coeff_context[plane][(bo.x >> xdec) + bx] = value;
}
let bo_y = bo.y_in_sb();
for by in 0..tx_size.height_mi() {
self.left_coeff_context[plane][(bo_y >> ydec) + by] = value;
}
}
fn reset_left_coeff_context(&mut self, plane: usize) {
for c in &mut self.left_coeff_context[plane] {
*c = 0;
}
}
fn reset_left_partition_context(&mut self) {
for c in &mut self.left_partition_context {
*c = 0;
}
}
//TODO(anyone): Add reset_left_tx_context() here then call it in reset_left_contexts()
pub fn reset_skip_context(
&mut self, bo: &BlockOffset, bsize: BlockSize, xdec: usize, ydec: usize
) {
const num_planes: usize = 3;
let nplanes = if bsize >= BLOCK_8X8 {
3
} else {
1 + (num_planes - 1) * has_chroma(bo, bsize, xdec, ydec) as usize
};
for plane in 0..nplanes {
let xdec2 = if plane == 0 {
0
} else {
xdec
};
let ydec2 = if plane == 0 {
0
} else {
ydec
};
let plane_bsize = if plane == 0 {
bsize
} else {
get_plane_block_size(bsize, xdec2, ydec2)
};
let bw = plane_bsize.width_mi();
let bh = plane_bsize.height_mi();
for bx in 0..bw {
self.above_coeff_context[plane][(bo.x >> xdec2) + bx] = 0;
}
let bo_y = bo.y_in_sb();
for by in 0..bh {
self.left_coeff_context[plane][(bo_y >> ydec2) + by] = 0;
}
}
}
pub fn reset_left_contexts(&mut self) {
for p in 0..3 {
BlockContext::reset_left_coeff_context(self, p);
}
BlockContext::reset_left_partition_context(self);
//TODO(anyone): Call reset_left_tx_context() here.
}
pub fn set_mode(
&mut self, bo: &BlockOffset, bsize: BlockSize, mode: PredictionMode
) {
self.for_each(bo, bsize, |block| block.mode = mode);
}
pub fn set_block_size(&mut self, bo: &BlockOffset, bsize: BlockSize) {
let n4_w = bsize.width_mi();
let n4_h = bsize.height_mi();
self.for_each(bo, bsize, |block| { block.n4_w = n4_w; block.n4_h = n4_h } );
}
pub fn set_tx_size(&mut self, bo: &BlockOffset, txsize: TxSize) {
let tx_w = txsize.width_mi();
let tx_h = txsize.height_mi();
self.for_each(bo, txsize.block_size(), |block| { block.tx_w = tx_w; block.tx_h = tx_h } );
}
pub fn get_mode(&mut self, bo: &BlockOffset) -> PredictionMode {
self.blocks[bo.y][bo.x].mode
}
fn partition_plane_context(
&self, bo: &BlockOffset, bsize: BlockSize
) -> usize {
// TODO: this should be way simpler without sub8x8
let above_ctx = self.above_partition_context[bo.x];
let left_ctx = self.left_partition_context[bo.y_in_sb()];
let bsl = bsize.width_log2() - BLOCK_8X8.width_log2();
let above = (above_ctx >> bsl) & 1;
let left = (left_ctx >> bsl) & 1;
assert!(bsize.is_sqr());
(left * 2 + above) as usize + bsl as usize * PARTITION_PLOFFSET
}
pub fn update_partition_context(
&mut self, bo: &BlockOffset, subsize: BlockSize, bsize: BlockSize
) {
#[allow(dead_code)]
let bw = bsize.width_mi();
let bh = bsize.height_mi();
let above_ctx =
&mut self.above_partition_context[bo.x..bo.x + bw as usize];
let left_ctx = &mut self.left_partition_context
[bo.y_in_sb()..bo.y_in_sb() + bh as usize];
// update the partition context at the end notes. set partition bits
// of block sizes larger than the current one to be one, and partition
// bits of smaller block sizes to be zero.
for i in 0..bw {
above_ctx[i as usize] = partition_context_lookup[subsize as usize][0];
}
for i in 0..bh {
left_ctx[i as usize] = partition_context_lookup[subsize as usize][1];
}
}
fn skip_context(&mut self, bo: &BlockOffset) -> usize {
let above_skip = if bo.y > 0 {
self.above_of(bo).skip as usize
} else {
0
};
let left_skip = if bo.x > 0 {
self.left_of(bo).skip as usize
} else {
0
};
above_skip + left_skip
}
pub fn set_skip(&mut self, bo: &BlockOffset, bsize: BlockSize, skip: bool) {
self.for_each(bo, bsize, |block| block.skip = skip);
}
pub fn set_segmentation_idx(&mut self, bo: &BlockOffset, bsize: BlockSize, idx: u8) {
self.for_each(bo, bsize, |block| block.segmentation_idx = idx);
}
pub fn set_ref_frames(&mut self, bo: &BlockOffset, bsize: BlockSize, r: [usize; 2]) {
let bw = bsize.width_mi();
let bh = bsize.height_mi();
for y in 0..bh {
for x in 0..bw {
self.blocks[bo.y + y as usize][bo.x + x as usize].ref_frames = r;
}
}
}
pub fn set_motion_vectors(&mut self, bo: &BlockOffset, bsize: BlockSize, mvs: [MotionVector; 2]) {
let bw = bsize.width_mi();
let bh = bsize.height_mi();
for y in 0..bh {
for x in 0..bw {
self.blocks[bo.y + y as usize][bo.x + x as usize].mv = mvs;
}
}
}
pub fn set_cdef(&mut self, sbo: &SuperBlockOffset, cdef_index: u8) {
let bo = sbo.block_offset(0, 0);
// Checkme: Is 16 still the right block unit for 128x128 superblocks?
let bw = cmp::min (bo.x + MAX_MIB_SIZE, self.blocks[bo.y as usize].len());
let bh = cmp::min (bo.y + MAX_MIB_SIZE, self.blocks.len());
for y in bo.y..bh {
for x in bo.x..bw {
self.blocks[y as usize][x as usize].cdef_index = cdef_index;
}
}
}
// The mode info data structure has a one element border above and to the
// left of the entries corresponding to real macroblocks.
// The prediction flags in these dummy entries are initialized to 0.
// 0 - inter/inter, inter/--, --/inter, --/--
// 1 - intra/inter, inter/intra
// 2 - intra/--, --/intra
// 3 - intra/intra
pub fn intra_inter_context(&mut self, bo: &BlockOffset) -> usize {
let has_above = bo.y > 0;
let has_left = bo.x > 0;
match (has_above, has_left) {
(true, true) => {
let above_intra = !self.above_of(bo).is_inter();
let left_intra = !self.left_of(bo).is_inter();
if above_intra && left_intra {
3
} else {
(above_intra || left_intra) as usize
}
}
(true, _) | (_, true) =>
2 * if has_above {
!self.above_of(bo).is_inter() as usize
} else {
!self.left_of(bo).is_inter() as usize
},
(_, _) => 0
}
}
pub fn get_txb_ctx(
&mut self, plane_bsize: BlockSize, tx_size: TxSize, plane: usize,
bo: &BlockOffset, xdec: usize, ydec: usize
) -> TXB_CTX {
let mut txb_ctx = TXB_CTX {
txb_skip_ctx: 0,
dc_sign_ctx: 0
};
const MAX_TX_SIZE_UNIT: usize = 16;
const signs: [i8; 3] = [0, -1, 1];
const dc_sign_contexts: [usize; 4 * MAX_TX_SIZE_UNIT + 1] = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
];
let mut dc_sign: i16 = 0;
let txb_w_unit = tx_size.width_mi();
let txb_h_unit = tx_size.height_mi();
// Decide txb_ctx.dc_sign_ctx
for k in 0..txb_w_unit {
let sign = self.above_coeff_context[plane][(bo.x >> xdec) + k]
>> COEFF_CONTEXT_BITS;
assert!(sign <= 2);
dc_sign += signs[sign as usize] as i16;
}
for k in 0..txb_h_unit {
let sign = self.left_coeff_context[plane][(bo.y_in_sb() >> ydec) + k]
>> COEFF_CONTEXT_BITS;
assert!(sign <= 2);
dc_sign += signs[sign as usize] as i16;
}
txb_ctx.dc_sign_ctx =
dc_sign_contexts[(dc_sign + 2 * MAX_TX_SIZE_UNIT as i16) as usize];
// Decide txb_ctx.txb_skip_ctx
if plane == 0 {
if plane_bsize == tx_size.block_size() {
txb_ctx.txb_skip_ctx = 0;
} else {
// This is the algorithm to generate table skip_contexts[min][max].
// if (!max)
// txb_skip_ctx = 1;
// else if (!min)
// txb_skip_ctx = 2 + (max > 3);
// else if (max <= 3)
// txb_skip_ctx = 4;
// else if (min <= 3)
// txb_skip_ctx = 5;
// else
// txb_skip_ctx = 6;
const skip_contexts: [[u8; 5]; 5] = [
[1, 2, 2, 2, 3],
[1, 4, 4, 4, 5],
[1, 4, 4, 4, 5],
[1, 4, 4, 4, 5],
[1, 4, 4, 4, 6]
];
let mut top: u8 = 0;
let mut left: u8 = 0;
for k in 0..txb_w_unit {
top |= self.above_coeff_context[0][(bo.x >> xdec) + k];
}
top &= COEFF_CONTEXT_MASK as u8;
for k in 0..txb_h_unit {
left |= self.left_coeff_context[0][(bo.y_in_sb() >> ydec) + k];
}
left &= COEFF_CONTEXT_MASK as u8;
let max = cmp::min(top | left, 4);
let min = cmp::min(cmp::min(top, left), 4);
txb_ctx.txb_skip_ctx =
skip_contexts[min as usize][max as usize] as usize;
}
} else {
let mut top: u8 = 0;
let mut left: u8 = 0;
for k in 0..txb_w_unit {
top |= self.above_coeff_context[plane][(bo.x >> xdec) + k];
}
for k in 0..txb_h_unit {
left |= self.left_coeff_context[plane][(bo.y_in_sb() >> ydec) + k];
}
let ctx_base = (top != 0) as usize + (left != 0) as usize;
let ctx_offset = if num_pels_log2_lookup[plane_bsize as usize]
> num_pels_log2_lookup[tx_size.block_size() as usize]
{
10
} else {
7
};
txb_ctx.txb_skip_ctx = ctx_base + ctx_offset;
}
txb_ctx
}
}
#[derive(Copy, Clone)]
pub enum RestorationFilter {
None,
Wiener { coeffs: [[i8; 2]; 3] },
Sgr { xqd: [i8; 2] },
}
impl RestorationFilter {
pub fn default() -> RestorationFilter {
RestorationFilter::None
}
}
#[derive(Copy, Clone)]
pub struct RestorationUnit {
pub params: RestorationFilter,
}
impl RestorationUnit {
pub fn default() -> RestorationUnit {
RestorationUnit {
params: RestorationFilter::default()
}
}
}
#[derive(Clone, Default)]
pub struct RestorationContext {
pub cols: usize,
pub rows: usize,
pub wiener_ref: [[[i8; 3]; 2]; PLANES],
pub sgr_ref: [[i8; 2]; PLANES],
pub units: Vec<Vec<Vec<RestorationUnit>>>
}
impl RestorationContext {
pub fn new(cols: usize, rows: usize) -> RestorationContext {
RestorationContext {
cols,
rows,
wiener_ref: [[WIENER_TAPS_MID; 2]; PLANES],
sgr_ref: [SGR_XQD_MID; PLANES],
units: vec![vec![vec![RestorationUnit::default(); cols]; rows]; PLANES]
}
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum CFLSign {
CFL_SIGN_ZERO = 0,
CFL_SIGN_NEG = 1,
CFL_SIGN_POS = 2
}
impl CFLSign {
pub fn from_alpha(a: i16) -> CFLSign {
[ CFL_SIGN_NEG, CFL_SIGN_ZERO, CFL_SIGN_POS ][(a.signum() + 1) as usize]
}
}
use context::CFLSign::*;
const CFL_SIGNS: usize = 3;
static cfl_sign_value: [i16; CFL_SIGNS] = [ 0, -1, 1 ];
#[derive(Copy, Clone)]
pub struct CFLParams {
sign: [CFLSign; 2],
scale: [u8; 2]
}
impl CFLParams {
pub fn new() -> CFLParams {
CFLParams {
sign: [CFL_SIGN_NEG, CFL_SIGN_ZERO],
scale: [1, 0]
}
}
pub fn joint_sign(self) -> u32 {
assert!(self.sign[0] != CFL_SIGN_ZERO || self.sign[1] != CFL_SIGN_ZERO);
(self.sign[0] as u32) * (CFL_SIGNS as u32) + (self.sign[1] as u32) - 1
}
pub fn context(self, uv: usize) -> usize {
assert!(self.sign[uv] != CFL_SIGN_ZERO);
(self.sign[uv] as usize - 1) * CFL_SIGNS + (self.sign[1 - uv] as usize)
}
pub fn index(self, uv: usize) -> u32 {
assert!(self.sign[uv] != CFL_SIGN_ZERO && self.scale[uv] != 0);
(self.scale[uv] - 1) as u32
}
pub fn alpha(self, uv: usize) -> i16 {
cfl_sign_value[self.sign[uv] as usize] * (self.scale[uv] as i16)
}
pub fn from_alpha(u: i16, v: i16) -> CFLParams {
CFLParams {
sign: [ CFLSign::from_alpha(u), CFLSign::from_alpha(v) ],
scale: [ u.abs() as u8, v.abs() as u8 ]
}
}
}
#[derive(Debug, Default)]
struct FieldMap {
map: Vec<(&'static str, usize, usize)>
}
impl FieldMap {
/// Print the field the address belong to
fn lookup(&self, addr: usize) {
for (name, start, end) in &self.map {
// eprintln!("{} {} {} val {}", name, start, end, addr);
if addr >= *start && addr < *end {
eprintln!(" CDF {}", name);
eprintln!("");
return;
}
}
eprintln!(" CDF address not found {}", addr);
}
}
macro_rules! symbol_with_update {
($self:ident, $w:ident, $s:expr, $cdf:expr) => {
$w.symbol_with_update($s, $cdf);
#[cfg(debug)] {
if let Some(map) = $self.fc_map.as_ref() {
map.lookup($cdf.as_ptr() as usize);
}
}
};
}
pub fn av1_get_coded_tx_size(tx_size: TxSize) -> TxSize {
if tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64 {
return TX_32X32
}
if tx_size == TX_16X64 {
return TX_16X32
}
if tx_size == TX_64X16 {
return TX_32X16
}
tx_size
}
#[derive(Clone)]
pub struct ContextWriterCheckpoint {
pub fc: CDFContext,
pub bc: BlockContext
}
#[derive(Clone)]
pub struct ContextWriter {
pub bc: BlockContext,
pub fc: CDFContext,
pub rc: RestorationContext,
#[cfg(debug)]
fc_map: Option<FieldMap> // For debugging purposes
}
impl ContextWriter {
pub fn new(fc: CDFContext, bc: BlockContext, rc: RestorationContext) -> Self {
#[allow(unused_mut)]
let mut cw = ContextWriter {
fc,
bc,
rc,
#[cfg(debug)]
fc_map: Default::default()
};
#[cfg(debug)] {
if std::env::var_os("RAV1E_DEBUG").is_some() {
cw.fc_map = Some(FieldMap {
map: cw.fc.build_map()
});
}
}
cw
}
fn cdf_element_prob(cdf: &[u16], element: usize) -> u16 {
(if element > 0 {
cdf[element - 1]
} else {
32768
}) - cdf[element]
}
fn partition_gather_horz_alike(
out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize
) {
out[0] = 32768;
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_SPLIT as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_B as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_4 as usize
);
out[0] = 32768 - out[0];
out[1] = 0;
}
fn partition_gather_vert_alike(
out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize
) {
out[0] = 32768;
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_SPLIT as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_B as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_4 as usize
);
out[0] = 32768 - out[0];
out[1] = 0;
}
pub fn write_partition(
&mut self, w: &mut dyn Writer, bo: &BlockOffset, p: PartitionType, bsize: BlockSize
) {
assert!(bsize >= BlockSize::BLOCK_8X8 );
let hbs = bsize.width_mi() / 2;
let has_cols = (bo.x + hbs) < self.bc.cols;
let has_rows = (bo.y + hbs) < self.bc.rows;
let ctx = self.bc.partition_plane_context(&bo, bsize);
assert!(ctx < PARTITION_CONTEXTS);
let partition_cdf = if bsize <= BlockSize::BLOCK_8X8 {
&mut self.fc.partition_cdf[ctx][..PARTITION_TYPES+1]
} else {
&mut self.fc.partition_cdf[ctx]
};
if !has_rows && !has_cols {
return;
}
if has_rows && has_cols {
symbol_with_update!(self, w, p as u32, partition_cdf);
} else if !has_rows && has_cols {
assert!(bsize > BlockSize::BLOCK_8X8);
let mut cdf = [0u16; 2];
ContextWriter::partition_gather_vert_alike(
&mut cdf,
partition_cdf,
bsize
);
w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
} else {
assert!(bsize > BlockSize::BLOCK_8X8);
let mut cdf = [0u16; 2];
ContextWriter::partition_gather_horz_alike(
&mut cdf,
partition_cdf,
bsize
);
w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
}
}
pub fn write_intra_mode_kf(
&mut self, w: &mut dyn Writer, bo: &BlockOffset, mode: PredictionMode
) {
static intra_mode_context: [usize; INTRA_MODES] =
[0, 1, 2, 3, 4, 4, 4, 4, 3, 0, 1, 2, 0];
let above_mode = self.bc.above_of(bo).mode as usize;
let left_mode = self.bc.left_of(bo).mode as usize;
let above_ctx = intra_mode_context[above_mode];
let left_ctx = intra_mode_context[left_mode];
let cdf = &mut self.fc.kf_y_cdf[above_ctx][left_ctx];
symbol_with_update!(self, w, mode as u32, cdf);
}
pub fn write_intra_mode(&mut self, w: &mut dyn Writer, bsize: BlockSize, mode: PredictionMode) {
let cdf =
&mut self.fc.y_mode_cdf[size_group_lookup[bsize as usize] as usize];
symbol_with_update!(self, w, mode as u32, cdf);
}
pub fn write_intra_uv_mode(
&mut self, w: &mut dyn Writer, uv_mode: PredictionMode, y_mode: PredictionMode, bs: BlockSize
) {
let cdf =
&mut self.fc.uv_mode_cdf[bs.cfl_allowed() as usize][y_mode as usize];
if bs.cfl_allowed() {
symbol_with_update!(self, w, uv_mode as u32, cdf);
} else {
symbol_with_update!(self, w, uv_mode as u32, &mut cdf[..UV_INTRA_MODES]);
}
}
pub fn write_cfl_alphas(&mut self, w: &mut dyn Writer, cfl: CFLParams) {
symbol_with_update!(self, w, cfl.joint_sign(), &mut self.fc.cfl_sign_cdf);
for uv in 0..2 {
if cfl.sign[uv] != CFL_SIGN_ZERO {
symbol_with_update!(self, w, cfl.index(uv), &mut self.fc.cfl_alpha_cdf[cfl.context(uv)]);
}
}
}
pub fn write_angle_delta(&mut self, w: &mut dyn Writer, angle: i8, mode: PredictionMode) {
symbol_with_update!(
self,
w,
(angle + MAX_ANGLE_DELTA as i8) as u32,
&mut self.fc.angle_delta_cdf
[mode as usize - PredictionMode::V_PRED as usize]
);
}
pub fn write_use_filter_intra(&mut self, w: &mut dyn Writer, enable: bool, block_size: BlockSize) {
symbol_with_update!(self, w, enable as u32, &mut self.fc.filter_intra_cdfs[block_size as usize]);
}
fn get_mvref_ref_frames(&mut self, ref_frame: usize) -> ([usize; 2], usize) {
let ref_frame_map: [[usize; 2]; TOTAL_COMP_REFS] = [
[ LAST_FRAME, BWDREF_FRAME ], [ LAST2_FRAME, BWDREF_FRAME ],
[ LAST3_FRAME, BWDREF_FRAME ], [ GOLDEN_FRAME, BWDREF_FRAME ],
[ LAST_FRAME, ALTREF2_FRAME ], [ LAST2_FRAME, ALTREF2_FRAME ],
[ LAST3_FRAME, ALTREF2_FRAME ], [ GOLDEN_FRAME, ALTREF2_FRAME ],
[ LAST_FRAME, ALTREF_FRAME ], [ LAST2_FRAME, ALTREF_FRAME ],
[ LAST3_FRAME, ALTREF_FRAME ], [ GOLDEN_FRAME, ALTREF_FRAME ],
[ LAST_FRAME, LAST2_FRAME ], [ LAST_FRAME, LAST3_FRAME ],
[ LAST_FRAME, GOLDEN_FRAME ], [ BWDREF_FRAME, ALTREF_FRAME ],
// NOTE: Following reference frame pairs are not supported to be explicitly
// signalled, but they are possibly chosen by the use of skip_mode,
// which may use the most recent one-sided reference frame pair.
[ LAST2_FRAME, LAST3_FRAME ], [ LAST2_FRAME, GOLDEN_FRAME ],
[ LAST3_FRAME, GOLDEN_FRAME ], [ BWDREF_FRAME, ALTREF2_FRAME ],
[ ALTREF2_FRAME, ALTREF_FRAME ]
];
if ref_frame >= REF_FRAMES {
([ ref_frame_map[ref_frame - REF_FRAMES][0], ref_frame_map[ref_frame - REF_FRAMES][1] ], 2)
} else {
([ ref_frame, 0 ], 1)
}
}
fn find_valid_row_offs(&mut self, row_offset: isize, mi_row: usize, mi_rows: usize) -> isize {
if /* !tile->tg_horz_boundary */ true {
cmp::min(cmp::max(row_offset, -(mi_row as isize)), (mi_rows - mi_row - 1) as isize)
} else {
0
/* TODO: for tiling */
}
}
fn has_tr(&mut self, bo: &BlockOffset, bsize: BlockSize, is_sec_rect: bool) -> bool {
let sb_mi_size = BLOCK_64X64.width_mi(); /* Assume 64x64 for now */
let mask_row = bo.y & LOCAL_BLOCK_MASK;
let mask_col = bo.x & LOCAL_BLOCK_MASK;
let target_n4_w = bsize.width_mi();
let target_n4_h = bsize.height_mi();
let mut bs = target_n4_w.max(target_n4_h);
if bs > BLOCK_64X64.width_mi() {
return false;
}
let mut has_tr = !((mask_row & bs) != 0 && (mask_col & bs) != 0);
/* TODO: assert its a power of two */
while bs < sb_mi_size {
if (mask_col & bs) != 0 {
if (mask_col & (2 * bs) != 0) && (mask_row & (2 * bs) != 0) {
has_tr = false;
break;
}
} else {
break;
}
bs <<= 1;
}
/* The left hand of two vertical rectangles always has a top right (as the
* block above will have been decoded) */
if (target_n4_w < target_n4_h) && !is_sec_rect {
has_tr = true;
}
/* The bottom of two horizontal rectangles never has a top right (as the block
* to the right won't have been decoded) */
if (target_n4_w > target_n4_h) && is_sec_rect {
has_tr = false;
}
/* The bottom left square of a Vertical A (in the old format) does
* not have a top right as it is decoded before the right hand
* rectangle of the partition */
/*
if blk.partition == PartitionType::PARTITION_VERT_A {
if blk.n4_w == blk.n4_h {
if (mask_row & bs) != 0 {
has_tr = false;
}
}
}
*/
has_tr
}
fn find_valid_col_offs(&mut self, col_offset: isize, mi_col: usize) -> isize {
cmp::max(col_offset, -(mi_col as isize))
}
fn find_matching_mv(&self, mv: MotionVector, mv_stack: &mut Vec<CandidateMV>) -> bool {
for mv_cand in mv_stack {
if mv.row == mv_cand.this_mv.row && mv.col == mv_cand.this_mv.col {
return true;
}
}
false
}
fn find_matching_mv_and_update_weight(&self, mv: MotionVector, mv_stack: &mut Vec<CandidateMV>, weight: u32) -> bool {
for mut mv_cand in mv_stack {
if mv.row == mv_cand.this_mv.row && mv.col == mv_cand.this_mv.col {
mv_cand.weight += weight;
return true;
}
}
false
}
fn find_matching_comp_mv_and_update_weight(&self, mvs: [MotionVector; 2], mv_stack: &mut Vec<CandidateMV>, weight: u32) -> bool {
for mut mv_cand in mv_stack {
if mvs[0].row == mv_cand.this_mv.row && mvs[0].col == mv_cand.this_mv.col &&
mvs[1].row == mv_cand.comp_mv.row && mvs[1].col == mv_cand.comp_mv.col {
mv_cand.weight += weight;
return true;
}
}
false
}
fn add_ref_mv_candidate(&self, ref_frames: [usize; 2], blk: &Block, mv_stack: &mut Vec<CandidateMV>,
weight: u32, newmv_count: &mut usize, is_compound: bool) -> bool {
if !blk.is_inter() { /* For intrabc */
false
} else if is_compound {
if blk.ref_frames[0] == ref_frames[0] && blk.ref_frames[1] == ref_frames[1] {
let found_match = self.find_matching_comp_mv_and_update_weight(blk.mv, mv_stack, weight);
if !found_match && mv_stack.len() < MAX_REF_MV_STACK_SIZE {
let mv_cand = CandidateMV {
this_mv: blk.mv[0],
comp_mv: blk.mv[1],
weight: weight
};
mv_stack.push(mv_cand);
}
if blk.mode == PredictionMode::NEW_NEWMV ||
blk.mode == PredictionMode::NEAREST_NEWMV ||
blk.mode == PredictionMode::NEW_NEARESTMV ||
blk.mode == PredictionMode::NEAR_NEWMV ||
blk.mode == PredictionMode::NEW_NEARMV {
*newmv_count += 1;
}
true
} else {
false
}
} else {
let mut found = false;
for i in 0..2 {
if blk.ref_frames[i] == ref_frames[0] {
let found_match = self.find_matching_mv_and_update_weight(blk.mv[i], mv_stack, weight);
if !found_match && mv_stack.len() < MAX_REF_MV_STACK_SIZE {
let mv_cand = CandidateMV {
this_mv: blk.mv[i],
comp_mv: MotionVector { row: 0, col: 0 },
weight: weight
};
mv_stack.push(mv_cand);
}
if blk.mode == PredictionMode::NEW_NEWMV ||
blk.mode == PredictionMode::NEAREST_NEWMV ||
blk.mode == PredictionMode::NEW_NEARESTMV ||
blk.mode == PredictionMode::NEAR_NEWMV ||
blk.mode == PredictionMode::NEW_NEARMV ||
blk.mode == PredictionMode::NEWMV {
*newmv_count += 1;
}
found = true;
}
}
found
}
}
fn add_extra_mv_candidate(
&self,
blk: &Block,
ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>,
fi: &FrameInvariants,
is_compound: bool,
ref_id_count: &mut [usize; 2],
ref_id_mvs: &mut [[MotionVector; 2]; 2],
ref_diff_count: &mut [usize; 2],
ref_diff_mvs: &mut [[MotionVector; 2]; 2],
) {
if is_compound {
for cand_list in 0..2 {
let cand_ref = blk.ref_frames[cand_list];
if cand_ref > INTRA_FRAME && cand_ref != NONE_FRAME {
for list in 0..2 {
let mut cand_mv = blk.mv[cand_list];
if cand_ref == ref_frames[list] && ref_id_count[list] < 2 {
ref_id_mvs[list][ref_id_count[list]] = cand_mv;
ref_id_count[list] = ref_id_count[list] + 1;
} else if ref_diff_count[list] < 2 {
if fi.ref_frame_sign_bias[cand_ref - LAST_FRAME] !=
fi.ref_frame_sign_bias[ref_frames[list] - LAST_FRAME] {
cand_mv.row = -cand_mv.row;
cand_mv.col = -cand_mv.col;
}
ref_diff_mvs[list][ref_diff_count[list]] = cand_mv;
ref_diff_count[list] = ref_diff_count[list] + 1;
}
}
}
}
} else {
for cand_list in 0..2 {
let cand_ref = blk.ref_frames[cand_list];
if cand_ref > INTRA_FRAME && cand_ref != NONE_FRAME {
let mut mv = blk.mv[cand_list];
if fi.ref_frame_sign_bias[cand_ref - LAST_FRAME] !=
fi.ref_frame_sign_bias[ref_frames[0] - LAST_FRAME] {
mv.row = -mv.row;
mv.col = -mv.col;
}
if !self.find_matching_mv(mv, mv_stack) {
let mv_cand = CandidateMV {
this_mv: mv,
comp_mv: MotionVector { row: 0, col: 0 },
weight: 2
};
mv_stack.push(mv_cand);
}
}
}
}
}
fn scan_row_mbmi(&mut self, bo: &BlockOffset, row_offset: isize, max_row_offs: isize,
processed_rows: &mut isize, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, newmv_count: &mut usize, bsize: BlockSize,
is_compound: bool) -> bool {
let bc = &self.bc;
let target_n4_w = bsize.width_mi();
let end_mi = cmp::min(cmp::min(target_n4_w, bc.cols - bo.x),
BLOCK_64X64.width_mi());
let n4_w_8 = BLOCK_8X8.width_mi();
let n4_w_16 = BLOCK_16X16.width_mi();
let mut col_offset = 0;
if row_offset.abs() > 1 {
col_offset = 1;
if ((bo.x & 0x01) != 0) && (target_n4_w < n4_w_8) {
col_offset -= 1;
}
}
let use_step_16 = target_n4_w >= 16;
let mut found_match = false;
let mut i = 0;
while i < end_mi {
let cand = bc.at(&bo.with_offset(col_offset + i as isize, row_offset));
let n4_w = cand.n4_w;
let mut len = cmp::min(target_n4_w, n4_w);
if use_step_16 {
len = cmp::max(n4_w_16, len);
} else if row_offset.abs() > 1 {
len = cmp::max(len, n4_w_8);
}
let mut weight = 2 as u32;
if target_n4_w >= n4_w_8 && target_n4_w <= n4_w {
let inc = cmp::min(-max_row_offs + row_offset + 1, cand.n4_h as isize);
assert!(inc >= 0);
weight = cmp::max(weight, inc as u32);
*processed_rows = (inc as isize) - row_offset - 1;
}
if self.add_ref_mv_candidate(ref_frames, cand, mv_stack, len as u32 * weight, newmv_count, is_compound) {
found_match = true;
}
i += len;
}
found_match
}
fn scan_col_mbmi(&mut self, bo: &BlockOffset, col_offset: isize, max_col_offs: isize,
processed_cols: &mut isize, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, newmv_count: &mut usize, bsize: BlockSize,
is_compound: bool) -> bool {
let bc = &self.bc;
let target_n4_h = bsize.height_mi();
let end_mi = cmp::min(cmp::min(target_n4_h, bc.rows - bo.y),
BLOCK_64X64.height_mi());
let n4_h_8 = BLOCK_8X8.height_mi();
let n4_h_16 = BLOCK_16X16.height_mi();
let mut row_offset = 0;
if col_offset.abs() > 1 {
row_offset = 1;
if ((bo.y & 0x01) != 0) && (target_n4_h < n4_h_8) {
row_offset -= 1;
}
}
let use_step_16 = target_n4_h >= 16;
let mut found_match = false;
let mut i = 0;
while i < end_mi {
let cand = bc.at(&bo.with_offset(col_offset, row_offset + i as isize));
let n4_h = cand.n4_h;
let mut len = cmp::min(target_n4_h, n4_h);
if use_step_16 {
len = cmp::max(n4_h_16, len);
} else if col_offset.abs() > 1 {
len = cmp::max(len, n4_h_8);
}
let mut weight = 2 as u32;
if target_n4_h >= n4_h_8 && target_n4_h <= n4_h {
let inc = cmp::min(-max_col_offs + col_offset + 1, cand.n4_w as isize);
assert!(inc >= 0);
weight = cmp::max(weight, inc as u32);
*processed_cols = (inc as isize) - col_offset - 1;
}
if self.add_ref_mv_candidate(ref_frames, cand, mv_stack, len as u32 * weight, newmv_count, is_compound) {
found_match = true;
}
i += len;
}
found_match
}
fn scan_blk_mbmi(&mut self, bo: &BlockOffset, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, newmv_count: &mut usize,
is_compound: bool) -> bool {
if bo.x >= self.bc.cols || bo.y >= self.bc.rows {
return false;
}
let weight = 2 * BLOCK_8X8.width_mi() as u32;
/* Always assume its within a tile, probably wrong */
self.add_ref_mv_candidate(ref_frames, self.bc.at(bo), mv_stack, weight, newmv_count, is_compound)
}
fn add_offset(&mut self, mv_stack: &mut Vec<CandidateMV>) {
for mut cand_mv in mv_stack {
cand_mv.weight += REF_CAT_LEVEL;
}
}
fn setup_mvref_list(&mut self, bo: &BlockOffset, ref_frames: [usize; 2], mv_stack: &mut Vec<CandidateMV>,
bsize: BlockSize, is_sec_rect: bool, fi: &FrameInvariants, is_compound: bool) -> usize {
let (_rf, _rf_num) = self.get_mvref_ref_frames(INTRA_FRAME);
let target_n4_h = bsize.height_mi();
let target_n4_w = bsize.width_mi();
let mut max_row_offs = 0 as isize;
let row_adj = (target_n4_h < BLOCK_8X8.height_mi()) && (bo.y & 0x01) != 0x0;
let mut max_col_offs = 0 as isize;
let col_adj = (target_n4_w < BLOCK_8X8.width_mi()) && (bo.x & 0x01) != 0x0;
let mut processed_rows = 0 as isize;
let mut processed_cols = 0 as isize;
let up_avail = bo.y > 0;
let left_avail = bo.x > 0;
if up_avail {
max_row_offs = -2 * MVREF_ROW_COLS as isize + row_adj as isize;
// limit max offset for small blocks
if target_n4_h < BLOCK_8X8.height_mi() {
max_row_offs = -2 * 2 + row_adj as isize;
}
let rows = self.bc.rows;
max_row_offs = self.find_valid_row_offs(max_row_offs, bo.y, rows);
}
if left_avail {
max_col_offs = -2 * MVREF_ROW_COLS as isize + col_adj as isize;
// limit max offset for small blocks
if target_n4_w < BLOCK_8X8.width_mi() {
max_col_offs = -2 * 2 + col_adj as isize;
}
max_col_offs = self.find_valid_col_offs(max_col_offs, bo.x);
}
let mut row_match = false;
let mut col_match = false;
let mut newmv_count: usize = 0;
if max_row_offs.abs() >= 1 {
let found_match = self.scan_row_mbmi(bo, -1, max_row_offs, &mut processed_rows, ref_frames, mv_stack,
&mut newmv_count, bsize, is_compound);
row_match |= found_match;
}
if max_col_offs.abs() >= 1 {
let found_match = self.scan_col_mbmi(bo, -1, max_col_offs, &mut processed_cols, ref_frames, mv_stack,
&mut newmv_count, bsize, is_compound);
col_match |= found_match;
}
if self.has_tr(bo, bsize, is_sec_rect) {
let found_match = self.scan_blk_mbmi(&bo.with_offset(target_n4_w as isize, -1), ref_frames, mv_stack,
&mut newmv_count, is_compound);
row_match |= found_match;
}
let nearest_match = if row_match { 1 } else { 0 } + if col_match { 1 } else { 0 };
self.add_offset(mv_stack);
/* Scan the second outer area. */
let mut far_newmv_count: usize = 0; // won't be used
let found_match = self.scan_blk_mbmi(
&bo.with_offset(-1, -1), ref_frames, mv_stack, &mut far_newmv_count, is_compound
);
row_match |= found_match;
for idx in 2..MVREF_ROW_COLS+1 {
let row_offset = -2 * idx as isize + 1 + row_adj as isize;
let col_offset = -2 * idx as isize + 1 + col_adj as isize;
if row_offset.abs() <= max_row_offs.abs() && row_offset.abs() > processed_rows {
let found_match = self.scan_row_mbmi(bo, row_offset, max_row_offs, &mut processed_rows, ref_frames, mv_stack,
&mut far_newmv_count, bsize, is_compound);
row_match |= found_match;
}
if col_offset.abs() <= max_col_offs.abs() && col_offset.abs() > processed_cols {
let found_match = self.scan_col_mbmi(bo, col_offset, max_col_offs, &mut processed_cols, ref_frames, mv_stack,
&mut far_newmv_count, bsize, is_compound);
col_match |= found_match;
}
}
let total_match = if row_match { 1 } else { 0 } + if col_match { 1 } else { 0 };
assert!(total_match >= nearest_match);
// mode_context contains both newmv_context and refmv_context, where newmv_context
// lies in the REF_MVOFFSET least significant bits
let mode_context = match nearest_match {
0 => cmp::min(total_match, 1) + (total_match << REFMV_OFFSET),
1 => 3 - cmp::min(newmv_count, 1) + ((2 + total_match) << REFMV_OFFSET),
_ => 5 - cmp::min(newmv_count, 1) + (5 << REFMV_OFFSET)
};
/* TODO: Find nearest match and assign nearest and near mvs */
// 7.10.2.11 Sort MV stack according to weight
mv_stack.sort_by(|a, b| b.weight.cmp(&a.weight));
if mv_stack.len() < 2 {
// 7.10.2.12 Extra search process
let w4 = bsize.width_mi().min(16).min(self.bc.cols - bo.x);
let h4 = bsize.height_mi().min(16).min(self.bc.rows - bo.y);
let num4x4 = w4.min(h4);
let passes = if up_avail { 0 } else { 1 } .. if left_avail { 2 } else { 1 };
let mut ref_id_count = [0 as usize; 2];
let mut ref_diff_count = [0 as usize; 2];
let mut ref_id_mvs = [[MotionVector { row: 0, col: 0 }; 2]; 2];
let mut ref_diff_mvs = [[MotionVector { row: 0, col: 0 }; 2]; 2];
for pass in passes {
let mut idx = 0;
while idx < num4x4 && mv_stack.len() < 2 {
let rbo = if pass == 0 {
bo.with_offset(idx as isize, -1)
} else {
bo.with_offset(-1, idx as isize)
};
let blk = &self.bc.at(&rbo);
self.add_extra_mv_candidate(
blk, ref_frames, mv_stack, fi, is_compound,
&mut ref_id_count, &mut ref_id_mvs, &mut ref_diff_count, &mut ref_diff_mvs
);
idx += if pass == 0 {
blk.n4_w
} else {
blk.n4_h
};
}
}
if is_compound {
let mut combined_mvs = [[MotionVector { row: 0, col: 0}; 2]; 2];
for list in 0..2 {
let mut comp_count = 0;
for idx in 0..ref_id_count[list] {
combined_mvs[comp_count][list] = ref_id_mvs[list][idx];
comp_count = comp_count + 1;
}
for idx in 0..ref_diff_count[list] {
if comp_count < 2 {
combined_mvs[comp_count][list] = ref_diff_mvs[list][idx];
comp_count = comp_count + 1;
}
}
}
if mv_stack.len() == 1 {
let mv_cand = if combined_mvs[0][0].row == mv_stack[0].this_mv.row &&
combined_mvs[0][0].col == mv_stack[0].this_mv.col &&
combined_mvs[0][1].row == mv_stack[0].comp_mv.row &&
combined_mvs[0][1].col == mv_stack[0].comp_mv.col {
CandidateMV {
this_mv: combined_mvs[1][0],
comp_mv: combined_mvs[1][1],
weight: 2
}
} else {
CandidateMV {
this_mv: combined_mvs[0][0],
comp_mv: combined_mvs[0][1],
weight: 2
}
};
mv_stack.push(mv_cand);
} else {
for idx in 0..2 {
let mv_cand = CandidateMV {
this_mv: combined_mvs[idx][0],
comp_mv: combined_mvs[idx][1],
weight: 2
};
mv_stack.push(mv_cand);
}
}
assert!(mv_stack.len() == 2);
}
}
/* TODO: Handle single reference frame extension */
// clamp mvs
for mv in mv_stack {
let blk_w = bsize.width();
let blk_h = bsize.height();
let border_w = 128 + blk_w as isize * 8;
let border_h = 128 + blk_h as isize * 8;
let mvx_min = -(bo.x as isize) * (8 * MI_SIZE) as isize - border_w;
let mvx_max = (self.bc.cols - bo.x - blk_w / MI_SIZE) as isize * (8 * MI_SIZE) as isize + border_w;
let mvy_min = -(bo.y as isize) * (8 * MI_SIZE) as isize - border_h;
let mvy_max = (self.bc.rows - bo.y - blk_h / MI_SIZE) as isize * (8 * MI_SIZE) as isize + border_h;
mv.this_mv.row = (mv.this_mv.row as isize).max(mvy_min).min(mvy_max) as i16;
mv.this_mv.col = (mv.this_mv.col as isize).max(mvx_min).min(mvx_max) as i16;
mv.comp_mv.row = (mv.comp_mv.row as isize).max(mvy_min).min(mvy_max) as i16;
mv.comp_mv.col = (mv.comp_mv.col as isize).max(mvx_min).min(mvx_max) as i16;
}
mode_context
}
pub fn find_mvrefs(&mut self, bo: &BlockOffset, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, bsize: BlockSize, is_sec_rect: bool,
fi: &FrameInvariants, is_compound: bool) -> usize {
assert!(ref_frames[0] != NONE_FRAME);
if ref_frames[0] < REF_FRAMES {
if ref_frames[0] != INTRA_FRAME {
/* TODO: convert global mv to an mv here */
} else {
/* TODO: set the global mv ref to invalid here */
}
}
if ref_frames[0] != INTRA_FRAME {
/* TODO: Set zeromv ref to the converted global motion vector */
} else {
/* TODO: Set the zeromv ref to 0 */
}
if ref_frames[0] <= INTRA_FRAME {
return 0;
}
self.setup_mvref_list(bo, ref_frames, mv_stack, bsize, is_sec_rect, fi, is_compound)
}
pub fn fill_neighbours_ref_counts(&mut self, bo: &BlockOffset) {
let mut ref_counts = [0; TOTAL_REFS_PER_FRAME];
let above_b = self.bc.above_of(bo);
let left_b = self.bc.left_of(bo);
if bo.y > 0 && above_b.is_inter() {
ref_counts[above_b.ref_frames[0] as usize] += 1;
if above_b.has_second_ref() {
ref_counts[above_b.ref_frames[1] as usize] += 1;
}
}
if bo.x > 0 && left_b.is_inter() {
ref_counts[left_b.ref_frames[0] as usize] += 1;
if left_b.has_second_ref() {
ref_counts[left_b.ref_frames[1] as usize] += 1;
}
}
self.bc.at_mut(bo).neighbors_ref_counts = ref_counts;
}
fn ref_count_ctx(counts0: usize, counts1: usize) -> usize {
if counts0 < counts1 {
0
} else if counts0 == counts1 {
1
} else {
2
}
}
fn get_ref_frame_ctx_b0(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let fwd_cnt = ref_counts[LAST_FRAME] + ref_counts[LAST2_FRAME] +
ref_counts[LAST3_FRAME] + ref_counts[GOLDEN_FRAME];
let bwd_cnt = ref_counts[BWDREF_FRAME] + ref_counts[ALTREF2_FRAME] +
ref_counts[ALTREF_FRAME];
ContextWriter::ref_count_ctx(fwd_cnt, bwd_cnt)
}
fn get_pred_ctx_brfarf2_or_arf(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let brfarf2_count = ref_counts[BWDREF_FRAME] + ref_counts[ALTREF2_FRAME];
let arf_count = ref_counts[ALTREF_FRAME];
ContextWriter::ref_count_ctx(brfarf2_count, arf_count)
}
fn get_pred_ctx_ll2_or_l3gld(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let l_l2_count = ref_counts[LAST_FRAME] + ref_counts[LAST2_FRAME];
let l3_gold_count = ref_counts[LAST3_FRAME] + ref_counts[GOLDEN_FRAME];
ContextWriter::ref_count_ctx(l_l2_count, l3_gold_count)
}
fn get_pred_ctx_last_or_last2(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let l_count = ref_counts[LAST_FRAME];
let l2_count = ref_counts[LAST2_FRAME];
ContextWriter::ref_count_ctx(l_count, l2_count)
}
fn get_pred_ctx_last3_or_gold(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let l3_count = ref_counts[LAST3_FRAME];
let gold_count = ref_counts[GOLDEN_FRAME];
ContextWriter::ref_count_ctx(l3_count, gold_count)
}
fn get_pred_ctx_brf_or_arf2(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let brf_count = ref_counts[BWDREF_FRAME];
let arf2_count = ref_counts[ALTREF2_FRAME];
ContextWriter::ref_count_ctx(brf_count, arf2_count)
}
fn get_comp_mode_ctx(&self, bo: &BlockOffset) -> usize {
fn check_backward(ref_frame: usize) -> bool {
ref_frame >= BWDREF_FRAME && ref_frame <= ALTREF_FRAME
}
let avail_left = bo.x > 0;
let avail_up = bo.y > 0;
let bo_left = bo.with_offset(-1, 0);
let bo_up = bo.with_offset(0, -1);
let above0 = if avail_up { self.bc.at(&bo_up).ref_frames[0] } else { INTRA_FRAME };
let above1 = if avail_up { self.bc.at(&bo_up).ref_frames[1] } else { NONE_FRAME };
let left0 = if avail_left { self.bc.at(&bo_left).ref_frames[0] } else { INTRA_FRAME };
let left1 = if avail_left { self.bc.at(&bo_left).ref_frames[1] } else { NONE_FRAME };
let left_single = left1 == NONE_FRAME;
let above_single = above1 == NONE_FRAME;
let left_intra = left0 == INTRA_FRAME;
let above_intra = above0 == INTRA_FRAME;
let left_backward = check_backward(left0);
let above_backward = check_backward(above0);
if avail_left && avail_up {
if above_single && left_single {
(above_backward ^ left_backward) as usize
} else if above_single {
2 + (above_backward || above_intra) as usize
} else if left_single {
2 + (left_backward || left_intra) as usize
} else {
4
}
} else if avail_up {
if above_single {
above_backward as usize
} else {
3
}
} else if avail_left {
if left_single {
left_backward as usize
} else {
3
}
} else {
1
}
}
fn get_comp_ref_type_ctx(&self, bo: &BlockOffset) -> usize {
fn is_samedir_ref_pair(ref0: usize, ref1: usize) -> bool {
(ref0 >= BWDREF_FRAME && ref0 != NONE_FRAME) == (ref1 >= BWDREF_FRAME && ref1 != NONE_FRAME)
}
let avail_left = bo.x > 0;
let avail_up = bo.y > 0;
let bo_left = bo.with_offset(-1, 0);
let bo_up = bo.with_offset(0, -1);
let above0 = if avail_up { self.bc.at(&bo_up).ref_frames[0] } else { INTRA_FRAME };
let above1 = if avail_up { self.bc.at(&bo_up).ref_frames[1] } else { NONE_FRAME };
let left0 = if avail_left { self.bc.at(&bo_left).ref_frames[0] } else { INTRA_FRAME };
let left1 = if avail_left { self.bc.at(&bo_left).ref_frames[1] } else { NONE_FRAME };
let left_single = left1 == NONE_FRAME;
let above_single = above1 == NONE_FRAME;
let left_intra = left0 == INTRA_FRAME;
let above_intra = above0 == INTRA_FRAME;
let above_comp_inter = avail_up && !above_intra && !above_single;
let left_comp_inter = avail_left && !left_intra && !left_single;
let above_uni_comp = above_comp_inter && is_samedir_ref_pair(above0, above1);
let left_uni_comp = left_comp_inter && is_samedir_ref_pair(left0, left1);
if avail_up && !above_intra && avail_left && !left_intra {
let samedir = is_samedir_ref_pair(above0, left0);
if !above_comp_inter && !left_comp_inter {
1 + 2 * samedir as usize
} else if !above_comp_inter {
if !left_uni_comp { 1 } else { 3 + samedir as usize }
} else if !left_comp_inter {
if !above_uni_comp { 1 } else { 3 + samedir as usize }
} else {
if !above_uni_comp && !left_uni_comp {
0
} else if !above_uni_comp || !left_uni_comp {
2
} else {
3 + ((above0 == BWDREF_FRAME) == (left0 == BWDREF_FRAME)) as usize
}
}
} else if avail_up && avail_left {
if above_comp_inter {
1 + 2 * above_uni_comp as usize
} else if left_comp_inter {
1 + 2 * left_uni_comp as usize
} else {
2
}
} else if above_comp_inter {
4 * above_uni_comp as usize
} else if left_comp_inter {
4 * left_uni_comp as usize
} else {
2
}
}
pub fn write_ref_frames(&mut self, w: &mut dyn Writer, fi: &FrameInvariants, bo: &BlockOffset) {
let rf = self.bc.at(bo).ref_frames;
let sz = self.bc.at(bo).n4_w.min(self.bc.at(bo).n4_h);
/* TODO: Handle multiple references */
let comp_mode = self.bc.at(bo).has_second_ref();
if fi.reference_mode != ReferenceMode::SINGLE && sz >= 2 {
let ctx = self.get_comp_mode_ctx(bo);
symbol_with_update!(self, w, comp_mode as u32, &mut self.fc.comp_mode_cdf[ctx]);
} else {
assert!(!comp_mode);
}
if comp_mode {
let comp_ref_type = 1 as u32; // bidir
let ctx = self.get_comp_ref_type_ctx(bo);
symbol_with_update!(self, w, comp_ref_type, &mut self.fc.comp_ref_type_cdf[ctx]);
if comp_ref_type == 0 {
unimplemented!();
} else {
let compref = rf[0] == GOLDEN_FRAME || rf[0] == LAST3_FRAME;
let ctx = self.get_pred_ctx_ll2_or_l3gld(bo);
symbol_with_update!(self, w, compref as u32, &mut self.fc.comp_ref_cdf[ctx][0]);
if !compref {
let compref_p1 = rf[0] == LAST2_FRAME;
let ctx = self.get_pred_ctx_last_or_last2(bo);
symbol_with_update!(self, w, compref_p1 as u32, &mut self.fc.comp_ref_cdf[ctx][1]);
} else {
let compref_p2 = rf[0] == GOLDEN_FRAME;
let ctx = self.get_pred_ctx_last3_or_gold(bo);
symbol_with_update!(self, w, compref_p2 as u32, &mut self.fc.comp_ref_cdf[ctx][2]);
}
let comp_bwdref = rf[1] == ALTREF_FRAME;
let ctx = self.get_pred_ctx_brfarf2_or_arf(bo);
symbol_with_update!(self, w, comp_bwdref as u32, &mut self.fc.comp_bwd_ref_cdf[ctx][0]);
if !comp_bwdref {
let comp_bwdref_p1 = rf[1] == ALTREF2_FRAME;
let ctx = self.get_pred_ctx_brf_or_arf2(bo);
symbol_with_update!(self, w, comp_bwdref_p1 as u32, &mut self.fc.comp_bwd_ref_cdf[ctx][1]);
}
}
} else {
let b0_ctx = self.get_ref_frame_ctx_b0(bo);
let b0 = rf[0] <= ALTREF_FRAME && rf[0] >= BWDREF_FRAME;
symbol_with_update!(self, w, b0 as u32, &mut self.fc.single_ref_cdfs[b0_ctx][0]);
if b0 {
let b1_ctx = self.get_pred_ctx_brfarf2_or_arf(bo);
let b1 = rf[0] == ALTREF_FRAME;
symbol_with_update!(self, w, b1 as u32, &mut self.fc.single_ref_cdfs[b1_ctx][1]);
if !b1 {
let b5_ctx = self.get_pred_ctx_brf_or_arf2(bo);
let b5 = rf[0] == ALTREF2_FRAME;
symbol_with_update!(self, w, b5 as u32, &mut self.fc.single_ref_cdfs[b5_ctx][5]);
}
} else {
let b2_ctx = self.get_pred_ctx_ll2_or_l3gld(bo);
let b2 = rf[0] == LAST3_FRAME || rf[0] == GOLDEN_FRAME;
symbol_with_update!(self, w, b2 as u32, &mut self.fc.single_ref_cdfs[b2_ctx][2]);
if !b2 {
let b3_ctx = self.get_pred_ctx_last_or_last2(bo);
let b3 = rf[0] != LAST_FRAME;
symbol_with_update!(self, w, b3 as u32, &mut self.fc.single_ref_cdfs[b3_ctx][3]);
} else {
let b4_ctx = self.get_pred_ctx_last3_or_gold(bo);
let b4 = rf[0] != LAST3_FRAME;
symbol_with_update!(self, w, b4 as u32, &mut self.fc.single_ref_cdfs[b4_ctx][4]);
}
}
}
}
pub fn write_compound_mode(
&mut self, w: &mut dyn Writer, mode: PredictionMode, ctx: usize,
) {
let newmv_ctx = ctx & NEWMV_CTX_MASK;
let refmv_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
let ctx = if refmv_ctx < 2 {
newmv_ctx.min(1)
} else if refmv_ctx < 4 {
(newmv_ctx + 1).min(4)
} else {
(newmv_ctx.max(1) + 3).min(7)
};
assert!(mode >= PredictionMode::NEAREST_NEARESTMV);
let val = mode as u32 - PredictionMode::NEAREST_NEARESTMV as u32;
symbol_with_update!(self, w, val, &mut self.fc.compound_mode_cdf[ctx]);
}
pub fn write_inter_mode(&mut self, w: &mut dyn Writer, mode: PredictionMode, ctx: usize) {
let newmv_ctx = ctx & NEWMV_CTX_MASK;
symbol_with_update!(self, w, (mode != PredictionMode::NEWMV) as u32, &mut self.fc.newmv_cdf[newmv_ctx]);
if mode != PredictionMode::NEWMV {
let zeromv_ctx = (ctx >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
symbol_with_update!(self, w, (mode != PredictionMode::GLOBALMV) as u32, &mut self.fc.zeromv_cdf[zeromv_ctx]);
if mode != PredictionMode::GLOBALMV {
let refmv_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
symbol_with_update!(self, w, (mode != PredictionMode::NEARESTMV) as u32, &mut self.fc.refmv_cdf[refmv_ctx]);
}
}
}
pub fn write_drl_mode(&mut self, w: &mut dyn Writer, drl_mode: bool, ctx: usize) {
symbol_with_update!(self, w, drl_mode as u32, &mut self.fc.drl_cdfs[ctx]);
}
pub fn write_mv(&mut self, w: &mut dyn Writer,
mv: MotionVector, ref_mv: MotionVector,
mv_precision: MvSubpelPrecision) {
let diff = MotionVector { row: mv.row - ref_mv.row, col: mv.col - ref_mv.col };
let j: MvJointType = av1_get_mv_joint(diff);
w.symbol_with_update(j as u32, &mut self.fc.nmv_context.joints_cdf);
if mv_joint_vertical(j) {
encode_mv_component(w, diff.row as i32, &mut self.fc.nmv_context.comps[0], mv_precision);
}
if mv_joint_horizontal(j) {
encode_mv_component(w, diff.col as i32, &mut self.fc.nmv_context.comps[1], mv_precision);
}
}
pub fn write_tx_type(
&mut self, w: &mut dyn Writer, tx_size: TxSize, tx_type: TxType, y_mode: PredictionMode,
is_inter: bool, use_reduced_tx_set: bool
) {
let square_tx_size = tx_size.sqr();
let tx_set =
get_tx_set(tx_size, is_inter, use_reduced_tx_set);
let num_tx_types = num_tx_set[tx_set as usize];
if num_tx_types > 1 {
let tx_set_index = get_tx_set_index(tx_size, is_inter, use_reduced_tx_set);
assert!(tx_set_index > 0);
assert!(av1_tx_used[tx_set as usize][tx_type as usize] != 0);
if is_inter {
symbol_with_update!(
self,
w,
av1_tx_ind[tx_set as usize][tx_type as usize] as u32,
&mut self.fc.inter_tx_cdf[tx_set_index as usize]
[square_tx_size as usize]
[..num_tx_set[tx_set as usize] + 1]
);
} else {
let intra_dir = y_mode;
// TODO: Once use_filter_intra is enabled,
// intra_dir =
// fimode_to_intradir[mbmi->filter_intra_mode_info.filter_intra_mode];
symbol_with_update!(
self,
w,
av1_tx_ind[tx_set as usize][tx_type as usize] as u32,
&mut self.fc.intra_tx_cdf[tx_set_index as usize]
[square_tx_size as usize][intra_dir as usize]
[..num_tx_set[tx_set as usize] + 1]
);
}
}
}
pub fn write_skip(&mut self, w: &mut dyn Writer, bo: &BlockOffset, skip: bool) {
let ctx = self.bc.skip_context(bo);
symbol_with_update!(self, w, skip as u32, &mut self.fc.skip_cdfs[ctx]);
}
fn get_segment_pred(&mut self, bo: &BlockOffset) -> ( u8, u8 ) {
let mut prev_ul = -1;
let mut prev_u = -1;
let mut prev_l = -1;
if bo.x > 0 && bo.y > 0 {
prev_ul = self.bc.above_left_of(bo).segmentation_idx as i8;
}
if bo.y > 0 {
prev_u = self.bc.above_of(bo).segmentation_idx as i8;
}
if bo.x > 0 {
prev_l = self.bc.left_of(bo).segmentation_idx as i8;
}
/* Pick CDF index based on number of matching/out-of-bounds segment IDs. */
let cdf_index: u8;
if prev_ul < 0 || prev_u < 0 || prev_l < 0 { /* Edge case */
cdf_index = 0;
} else if (prev_ul == prev_u) && (prev_ul == prev_l) {
cdf_index = 2;
} else if (prev_ul == prev_u) || (prev_ul == prev_l) || (prev_u == prev_l) {
cdf_index = 1;
} else {
cdf_index = 0;
}
/* If 2 or more are identical returns that as predictor, otherwise prev_l. */
let r: i8;
if prev_u == -1 { /* edge case */
r = if prev_l == -1 { 0 } else { prev_l };
} else if prev_l == -1 { /* edge case */
r = prev_u;
} else {
r = if prev_ul == prev_u { prev_u } else { prev_l };
}
( r as u8, cdf_index )
}
fn neg_interleave(&mut self, x: i32, r: i32, max: i32) -> i32 {
assert!(x < max);
if r == 0 {
return x;
} else if r >= (max - 1) {
return -x + max - 1;
}
let diff = x - r;
if 2 * r < max {
if diff.abs() <= r {
if diff > 0 {
return (diff << 1) - 1;
} else {
return (-diff) << 1;
}
}
x
} else {
if diff.abs() < (max - r) {
if diff > 0 {
return (diff << 1) - 1;
} else {
return (-diff) << 1;
}
}
(max - x) - 1
}
}
pub fn write_segmentation(&mut self, w: &mut dyn Writer, bo: &BlockOffset,
bsize: BlockSize, skip: bool, last_active_segid: u8) {
let ( pred, cdf_index ) = self.get_segment_pred(bo);
if skip {
self.bc.set_segmentation_idx(bo, bsize, pred);
return;
}
let seg_idx = self.bc.at(bo).segmentation_idx;
let coded_id = self.neg_interleave(seg_idx as i32, pred as i32, (last_active_segid + 1) as i32);
symbol_with_update!(self, w, coded_id as u32, &mut self.fc.spatial_segmentation_cdfs[cdf_index as usize]);
}
pub fn write_cdef(&mut self, w: &mut dyn Writer, strength_index: u8, bits: u8) {
w.literal(bits, strength_index as u32);
}
pub fn write_block_deblock_deltas(&mut self, w: &mut dyn Writer,
bo: &BlockOffset, multi: bool) {
let block = self.bc.at(bo);
let deltas = if multi { FRAME_LF_COUNT + PLANES - 3 } else { 1 };
for i in 0..deltas {
let delta = block.deblock_deltas[i];
let abs:u32 = delta.abs() as u32;
if multi {
symbol_with_update!(self, w, cmp::min(abs, DELTA_LF_SMALL),
&mut self.fc.deblock_delta_multi_cdf[i]);
} else {
symbol_with_update!(self, w, cmp::min(abs, DELTA_LF_SMALL),
&mut self.fc.deblock_delta_cdf);
};
if abs >= DELTA_LF_SMALL {
let bits = msb(abs as i32 - 1) as u32;
w.literal(3, bits - 1);
w.literal(bits as u8, abs - (1<<bits) - 1);
}
if abs > 0 {
w.bool(delta < 0, 16384);
}
}
}
pub fn write_is_inter(&mut self, w: &mut dyn Writer, bo: &BlockOffset, is_inter: bool) {
let ctx = self.bc.intra_inter_context(bo);
symbol_with_update!(self, w, is_inter as u32, &mut self.fc.intra_inter_cdfs[ctx]);
}
pub fn get_txsize_entropy_ctx(&mut self, tx_size: TxSize) -> usize {
(tx_size.sqr() as usize + tx_size.sqr_up() as usize + 1) >> 1
}
pub fn txb_init_levels(
&mut self, coeffs: &[i32], width: usize, height: usize,
levels_buf: &mut [u8]
) {
let mut offset = TX_PAD_TOP * (width + TX_PAD_HOR);
for y in 0..height {
for x in 0..width {
levels_buf[offset] = clamp(coeffs[y * width + x].abs(), 0, 127) as u8;
offset += 1;
}
offset += TX_PAD_HOR;
}
}
pub fn av1_get_coded_tx_size(&mut self, tx_size: TxSize) -> TxSize {
if tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64 {
return TX_32X32
}
if tx_size == TX_16X64 {
return TX_16X32
}
if tx_size == TX_64X16 {
return TX_32X16
}
tx_size
}
pub fn get_txb_bwl(&mut self, tx_size: TxSize) -> usize {
av1_get_coded_tx_size(tx_size).width_log2()
}
pub fn get_eob_pos_token(&mut self, eob: usize, extra: &mut u32) -> u32 {
let t = if eob < 33 {
eob_to_pos_small[eob] as u32
} else {
let e = cmp::min((eob - 1) >> 5, 16);
eob_to_pos_large[e as usize] as u32
};
assert!(eob as i32 >= k_eob_group_start[t as usize] as i32);
*extra = eob as u32 - k_eob_group_start[t as usize] as u32;
t
}
pub fn get_nz_mag(
&mut self, levels: &[u8], bwl: usize, tx_class: TxClass
) -> usize {
// May version.
// Note: AOMMIN(level, 3) is useless for decoder since level < 3.
let mut mag = clip_max3(levels[1]); // { 0, 1 }
mag += clip_max3(levels[(1 << bwl) + TX_PAD_HOR]); // { 1, 0 }
if tx_class == TX_CLASS_2D {
mag += clip_max3(levels[(1 << bwl) + TX_PAD_HOR + 1]); // { 1, 1 }
mag += clip_max3(levels[2]); // { 0, 2 }
mag += clip_max3(levels[(2 << bwl) + (2 << TX_PAD_HOR_LOG2)]); // { 2, 0 }
} else if tx_class == TX_CLASS_VERT {
mag += clip_max3(levels[(2 << bwl) + (2 << TX_PAD_HOR_LOG2)]); // { 2, 0 }
mag += clip_max3(levels[(3 << bwl) + (3 << TX_PAD_HOR_LOG2)]); // { 3, 0 }
mag += clip_max3(levels[(4 << bwl) + (4 << TX_PAD_HOR_LOG2)]); // { 4, 0 }
} else {
mag += clip_max3(levels[2]); // { 0, 2 }
mag += clip_max3(levels[3]); // { 0, 3 }
mag += clip_max3(levels[4]); // { 0, 4 }
}
mag as usize
}
pub fn get_nz_map_ctx_from_stats(
&mut self,
stats: usize,
coeff_idx: usize, // raster order
bwl: usize,
tx_size: TxSize,
tx_class: TxClass
) -> usize {
if (tx_class as u32 | coeff_idx as u32) == 0 {
return 0;
};
let row = coeff_idx >> bwl;
let col = coeff_idx - (row << bwl);
let mut ctx = (stats + 1) >> 1;
ctx = cmp::min(ctx, 4);
match tx_class {
TX_CLASS_2D => {
// This is the algorithm to generate table av1_nz_map_ctx_offset[].
// const int width = tx_size_wide[tx_size];
// const int height = tx_size_high[tx_size];
// if (width < height) {
// if (row < 2) return 11 + ctx;
// } else if (width > height) {
// if (col < 2) return 16 + ctx;
// }
// if (row + col < 2) return ctx + 1;
// if (row + col < 4) return 5 + ctx + 1;
// return 21 + ctx;
ctx + av1_nz_map_ctx_offset[tx_size as usize][cmp::min(row, 4)][cmp::min(col, 4)] as usize
}
TX_CLASS_HORIZ => {
let row = coeff_idx >> bwl;
let col = coeff_idx - (row << bwl);
ctx + nz_map_ctx_offset_1d[col as usize]
}
TX_CLASS_VERT => {
let row = coeff_idx >> bwl;
ctx + nz_map_ctx_offset_1d[row]
}
}
}
pub fn get_nz_map_ctx(
&mut self, levels: &[u8], coeff_idx: usize, bwl: usize, height: usize,
scan_idx: usize, is_eob: bool, tx_size: TxSize, tx_class: TxClass
) -> usize {
if is_eob {
if scan_idx == 0 {
return 0;
}
if scan_idx <= (height << bwl) / 8 {
return 1;
}
if scan_idx <= (height << bwl) / 4 {
return 2;
}
return 3;
}
let padded_idx = coeff_idx + ((coeff_idx >> bwl) << TX_PAD_HOR_LOG2);
let stats = self.get_nz_mag(&levels[padded_idx..], bwl, tx_class);
self.get_nz_map_ctx_from_stats(stats, coeff_idx, bwl, tx_size, tx_class)
}
pub fn get_nz_map_contexts(
&mut self, levels: &mut [u8], scan: &[u16], eob: u16,
tx_size: TxSize, tx_class: TxClass, coeff_contexts: &mut [i8]
) {
let bwl = self.get_txb_bwl(tx_size);
let height = av1_get_coded_tx_size(tx_size).height();
for i in 0..eob {
let pos = scan[i as usize];
coeff_contexts[pos as usize] = self.get_nz_map_ctx(
levels,
pos as usize,
bwl,
height,
i as usize,
i == eob - 1,
tx_size,
tx_class
) as i8;
}
}
pub fn get_br_ctx(
&mut self,
levels: &[u8],
c: usize, // raster order
bwl: usize,
tx_class: TxClass
) -> usize {
let row: usize = c >> bwl;
let col: usize = c - (row << bwl);
let stride: usize = (1 << bwl) + TX_PAD_HOR;
let pos: usize = row * stride + col;
let mut mag: usize = levels[pos + 1] as usize;
mag += levels[pos + stride] as usize;
match tx_class {
TX_CLASS_2D => {
mag += levels[pos + stride + 1] as usize;
mag = cmp::min((mag + 1) >> 1, 6);
if c == 0 {
return mag;
}
if (row < 2) && (col < 2) {
return mag + 7;
}
}
TX_CLASS_HORIZ => {
mag += levels[pos + 2] as usize;
mag = cmp::min((mag + 1) >> 1, 6);
if c == 0 {
return mag;
}
if col == 0 {
return mag + 7;
}
}
TX_CLASS_VERT => {
mag += levels[pos + (stride << 1)] as usize;
mag = cmp::min((mag + 1) >> 1, 6);
if c == 0 {
return mag;
}
if row == 0 {
return mag + 7;
}
}
}
mag + 14
}
pub fn get_level_mag_with_txclass(
&mut self, levels: &[u8], stride: usize, row: usize, col: usize,
mag: &mut [usize], tx_class: TxClass
) {
for idx in 0..CONTEXT_MAG_POSITION_NUM {
let ref_row =
row + mag_ref_offset_with_txclass[tx_class as usize][idx][0];
let ref_col =
col + mag_ref_offset_with_txclass[tx_class as usize][idx][1];
let pos = ref_row * stride + ref_col;
mag[idx] = levels[pos] as usize;
}
}
pub fn write_coeffs_lv_map(
&mut self, w: &mut dyn Writer, plane: usize, bo: &BlockOffset, coeffs_in: &[i32],
pred_mode: PredictionMode,
tx_size: TxSize, tx_type: TxType, plane_bsize: BlockSize, xdec: usize,
ydec: usize, use_reduced_tx_set: bool
) -> bool {
let is_inter = pred_mode >= PredictionMode::NEARESTMV;
//assert!(!is_inter);
// Note: Both intra and inter mode uses inter scan order. Surprised?
let scan_order =
&av1_scan_orders[tx_size as usize][tx_type as usize];
let scan = scan_order.scan;
let width = av1_get_coded_tx_size(tx_size).width();
let height = av1_get_coded_tx_size(tx_size).height();
let mut coeffs_storage = [0 as i32; 32*32];
let coeffs = &mut coeffs_storage[..width*height];
let mut cul_level = 0 as u32;
for i in 0..width*height {
coeffs[i] = coeffs_in[scan[i] as usize];
cul_level += coeffs[i].abs() as u32;
}
let eob = if cul_level == 0 { 0 } else {
coeffs.iter().rposition(|&v| v != 0).map(|i| i + 1).unwrap_or(0)
};
let txs_ctx = self.get_txsize_entropy_ctx(tx_size);
let txb_ctx =
self.bc.get_txb_ctx(plane_bsize, tx_size, plane, bo, xdec, ydec);
{
let cdf = &mut self.fc.txb_skip_cdf[txs_ctx][txb_ctx.txb_skip_ctx];
symbol_with_update!(self, w, (eob == 0) as u32, cdf);
}
if eob == 0 {
self.bc.set_coeff_context(plane, bo, tx_size, xdec, ydec, 0);
return false;
}
let mut levels_buf = [0 as u8; TX_PAD_2D];
self.txb_init_levels(
coeffs_in,
width,
height,
&mut levels_buf
);
let tx_class = tx_type_to_class[tx_type as usize];
let plane_type = if plane == 0 {
0
} else {
1
} as usize;
assert!(tx_size <= TX_32X32 || tx_type == DCT_DCT);
// Signal tx_type for luma plane only
if plane == 0 {
self.write_tx_type(
w,
tx_size,
tx_type,
pred_mode,
is_inter,
use_reduced_tx_set
);
}
// Encode EOB
let mut eob_extra = 0 as u32;
let eob_pt = self.get_eob_pos_token(eob, &mut eob_extra);
let eob_multi_size: usize = tx_size.area_log2() - 4;
let eob_multi_ctx: usize = if tx_class == TX_CLASS_2D {
0
} else {
1
};
symbol_with_update!(
self,
w,
eob_pt - 1,
match eob_multi_size {
0 => &mut self.fc.eob_flag_cdf16[plane_type][eob_multi_ctx],
1 => &mut self.fc.eob_flag_cdf32[plane_type][eob_multi_ctx],
2 => &mut self.fc.eob_flag_cdf64[plane_type][eob_multi_ctx],
3 => &mut self.fc.eob_flag_cdf128[plane_type][eob_multi_ctx],
4 => &mut self.fc.eob_flag_cdf256[plane_type][eob_multi_ctx],
5 => &mut self.fc.eob_flag_cdf512[plane_type][eob_multi_ctx],
_ => &mut self.fc.eob_flag_cdf1024[plane_type][eob_multi_ctx],
}
);
let eob_offset_bits = k_eob_offset_bits[eob_pt as usize];
if eob_offset_bits > 0 {
let mut eob_shift = eob_offset_bits - 1;
let mut bit = if (eob_extra & (1 << eob_shift)) != 0 {
1
} else {
0
} as u32;
symbol_with_update!(
self,
w,
bit,
&mut self.fc.eob_extra_cdf[txs_ctx][plane_type][(eob_pt - 3) as usize]
);
for i in 1..eob_offset_bits {
eob_shift = eob_offset_bits as u16 - 1 - i as u16;
bit = if (eob_extra & (1 << eob_shift)) != 0 {
1
} else {
0
};
w.bit(bit as u16);
}
}
let mut coeff_contexts = [0 as i8; MAX_TX_SQUARE];
let levels =
&mut levels_buf[TX_PAD_TOP * (width + TX_PAD_HOR)..];
self.get_nz_map_contexts(
levels,
scan,
eob as u16,
tx_size,
tx_class,
&mut coeff_contexts
);
let bwl = self.get_txb_bwl(tx_size);
for c in (0..eob).rev() {
let pos = scan[c];
let coeff_ctx = coeff_contexts[pos as usize];
let v = coeffs_in[pos as usize];
let level: u32 = v.abs() as u32;
if c == eob - 1 {
symbol_with_update!(
self,
w,
(cmp::min(level, 3) - 1) as u32,
&mut self.fc.coeff_base_eob_cdf[txs_ctx][plane_type]
[coeff_ctx as usize]
);
} else {
symbol_with_update!(
self,
w,
(cmp::min(level, 3)) as u32,
&mut self.fc.coeff_base_cdf[txs_ctx][plane_type][coeff_ctx as usize]
);
}
if level > NUM_BASE_LEVELS as u32 {
let pos = scan[c as usize];
let v = coeffs_in[pos as usize];
let level = v.abs() as u16;
if level <= NUM_BASE_LEVELS as u16 {
continue;
}
let base_range = level - 1 - NUM_BASE_LEVELS as u16;
let br_ctx = self.get_br_ctx(levels, pos as usize, bwl, tx_class);
let mut idx = 0;
loop {
if idx >= COEFF_BASE_RANGE {
break;
}
let k = cmp::min(base_range - idx as u16, BR_CDF_SIZE as u16 - 1);
symbol_with_update!(
self,
w,
k as u32,
&mut self.fc.coeff_br_cdf
[cmp::min(txs_ctx, TxSize::TX_32X32 as usize)][plane_type]
[br_ctx]
);
if k < BR_CDF_SIZE as u16 - 1 {
break;
}
idx += BR_CDF_SIZE - 1;
}
}
}
// Loop to code all signs in the transform block,
// starting with the sign of DC (if applicable)
for c in 0..eob {
let v = coeffs_in[scan[c] as usize];
let level = v.abs() as u32;
if level == 0 {
continue;
}
let sign = if v < 0 {
1
} else {
0
};
if c == 0 {
symbol_with_update!(
self,
w,
sign,
&mut self.fc.dc_sign_cdf[plane_type][txb_ctx.dc_sign_ctx]
);
} else {
w.bit(sign as u16);
}
// save extra golomb codes for separate loop
if level > (COEFF_BASE_RANGE + NUM_BASE_LEVELS) as u32 {
let pos = scan[c];
w.write_golomb(
coeffs_in[pos as usize].abs() as u16
- COEFF_BASE_RANGE as u16
- 1
- NUM_BASE_LEVELS as u16
);
}
}
cul_level = cmp::min(COEFF_CONTEXT_MASK as u32, cul_level);
self.bc.set_dc_sign(&mut cul_level, coeffs[0]);
self.bc.set_coeff_context(plane, bo, tx_size, xdec, ydec, cul_level as u8);
true
}
pub fn checkpoint(&mut self) -> ContextWriterCheckpoint {
ContextWriterCheckpoint {
fc: self.fc,
bc: self.bc.checkpoint()
}
}
pub fn rollback(&mut self, checkpoint: &ContextWriterCheckpoint) {
self.fc = checkpoint.fc;
self.bc.rollback(&checkpoint.bc);
#[cfg(debug)] {
if self.fc_map.is_some() {
self.fc_map = Some(FieldMap {
map: self.fc.build_map()
});
}
}
}
}
/* Symbols for coding magnitude class of nonzero components */
const MV_CLASSES:usize = 11;
// MV Class Types
const MV_CLASS_0: usize = 0; /* (0, 2] integer pel */
const MV_CLASS_1: usize = 1; /* (2, 4] integer pel */
const MV_CLASS_2: usize = 2; /* (4, 8] integer pel */
const MV_CLASS_3: usize = 3; /* (8, 16] integer pel */
const MV_CLASS_4: usize = 4; /* (16, 32] integer pel */
const MV_CLASS_5: usize = 5; /* (32, 64] integer pel */
const MV_CLASS_6: usize = 6; /* (64, 128] integer pel */
const MV_CLASS_7: usize = 7; /* (128, 256] integer pel */
const MV_CLASS_8: usize = 8; /* (256, 512] integer pel */
const MV_CLASS_9: usize = 9; /* (512, 1024] integer pel */
const MV_CLASS_10: usize = 10; /* (1024,2048] integer pel */
const CLASS0_BITS: usize = 1; /* bits at integer precision for class 0 */
const CLASS0_SIZE: usize = (1 << CLASS0_BITS);
const MV_OFFSET_BITS: usize = (MV_CLASSES + CLASS0_BITS - 2);
const MV_BITS_CONTEXTS: usize = 6;
const MV_FP_SIZE: usize = 4;
const MV_MAX_BITS: usize = (MV_CLASSES + CLASS0_BITS + 2);
const MV_MAX: usize = ((1 << MV_MAX_BITS) - 1);
const MV_VALS: usize = ((MV_MAX << 1) + 1);
const MV_IN_USE_BITS: usize = 14;
const MV_UPP: i32 = (1 << MV_IN_USE_BITS);
const MV_LOW: i32 = (-(1 << MV_IN_USE_BITS));
#[inline(always)]
pub fn av1_get_mv_joint(mv: MotionVector) -> MvJointType {
if mv.row == 0 {
if mv.col == 0 { MvJointType::MV_JOINT_ZERO } else { MvJointType::MV_JOINT_HNZVZ }
} else {
if mv.col == 0 { MvJointType::MV_JOINT_HZVNZ } else { MvJointType::MV_JOINT_HNZVNZ }
}
}
#[inline(always)]
pub fn mv_joint_vertical(joint_type: MvJointType) -> bool {
joint_type == MvJointType::MV_JOINT_HZVNZ || joint_type == MvJointType::MV_JOINT_HNZVNZ
}
#[inline(always)]
pub fn mv_joint_horizontal(joint_type: MvJointType ) -> bool {
joint_type == MvJointType::MV_JOINT_HNZVZ || joint_type == MvJointType::MV_JOINT_HNZVNZ
}
#[inline(always)]
pub fn mv_class_base(mv_class: usize) -> u32 {
if mv_class != MV_CLASS_0 {
(CLASS0_SIZE << (mv_class as usize + 2)) as u32 }
else { 0 }
}
#[inline(always)]
// If n != 0, returns the floor of log base 2 of n. If n == 0, returns 0.
pub fn log_in_base_2(n: u32) -> u8 {
31 - cmp::min(31, n.leading_zeros() as u8)
}
#[inline(always)]
pub fn get_mv_class(z: u32, offset: &mut u32) -> usize {
let c =
if z >= CLASS0_SIZE as u32 * 4096 { MV_CLASS_10 }
else { log_in_base_2(z >> 3) as usize };
*offset = z - mv_class_base(c);
c
}
pub fn encode_mv_component(w: &mut Writer, comp: i32,
mvcomp: &mut NMVComponent, precision: MvSubpelPrecision) {
assert!(comp != 0);
let mut offset: u32 = 0;
let sign: u32 = if comp < 0 { 1 } else { 0 };
let mag: u32 = if sign == 1 { -comp as u32 } else { comp as u32 };
let mv_class = get_mv_class(mag - 1, &mut offset);
let d = offset >> 3; // int mv data
let fr = (offset >> 1) & 3; // fractional mv data
let hp = offset & 1; // high precision mv data
// Sign
w.symbol_with_update(sign, &mut mvcomp.sign_cdf);
// Class
w.symbol_with_update(mv_class as u32, &mut mvcomp.classes_cdf);
// Integer bits
if mv_class == MV_CLASS_0 {
w.symbol_with_update(d, &mut mvcomp.class0_cdf);
} else {
let n = mv_class + CLASS0_BITS - 1; // number of bits
for i in 0..n {
w.symbol_with_update((d >> i) & 1, &mut mvcomp.bits_cdf[i]);
}
}
// Fractional bits
if precision > MvSubpelPrecision::MV_SUBPEL_NONE {
w.symbol_with_update(
fr,
if mv_class == MV_CLASS_0 { &mut mvcomp.class0_fp_cdf[d as usize] }
else { &mut mvcomp.fp_cdf });
}
// High precision bit
if precision > MvSubpelPrecision::MV_SUBPEL_LOW_PRECISION {
w.symbol_with_update(
hp,
if mv_class == MV_CLASS_0 { &mut mvcomp.class0_hp_cdf }
else { &mut mvcomp.hp_cdf});
}
}
Revise has_tr() for rectangular partition sizes (#790)
// Copyright (c) 2017-2018, The rav1e contributors. All rights reserved
//
// This source code is subject to the terms of the BSD 2 Clause License and
// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
// was not distributed with this source code in the LICENSE file, you can
// obtain it at www.aomedia.org/license/software. If the Alliance for Open
// Media Patent License 1.0 was not distributed with this source code in the
// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
#![allow(safe_extern_statics)]
#![allow(non_upper_case_globals)]
#![allow(dead_code)]
#![allow(non_camel_case_types)]
#![cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
#![cfg_attr(feature = "cargo-clippy", allow(unnecessary_mut_passed))]
#![cfg_attr(feature = "cargo-clippy", allow(needless_range_loop))]
#![cfg_attr(feature = "cargo-clippy", allow(collapsible_if))]
use ec::Writer;
use partition::BlockSize::*;
use partition::PredictionMode::*;
use partition::TxSize::*;
use partition::TxType::*;
use partition::*;
use lrf::WIENER_TAPS_MID;
use lrf::SGR_XQD_MID;
use plane::*;
use util::clamp;
use util::msb;
use std::*;
use entropymode::*;
use token_cdfs::*;
use encoder::FrameInvariants;
use scan_order::*;
use encoder::ReferenceMode;
use self::REF_CONTEXTS;
use self::SINGLE_REFS;
pub const PLANES: usize = 3;
const PARTITION_PLOFFSET: usize = 4;
const PARTITION_BLOCK_SIZES: usize = 4 + 1;
const PARTITION_CONTEXTS_PRIMARY: usize = PARTITION_BLOCK_SIZES * PARTITION_PLOFFSET;
pub const PARTITION_CONTEXTS: usize = PARTITION_CONTEXTS_PRIMARY;
pub const PARTITION_TYPES: usize = 4;
pub const MI_SIZE_LOG2: usize = 2;
pub const MI_SIZE: usize = (1 << MI_SIZE_LOG2);
const MAX_MIB_SIZE_LOG2: usize = (MAX_SB_SIZE_LOG2 - MI_SIZE_LOG2);
pub const MAX_MIB_SIZE: usize = (1 << MAX_MIB_SIZE_LOG2);
pub const MAX_MIB_MASK: usize = (MAX_MIB_SIZE - 1);
const MAX_SB_SIZE_LOG2: usize = 6;
pub const MAX_SB_SIZE: usize = (1 << MAX_SB_SIZE_LOG2);
const MAX_SB_SQUARE: usize = (MAX_SB_SIZE * MAX_SB_SIZE);
pub const MAX_TX_SIZE: usize = 64;
const MAX_TX_SQUARE: usize = MAX_TX_SIZE * MAX_TX_SIZE;
pub const INTRA_MODES: usize = 13;
pub const UV_INTRA_MODES: usize = 14;
pub const CFL_JOINT_SIGNS: usize = 8;
pub const CFL_ALPHA_CONTEXTS: usize = 6;
pub const CFL_ALPHABET_SIZE: usize = 16;
pub const SKIP_MODE_CONTEXTS: usize = 3;
pub const COMP_INDEX_CONTEXTS: usize = 6;
pub const COMP_GROUP_IDX_CONTEXTS: usize = 6;
pub const BLOCK_SIZE_GROUPS: usize = 4;
pub const MAX_ANGLE_DELTA: usize = 3;
pub const DIRECTIONAL_MODES: usize = 8;
pub const KF_MODE_CONTEXTS: usize = 5;
pub const EXT_PARTITION_TYPES: usize = 10;
pub const TX_SIZE_SQR_CONTEXTS: usize = 4; // Coded tx_size <= 32x32, so is the # of CDF contexts from tx sizes
pub const TX_SETS: usize = 9;
pub const TX_SETS_INTRA: usize = 3;
pub const TX_SETS_INTER: usize = 4;
pub const TXFM_PARTITION_CONTEXTS: usize = ((TxSize::TX_SIZES - TxSize::TX_8X8 as usize) * 6 - 3);
const MAX_REF_MV_STACK_SIZE: usize = 8;
pub const REF_CAT_LEVEL: u32 = 640;
pub const FRAME_LF_COUNT: usize = 4;
pub const MAX_LOOP_FILTER: usize = 63;
const DELTA_LF_SMALL: u32 = 3;
pub const DELTA_LF_PROBS: usize = DELTA_LF_SMALL as usize;
const DELTA_Q_SMALL: u32 = 3;
pub const DELTA_Q_PROBS: usize = DELTA_Q_SMALL as usize;
// Number of transform types in each set type
static num_tx_set: [usize; TX_SETS] =
[1, 2, 5, 7, 7, 10, 12, 16, 16];
pub static av1_tx_used: [[usize; TX_TYPES]; TX_SETS] = [
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
];
// Maps set types above to the indices used for intra
static tx_set_index_intra: [i8; TX_SETS] =
[0, -1, 2, -1, 1, -1, -1, -1, -16];
// Maps set types above to the indices used for inter
static tx_set_index_inter: [i8; TX_SETS] =
[0, 3, -1, -1, -1, -1, 2, -1, 1];
static av1_tx_ind: [[usize; TX_TYPES]; TX_SETS] = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 3, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 5, 6, 4, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0],
[1, 5, 6, 4, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0],
[1, 2, 3, 6, 4, 5, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 8, 6, 7, 9, 10, 11, 0, 1, 2, 0, 0, 0, 0],
[7, 8, 9, 12, 10, 11, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6],
[7, 8, 9, 12, 10, 11, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6]
];
static ss_size_lookup: [[[BlockSize; 2]; 2]; BlockSize::BLOCK_SIZES_ALL] = [
// ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1
// ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1
[ [ BLOCK_4X4, BLOCK_4X4 ], [BLOCK_4X4, BLOCK_4X4 ] ],
[ [ BLOCK_4X8, BLOCK_4X4 ], [BLOCK_4X4, BLOCK_4X4 ] ],
[ [ BLOCK_8X4, BLOCK_4X4 ], [BLOCK_4X4, BLOCK_4X4 ] ],
[ [ BLOCK_8X8, BLOCK_8X4 ], [BLOCK_4X8, BLOCK_4X4 ] ],
[ [ BLOCK_8X16, BLOCK_8X8 ], [BLOCK_4X16, BLOCK_4X8 ] ],
[ [ BLOCK_16X8, BLOCK_16X4 ], [BLOCK_8X8, BLOCK_8X4 ] ],
[ [ BLOCK_16X16, BLOCK_16X8 ], [BLOCK_8X16, BLOCK_8X8 ] ],
[ [ BLOCK_16X32, BLOCK_16X16 ], [BLOCK_8X32, BLOCK_8X16 ] ],
[ [ BLOCK_32X16, BLOCK_32X8 ], [BLOCK_16X16, BLOCK_16X8 ] ],
[ [ BLOCK_32X32, BLOCK_32X16 ], [BLOCK_16X32, BLOCK_16X16 ] ],
[ [ BLOCK_32X64, BLOCK_32X32 ], [BLOCK_16X64, BLOCK_16X32 ] ],
[ [ BLOCK_64X32, BLOCK_64X16 ], [BLOCK_32X32, BLOCK_32X16 ] ],
[ [ BLOCK_64X64, BLOCK_64X32 ], [BLOCK_32X64, BLOCK_32X32 ] ],
[ [ BLOCK_64X128, BLOCK_64X64 ], [ BLOCK_INVALID, BLOCK_32X64 ] ],
[ [ BLOCK_128X64, BLOCK_INVALID ], [ BLOCK_64X64, BLOCK_64X32 ] ],
[ [ BLOCK_128X128, BLOCK_128X64 ], [ BLOCK_64X128, BLOCK_64X64 ] ],
[ [ BLOCK_4X16, BLOCK_4X8 ], [BLOCK_4X16, BLOCK_4X8 ] ],
[ [ BLOCK_16X4, BLOCK_16X4 ], [BLOCK_8X4, BLOCK_8X4 ] ],
[ [ BLOCK_8X32, BLOCK_8X16 ], [BLOCK_INVALID, BLOCK_4X16 ] ],
[ [ BLOCK_32X8, BLOCK_INVALID ], [BLOCK_16X8, BLOCK_16X4 ] ],
[ [ BLOCK_16X64, BLOCK_16X32 ], [BLOCK_INVALID, BLOCK_8X32 ] ],
[ [ BLOCK_64X16, BLOCK_INVALID ], [BLOCK_32X16, BLOCK_32X8 ] ]
];
pub fn get_plane_block_size(bsize: BlockSize, subsampling_x: usize, subsampling_y: usize)
-> BlockSize {
ss_size_lookup[bsize as usize][subsampling_x][subsampling_y]
}
// Generates 4 bit field in which each bit set to 1 represents
// a blocksize partition 1111 means we split 64x64, 32x32, 16x16
// and 8x8. 1000 means we just split the 64x64 to 32x32
static partition_context_lookup: [[u8; 2]; BlockSize::BLOCK_SIZES_ALL] = [
[ 31, 31 ], // 4X4 - {0b11111, 0b11111}
[ 31, 30 ], // 4X8 - {0b11111, 0b11110}
[ 30, 31 ], // 8X4 - {0b11110, 0b11111}
[ 30, 30 ], // 8X8 - {0b11110, 0b11110}
[ 30, 28 ], // 8X16 - {0b11110, 0b11100}
[ 28, 30 ], // 16X8 - {0b11100, 0b11110}
[ 28, 28 ], // 16X16 - {0b11100, 0b11100}
[ 28, 24 ], // 16X32 - {0b11100, 0b11000}
[ 24, 28 ], // 32X16 - {0b11000, 0b11100}
[ 24, 24 ], // 32X32 - {0b11000, 0b11000}
[ 24, 16 ], // 32X64 - {0b11000, 0b10000}
[ 16, 24 ], // 64X32 - {0b10000, 0b11000}
[ 16, 16 ], // 64X64 - {0b10000, 0b10000}
[ 16, 0 ], // 64X128- {0b10000, 0b00000}
[ 0, 16 ], // 128X64- {0b00000, 0b10000}
[ 0, 0 ], // 128X128-{0b00000, 0b00000}
[ 31, 28 ], // 4X16 - {0b11111, 0b11100}
[ 28, 31 ], // 16X4 - {0b11100, 0b11111}
[ 30, 24 ], // 8X32 - {0b11110, 0b11000}
[ 24, 30 ], // 32X8 - {0b11000, 0b11110}
[ 28, 16 ], // 16X64 - {0b11100, 0b10000}
[ 16, 28 ] // 64X16 - {0b10000, 0b11100}
];
static size_group_lookup: [u8; BlockSize::BLOCK_SIZES_ALL] = [
0, 0,
0, 1,
1, 1,
2, 2,
2, 3,
3, 3,
3, 3, 3, 3, 0,
0, 1,
1, 2,
2
];
static num_pels_log2_lookup: [u8; BlockSize::BLOCK_SIZES_ALL] = [
4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13, 13, 14, 6, 6, 8, 8, 10, 10];
pub const PLANE_TYPES: usize = 2;
const REF_TYPES: usize = 2;
pub const SKIP_CONTEXTS: usize = 3;
pub const INTRA_INTER_CONTEXTS: usize = 4;
pub const INTER_MODE_CONTEXTS: usize = 8;
pub const DRL_MODE_CONTEXTS: usize = 3;
pub const COMP_INTER_CONTEXTS: usize = 5;
pub const COMP_REF_TYPE_CONTEXTS: usize = 5;
pub const UNI_COMP_REF_CONTEXTS: usize = 3;
// Level Map
pub const TXB_SKIP_CONTEXTS: usize = 13;
pub const EOB_COEF_CONTEXTS: usize = 9;
const SIG_COEF_CONTEXTS_2D: usize = 26;
const SIG_COEF_CONTEXTS_1D: usize = 16;
pub const SIG_COEF_CONTEXTS_EOB: usize = 4;
pub const SIG_COEF_CONTEXTS: usize = SIG_COEF_CONTEXTS_2D + SIG_COEF_CONTEXTS_1D;
const COEFF_BASE_CONTEXTS: usize = SIG_COEF_CONTEXTS;
pub const DC_SIGN_CONTEXTS: usize = 3;
const BR_TMP_OFFSET: usize = 12;
const BR_REF_CAT: usize = 4;
pub const LEVEL_CONTEXTS: usize = 21;
pub const NUM_BASE_LEVELS: usize = 2;
pub const BR_CDF_SIZE: usize = 4;
const COEFF_BASE_RANGE: usize = 4 * (BR_CDF_SIZE - 1);
const COEFF_CONTEXT_BITS: usize = 6;
const COEFF_CONTEXT_MASK: usize = (1 << COEFF_CONTEXT_BITS) - 1;
const MAX_BASE_BR_RANGE: usize = COEFF_BASE_RANGE + NUM_BASE_LEVELS + 1;
const BASE_CONTEXT_POSITION_NUM: usize = 12;
// Pad 4 extra columns to remove horizontal availability check.
const TX_PAD_HOR_LOG2: usize = 2;
const TX_PAD_HOR: usize = 4;
// Pad 6 extra rows (2 on top and 4 on bottom) to remove vertical availability
// check.
const TX_PAD_TOP: usize = 2;
const TX_PAD_BOTTOM: usize = 4;
const TX_PAD_VER: usize = (TX_PAD_TOP + TX_PAD_BOTTOM);
// Pad 16 extra bytes to avoid reading overflow in SIMD optimization.
const TX_PAD_END: usize = 16;
const TX_PAD_2D: usize =
((MAX_TX_SIZE + TX_PAD_HOR) * (MAX_TX_SIZE + TX_PAD_VER) + TX_PAD_END);
const TX_CLASSES: usize = 3;
#[derive(Copy, Clone, PartialEq)]
pub enum TxClass {
TX_CLASS_2D = 0,
TX_CLASS_HORIZ = 1,
TX_CLASS_VERT = 2
}
#[derive(Copy, Clone, PartialEq)]
pub enum SegLvl {
SEG_LVL_ALT_Q = 0, /* Use alternate Quantizer .... */
SEG_LVL_ALT_LF_Y_V = 1, /* Use alternate loop filter value on y plane vertical */
SEG_LVL_ALT_LF_Y_H = 2, /* Use alternate loop filter value on y plane horizontal */
SEG_LVL_ALT_LF_U = 3, /* Use alternate loop filter value on u plane */
SEG_LVL_ALT_LF_V = 4, /* Use alternate loop filter value on v plane */
SEG_LVL_REF_FRAME = 5, /* Optional Segment reference frame */
SEG_LVL_SKIP = 6, /* Optional Segment (0,0) + skip mode */
SEG_LVL_GLOBALMV = 7,
SEG_LVL_MAX = 8
}
pub const seg_feature_bits: [u32; SegLvl::SEG_LVL_MAX as usize] =
[ 8, 6, 6, 6, 6, 3, 0, 0 ];
pub const seg_feature_is_signed: [bool; SegLvl::SEG_LVL_MAX as usize] =
[ true, true, true, true, true, false, false, false, ];
use context::TxClass::*;
static tx_type_to_class: [TxClass; TX_TYPES] = [
TX_CLASS_2D, // DCT_DCT
TX_CLASS_2D, // ADST_DCT
TX_CLASS_2D, // DCT_ADST
TX_CLASS_2D, // ADST_ADST
TX_CLASS_2D, // FLIPADST_DCT
TX_CLASS_2D, // DCT_FLIPADST
TX_CLASS_2D, // FLIPADST_FLIPADST
TX_CLASS_2D, // ADST_FLIPADST
TX_CLASS_2D, // FLIPADST_ADST
TX_CLASS_2D, // IDTX
TX_CLASS_VERT, // V_DCT
TX_CLASS_HORIZ, // H_DCT
TX_CLASS_VERT, // V_ADST
TX_CLASS_HORIZ, // H_ADST
TX_CLASS_VERT, // V_FLIPADST
TX_CLASS_HORIZ // H_FLIPADST
];
static eob_to_pos_small: [u8; 33] = [
0, 1, 2, // 0-2
3, 3, // 3-4
4, 4, 4, 4, // 5-8
5, 5, 5, 5, 5, 5, 5, 5, // 9-16
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 // 17-32
];
static eob_to_pos_large: [u8; 17] = [
6, // place holder
7, // 33-64
8, 8, // 65-128
9, 9, 9, 9, // 129-256
10, 10, 10, 10, 10, 10, 10, 10, // 257-512
11 // 513-
];
static k_eob_group_start: [u16; 12] = [ 0, 1, 2, 3, 5, 9,
17, 33, 65, 129, 257, 513 ];
static k_eob_offset_bits: [u16; 12] = [ 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ];
fn clip_max3(x: u8) -> u8 {
if x > 3 {
3
} else {
x
}
}
// The ctx offset table when TX is TX_CLASS_2D.
// TX col and row indices are clamped to 4
#[cfg_attr(rustfmt, rustfmt_skip)]
static av1_nz_map_ctx_offset: [[[i8; 5]; 5]; TxSize::TX_SIZES_ALL] = [
// TX_4X4
[
[ 0, 1, 6, 6, 0],
[ 1, 6, 6, 21, 0],
[ 6, 6, 21, 21, 0],
[ 6, 21, 21, 21, 0],
[ 0, 0, 0, 0, 0]
],
// TX_8X8
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_16X16
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_32X32
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_64X64
[
[ 0, 1, 6, 6, 21],
[ 1, 6, 6, 21, 21],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_4X8
[
[ 0, 11, 11, 11, 0],
[11, 11, 11, 11, 0],
[ 6, 6, 21, 21, 0],
[ 6, 21, 21, 21, 0],
[21, 21, 21, 21, 0]
],
// TX_8X4
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[ 0, 0, 0, 0, 0]
],
// TX_8X16
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_16X8
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_16X32
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_32X16
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_32X64
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_64X32
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_4X16
[
[ 0, 11, 11, 11, 0],
[11, 11, 11, 11, 0],
[ 6, 6, 21, 21, 0],
[ 6, 21, 21, 21, 0],
[21, 21, 21, 21, 0]
],
// TX_16X4
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[ 0, 0, 0, 0, 0]
],
// TX_8X32
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_32X8
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
],
// TX_16X64
[
[ 0, 11, 11, 11, 11],
[11, 11, 11, 11, 11],
[ 6, 6, 21, 21, 21],
[ 6, 21, 21, 21, 21],
[21, 21, 21, 21, 21]
],
// TX_64X16
[
[ 0, 16, 6, 6, 21],
[16, 16, 6, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21],
[16, 16, 21, 21, 21]
]
];
const NZ_MAP_CTX_0: usize = SIG_COEF_CONTEXTS_2D;
const NZ_MAP_CTX_5: usize = (NZ_MAP_CTX_0 + 5);
const NZ_MAP_CTX_10: usize = (NZ_MAP_CTX_0 + 10);
static nz_map_ctx_offset_1d: [usize; 32] = [
NZ_MAP_CTX_0, NZ_MAP_CTX_5, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10, NZ_MAP_CTX_10,
NZ_MAP_CTX_10, NZ_MAP_CTX_10 ];
const CONTEXT_MAG_POSITION_NUM: usize = 3;
static mag_ref_offset_with_txclass: [[[usize; 2]; CONTEXT_MAG_POSITION_NUM]; 3] = [
[ [ 0, 1 ], [ 1, 0 ], [ 1, 1 ] ],
[ [ 0, 1 ], [ 1, 0 ], [ 0, 2 ] ],
[ [ 0, 1 ], [ 1, 0 ], [ 2, 0 ] ] ];
// End of Level Map
pub fn has_chroma(
bo: &BlockOffset, bsize: BlockSize, subsampling_x: usize,
subsampling_y: usize
) -> bool {
let bw = bsize.width_mi();
let bh = bsize.height_mi();
((bo.x & 0x01) == 1 || (bw & 0x01) == 0 || subsampling_x == 0)
&& ((bo.y & 0x01) == 1 || (bh & 0x01) == 0 || subsampling_y == 0)
}
pub fn get_tx_set(
tx_size: TxSize, is_inter: bool, use_reduced_set: bool
) -> TxSet {
let tx_size_sqr_up = tx_size.sqr_up();
let tx_size_sqr = tx_size.sqr();
if tx_size.width() >= 64 || tx_size.height() >= 64 {
TxSet::TX_SET_DCTONLY
} else if tx_size_sqr_up == TxSize::TX_32X32 {
if is_inter {
TxSet::TX_SET_DCT_IDTX
} else {
TxSet::TX_SET_DCTONLY
}
} else if use_reduced_set {
if is_inter {
TxSet::TX_SET_DCT_IDTX
} else {
TxSet::TX_SET_DTT4_IDTX
}
} else if is_inter {
if tx_size_sqr == TxSize::TX_16X16 {
TxSet::TX_SET_DTT9_IDTX_1DDCT
} else {
TxSet::TX_SET_ALL16
}
} else {
if tx_size_sqr == TxSize::TX_16X16 {
TxSet::TX_SET_DTT4_IDTX
} else {
TxSet::TX_SET_DTT4_IDTX_1DDCT
}
}
}
fn get_tx_set_index(
tx_size: TxSize, is_inter: bool, use_reduced_set: bool
) -> i8 {
let set_type = get_tx_set(tx_size, is_inter, use_reduced_set);
if is_inter {
tx_set_index_inter[set_type as usize]
} else {
tx_set_index_intra[set_type as usize]
}
}
static intra_mode_to_tx_type_context: [TxType; INTRA_MODES] = [
DCT_DCT, // DC
ADST_DCT, // V
DCT_ADST, // H
DCT_DCT, // D45
ADST_ADST, // D135
ADST_DCT, // D117
DCT_ADST, // D153
DCT_ADST, // D207
ADST_DCT, // D63
ADST_ADST, // SMOOTH
ADST_DCT, // SMOOTH_V
DCT_ADST, // SMOOTH_H
ADST_ADST, // PAETH
];
static uv2y: [PredictionMode; UV_INTRA_MODES] = [
DC_PRED, // UV_DC_PRED
V_PRED, // UV_V_PRED
H_PRED, // UV_H_PRED
D45_PRED, // UV_D45_PRED
D135_PRED, // UV_D135_PRED
D117_PRED, // UV_D117_PRED
D153_PRED, // UV_D153_PRED
D207_PRED, // UV_D207_PRED
D63_PRED, // UV_D63_PRED
SMOOTH_PRED, // UV_SMOOTH_PRED
SMOOTH_V_PRED, // UV_SMOOTH_V_PRED
SMOOTH_H_PRED, // UV_SMOOTH_H_PRED
PAETH_PRED, // UV_PAETH_PRED
DC_PRED // CFL_PRED
];
pub fn uv_intra_mode_to_tx_type_context(pred: PredictionMode) -> TxType {
intra_mode_to_tx_type_context[uv2y[pred as usize] as usize]
}
#[derive(Clone,Copy)]
pub struct NMVComponent {
classes_cdf: [u16; MV_CLASSES + 1],
class0_fp_cdf: [[u16; MV_FP_SIZE + 1]; CLASS0_SIZE],
fp_cdf: [u16; MV_FP_SIZE + 1],
sign_cdf: [u16; 2 + 1],
class0_hp_cdf: [u16; 2 + 1],
hp_cdf: [u16; 2 + 1],
class0_cdf: [u16; CLASS0_SIZE + 1],
bits_cdf: [[u16; 2 + 1]; MV_OFFSET_BITS],
}
#[derive(Clone,Copy)]
pub struct NMVContext {
joints_cdf: [u16; MV_JOINTS + 1],
comps: [NMVComponent; 2],
}
extern "C" {
//static av1_scan_orders: [[SCAN_ORDER; TX_TYPES]; TxSize::TX_SIZES_ALL];
}
// lv_map
static default_nmv_context: NMVContext = {
NMVContext {
joints_cdf: cdf!(4096, 11264, 19328),
comps: [
NMVComponent {
classes_cdf: cdf!(
28672, 30976, 31858, 32320, 32551, 32656, 32740, 32757, 32762, 32767
),
class0_fp_cdf: [cdf!(16384, 24576, 26624), cdf!(12288, 21248, 24128)],
fp_cdf: cdf!(8192, 17408, 21248),
sign_cdf: cdf!(128 * 128),
class0_hp_cdf: cdf!(160 * 128),
hp_cdf: cdf!(128 * 128),
class0_cdf: cdf!(216 * 128),
bits_cdf: [
cdf!(128 * 136),
cdf!(128 * 140),
cdf!(128 * 148),
cdf!(128 * 160),
cdf!(128 * 176),
cdf!(128 * 192),
cdf!(128 * 224),
cdf!(128 * 234),
cdf!(128 * 234),
cdf!(128 * 240)
]
},
NMVComponent {
classes_cdf: cdf!(
28672, 30976, 31858, 32320, 32551, 32656, 32740, 32757, 32762, 32767
),
class0_fp_cdf: [cdf!(16384, 24576, 26624), cdf!(12288, 21248, 24128)],
fp_cdf: cdf!(8192, 17408, 21248),
sign_cdf: cdf!(128 * 128),
class0_hp_cdf: cdf!(160 * 128),
hp_cdf: cdf!(128 * 128),
class0_cdf: cdf!(216 * 128),
bits_cdf: [
cdf!(128 * 136),
cdf!(128 * 140),
cdf!(128 * 148),
cdf!(128 * 160),
cdf!(128 * 176),
cdf!(128 * 192),
cdf!(128 * 224),
cdf!(128 * 234),
cdf!(128 * 234),
cdf!(128 * 240)
]
}
]
}
};
#[derive(Clone)]
pub struct CandidateMV {
pub this_mv: MotionVector,
pub comp_mv: MotionVector,
pub weight: u32
}
#[derive(Clone,Copy)]
pub struct CDFContext {
partition_cdf: [[u16; EXT_PARTITION_TYPES + 1]; PARTITION_CONTEXTS],
kf_y_cdf: [[[u16; INTRA_MODES + 1]; KF_MODE_CONTEXTS]; KF_MODE_CONTEXTS],
y_mode_cdf: [[u16; INTRA_MODES + 1]; BLOCK_SIZE_GROUPS],
uv_mode_cdf: [[[u16; UV_INTRA_MODES + 1]; INTRA_MODES]; 2],
cfl_sign_cdf: [u16; CFL_JOINT_SIGNS + 1],
cfl_alpha_cdf: [[u16; CFL_ALPHABET_SIZE + 1]; CFL_ALPHA_CONTEXTS],
newmv_cdf: [[u16; 2 + 1]; NEWMV_MODE_CONTEXTS],
zeromv_cdf: [[u16; 2 + 1]; GLOBALMV_MODE_CONTEXTS],
refmv_cdf: [[u16; 2 + 1]; REFMV_MODE_CONTEXTS],
intra_tx_cdf:
[[[[u16; TX_TYPES + 1]; INTRA_MODES]; TX_SIZE_SQR_CONTEXTS]; TX_SETS_INTRA],
inter_tx_cdf: [[[u16; TX_TYPES + 1]; TX_SIZE_SQR_CONTEXTS]; TX_SETS_INTER],
skip_cdfs: [[u16; 3]; SKIP_CONTEXTS],
intra_inter_cdfs: [[u16; 3]; INTRA_INTER_CONTEXTS],
angle_delta_cdf: [[u16; 2 * MAX_ANGLE_DELTA + 1 + 1]; DIRECTIONAL_MODES],
filter_intra_cdfs: [[u16; 3]; BlockSize::BLOCK_SIZES_ALL],
comp_mode_cdf: [[u16; 3]; COMP_INTER_CONTEXTS],
comp_ref_type_cdf: [[u16; 3]; COMP_REF_TYPE_CONTEXTS],
comp_ref_cdf: [[[u16; 3]; FWD_REFS - 1]; REF_CONTEXTS],
comp_bwd_ref_cdf: [[[u16; 3]; BWD_REFS - 1]; REF_CONTEXTS],
single_ref_cdfs: [[[u16; 2 + 1]; SINGLE_REFS - 1]; REF_CONTEXTS],
drl_cdfs: [[u16; 2 + 1]; DRL_MODE_CONTEXTS],
compound_mode_cdf: [[u16; INTER_COMPOUND_MODES + 1]; INTER_MODE_CONTEXTS],
nmv_context: NMVContext,
deblock_delta_multi_cdf: [[u16; DELTA_LF_PROBS + 1 + 1]; FRAME_LF_COUNT],
deblock_delta_cdf: [u16; DELTA_LF_PROBS + 1 + 1],
spatial_segmentation_cdfs: [[u16; 8 + 1]; 3],
// lv_map
txb_skip_cdf: [[[u16; 3]; TXB_SKIP_CONTEXTS]; TxSize::TX_SIZES],
dc_sign_cdf: [[[u16; 3]; DC_SIGN_CONTEXTS]; PLANE_TYPES],
eob_extra_cdf:
[[[[u16; 3]; EOB_COEF_CONTEXTS]; PLANE_TYPES]; TxSize::TX_SIZES],
eob_flag_cdf16: [[[u16; 5 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf32: [[[u16; 6 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf64: [[[u16; 7 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf128: [[[u16; 8 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf256: [[[u16; 9 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf512: [[[u16; 10 + 1]; 2]; PLANE_TYPES],
eob_flag_cdf1024: [[[u16; 11 + 1]; 2]; PLANE_TYPES],
coeff_base_eob_cdf:
[[[[u16; 3 + 1]; SIG_COEF_CONTEXTS_EOB]; PLANE_TYPES]; TxSize::TX_SIZES],
coeff_base_cdf:
[[[[u16; 4 + 1]; SIG_COEF_CONTEXTS]; PLANE_TYPES]; TxSize::TX_SIZES],
coeff_br_cdf: [[[[u16; BR_CDF_SIZE + 1]; LEVEL_CONTEXTS]; PLANE_TYPES];
TxSize::TX_SIZES]
}
impl CDFContext {
pub fn new(quantizer: u8) -> CDFContext {
let qctx = match quantizer {
0..=20 => 0,
21..=60 => 1,
61..=120 => 2,
_ => 3
};
CDFContext {
partition_cdf: default_partition_cdf,
kf_y_cdf: default_kf_y_mode_cdf,
y_mode_cdf: default_if_y_mode_cdf,
uv_mode_cdf: default_uv_mode_cdf,
cfl_sign_cdf: default_cfl_sign_cdf,
cfl_alpha_cdf: default_cfl_alpha_cdf,
newmv_cdf: default_newmv_cdf,
zeromv_cdf: default_zeromv_cdf,
refmv_cdf: default_refmv_cdf,
intra_tx_cdf: default_intra_ext_tx_cdf,
inter_tx_cdf: default_inter_ext_tx_cdf,
skip_cdfs: default_skip_cdfs,
intra_inter_cdfs: default_intra_inter_cdf,
angle_delta_cdf: default_angle_delta_cdf,
filter_intra_cdfs: default_filter_intra_cdfs,
comp_mode_cdf: default_comp_mode_cdf,
comp_ref_type_cdf: default_comp_ref_type_cdf,
comp_ref_cdf: default_comp_ref_cdf,
comp_bwd_ref_cdf: default_comp_bwdref_cdf,
single_ref_cdfs: default_single_ref_cdf,
drl_cdfs: default_drl_cdf,
compound_mode_cdf: default_compound_mode_cdf,
nmv_context: default_nmv_context,
deblock_delta_multi_cdf: default_delta_lf_multi_cdf,
deblock_delta_cdf: default_delta_lf_cdf,
spatial_segmentation_cdfs: default_spatial_pred_seg_tree_cdf,
// lv_map
txb_skip_cdf: av1_default_txb_skip_cdfs[qctx],
dc_sign_cdf: av1_default_dc_sign_cdfs[qctx],
eob_extra_cdf: av1_default_eob_extra_cdfs[qctx],
eob_flag_cdf16: av1_default_eob_multi16_cdfs[qctx],
eob_flag_cdf32: av1_default_eob_multi32_cdfs[qctx],
eob_flag_cdf64: av1_default_eob_multi64_cdfs[qctx],
eob_flag_cdf128: av1_default_eob_multi128_cdfs[qctx],
eob_flag_cdf256: av1_default_eob_multi256_cdfs[qctx],
eob_flag_cdf512: av1_default_eob_multi512_cdfs[qctx],
eob_flag_cdf1024: av1_default_eob_multi1024_cdfs[qctx],
coeff_base_eob_cdf: av1_default_coeff_base_eob_multi_cdfs[qctx],
coeff_base_cdf: av1_default_coeff_base_multi_cdfs[qctx],
coeff_br_cdf: av1_default_coeff_lps_multi_cdfs[qctx]
}
}
pub fn reset_counts(&mut self) {
macro_rules! reset_1d {
($field:expr) => (let r = $field.last_mut().unwrap(); *r = 0;)
}
macro_rules! reset_2d {
($field:expr) => (for mut x in $field.iter_mut() { reset_1d!(x); })
}
macro_rules! reset_3d {
($field:expr) => (for mut x in $field.iter_mut() { reset_2d!(x); })
}
macro_rules! reset_4d {
($field:expr) => (for mut x in $field.iter_mut() { reset_3d!(x); })
}
for i in 0..4 { self.partition_cdf[i][4] = 0; }
for i in 4..16 { self.partition_cdf[i][10] = 0; }
for i in 16..20 { self.partition_cdf[i][8] = 0; }
reset_3d!(self.kf_y_cdf);
reset_2d!(self.y_mode_cdf);
for i in 0..INTRA_MODES {
self.uv_mode_cdf[0][i][UV_INTRA_MODES - 1] = 0;
self.uv_mode_cdf[1][i][UV_INTRA_MODES] = 0;
}
reset_1d!(self.cfl_sign_cdf);
reset_2d!(self.cfl_alpha_cdf);
reset_2d!(self.newmv_cdf);
reset_2d!(self.zeromv_cdf);
reset_2d!(self.refmv_cdf);
for i in 0..TX_SIZE_SQR_CONTEXTS {
for j in 0..INTRA_MODES {
self.intra_tx_cdf[1][i][j][7] = 0;
self.intra_tx_cdf[2][i][j][5] = 0;
}
self.inter_tx_cdf[1][i][16] = 0;
self.inter_tx_cdf[2][i][12] = 0;
self.inter_tx_cdf[3][i][2] = 0;
}
reset_2d!(self.skip_cdfs);
reset_2d!(self.intra_inter_cdfs);
reset_2d!(self.angle_delta_cdf);
reset_2d!(self.filter_intra_cdfs);
reset_2d!(self.comp_mode_cdf);
reset_2d!(self.comp_ref_type_cdf);
reset_3d!(self.comp_ref_cdf);
reset_3d!(self.comp_bwd_ref_cdf);
reset_3d!(self.single_ref_cdfs);
reset_2d!(self.drl_cdfs);
reset_2d!(self.compound_mode_cdf);
reset_2d!(self.deblock_delta_multi_cdf);
reset_1d!(self.deblock_delta_cdf);
reset_2d!(self.spatial_segmentation_cdfs);
reset_1d!(self.nmv_context.joints_cdf);
for i in 0..2 {
reset_1d!(self.nmv_context.comps[i].classes_cdf);
reset_2d!(self.nmv_context.comps[i].class0_fp_cdf);
reset_1d!(self.nmv_context.comps[i].fp_cdf);
reset_1d!(self.nmv_context.comps[i].sign_cdf);
reset_1d!(self.nmv_context.comps[i].class0_hp_cdf);
reset_1d!(self.nmv_context.comps[i].hp_cdf);
reset_1d!(self.nmv_context.comps[i].class0_cdf);
reset_2d!(self.nmv_context.comps[i].bits_cdf);
}
// lv_map
reset_3d!(self.txb_skip_cdf);
reset_3d!(self.dc_sign_cdf);
reset_4d!(self.eob_extra_cdf);
reset_3d!(self.eob_flag_cdf16);
reset_3d!(self.eob_flag_cdf32);
reset_3d!(self.eob_flag_cdf64);
reset_3d!(self.eob_flag_cdf128);
reset_3d!(self.eob_flag_cdf256);
reset_3d!(self.eob_flag_cdf512);
reset_3d!(self.eob_flag_cdf1024);
reset_4d!(self.coeff_base_eob_cdf);
reset_4d!(self.coeff_base_cdf);
reset_4d!(self.coeff_br_cdf);
}
pub fn build_map(&self) -> Vec<(&'static str, usize, usize)> {
use std::mem::size_of_val;
let partition_cdf_start =
self.partition_cdf.first().unwrap().as_ptr() as usize;
let partition_cdf_end =
partition_cdf_start + size_of_val(&self.partition_cdf);
let kf_y_cdf_start = self.kf_y_cdf.first().unwrap().as_ptr() as usize;
let kf_y_cdf_end = kf_y_cdf_start + size_of_val(&self.kf_y_cdf);
let y_mode_cdf_start = self.y_mode_cdf.first().unwrap().as_ptr() as usize;
let y_mode_cdf_end = y_mode_cdf_start + size_of_val(&self.y_mode_cdf);
let uv_mode_cdf_start =
self.uv_mode_cdf.first().unwrap().as_ptr() as usize;
let uv_mode_cdf_end = uv_mode_cdf_start + size_of_val(&self.uv_mode_cdf);
let cfl_sign_cdf_start = self.cfl_sign_cdf.as_ptr() as usize;
let cfl_sign_cdf_end = cfl_sign_cdf_start + size_of_val(&self.cfl_sign_cdf);
let cfl_alpha_cdf_start =
self.cfl_alpha_cdf.first().unwrap().as_ptr() as usize;
let cfl_alpha_cdf_end =
cfl_alpha_cdf_start + size_of_val(&self.cfl_alpha_cdf);
let intra_tx_cdf_start =
self.intra_tx_cdf.first().unwrap().as_ptr() as usize;
let intra_tx_cdf_end =
intra_tx_cdf_start + size_of_val(&self.intra_tx_cdf);
let inter_tx_cdf_start =
self.inter_tx_cdf.first().unwrap().as_ptr() as usize;
let inter_tx_cdf_end =
inter_tx_cdf_start + size_of_val(&self.inter_tx_cdf);
let skip_cdfs_start = self.skip_cdfs.first().unwrap().as_ptr() as usize;
let skip_cdfs_end = skip_cdfs_start + size_of_val(&self.skip_cdfs);
let intra_inter_cdfs_start =
self.intra_inter_cdfs.first().unwrap().as_ptr() as usize;
let intra_inter_cdfs_end =
intra_inter_cdfs_start + size_of_val(&self.intra_inter_cdfs);
let angle_delta_cdf_start =
self.angle_delta_cdf.first().unwrap().as_ptr() as usize;
let angle_delta_cdf_end =
angle_delta_cdf_start + size_of_val(&self.angle_delta_cdf);
let filter_intra_cdfs_start =
self.filter_intra_cdfs.first().unwrap().as_ptr() as usize;
let filter_intra_cdfs_end =
filter_intra_cdfs_start + size_of_val(&self.filter_intra_cdfs);
let comp_mode_cdf_start =
self.comp_mode_cdf.first().unwrap().as_ptr() as usize;
let comp_mode_cdf_end =
comp_mode_cdf_start + size_of_val(&self.comp_mode_cdf);
let comp_ref_type_cdf_start =
self.comp_ref_type_cdf.first().unwrap().as_ptr() as usize;
let comp_ref_type_cdf_end =
comp_ref_type_cdf_start + size_of_val(&self.comp_ref_type_cdf);
let comp_ref_cdf_start =
self.comp_ref_cdf.first().unwrap().as_ptr() as usize;
let comp_ref_cdf_end =
comp_ref_cdf_start + size_of_val(&self.comp_ref_cdf);
let comp_bwd_ref_cdf_start =
self.comp_bwd_ref_cdf.first().unwrap().as_ptr() as usize;
let comp_bwd_ref_cdf_end =
comp_bwd_ref_cdf_start + size_of_val(&self.comp_bwd_ref_cdf);
let deblock_delta_multi_cdf_start =
self.deblock_delta_multi_cdf.first().unwrap().as_ptr() as usize;
let deblock_delta_multi_cdf_end =
deblock_delta_multi_cdf_start + size_of_val(&self.deblock_delta_multi_cdf);
let deblock_delta_cdf_start =
self.deblock_delta_cdf.as_ptr() as usize;
let deblock_delta_cdf_end =
deblock_delta_cdf_start + size_of_val(&self.deblock_delta_cdf);
let spatial_segmentation_cdfs_start =
self.spatial_segmentation_cdfs.first().unwrap().as_ptr() as usize;
let spatial_segmentation_cdfs_end =
spatial_segmentation_cdfs_start + size_of_val(&self.spatial_segmentation_cdfs);
let txb_skip_cdf_start =
self.txb_skip_cdf.first().unwrap().as_ptr() as usize;
let txb_skip_cdf_end =
txb_skip_cdf_start + size_of_val(&self.txb_skip_cdf);
let dc_sign_cdf_start =
self.dc_sign_cdf.first().unwrap().as_ptr() as usize;
let dc_sign_cdf_end = dc_sign_cdf_start + size_of_val(&self.dc_sign_cdf);
let eob_extra_cdf_start =
self.eob_extra_cdf.first().unwrap().as_ptr() as usize;
let eob_extra_cdf_end =
eob_extra_cdf_start + size_of_val(&self.eob_extra_cdf);
let eob_flag_cdf16_start =
self.eob_flag_cdf16.first().unwrap().as_ptr() as usize;
let eob_flag_cdf16_end =
eob_flag_cdf16_start + size_of_val(&self.eob_flag_cdf16);
let eob_flag_cdf32_start =
self.eob_flag_cdf32.first().unwrap().as_ptr() as usize;
let eob_flag_cdf32_end =
eob_flag_cdf32_start + size_of_val(&self.eob_flag_cdf32);
let eob_flag_cdf64_start =
self.eob_flag_cdf64.first().unwrap().as_ptr() as usize;
let eob_flag_cdf64_end =
eob_flag_cdf64_start + size_of_val(&self.eob_flag_cdf64);
let eob_flag_cdf128_start =
self.eob_flag_cdf128.first().unwrap().as_ptr() as usize;
let eob_flag_cdf128_end =
eob_flag_cdf128_start + size_of_val(&self.eob_flag_cdf128);
let eob_flag_cdf256_start =
self.eob_flag_cdf256.first().unwrap().as_ptr() as usize;
let eob_flag_cdf256_end =
eob_flag_cdf256_start + size_of_val(&self.eob_flag_cdf256);
let eob_flag_cdf512_start =
self.eob_flag_cdf512.first().unwrap().as_ptr() as usize;
let eob_flag_cdf512_end =
eob_flag_cdf512_start + size_of_val(&self.eob_flag_cdf512);
let eob_flag_cdf1024_start =
self.eob_flag_cdf1024.first().unwrap().as_ptr() as usize;
let eob_flag_cdf1024_end =
eob_flag_cdf1024_start + size_of_val(&self.eob_flag_cdf1024);
let coeff_base_eob_cdf_start =
self.coeff_base_eob_cdf.first().unwrap().as_ptr() as usize;
let coeff_base_eob_cdf_end =
coeff_base_eob_cdf_start + size_of_val(&self.coeff_base_eob_cdf);
let coeff_base_cdf_start =
self.coeff_base_cdf.first().unwrap().as_ptr() as usize;
let coeff_base_cdf_end =
coeff_base_cdf_start + size_of_val(&self.coeff_base_cdf);
let coeff_br_cdf_start =
self.coeff_br_cdf.first().unwrap().as_ptr() as usize;
let coeff_br_cdf_end =
coeff_br_cdf_start + size_of_val(&self.coeff_br_cdf);
vec![
("partition_cdf", partition_cdf_start, partition_cdf_end),
("kf_y_cdf", kf_y_cdf_start, kf_y_cdf_end),
("y_mode_cdf", y_mode_cdf_start, y_mode_cdf_end),
("uv_mode_cdf", uv_mode_cdf_start, uv_mode_cdf_end),
("cfl_sign_cdf", cfl_sign_cdf_start, cfl_sign_cdf_end),
("cfl_alpha_cdf", cfl_alpha_cdf_start, cfl_alpha_cdf_end),
("intra_tx_cdf", intra_tx_cdf_start, intra_tx_cdf_end),
("inter_tx_cdf", inter_tx_cdf_start, inter_tx_cdf_end),
("skip_cdfs", skip_cdfs_start, skip_cdfs_end),
("intra_inter_cdfs", intra_inter_cdfs_start, intra_inter_cdfs_end),
("angle_delta_cdf", angle_delta_cdf_start, angle_delta_cdf_end),
("filter_intra_cdfs", filter_intra_cdfs_start, filter_intra_cdfs_end),
("comp_mode_cdf", comp_mode_cdf_start, comp_mode_cdf_end),
("comp_ref_type_cdf", comp_ref_type_cdf_start, comp_ref_type_cdf_end),
("comp_ref_cdf", comp_ref_cdf_start, comp_ref_cdf_end),
("comp_bwd_ref_cdf", comp_bwd_ref_cdf_start, comp_bwd_ref_cdf_end),
("deblock_delta_multi_cdf", deblock_delta_multi_cdf_start, deblock_delta_multi_cdf_end),
("deblock_delta_cdf", deblock_delta_cdf_start, deblock_delta_cdf_end),
("spatial_segmentation_cdfs", spatial_segmentation_cdfs_start, spatial_segmentation_cdfs_end),
("txb_skip_cdf", txb_skip_cdf_start, txb_skip_cdf_end),
("dc_sign_cdf", dc_sign_cdf_start, dc_sign_cdf_end),
("eob_extra_cdf", eob_extra_cdf_start, eob_extra_cdf_end),
("eob_flag_cdf16", eob_flag_cdf16_start, eob_flag_cdf16_end),
("eob_flag_cdf32", eob_flag_cdf32_start, eob_flag_cdf32_end),
("eob_flag_cdf64", eob_flag_cdf64_start, eob_flag_cdf64_end),
("eob_flag_cdf128", eob_flag_cdf128_start, eob_flag_cdf128_end),
("eob_flag_cdf256", eob_flag_cdf256_start, eob_flag_cdf256_end),
("eob_flag_cdf512", eob_flag_cdf512_start, eob_flag_cdf512_end),
("eob_flag_cdf1024", eob_flag_cdf1024_start, eob_flag_cdf1024_end),
("coeff_base_eob_cdf", coeff_base_eob_cdf_start, coeff_base_eob_cdf_end),
("coeff_base_cdf", coeff_base_cdf_start, coeff_base_cdf_end),
("coeff_br_cdf", coeff_br_cdf_start, coeff_br_cdf_end),
]
}
}
impl fmt::Debug for CDFContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CDFContext contains too many numbers to print :-(")
}
}
#[cfg(test)]
mod test {
#[test]
fn cdf_map() {
use super::*;
let cdf = CDFContext::new(8);
let cdf_map = FieldMap {
map: cdf.build_map()
};
let f = &cdf.partition_cdf[2];
cdf_map.lookup(f.as_ptr() as usize);
}
use super::CFLSign;
use super::CFLSign::*;
static cfl_alpha_signs: [[CFLSign; 2]; 8] = [
[ CFL_SIGN_ZERO, CFL_SIGN_NEG ],
[ CFL_SIGN_ZERO, CFL_SIGN_POS ],
[ CFL_SIGN_NEG, CFL_SIGN_ZERO ],
[ CFL_SIGN_NEG, CFL_SIGN_NEG ],
[ CFL_SIGN_NEG, CFL_SIGN_POS ],
[ CFL_SIGN_POS, CFL_SIGN_ZERO ],
[ CFL_SIGN_POS, CFL_SIGN_NEG ],
[ CFL_SIGN_POS, CFL_SIGN_POS ]
];
static cfl_context: [[usize; 8]; 2] = [
[ 0, 0, 0, 1, 2, 3, 4, 5 ],
[ 0, 3, 0, 1, 4, 0, 2, 5 ]
];
#[test]
fn cfl_joint_sign() {
use super::*;
let mut cfl = CFLParams::new();
for (joint_sign, &signs) in cfl_alpha_signs.iter().enumerate() {
cfl.sign = signs;
assert!(cfl.joint_sign() as usize == joint_sign);
for uv in 0..2 {
if signs[uv] != CFL_SIGN_ZERO {
assert!(cfl.context(uv) == cfl_context[uv][joint_sign]);
}
}
}
}
}
const SUPERBLOCK_TO_PLANE_SHIFT: usize = MAX_SB_SIZE_LOG2;
const SUPERBLOCK_TO_BLOCK_SHIFT: usize = MAX_MIB_SIZE_LOG2;
pub const BLOCK_TO_PLANE_SHIFT: usize = MI_SIZE_LOG2;
pub const LOCAL_BLOCK_MASK: usize = (1 << SUPERBLOCK_TO_BLOCK_SHIFT) - 1;
/// Absolute offset in superblocks inside a plane, where a superblock is defined
/// to be an N*N square where N = (1 << SUPERBLOCK_TO_PLANE_SHIFT).
#[derive(Clone)]
pub struct SuperBlockOffset {
pub x: usize,
pub y: usize
}
impl SuperBlockOffset {
/// Offset of a block inside the current superblock.
pub fn block_offset(&self, block_x: usize, block_y: usize) -> BlockOffset {
BlockOffset {
x: (self.x << SUPERBLOCK_TO_BLOCK_SHIFT) + block_x,
y: (self.y << SUPERBLOCK_TO_BLOCK_SHIFT) + block_y
}
}
/// Offset of the top-left pixel of this block.
pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset {
PlaneOffset {
x: (self.x as isize) << (SUPERBLOCK_TO_PLANE_SHIFT - plane.xdec),
y: (self.y as isize) << (SUPERBLOCK_TO_PLANE_SHIFT - plane.ydec)
}
}
}
/// Absolute offset in blocks inside a plane, where a block is defined
/// to be an N*N square where N = (1 << BLOCK_TO_PLANE_SHIFT).
#[derive(Clone)]
pub struct BlockOffset {
pub x: usize,
pub y: usize
}
impl BlockOffset {
/// Offset of the superblock in which this block is located.
pub fn sb_offset(&self) -> SuperBlockOffset {
SuperBlockOffset {
x: self.x >> SUPERBLOCK_TO_BLOCK_SHIFT,
y: self.y >> SUPERBLOCK_TO_BLOCK_SHIFT
}
}
/// Offset of the top-left pixel of this block.
pub fn plane_offset(&self, plane: &PlaneConfig) -> PlaneOffset {
let po = self.sb_offset().plane_offset(plane);
let x_offset = self.x & LOCAL_BLOCK_MASK;
let y_offset = self.y & LOCAL_BLOCK_MASK;
PlaneOffset {
x: po.x + (x_offset as isize >> plane.xdec << BLOCK_TO_PLANE_SHIFT),
y: po.y + (y_offset as isize >> plane.ydec << BLOCK_TO_PLANE_SHIFT)
}
}
pub fn y_in_sb(&self) -> usize {
self.y % MAX_MIB_SIZE
}
pub fn with_offset(&self, col_offset: isize, row_offset: isize) -> BlockOffset {
let x = self.x as isize + col_offset;
let y = self.y as isize + row_offset;
BlockOffset {
x: x as usize,
y: y as usize
}
}
}
#[derive(Copy, Clone)]
pub struct Block {
pub mode: PredictionMode,
pub partition: PartitionType,
pub skip: bool,
pub ref_frames: [usize; 2],
pub mv: [MotionVector; 2],
pub neighbors_ref_counts: [usize; TOTAL_REFS_PER_FRAME],
pub cdef_index: u8,
pub n4_w: usize, /* block width in the unit of mode_info */
pub n4_h: usize, /* block height in the unit of mode_info */
pub tx_w: usize, /* transform width in the unit of mode_info */
pub tx_h: usize, /* transform height in the unit of mode_info */
pub is_sec_rect: bool,
// The block-level deblock_deltas are left-shifted by
// fi.deblock.block_delta_shift and added to the frame-configured
// deltas
pub deblock_deltas: [i8; FRAME_LF_COUNT],
pub segmentation_idx: u8
}
impl Block {
pub fn default() -> Block {
Block {
mode: PredictionMode::DC_PRED,
partition: PartitionType::PARTITION_NONE,
skip: false,
ref_frames: [INTRA_FRAME; 2],
mv: [ MotionVector { row:0, col: 0 }; 2],
neighbors_ref_counts: [0; TOTAL_REFS_PER_FRAME],
cdef_index: 0,
n4_w: BLOCK_64X64.width_mi(),
n4_h: BLOCK_64X64.height_mi(),
tx_w: TX_64X64.width_mi(),
tx_h: TX_64X64.height_mi(),
is_sec_rect: false,
deblock_deltas: [0, 0, 0, 0],
segmentation_idx: 0,
}
}
pub fn is_inter(&self) -> bool {
self.mode >= PredictionMode::NEARESTMV
}
pub fn has_second_ref(&self) -> bool {
self.ref_frames[1] > INTRA_FRAME && self.ref_frames[1] != NONE_FRAME
}
}
pub struct TXB_CTX {
pub txb_skip_ctx: usize,
pub dc_sign_ctx: usize
}
#[derive(Clone, Default)]
pub struct BlockContext {
pub cols: usize,
pub rows: usize,
pub cdef_coded: bool,
pub code_deltas: bool,
pub update_seg: bool,
pub preskip_segid: bool,
above_partition_context: Vec<u8>,
left_partition_context: [u8; MAX_MIB_SIZE],
above_coeff_context: [Vec<u8>; PLANES],
left_coeff_context: [[u8; MAX_MIB_SIZE]; PLANES],
blocks: Vec<Vec<Block>>
}
impl BlockContext {
pub fn new(cols: usize, rows: usize) -> BlockContext {
// Align power of two
let aligned_cols = (cols + ((1 << MAX_MIB_SIZE_LOG2) - 1))
& !((1 << MAX_MIB_SIZE_LOG2) - 1);
BlockContext {
cols,
rows,
cdef_coded: false,
code_deltas: false,
update_seg: false,
preskip_segid: true,
above_partition_context: vec![0; aligned_cols],
left_partition_context: [0; MAX_MIB_SIZE],
above_coeff_context: [
vec![0; cols << (MI_SIZE_LOG2 - TxSize::smallest_width_log2())],
vec![0; cols << (MI_SIZE_LOG2 - TxSize::smallest_width_log2())],
vec![0; cols << (MI_SIZE_LOG2 - TxSize::smallest_width_log2())]
],
left_coeff_context: [[0; MAX_MIB_SIZE]; PLANES],
blocks: vec![vec![Block::default(); cols]; rows]
}
}
pub fn checkpoint(&mut self) -> BlockContext {
BlockContext {
cols: self.cols,
rows: self.rows,
cdef_coded: self.cdef_coded,
code_deltas: self.code_deltas,
update_seg: self.update_seg,
preskip_segid: self.preskip_segid,
above_partition_context: self.above_partition_context.clone(),
left_partition_context: self.left_partition_context,
above_coeff_context: self.above_coeff_context.clone(),
left_coeff_context: self.left_coeff_context,
blocks: vec![vec![Block::default(); 0]; 0]
}
}
pub fn rollback(&mut self, checkpoint: &BlockContext) {
self.cols = checkpoint.cols;
self.rows = checkpoint.rows;
self.cdef_coded = checkpoint.cdef_coded;
self.above_partition_context = checkpoint.above_partition_context.clone();
self.left_partition_context = checkpoint.left_partition_context;
self.above_coeff_context = checkpoint.above_coeff_context.clone();
self.left_coeff_context = checkpoint.left_coeff_context;
}
pub fn at_mut(&mut self, bo: &BlockOffset) -> &mut Block {
&mut self.blocks[bo.y][bo.x]
}
pub fn at(&self, bo: &BlockOffset) -> &Block {
&self.blocks[bo.y][bo.x]
}
pub fn above_of(&mut self, bo: &BlockOffset) -> Block {
if bo.y > 0 {
self.blocks[bo.y - 1][bo.x]
} else {
Block::default()
}
}
pub fn left_of(&mut self, bo: &BlockOffset) -> Block {
if bo.x > 0 {
self.blocks[bo.y][bo.x - 1]
} else {
Block::default()
}
}
pub fn above_left_of(&mut self, bo: &BlockOffset) -> Block {
if bo.x > 0 && bo.y > 0 {
self.blocks[bo.y - 1][bo.x - 1]
} else {
Block::default()
}
}
pub fn for_each<F>(&mut self, bo: &BlockOffset, bsize: BlockSize, f: F)
where
F: Fn(&mut Block) -> ()
{
let bw = bsize.width_mi();
let bh = bsize.height_mi();
for y in 0..bh {
for x in 0..bw {
f(&mut self.blocks[bo.y + y as usize][bo.x + x as usize]);
}
}
}
pub fn set_dc_sign(&mut self, cul_level: &mut u32, dc_val: i32) {
if dc_val < 0 {
*cul_level |= 1 << COEFF_CONTEXT_BITS;
} else if dc_val > 0 {
*cul_level += 2 << COEFF_CONTEXT_BITS;
}
}
fn set_coeff_context(
&mut self, plane: usize, bo: &BlockOffset, tx_size: TxSize, xdec: usize,
ydec: usize, value: u8
) {
for bx in 0..tx_size.width_mi() {
self.above_coeff_context[plane][(bo.x >> xdec) + bx] = value;
}
let bo_y = bo.y_in_sb();
for by in 0..tx_size.height_mi() {
self.left_coeff_context[plane][(bo_y >> ydec) + by] = value;
}
}
fn reset_left_coeff_context(&mut self, plane: usize) {
for c in &mut self.left_coeff_context[plane] {
*c = 0;
}
}
fn reset_left_partition_context(&mut self) {
for c in &mut self.left_partition_context {
*c = 0;
}
}
//TODO(anyone): Add reset_left_tx_context() here then call it in reset_left_contexts()
pub fn reset_skip_context(
&mut self, bo: &BlockOffset, bsize: BlockSize, xdec: usize, ydec: usize
) {
const num_planes: usize = 3;
let nplanes = if bsize >= BLOCK_8X8 {
3
} else {
1 + (num_planes - 1) * has_chroma(bo, bsize, xdec, ydec) as usize
};
for plane in 0..nplanes {
let xdec2 = if plane == 0 {
0
} else {
xdec
};
let ydec2 = if plane == 0 {
0
} else {
ydec
};
let plane_bsize = if plane == 0 {
bsize
} else {
get_plane_block_size(bsize, xdec2, ydec2)
};
let bw = plane_bsize.width_mi();
let bh = plane_bsize.height_mi();
for bx in 0..bw {
self.above_coeff_context[plane][(bo.x >> xdec2) + bx] = 0;
}
let bo_y = bo.y_in_sb();
for by in 0..bh {
self.left_coeff_context[plane][(bo_y >> ydec2) + by] = 0;
}
}
}
pub fn reset_left_contexts(&mut self) {
for p in 0..3 {
BlockContext::reset_left_coeff_context(self, p);
}
BlockContext::reset_left_partition_context(self);
//TODO(anyone): Call reset_left_tx_context() here.
}
pub fn set_mode(
&mut self, bo: &BlockOffset, bsize: BlockSize, mode: PredictionMode
) {
self.for_each(bo, bsize, |block| block.mode = mode);
}
pub fn set_block_size(&mut self, bo: &BlockOffset, bsize: BlockSize) {
let n4_w = bsize.width_mi();
let n4_h = bsize.height_mi();
self.for_each(bo, bsize, |block| { block.n4_w = n4_w; block.n4_h = n4_h } );
}
pub fn set_tx_size(&mut self, bo: &BlockOffset, txsize: TxSize) {
let tx_w = txsize.width_mi();
let tx_h = txsize.height_mi();
self.for_each(bo, txsize.block_size(), |block| { block.tx_w = tx_w; block.tx_h = tx_h } );
}
pub fn get_mode(&mut self, bo: &BlockOffset) -> PredictionMode {
self.blocks[bo.y][bo.x].mode
}
fn partition_plane_context(
&self, bo: &BlockOffset, bsize: BlockSize
) -> usize {
// TODO: this should be way simpler without sub8x8
let above_ctx = self.above_partition_context[bo.x];
let left_ctx = self.left_partition_context[bo.y_in_sb()];
let bsl = bsize.width_log2() - BLOCK_8X8.width_log2();
let above = (above_ctx >> bsl) & 1;
let left = (left_ctx >> bsl) & 1;
assert!(bsize.is_sqr());
(left * 2 + above) as usize + bsl as usize * PARTITION_PLOFFSET
}
pub fn update_partition_context(
&mut self, bo: &BlockOffset, subsize: BlockSize, bsize: BlockSize
) {
#[allow(dead_code)]
let bw = bsize.width_mi();
let bh = bsize.height_mi();
let above_ctx =
&mut self.above_partition_context[bo.x..bo.x + bw as usize];
let left_ctx = &mut self.left_partition_context
[bo.y_in_sb()..bo.y_in_sb() + bh as usize];
// update the partition context at the end notes. set partition bits
// of block sizes larger than the current one to be one, and partition
// bits of smaller block sizes to be zero.
for i in 0..bw {
above_ctx[i as usize] = partition_context_lookup[subsize as usize][0];
}
for i in 0..bh {
left_ctx[i as usize] = partition_context_lookup[subsize as usize][1];
}
}
fn skip_context(&mut self, bo: &BlockOffset) -> usize {
let above_skip = if bo.y > 0 {
self.above_of(bo).skip as usize
} else {
0
};
let left_skip = if bo.x > 0 {
self.left_of(bo).skip as usize
} else {
0
};
above_skip + left_skip
}
pub fn set_skip(&mut self, bo: &BlockOffset, bsize: BlockSize, skip: bool) {
self.for_each(bo, bsize, |block| block.skip = skip);
}
pub fn set_segmentation_idx(&mut self, bo: &BlockOffset, bsize: BlockSize, idx: u8) {
self.for_each(bo, bsize, |block| block.segmentation_idx = idx);
}
pub fn set_ref_frames(&mut self, bo: &BlockOffset, bsize: BlockSize, r: [usize; 2]) {
let bw = bsize.width_mi();
let bh = bsize.height_mi();
for y in 0..bh {
for x in 0..bw {
self.blocks[bo.y + y as usize][bo.x + x as usize].ref_frames = r;
}
}
}
pub fn set_motion_vectors(&mut self, bo: &BlockOffset, bsize: BlockSize, mvs: [MotionVector; 2]) {
let bw = bsize.width_mi();
let bh = bsize.height_mi();
for y in 0..bh {
for x in 0..bw {
self.blocks[bo.y + y as usize][bo.x + x as usize].mv = mvs;
}
}
}
pub fn set_cdef(&mut self, sbo: &SuperBlockOffset, cdef_index: u8) {
let bo = sbo.block_offset(0, 0);
// Checkme: Is 16 still the right block unit for 128x128 superblocks?
let bw = cmp::min (bo.x + MAX_MIB_SIZE, self.blocks[bo.y as usize].len());
let bh = cmp::min (bo.y + MAX_MIB_SIZE, self.blocks.len());
for y in bo.y..bh {
for x in bo.x..bw {
self.blocks[y as usize][x as usize].cdef_index = cdef_index;
}
}
}
// The mode info data structure has a one element border above and to the
// left of the entries corresponding to real macroblocks.
// The prediction flags in these dummy entries are initialized to 0.
// 0 - inter/inter, inter/--, --/inter, --/--
// 1 - intra/inter, inter/intra
// 2 - intra/--, --/intra
// 3 - intra/intra
pub fn intra_inter_context(&mut self, bo: &BlockOffset) -> usize {
let has_above = bo.y > 0;
let has_left = bo.x > 0;
match (has_above, has_left) {
(true, true) => {
let above_intra = !self.above_of(bo).is_inter();
let left_intra = !self.left_of(bo).is_inter();
if above_intra && left_intra {
3
} else {
(above_intra || left_intra) as usize
}
}
(true, _) | (_, true) =>
2 * if has_above {
!self.above_of(bo).is_inter() as usize
} else {
!self.left_of(bo).is_inter() as usize
},
(_, _) => 0
}
}
pub fn get_txb_ctx(
&mut self, plane_bsize: BlockSize, tx_size: TxSize, plane: usize,
bo: &BlockOffset, xdec: usize, ydec: usize
) -> TXB_CTX {
let mut txb_ctx = TXB_CTX {
txb_skip_ctx: 0,
dc_sign_ctx: 0
};
const MAX_TX_SIZE_UNIT: usize = 16;
const signs: [i8; 3] = [0, -1, 1];
const dc_sign_contexts: [usize; 4 * MAX_TX_SIZE_UNIT + 1] = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
];
let mut dc_sign: i16 = 0;
let txb_w_unit = tx_size.width_mi();
let txb_h_unit = tx_size.height_mi();
// Decide txb_ctx.dc_sign_ctx
for k in 0..txb_w_unit {
let sign = self.above_coeff_context[plane][(bo.x >> xdec) + k]
>> COEFF_CONTEXT_BITS;
assert!(sign <= 2);
dc_sign += signs[sign as usize] as i16;
}
for k in 0..txb_h_unit {
let sign = self.left_coeff_context[plane][(bo.y_in_sb() >> ydec) + k]
>> COEFF_CONTEXT_BITS;
assert!(sign <= 2);
dc_sign += signs[sign as usize] as i16;
}
txb_ctx.dc_sign_ctx =
dc_sign_contexts[(dc_sign + 2 * MAX_TX_SIZE_UNIT as i16) as usize];
// Decide txb_ctx.txb_skip_ctx
if plane == 0 {
if plane_bsize == tx_size.block_size() {
txb_ctx.txb_skip_ctx = 0;
} else {
// This is the algorithm to generate table skip_contexts[min][max].
// if (!max)
// txb_skip_ctx = 1;
// else if (!min)
// txb_skip_ctx = 2 + (max > 3);
// else if (max <= 3)
// txb_skip_ctx = 4;
// else if (min <= 3)
// txb_skip_ctx = 5;
// else
// txb_skip_ctx = 6;
const skip_contexts: [[u8; 5]; 5] = [
[1, 2, 2, 2, 3],
[1, 4, 4, 4, 5],
[1, 4, 4, 4, 5],
[1, 4, 4, 4, 5],
[1, 4, 4, 4, 6]
];
let mut top: u8 = 0;
let mut left: u8 = 0;
for k in 0..txb_w_unit {
top |= self.above_coeff_context[0][(bo.x >> xdec) + k];
}
top &= COEFF_CONTEXT_MASK as u8;
for k in 0..txb_h_unit {
left |= self.left_coeff_context[0][(bo.y_in_sb() >> ydec) + k];
}
left &= COEFF_CONTEXT_MASK as u8;
let max = cmp::min(top | left, 4);
let min = cmp::min(cmp::min(top, left), 4);
txb_ctx.txb_skip_ctx =
skip_contexts[min as usize][max as usize] as usize;
}
} else {
let mut top: u8 = 0;
let mut left: u8 = 0;
for k in 0..txb_w_unit {
top |= self.above_coeff_context[plane][(bo.x >> xdec) + k];
}
for k in 0..txb_h_unit {
left |= self.left_coeff_context[plane][(bo.y_in_sb() >> ydec) + k];
}
let ctx_base = (top != 0) as usize + (left != 0) as usize;
let ctx_offset = if num_pels_log2_lookup[plane_bsize as usize]
> num_pels_log2_lookup[tx_size.block_size() as usize]
{
10
} else {
7
};
txb_ctx.txb_skip_ctx = ctx_base + ctx_offset;
}
txb_ctx
}
}
#[derive(Copy, Clone)]
pub enum RestorationFilter {
None,
Wiener { coeffs: [[i8; 2]; 3] },
Sgr { xqd: [i8; 2] },
}
impl RestorationFilter {
pub fn default() -> RestorationFilter {
RestorationFilter::None
}
}
#[derive(Copy, Clone)]
pub struct RestorationUnit {
pub params: RestorationFilter,
}
impl RestorationUnit {
pub fn default() -> RestorationUnit {
RestorationUnit {
params: RestorationFilter::default()
}
}
}
#[derive(Clone, Default)]
pub struct RestorationContext {
pub cols: usize,
pub rows: usize,
pub wiener_ref: [[[i8; 3]; 2]; PLANES],
pub sgr_ref: [[i8; 2]; PLANES],
pub units: Vec<Vec<Vec<RestorationUnit>>>
}
impl RestorationContext {
pub fn new(cols: usize, rows: usize) -> RestorationContext {
RestorationContext {
cols,
rows,
wiener_ref: [[WIENER_TAPS_MID; 2]; PLANES],
sgr_ref: [SGR_XQD_MID; PLANES],
units: vec![vec![vec![RestorationUnit::default(); cols]; rows]; PLANES]
}
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum CFLSign {
CFL_SIGN_ZERO = 0,
CFL_SIGN_NEG = 1,
CFL_SIGN_POS = 2
}
impl CFLSign {
pub fn from_alpha(a: i16) -> CFLSign {
[ CFL_SIGN_NEG, CFL_SIGN_ZERO, CFL_SIGN_POS ][(a.signum() + 1) as usize]
}
}
use context::CFLSign::*;
const CFL_SIGNS: usize = 3;
static cfl_sign_value: [i16; CFL_SIGNS] = [ 0, -1, 1 ];
#[derive(Copy, Clone)]
pub struct CFLParams {
sign: [CFLSign; 2],
scale: [u8; 2]
}
impl CFLParams {
pub fn new() -> CFLParams {
CFLParams {
sign: [CFL_SIGN_NEG, CFL_SIGN_ZERO],
scale: [1, 0]
}
}
pub fn joint_sign(self) -> u32 {
assert!(self.sign[0] != CFL_SIGN_ZERO || self.sign[1] != CFL_SIGN_ZERO);
(self.sign[0] as u32) * (CFL_SIGNS as u32) + (self.sign[1] as u32) - 1
}
pub fn context(self, uv: usize) -> usize {
assert!(self.sign[uv] != CFL_SIGN_ZERO);
(self.sign[uv] as usize - 1) * CFL_SIGNS + (self.sign[1 - uv] as usize)
}
pub fn index(self, uv: usize) -> u32 {
assert!(self.sign[uv] != CFL_SIGN_ZERO && self.scale[uv] != 0);
(self.scale[uv] - 1) as u32
}
pub fn alpha(self, uv: usize) -> i16 {
cfl_sign_value[self.sign[uv] as usize] * (self.scale[uv] as i16)
}
pub fn from_alpha(u: i16, v: i16) -> CFLParams {
CFLParams {
sign: [ CFLSign::from_alpha(u), CFLSign::from_alpha(v) ],
scale: [ u.abs() as u8, v.abs() as u8 ]
}
}
}
#[derive(Debug, Default)]
struct FieldMap {
map: Vec<(&'static str, usize, usize)>
}
impl FieldMap {
/// Print the field the address belong to
fn lookup(&self, addr: usize) {
for (name, start, end) in &self.map {
// eprintln!("{} {} {} val {}", name, start, end, addr);
if addr >= *start && addr < *end {
eprintln!(" CDF {}", name);
eprintln!("");
return;
}
}
eprintln!(" CDF address not found {}", addr);
}
}
macro_rules! symbol_with_update {
($self:ident, $w:ident, $s:expr, $cdf:expr) => {
$w.symbol_with_update($s, $cdf);
#[cfg(debug)] {
if let Some(map) = $self.fc_map.as_ref() {
map.lookup($cdf.as_ptr() as usize);
}
}
};
}
pub fn av1_get_coded_tx_size(tx_size: TxSize) -> TxSize {
if tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64 {
return TX_32X32
}
if tx_size == TX_16X64 {
return TX_16X32
}
if tx_size == TX_64X16 {
return TX_32X16
}
tx_size
}
#[derive(Clone)]
pub struct ContextWriterCheckpoint {
pub fc: CDFContext,
pub bc: BlockContext
}
#[derive(Clone)]
pub struct ContextWriter {
pub bc: BlockContext,
pub fc: CDFContext,
pub rc: RestorationContext,
#[cfg(debug)]
fc_map: Option<FieldMap> // For debugging purposes
}
impl ContextWriter {
pub fn new(fc: CDFContext, bc: BlockContext, rc: RestorationContext) -> Self {
#[allow(unused_mut)]
let mut cw = ContextWriter {
fc,
bc,
rc,
#[cfg(debug)]
fc_map: Default::default()
};
#[cfg(debug)] {
if std::env::var_os("RAV1E_DEBUG").is_some() {
cw.fc_map = Some(FieldMap {
map: cw.fc.build_map()
});
}
}
cw
}
fn cdf_element_prob(cdf: &[u16], element: usize) -> u16 {
(if element > 0 {
cdf[element - 1]
} else {
32768
}) - cdf[element]
}
fn partition_gather_horz_alike(
out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize
) {
out[0] = 32768;
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_SPLIT as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_B as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_4 as usize
);
out[0] = 32768 - out[0];
out[1] = 0;
}
fn partition_gather_vert_alike(
out: &mut [u16; 2], cdf_in: &[u16], _bsize: BlockSize
) {
out[0] = 32768;
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_SPLIT as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_HORZ_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_A as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_B as usize
);
out[0] -= ContextWriter::cdf_element_prob(
cdf_in,
PartitionType::PARTITION_VERT_4 as usize
);
out[0] = 32768 - out[0];
out[1] = 0;
}
pub fn write_partition(
&mut self, w: &mut dyn Writer, bo: &BlockOffset, p: PartitionType, bsize: BlockSize
) {
assert!(bsize >= BlockSize::BLOCK_8X8 );
let hbs = bsize.width_mi() / 2;
let has_cols = (bo.x + hbs) < self.bc.cols;
let has_rows = (bo.y + hbs) < self.bc.rows;
let ctx = self.bc.partition_plane_context(&bo, bsize);
assert!(ctx < PARTITION_CONTEXTS);
let partition_cdf = if bsize <= BlockSize::BLOCK_8X8 {
&mut self.fc.partition_cdf[ctx][..PARTITION_TYPES+1]
} else {
&mut self.fc.partition_cdf[ctx]
};
if !has_rows && !has_cols {
return;
}
if has_rows && has_cols {
symbol_with_update!(self, w, p as u32, partition_cdf);
} else if !has_rows && has_cols {
assert!(bsize > BlockSize::BLOCK_8X8);
let mut cdf = [0u16; 2];
ContextWriter::partition_gather_vert_alike(
&mut cdf,
partition_cdf,
bsize
);
w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
} else {
assert!(bsize > BlockSize::BLOCK_8X8);
let mut cdf = [0u16; 2];
ContextWriter::partition_gather_horz_alike(
&mut cdf,
partition_cdf,
bsize
);
w.symbol((p == PartitionType::PARTITION_SPLIT) as u32, &cdf);
}
}
pub fn write_intra_mode_kf(
&mut self, w: &mut dyn Writer, bo: &BlockOffset, mode: PredictionMode
) {
static intra_mode_context: [usize; INTRA_MODES] =
[0, 1, 2, 3, 4, 4, 4, 4, 3, 0, 1, 2, 0];
let above_mode = self.bc.above_of(bo).mode as usize;
let left_mode = self.bc.left_of(bo).mode as usize;
let above_ctx = intra_mode_context[above_mode];
let left_ctx = intra_mode_context[left_mode];
let cdf = &mut self.fc.kf_y_cdf[above_ctx][left_ctx];
symbol_with_update!(self, w, mode as u32, cdf);
}
pub fn write_intra_mode(&mut self, w: &mut dyn Writer, bsize: BlockSize, mode: PredictionMode) {
let cdf =
&mut self.fc.y_mode_cdf[size_group_lookup[bsize as usize] as usize];
symbol_with_update!(self, w, mode as u32, cdf);
}
pub fn write_intra_uv_mode(
&mut self, w: &mut dyn Writer, uv_mode: PredictionMode, y_mode: PredictionMode, bs: BlockSize
) {
let cdf =
&mut self.fc.uv_mode_cdf[bs.cfl_allowed() as usize][y_mode as usize];
if bs.cfl_allowed() {
symbol_with_update!(self, w, uv_mode as u32, cdf);
} else {
symbol_with_update!(self, w, uv_mode as u32, &mut cdf[..UV_INTRA_MODES]);
}
}
pub fn write_cfl_alphas(&mut self, w: &mut dyn Writer, cfl: CFLParams) {
symbol_with_update!(self, w, cfl.joint_sign(), &mut self.fc.cfl_sign_cdf);
for uv in 0..2 {
if cfl.sign[uv] != CFL_SIGN_ZERO {
symbol_with_update!(self, w, cfl.index(uv), &mut self.fc.cfl_alpha_cdf[cfl.context(uv)]);
}
}
}
pub fn write_angle_delta(&mut self, w: &mut dyn Writer, angle: i8, mode: PredictionMode) {
symbol_with_update!(
self,
w,
(angle + MAX_ANGLE_DELTA as i8) as u32,
&mut self.fc.angle_delta_cdf
[mode as usize - PredictionMode::V_PRED as usize]
);
}
pub fn write_use_filter_intra(&mut self, w: &mut dyn Writer, enable: bool, block_size: BlockSize) {
symbol_with_update!(self, w, enable as u32, &mut self.fc.filter_intra_cdfs[block_size as usize]);
}
fn get_mvref_ref_frames(&mut self, ref_frame: usize) -> ([usize; 2], usize) {
let ref_frame_map: [[usize; 2]; TOTAL_COMP_REFS] = [
[ LAST_FRAME, BWDREF_FRAME ], [ LAST2_FRAME, BWDREF_FRAME ],
[ LAST3_FRAME, BWDREF_FRAME ], [ GOLDEN_FRAME, BWDREF_FRAME ],
[ LAST_FRAME, ALTREF2_FRAME ], [ LAST2_FRAME, ALTREF2_FRAME ],
[ LAST3_FRAME, ALTREF2_FRAME ], [ GOLDEN_FRAME, ALTREF2_FRAME ],
[ LAST_FRAME, ALTREF_FRAME ], [ LAST2_FRAME, ALTREF_FRAME ],
[ LAST3_FRAME, ALTREF_FRAME ], [ GOLDEN_FRAME, ALTREF_FRAME ],
[ LAST_FRAME, LAST2_FRAME ], [ LAST_FRAME, LAST3_FRAME ],
[ LAST_FRAME, GOLDEN_FRAME ], [ BWDREF_FRAME, ALTREF_FRAME ],
// NOTE: Following reference frame pairs are not supported to be explicitly
// signalled, but they are possibly chosen by the use of skip_mode,
// which may use the most recent one-sided reference frame pair.
[ LAST2_FRAME, LAST3_FRAME ], [ LAST2_FRAME, GOLDEN_FRAME ],
[ LAST3_FRAME, GOLDEN_FRAME ], [ BWDREF_FRAME, ALTREF2_FRAME ],
[ ALTREF2_FRAME, ALTREF_FRAME ]
];
if ref_frame >= REF_FRAMES {
([ ref_frame_map[ref_frame - REF_FRAMES][0], ref_frame_map[ref_frame - REF_FRAMES][1] ], 2)
} else {
([ ref_frame, 0 ], 1)
}
}
fn find_valid_row_offs(&mut self, row_offset: isize, mi_row: usize, mi_rows: usize) -> isize {
if /* !tile->tg_horz_boundary */ true {
cmp::min(cmp::max(row_offset, -(mi_row as isize)), (mi_rows - mi_row - 1) as isize)
} else {
0
/* TODO: for tiling */
}
}
fn has_tr(&mut self, bo: &BlockOffset, bsize: BlockSize, _is_sec_rect: bool) -> bool {
let sb_mi_size = BLOCK_64X64.width_mi(); /* Assume 64x64 for now */
let mask_row = bo.y & LOCAL_BLOCK_MASK;
let mask_col = bo.x & LOCAL_BLOCK_MASK;
let target_n4_w = bsize.width_mi();
let target_n4_h = bsize.height_mi();
let mut bs = target_n4_w.max(target_n4_h);
if bs > BLOCK_64X64.width_mi() {
return false;
}
let mut has_tr = !((mask_row & bs) != 0 && (mask_col & bs) != 0);
/* TODO: assert its a power of two */
while bs < sb_mi_size {
if (mask_col & bs) != 0 {
if (mask_col & (2 * bs) != 0) && (mask_row & (2 * bs) != 0) {
has_tr = false;
break;
}
} else {
break;
}
bs <<= 1;
}
/* The left hand of two vertical rectangles always has a top right (as the
* block above will have been decoded) */
if (target_n4_w < target_n4_h) && (bo.x & target_n4_w) == 0 {
has_tr = true;
}
/* The bottom of two horizontal rectangles never has a top right (as the block
* to the right won't have been decoded) */
if (target_n4_w > target_n4_h) && (bo.y & target_n4_h) != 0 {
has_tr = false;
}
/* The bottom left square of a Vertical A (in the old format) does
* not have a top right as it is decoded before the right hand
* rectangle of the partition */
/*
if blk.partition == PartitionType::PARTITION_VERT_A {
if blk.n4_w == blk.n4_h {
if (mask_row & bs) != 0 {
has_tr = false;
}
}
}
*/
has_tr
}
fn find_valid_col_offs(&mut self, col_offset: isize, mi_col: usize) -> isize {
cmp::max(col_offset, -(mi_col as isize))
}
fn find_matching_mv(&self, mv: MotionVector, mv_stack: &mut Vec<CandidateMV>) -> bool {
for mv_cand in mv_stack {
if mv.row == mv_cand.this_mv.row && mv.col == mv_cand.this_mv.col {
return true;
}
}
false
}
fn find_matching_mv_and_update_weight(&self, mv: MotionVector, mv_stack: &mut Vec<CandidateMV>, weight: u32) -> bool {
for mut mv_cand in mv_stack {
if mv.row == mv_cand.this_mv.row && mv.col == mv_cand.this_mv.col {
mv_cand.weight += weight;
return true;
}
}
false
}
fn find_matching_comp_mv_and_update_weight(&self, mvs: [MotionVector; 2], mv_stack: &mut Vec<CandidateMV>, weight: u32) -> bool {
for mut mv_cand in mv_stack {
if mvs[0].row == mv_cand.this_mv.row && mvs[0].col == mv_cand.this_mv.col &&
mvs[1].row == mv_cand.comp_mv.row && mvs[1].col == mv_cand.comp_mv.col {
mv_cand.weight += weight;
return true;
}
}
false
}
fn add_ref_mv_candidate(&self, ref_frames: [usize; 2], blk: &Block, mv_stack: &mut Vec<CandidateMV>,
weight: u32, newmv_count: &mut usize, is_compound: bool) -> bool {
if !blk.is_inter() { /* For intrabc */
false
} else if is_compound {
if blk.ref_frames[0] == ref_frames[0] && blk.ref_frames[1] == ref_frames[1] {
let found_match = self.find_matching_comp_mv_and_update_weight(blk.mv, mv_stack, weight);
if !found_match && mv_stack.len() < MAX_REF_MV_STACK_SIZE {
let mv_cand = CandidateMV {
this_mv: blk.mv[0],
comp_mv: blk.mv[1],
weight: weight
};
mv_stack.push(mv_cand);
}
if blk.mode == PredictionMode::NEW_NEWMV ||
blk.mode == PredictionMode::NEAREST_NEWMV ||
blk.mode == PredictionMode::NEW_NEARESTMV ||
blk.mode == PredictionMode::NEAR_NEWMV ||
blk.mode == PredictionMode::NEW_NEARMV {
*newmv_count += 1;
}
true
} else {
false
}
} else {
let mut found = false;
for i in 0..2 {
if blk.ref_frames[i] == ref_frames[0] {
let found_match = self.find_matching_mv_and_update_weight(blk.mv[i], mv_stack, weight);
if !found_match && mv_stack.len() < MAX_REF_MV_STACK_SIZE {
let mv_cand = CandidateMV {
this_mv: blk.mv[i],
comp_mv: MotionVector { row: 0, col: 0 },
weight: weight
};
mv_stack.push(mv_cand);
}
if blk.mode == PredictionMode::NEW_NEWMV ||
blk.mode == PredictionMode::NEAREST_NEWMV ||
blk.mode == PredictionMode::NEW_NEARESTMV ||
blk.mode == PredictionMode::NEAR_NEWMV ||
blk.mode == PredictionMode::NEW_NEARMV ||
blk.mode == PredictionMode::NEWMV {
*newmv_count += 1;
}
found = true;
}
}
found
}
}
fn add_extra_mv_candidate(
&self,
blk: &Block,
ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>,
fi: &FrameInvariants,
is_compound: bool,
ref_id_count: &mut [usize; 2],
ref_id_mvs: &mut [[MotionVector; 2]; 2],
ref_diff_count: &mut [usize; 2],
ref_diff_mvs: &mut [[MotionVector; 2]; 2],
) {
if is_compound {
for cand_list in 0..2 {
let cand_ref = blk.ref_frames[cand_list];
if cand_ref > INTRA_FRAME && cand_ref != NONE_FRAME {
for list in 0..2 {
let mut cand_mv = blk.mv[cand_list];
if cand_ref == ref_frames[list] && ref_id_count[list] < 2 {
ref_id_mvs[list][ref_id_count[list]] = cand_mv;
ref_id_count[list] = ref_id_count[list] + 1;
} else if ref_diff_count[list] < 2 {
if fi.ref_frame_sign_bias[cand_ref - LAST_FRAME] !=
fi.ref_frame_sign_bias[ref_frames[list] - LAST_FRAME] {
cand_mv.row = -cand_mv.row;
cand_mv.col = -cand_mv.col;
}
ref_diff_mvs[list][ref_diff_count[list]] = cand_mv;
ref_diff_count[list] = ref_diff_count[list] + 1;
}
}
}
}
} else {
for cand_list in 0..2 {
let cand_ref = blk.ref_frames[cand_list];
if cand_ref > INTRA_FRAME && cand_ref != NONE_FRAME {
let mut mv = blk.mv[cand_list];
if fi.ref_frame_sign_bias[cand_ref - LAST_FRAME] !=
fi.ref_frame_sign_bias[ref_frames[0] - LAST_FRAME] {
mv.row = -mv.row;
mv.col = -mv.col;
}
if !self.find_matching_mv(mv, mv_stack) {
let mv_cand = CandidateMV {
this_mv: mv,
comp_mv: MotionVector { row: 0, col: 0 },
weight: 2
};
mv_stack.push(mv_cand);
}
}
}
}
}
fn scan_row_mbmi(&mut self, bo: &BlockOffset, row_offset: isize, max_row_offs: isize,
processed_rows: &mut isize, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, newmv_count: &mut usize, bsize: BlockSize,
is_compound: bool) -> bool {
let bc = &self.bc;
let target_n4_w = bsize.width_mi();
let end_mi = cmp::min(cmp::min(target_n4_w, bc.cols - bo.x),
BLOCK_64X64.width_mi());
let n4_w_8 = BLOCK_8X8.width_mi();
let n4_w_16 = BLOCK_16X16.width_mi();
let mut col_offset = 0;
if row_offset.abs() > 1 {
col_offset = 1;
if ((bo.x & 0x01) != 0) && (target_n4_w < n4_w_8) {
col_offset -= 1;
}
}
let use_step_16 = target_n4_w >= 16;
let mut found_match = false;
let mut i = 0;
while i < end_mi {
let cand = bc.at(&bo.with_offset(col_offset + i as isize, row_offset));
let n4_w = cand.n4_w;
let mut len = cmp::min(target_n4_w, n4_w);
if use_step_16 {
len = cmp::max(n4_w_16, len);
} else if row_offset.abs() > 1 {
len = cmp::max(len, n4_w_8);
}
let mut weight = 2 as u32;
if target_n4_w >= n4_w_8 && target_n4_w <= n4_w {
let inc = cmp::min(-max_row_offs + row_offset + 1, cand.n4_h as isize);
assert!(inc >= 0);
weight = cmp::max(weight, inc as u32);
*processed_rows = (inc as isize) - row_offset - 1;
}
if self.add_ref_mv_candidate(ref_frames, cand, mv_stack, len as u32 * weight, newmv_count, is_compound) {
found_match = true;
}
i += len;
}
found_match
}
fn scan_col_mbmi(&mut self, bo: &BlockOffset, col_offset: isize, max_col_offs: isize,
processed_cols: &mut isize, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, newmv_count: &mut usize, bsize: BlockSize,
is_compound: bool) -> bool {
let bc = &self.bc;
let target_n4_h = bsize.height_mi();
let end_mi = cmp::min(cmp::min(target_n4_h, bc.rows - bo.y),
BLOCK_64X64.height_mi());
let n4_h_8 = BLOCK_8X8.height_mi();
let n4_h_16 = BLOCK_16X16.height_mi();
let mut row_offset = 0;
if col_offset.abs() > 1 {
row_offset = 1;
if ((bo.y & 0x01) != 0) && (target_n4_h < n4_h_8) {
row_offset -= 1;
}
}
let use_step_16 = target_n4_h >= 16;
let mut found_match = false;
let mut i = 0;
while i < end_mi {
let cand = bc.at(&bo.with_offset(col_offset, row_offset + i as isize));
let n4_h = cand.n4_h;
let mut len = cmp::min(target_n4_h, n4_h);
if use_step_16 {
len = cmp::max(n4_h_16, len);
} else if col_offset.abs() > 1 {
len = cmp::max(len, n4_h_8);
}
let mut weight = 2 as u32;
if target_n4_h >= n4_h_8 && target_n4_h <= n4_h {
let inc = cmp::min(-max_col_offs + col_offset + 1, cand.n4_w as isize);
assert!(inc >= 0);
weight = cmp::max(weight, inc as u32);
*processed_cols = (inc as isize) - col_offset - 1;
}
if self.add_ref_mv_candidate(ref_frames, cand, mv_stack, len as u32 * weight, newmv_count, is_compound) {
found_match = true;
}
i += len;
}
found_match
}
fn scan_blk_mbmi(&mut self, bo: &BlockOffset, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, newmv_count: &mut usize,
is_compound: bool) -> bool {
if bo.x >= self.bc.cols || bo.y >= self.bc.rows {
return false;
}
let weight = 2 * BLOCK_8X8.width_mi() as u32;
/* Always assume its within a tile, probably wrong */
self.add_ref_mv_candidate(ref_frames, self.bc.at(bo), mv_stack, weight, newmv_count, is_compound)
}
fn add_offset(&mut self, mv_stack: &mut Vec<CandidateMV>) {
for mut cand_mv in mv_stack {
cand_mv.weight += REF_CAT_LEVEL;
}
}
fn setup_mvref_list(&mut self, bo: &BlockOffset, ref_frames: [usize; 2], mv_stack: &mut Vec<CandidateMV>,
bsize: BlockSize, is_sec_rect: bool, fi: &FrameInvariants, is_compound: bool) -> usize {
let (_rf, _rf_num) = self.get_mvref_ref_frames(INTRA_FRAME);
let target_n4_h = bsize.height_mi();
let target_n4_w = bsize.width_mi();
let mut max_row_offs = 0 as isize;
let row_adj = (target_n4_h < BLOCK_8X8.height_mi()) && (bo.y & 0x01) != 0x0;
let mut max_col_offs = 0 as isize;
let col_adj = (target_n4_w < BLOCK_8X8.width_mi()) && (bo.x & 0x01) != 0x0;
let mut processed_rows = 0 as isize;
let mut processed_cols = 0 as isize;
let up_avail = bo.y > 0;
let left_avail = bo.x > 0;
if up_avail {
max_row_offs = -2 * MVREF_ROW_COLS as isize + row_adj as isize;
// limit max offset for small blocks
if target_n4_h < BLOCK_8X8.height_mi() {
max_row_offs = -2 * 2 + row_adj as isize;
}
let rows = self.bc.rows;
max_row_offs = self.find_valid_row_offs(max_row_offs, bo.y, rows);
}
if left_avail {
max_col_offs = -2 * MVREF_ROW_COLS as isize + col_adj as isize;
// limit max offset for small blocks
if target_n4_w < BLOCK_8X8.width_mi() {
max_col_offs = -2 * 2 + col_adj as isize;
}
max_col_offs = self.find_valid_col_offs(max_col_offs, bo.x);
}
let mut row_match = false;
let mut col_match = false;
let mut newmv_count: usize = 0;
if max_row_offs.abs() >= 1 {
let found_match = self.scan_row_mbmi(bo, -1, max_row_offs, &mut processed_rows, ref_frames, mv_stack,
&mut newmv_count, bsize, is_compound);
row_match |= found_match;
}
if max_col_offs.abs() >= 1 {
let found_match = self.scan_col_mbmi(bo, -1, max_col_offs, &mut processed_cols, ref_frames, mv_stack,
&mut newmv_count, bsize, is_compound);
col_match |= found_match;
}
if self.has_tr(bo, bsize, is_sec_rect) {
let found_match = self.scan_blk_mbmi(&bo.with_offset(target_n4_w as isize, -1), ref_frames, mv_stack,
&mut newmv_count, is_compound);
row_match |= found_match;
}
let nearest_match = if row_match { 1 } else { 0 } + if col_match { 1 } else { 0 };
self.add_offset(mv_stack);
/* Scan the second outer area. */
let mut far_newmv_count: usize = 0; // won't be used
let found_match = self.scan_blk_mbmi(
&bo.with_offset(-1, -1), ref_frames, mv_stack, &mut far_newmv_count, is_compound
);
row_match |= found_match;
for idx in 2..MVREF_ROW_COLS+1 {
let row_offset = -2 * idx as isize + 1 + row_adj as isize;
let col_offset = -2 * idx as isize + 1 + col_adj as isize;
if row_offset.abs() <= max_row_offs.abs() && row_offset.abs() > processed_rows {
let found_match = self.scan_row_mbmi(bo, row_offset, max_row_offs, &mut processed_rows, ref_frames, mv_stack,
&mut far_newmv_count, bsize, is_compound);
row_match |= found_match;
}
if col_offset.abs() <= max_col_offs.abs() && col_offset.abs() > processed_cols {
let found_match = self.scan_col_mbmi(bo, col_offset, max_col_offs, &mut processed_cols, ref_frames, mv_stack,
&mut far_newmv_count, bsize, is_compound);
col_match |= found_match;
}
}
let total_match = if row_match { 1 } else { 0 } + if col_match { 1 } else { 0 };
assert!(total_match >= nearest_match);
// mode_context contains both newmv_context and refmv_context, where newmv_context
// lies in the REF_MVOFFSET least significant bits
let mode_context = match nearest_match {
0 => cmp::min(total_match, 1) + (total_match << REFMV_OFFSET),
1 => 3 - cmp::min(newmv_count, 1) + ((2 + total_match) << REFMV_OFFSET),
_ => 5 - cmp::min(newmv_count, 1) + (5 << REFMV_OFFSET)
};
/* TODO: Find nearest match and assign nearest and near mvs */
// 7.10.2.11 Sort MV stack according to weight
mv_stack.sort_by(|a, b| b.weight.cmp(&a.weight));
if mv_stack.len() < 2 {
// 7.10.2.12 Extra search process
let w4 = bsize.width_mi().min(16).min(self.bc.cols - bo.x);
let h4 = bsize.height_mi().min(16).min(self.bc.rows - bo.y);
let num4x4 = w4.min(h4);
let passes = if up_avail { 0 } else { 1 } .. if left_avail { 2 } else { 1 };
let mut ref_id_count = [0 as usize; 2];
let mut ref_diff_count = [0 as usize; 2];
let mut ref_id_mvs = [[MotionVector { row: 0, col: 0 }; 2]; 2];
let mut ref_diff_mvs = [[MotionVector { row: 0, col: 0 }; 2]; 2];
for pass in passes {
let mut idx = 0;
while idx < num4x4 && mv_stack.len() < 2 {
let rbo = if pass == 0 {
bo.with_offset(idx as isize, -1)
} else {
bo.with_offset(-1, idx as isize)
};
let blk = &self.bc.at(&rbo);
self.add_extra_mv_candidate(
blk, ref_frames, mv_stack, fi, is_compound,
&mut ref_id_count, &mut ref_id_mvs, &mut ref_diff_count, &mut ref_diff_mvs
);
idx += if pass == 0 {
blk.n4_w
} else {
blk.n4_h
};
}
}
if is_compound {
let mut combined_mvs = [[MotionVector { row: 0, col: 0}; 2]; 2];
for list in 0..2 {
let mut comp_count = 0;
for idx in 0..ref_id_count[list] {
combined_mvs[comp_count][list] = ref_id_mvs[list][idx];
comp_count = comp_count + 1;
}
for idx in 0..ref_diff_count[list] {
if comp_count < 2 {
combined_mvs[comp_count][list] = ref_diff_mvs[list][idx];
comp_count = comp_count + 1;
}
}
}
if mv_stack.len() == 1 {
let mv_cand = if combined_mvs[0][0].row == mv_stack[0].this_mv.row &&
combined_mvs[0][0].col == mv_stack[0].this_mv.col &&
combined_mvs[0][1].row == mv_stack[0].comp_mv.row &&
combined_mvs[0][1].col == mv_stack[0].comp_mv.col {
CandidateMV {
this_mv: combined_mvs[1][0],
comp_mv: combined_mvs[1][1],
weight: 2
}
} else {
CandidateMV {
this_mv: combined_mvs[0][0],
comp_mv: combined_mvs[0][1],
weight: 2
}
};
mv_stack.push(mv_cand);
} else {
for idx in 0..2 {
let mv_cand = CandidateMV {
this_mv: combined_mvs[idx][0],
comp_mv: combined_mvs[idx][1],
weight: 2
};
mv_stack.push(mv_cand);
}
}
assert!(mv_stack.len() == 2);
}
}
/* TODO: Handle single reference frame extension */
// clamp mvs
for mv in mv_stack {
let blk_w = bsize.width();
let blk_h = bsize.height();
let border_w = 128 + blk_w as isize * 8;
let border_h = 128 + blk_h as isize * 8;
let mvx_min = -(bo.x as isize) * (8 * MI_SIZE) as isize - border_w;
let mvx_max = (self.bc.cols - bo.x - blk_w / MI_SIZE) as isize * (8 * MI_SIZE) as isize + border_w;
let mvy_min = -(bo.y as isize) * (8 * MI_SIZE) as isize - border_h;
let mvy_max = (self.bc.rows - bo.y - blk_h / MI_SIZE) as isize * (8 * MI_SIZE) as isize + border_h;
mv.this_mv.row = (mv.this_mv.row as isize).max(mvy_min).min(mvy_max) as i16;
mv.this_mv.col = (mv.this_mv.col as isize).max(mvx_min).min(mvx_max) as i16;
mv.comp_mv.row = (mv.comp_mv.row as isize).max(mvy_min).min(mvy_max) as i16;
mv.comp_mv.col = (mv.comp_mv.col as isize).max(mvx_min).min(mvx_max) as i16;
}
mode_context
}
pub fn find_mvrefs(&mut self, bo: &BlockOffset, ref_frames: [usize; 2],
mv_stack: &mut Vec<CandidateMV>, bsize: BlockSize, is_sec_rect: bool,
fi: &FrameInvariants, is_compound: bool) -> usize {
assert!(ref_frames[0] != NONE_FRAME);
if ref_frames[0] < REF_FRAMES {
if ref_frames[0] != INTRA_FRAME {
/* TODO: convert global mv to an mv here */
} else {
/* TODO: set the global mv ref to invalid here */
}
}
if ref_frames[0] != INTRA_FRAME {
/* TODO: Set zeromv ref to the converted global motion vector */
} else {
/* TODO: Set the zeromv ref to 0 */
}
if ref_frames[0] <= INTRA_FRAME {
return 0;
}
self.setup_mvref_list(bo, ref_frames, mv_stack, bsize, is_sec_rect, fi, is_compound)
}
pub fn fill_neighbours_ref_counts(&mut self, bo: &BlockOffset) {
let mut ref_counts = [0; TOTAL_REFS_PER_FRAME];
let above_b = self.bc.above_of(bo);
let left_b = self.bc.left_of(bo);
if bo.y > 0 && above_b.is_inter() {
ref_counts[above_b.ref_frames[0] as usize] += 1;
if above_b.has_second_ref() {
ref_counts[above_b.ref_frames[1] as usize] += 1;
}
}
if bo.x > 0 && left_b.is_inter() {
ref_counts[left_b.ref_frames[0] as usize] += 1;
if left_b.has_second_ref() {
ref_counts[left_b.ref_frames[1] as usize] += 1;
}
}
self.bc.at_mut(bo).neighbors_ref_counts = ref_counts;
}
fn ref_count_ctx(counts0: usize, counts1: usize) -> usize {
if counts0 < counts1 {
0
} else if counts0 == counts1 {
1
} else {
2
}
}
fn get_ref_frame_ctx_b0(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let fwd_cnt = ref_counts[LAST_FRAME] + ref_counts[LAST2_FRAME] +
ref_counts[LAST3_FRAME] + ref_counts[GOLDEN_FRAME];
let bwd_cnt = ref_counts[BWDREF_FRAME] + ref_counts[ALTREF2_FRAME] +
ref_counts[ALTREF_FRAME];
ContextWriter::ref_count_ctx(fwd_cnt, bwd_cnt)
}
fn get_pred_ctx_brfarf2_or_arf(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let brfarf2_count = ref_counts[BWDREF_FRAME] + ref_counts[ALTREF2_FRAME];
let arf_count = ref_counts[ALTREF_FRAME];
ContextWriter::ref_count_ctx(brfarf2_count, arf_count)
}
fn get_pred_ctx_ll2_or_l3gld(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let l_l2_count = ref_counts[LAST_FRAME] + ref_counts[LAST2_FRAME];
let l3_gold_count = ref_counts[LAST3_FRAME] + ref_counts[GOLDEN_FRAME];
ContextWriter::ref_count_ctx(l_l2_count, l3_gold_count)
}
fn get_pred_ctx_last_or_last2(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let l_count = ref_counts[LAST_FRAME];
let l2_count = ref_counts[LAST2_FRAME];
ContextWriter::ref_count_ctx(l_count, l2_count)
}
fn get_pred_ctx_last3_or_gold(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let l3_count = ref_counts[LAST3_FRAME];
let gold_count = ref_counts[GOLDEN_FRAME];
ContextWriter::ref_count_ctx(l3_count, gold_count)
}
fn get_pred_ctx_brf_or_arf2(&mut self, bo: &BlockOffset) -> usize {
let ref_counts = self.bc.at(bo).neighbors_ref_counts;
let brf_count = ref_counts[BWDREF_FRAME];
let arf2_count = ref_counts[ALTREF2_FRAME];
ContextWriter::ref_count_ctx(brf_count, arf2_count)
}
fn get_comp_mode_ctx(&self, bo: &BlockOffset) -> usize {
fn check_backward(ref_frame: usize) -> bool {
ref_frame >= BWDREF_FRAME && ref_frame <= ALTREF_FRAME
}
let avail_left = bo.x > 0;
let avail_up = bo.y > 0;
let bo_left = bo.with_offset(-1, 0);
let bo_up = bo.with_offset(0, -1);
let above0 = if avail_up { self.bc.at(&bo_up).ref_frames[0] } else { INTRA_FRAME };
let above1 = if avail_up { self.bc.at(&bo_up).ref_frames[1] } else { NONE_FRAME };
let left0 = if avail_left { self.bc.at(&bo_left).ref_frames[0] } else { INTRA_FRAME };
let left1 = if avail_left { self.bc.at(&bo_left).ref_frames[1] } else { NONE_FRAME };
let left_single = left1 == NONE_FRAME;
let above_single = above1 == NONE_FRAME;
let left_intra = left0 == INTRA_FRAME;
let above_intra = above0 == INTRA_FRAME;
let left_backward = check_backward(left0);
let above_backward = check_backward(above0);
if avail_left && avail_up {
if above_single && left_single {
(above_backward ^ left_backward) as usize
} else if above_single {
2 + (above_backward || above_intra) as usize
} else if left_single {
2 + (left_backward || left_intra) as usize
} else {
4
}
} else if avail_up {
if above_single {
above_backward as usize
} else {
3
}
} else if avail_left {
if left_single {
left_backward as usize
} else {
3
}
} else {
1
}
}
fn get_comp_ref_type_ctx(&self, bo: &BlockOffset) -> usize {
fn is_samedir_ref_pair(ref0: usize, ref1: usize) -> bool {
(ref0 >= BWDREF_FRAME && ref0 != NONE_FRAME) == (ref1 >= BWDREF_FRAME && ref1 != NONE_FRAME)
}
let avail_left = bo.x > 0;
let avail_up = bo.y > 0;
let bo_left = bo.with_offset(-1, 0);
let bo_up = bo.with_offset(0, -1);
let above0 = if avail_up { self.bc.at(&bo_up).ref_frames[0] } else { INTRA_FRAME };
let above1 = if avail_up { self.bc.at(&bo_up).ref_frames[1] } else { NONE_FRAME };
let left0 = if avail_left { self.bc.at(&bo_left).ref_frames[0] } else { INTRA_FRAME };
let left1 = if avail_left { self.bc.at(&bo_left).ref_frames[1] } else { NONE_FRAME };
let left_single = left1 == NONE_FRAME;
let above_single = above1 == NONE_FRAME;
let left_intra = left0 == INTRA_FRAME;
let above_intra = above0 == INTRA_FRAME;
let above_comp_inter = avail_up && !above_intra && !above_single;
let left_comp_inter = avail_left && !left_intra && !left_single;
let above_uni_comp = above_comp_inter && is_samedir_ref_pair(above0, above1);
let left_uni_comp = left_comp_inter && is_samedir_ref_pair(left0, left1);
if avail_up && !above_intra && avail_left && !left_intra {
let samedir = is_samedir_ref_pair(above0, left0);
if !above_comp_inter && !left_comp_inter {
1 + 2 * samedir as usize
} else if !above_comp_inter {
if !left_uni_comp { 1 } else { 3 + samedir as usize }
} else if !left_comp_inter {
if !above_uni_comp { 1 } else { 3 + samedir as usize }
} else {
if !above_uni_comp && !left_uni_comp {
0
} else if !above_uni_comp || !left_uni_comp {
2
} else {
3 + ((above0 == BWDREF_FRAME) == (left0 == BWDREF_FRAME)) as usize
}
}
} else if avail_up && avail_left {
if above_comp_inter {
1 + 2 * above_uni_comp as usize
} else if left_comp_inter {
1 + 2 * left_uni_comp as usize
} else {
2
}
} else if above_comp_inter {
4 * above_uni_comp as usize
} else if left_comp_inter {
4 * left_uni_comp as usize
} else {
2
}
}
pub fn write_ref_frames(&mut self, w: &mut dyn Writer, fi: &FrameInvariants, bo: &BlockOffset) {
let rf = self.bc.at(bo).ref_frames;
let sz = self.bc.at(bo).n4_w.min(self.bc.at(bo).n4_h);
/* TODO: Handle multiple references */
let comp_mode = self.bc.at(bo).has_second_ref();
if fi.reference_mode != ReferenceMode::SINGLE && sz >= 2 {
let ctx = self.get_comp_mode_ctx(bo);
symbol_with_update!(self, w, comp_mode as u32, &mut self.fc.comp_mode_cdf[ctx]);
} else {
assert!(!comp_mode);
}
if comp_mode {
let comp_ref_type = 1 as u32; // bidir
let ctx = self.get_comp_ref_type_ctx(bo);
symbol_with_update!(self, w, comp_ref_type, &mut self.fc.comp_ref_type_cdf[ctx]);
if comp_ref_type == 0 {
unimplemented!();
} else {
let compref = rf[0] == GOLDEN_FRAME || rf[0] == LAST3_FRAME;
let ctx = self.get_pred_ctx_ll2_or_l3gld(bo);
symbol_with_update!(self, w, compref as u32, &mut self.fc.comp_ref_cdf[ctx][0]);
if !compref {
let compref_p1 = rf[0] == LAST2_FRAME;
let ctx = self.get_pred_ctx_last_or_last2(bo);
symbol_with_update!(self, w, compref_p1 as u32, &mut self.fc.comp_ref_cdf[ctx][1]);
} else {
let compref_p2 = rf[0] == GOLDEN_FRAME;
let ctx = self.get_pred_ctx_last3_or_gold(bo);
symbol_with_update!(self, w, compref_p2 as u32, &mut self.fc.comp_ref_cdf[ctx][2]);
}
let comp_bwdref = rf[1] == ALTREF_FRAME;
let ctx = self.get_pred_ctx_brfarf2_or_arf(bo);
symbol_with_update!(self, w, comp_bwdref as u32, &mut self.fc.comp_bwd_ref_cdf[ctx][0]);
if !comp_bwdref {
let comp_bwdref_p1 = rf[1] == ALTREF2_FRAME;
let ctx = self.get_pred_ctx_brf_or_arf2(bo);
symbol_with_update!(self, w, comp_bwdref_p1 as u32, &mut self.fc.comp_bwd_ref_cdf[ctx][1]);
}
}
} else {
let b0_ctx = self.get_ref_frame_ctx_b0(bo);
let b0 = rf[0] <= ALTREF_FRAME && rf[0] >= BWDREF_FRAME;
symbol_with_update!(self, w, b0 as u32, &mut self.fc.single_ref_cdfs[b0_ctx][0]);
if b0 {
let b1_ctx = self.get_pred_ctx_brfarf2_or_arf(bo);
let b1 = rf[0] == ALTREF_FRAME;
symbol_with_update!(self, w, b1 as u32, &mut self.fc.single_ref_cdfs[b1_ctx][1]);
if !b1 {
let b5_ctx = self.get_pred_ctx_brf_or_arf2(bo);
let b5 = rf[0] == ALTREF2_FRAME;
symbol_with_update!(self, w, b5 as u32, &mut self.fc.single_ref_cdfs[b5_ctx][5]);
}
} else {
let b2_ctx = self.get_pred_ctx_ll2_or_l3gld(bo);
let b2 = rf[0] == LAST3_FRAME || rf[0] == GOLDEN_FRAME;
symbol_with_update!(self, w, b2 as u32, &mut self.fc.single_ref_cdfs[b2_ctx][2]);
if !b2 {
let b3_ctx = self.get_pred_ctx_last_or_last2(bo);
let b3 = rf[0] != LAST_FRAME;
symbol_with_update!(self, w, b3 as u32, &mut self.fc.single_ref_cdfs[b3_ctx][3]);
} else {
let b4_ctx = self.get_pred_ctx_last3_or_gold(bo);
let b4 = rf[0] != LAST3_FRAME;
symbol_with_update!(self, w, b4 as u32, &mut self.fc.single_ref_cdfs[b4_ctx][4]);
}
}
}
}
pub fn write_compound_mode(
&mut self, w: &mut dyn Writer, mode: PredictionMode, ctx: usize,
) {
let newmv_ctx = ctx & NEWMV_CTX_MASK;
let refmv_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
let ctx = if refmv_ctx < 2 {
newmv_ctx.min(1)
} else if refmv_ctx < 4 {
(newmv_ctx + 1).min(4)
} else {
(newmv_ctx.max(1) + 3).min(7)
};
assert!(mode >= PredictionMode::NEAREST_NEARESTMV);
let val = mode as u32 - PredictionMode::NEAREST_NEARESTMV as u32;
symbol_with_update!(self, w, val, &mut self.fc.compound_mode_cdf[ctx]);
}
pub fn write_inter_mode(&mut self, w: &mut dyn Writer, mode: PredictionMode, ctx: usize) {
let newmv_ctx = ctx & NEWMV_CTX_MASK;
symbol_with_update!(self, w, (mode != PredictionMode::NEWMV) as u32, &mut self.fc.newmv_cdf[newmv_ctx]);
if mode != PredictionMode::NEWMV {
let zeromv_ctx = (ctx >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
symbol_with_update!(self, w, (mode != PredictionMode::GLOBALMV) as u32, &mut self.fc.zeromv_cdf[zeromv_ctx]);
if mode != PredictionMode::GLOBALMV {
let refmv_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
symbol_with_update!(self, w, (mode != PredictionMode::NEARESTMV) as u32, &mut self.fc.refmv_cdf[refmv_ctx]);
}
}
}
pub fn write_drl_mode(&mut self, w: &mut dyn Writer, drl_mode: bool, ctx: usize) {
symbol_with_update!(self, w, drl_mode as u32, &mut self.fc.drl_cdfs[ctx]);
}
pub fn write_mv(&mut self, w: &mut dyn Writer,
mv: MotionVector, ref_mv: MotionVector,
mv_precision: MvSubpelPrecision) {
let diff = MotionVector { row: mv.row - ref_mv.row, col: mv.col - ref_mv.col };
let j: MvJointType = av1_get_mv_joint(diff);
w.symbol_with_update(j as u32, &mut self.fc.nmv_context.joints_cdf);
if mv_joint_vertical(j) {
encode_mv_component(w, diff.row as i32, &mut self.fc.nmv_context.comps[0], mv_precision);
}
if mv_joint_horizontal(j) {
encode_mv_component(w, diff.col as i32, &mut self.fc.nmv_context.comps[1], mv_precision);
}
}
pub fn write_tx_type(
&mut self, w: &mut dyn Writer, tx_size: TxSize, tx_type: TxType, y_mode: PredictionMode,
is_inter: bool, use_reduced_tx_set: bool
) {
let square_tx_size = tx_size.sqr();
let tx_set =
get_tx_set(tx_size, is_inter, use_reduced_tx_set);
let num_tx_types = num_tx_set[tx_set as usize];
if num_tx_types > 1 {
let tx_set_index = get_tx_set_index(tx_size, is_inter, use_reduced_tx_set);
assert!(tx_set_index > 0);
assert!(av1_tx_used[tx_set as usize][tx_type as usize] != 0);
if is_inter {
symbol_with_update!(
self,
w,
av1_tx_ind[tx_set as usize][tx_type as usize] as u32,
&mut self.fc.inter_tx_cdf[tx_set_index as usize]
[square_tx_size as usize]
[..num_tx_set[tx_set as usize] + 1]
);
} else {
let intra_dir = y_mode;
// TODO: Once use_filter_intra is enabled,
// intra_dir =
// fimode_to_intradir[mbmi->filter_intra_mode_info.filter_intra_mode];
symbol_with_update!(
self,
w,
av1_tx_ind[tx_set as usize][tx_type as usize] as u32,
&mut self.fc.intra_tx_cdf[tx_set_index as usize]
[square_tx_size as usize][intra_dir as usize]
[..num_tx_set[tx_set as usize] + 1]
);
}
}
}
pub fn write_skip(&mut self, w: &mut dyn Writer, bo: &BlockOffset, skip: bool) {
let ctx = self.bc.skip_context(bo);
symbol_with_update!(self, w, skip as u32, &mut self.fc.skip_cdfs[ctx]);
}
fn get_segment_pred(&mut self, bo: &BlockOffset) -> ( u8, u8 ) {
let mut prev_ul = -1;
let mut prev_u = -1;
let mut prev_l = -1;
if bo.x > 0 && bo.y > 0 {
prev_ul = self.bc.above_left_of(bo).segmentation_idx as i8;
}
if bo.y > 0 {
prev_u = self.bc.above_of(bo).segmentation_idx as i8;
}
if bo.x > 0 {
prev_l = self.bc.left_of(bo).segmentation_idx as i8;
}
/* Pick CDF index based on number of matching/out-of-bounds segment IDs. */
let cdf_index: u8;
if prev_ul < 0 || prev_u < 0 || prev_l < 0 { /* Edge case */
cdf_index = 0;
} else if (prev_ul == prev_u) && (prev_ul == prev_l) {
cdf_index = 2;
} else if (prev_ul == prev_u) || (prev_ul == prev_l) || (prev_u == prev_l) {
cdf_index = 1;
} else {
cdf_index = 0;
}
/* If 2 or more are identical returns that as predictor, otherwise prev_l. */
let r: i8;
if prev_u == -1 { /* edge case */
r = if prev_l == -1 { 0 } else { prev_l };
} else if prev_l == -1 { /* edge case */
r = prev_u;
} else {
r = if prev_ul == prev_u { prev_u } else { prev_l };
}
( r as u8, cdf_index )
}
fn neg_interleave(&mut self, x: i32, r: i32, max: i32) -> i32 {
assert!(x < max);
if r == 0 {
return x;
} else if r >= (max - 1) {
return -x + max - 1;
}
let diff = x - r;
if 2 * r < max {
if diff.abs() <= r {
if diff > 0 {
return (diff << 1) - 1;
} else {
return (-diff) << 1;
}
}
x
} else {
if diff.abs() < (max - r) {
if diff > 0 {
return (diff << 1) - 1;
} else {
return (-diff) << 1;
}
}
(max - x) - 1
}
}
pub fn write_segmentation(&mut self, w: &mut dyn Writer, bo: &BlockOffset,
bsize: BlockSize, skip: bool, last_active_segid: u8) {
let ( pred, cdf_index ) = self.get_segment_pred(bo);
if skip {
self.bc.set_segmentation_idx(bo, bsize, pred);
return;
}
let seg_idx = self.bc.at(bo).segmentation_idx;
let coded_id = self.neg_interleave(seg_idx as i32, pred as i32, (last_active_segid + 1) as i32);
symbol_with_update!(self, w, coded_id as u32, &mut self.fc.spatial_segmentation_cdfs[cdf_index as usize]);
}
pub fn write_cdef(&mut self, w: &mut dyn Writer, strength_index: u8, bits: u8) {
w.literal(bits, strength_index as u32);
}
pub fn write_block_deblock_deltas(&mut self, w: &mut dyn Writer,
bo: &BlockOffset, multi: bool) {
let block = self.bc.at(bo);
let deltas = if multi { FRAME_LF_COUNT + PLANES - 3 } else { 1 };
for i in 0..deltas {
let delta = block.deblock_deltas[i];
let abs:u32 = delta.abs() as u32;
if multi {
symbol_with_update!(self, w, cmp::min(abs, DELTA_LF_SMALL),
&mut self.fc.deblock_delta_multi_cdf[i]);
} else {
symbol_with_update!(self, w, cmp::min(abs, DELTA_LF_SMALL),
&mut self.fc.deblock_delta_cdf);
};
if abs >= DELTA_LF_SMALL {
let bits = msb(abs as i32 - 1) as u32;
w.literal(3, bits - 1);
w.literal(bits as u8, abs - (1<<bits) - 1);
}
if abs > 0 {
w.bool(delta < 0, 16384);
}
}
}
pub fn write_is_inter(&mut self, w: &mut dyn Writer, bo: &BlockOffset, is_inter: bool) {
let ctx = self.bc.intra_inter_context(bo);
symbol_with_update!(self, w, is_inter as u32, &mut self.fc.intra_inter_cdfs[ctx]);
}
pub fn get_txsize_entropy_ctx(&mut self, tx_size: TxSize) -> usize {
(tx_size.sqr() as usize + tx_size.sqr_up() as usize + 1) >> 1
}
pub fn txb_init_levels(
&mut self, coeffs: &[i32], width: usize, height: usize,
levels_buf: &mut [u8]
) {
let mut offset = TX_PAD_TOP * (width + TX_PAD_HOR);
for y in 0..height {
for x in 0..width {
levels_buf[offset] = clamp(coeffs[y * width + x].abs(), 0, 127) as u8;
offset += 1;
}
offset += TX_PAD_HOR;
}
}
pub fn av1_get_coded_tx_size(&mut self, tx_size: TxSize) -> TxSize {
if tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_32X64 {
return TX_32X32
}
if tx_size == TX_16X64 {
return TX_16X32
}
if tx_size == TX_64X16 {
return TX_32X16
}
tx_size
}
pub fn get_txb_bwl(&mut self, tx_size: TxSize) -> usize {
av1_get_coded_tx_size(tx_size).width_log2()
}
pub fn get_eob_pos_token(&mut self, eob: usize, extra: &mut u32) -> u32 {
let t = if eob < 33 {
eob_to_pos_small[eob] as u32
} else {
let e = cmp::min((eob - 1) >> 5, 16);
eob_to_pos_large[e as usize] as u32
};
assert!(eob as i32 >= k_eob_group_start[t as usize] as i32);
*extra = eob as u32 - k_eob_group_start[t as usize] as u32;
t
}
pub fn get_nz_mag(
&mut self, levels: &[u8], bwl: usize, tx_class: TxClass
) -> usize {
// May version.
// Note: AOMMIN(level, 3) is useless for decoder since level < 3.
let mut mag = clip_max3(levels[1]); // { 0, 1 }
mag += clip_max3(levels[(1 << bwl) + TX_PAD_HOR]); // { 1, 0 }
if tx_class == TX_CLASS_2D {
mag += clip_max3(levels[(1 << bwl) + TX_PAD_HOR + 1]); // { 1, 1 }
mag += clip_max3(levels[2]); // { 0, 2 }
mag += clip_max3(levels[(2 << bwl) + (2 << TX_PAD_HOR_LOG2)]); // { 2, 0 }
} else if tx_class == TX_CLASS_VERT {
mag += clip_max3(levels[(2 << bwl) + (2 << TX_PAD_HOR_LOG2)]); // { 2, 0 }
mag += clip_max3(levels[(3 << bwl) + (3 << TX_PAD_HOR_LOG2)]); // { 3, 0 }
mag += clip_max3(levels[(4 << bwl) + (4 << TX_PAD_HOR_LOG2)]); // { 4, 0 }
} else {
mag += clip_max3(levels[2]); // { 0, 2 }
mag += clip_max3(levels[3]); // { 0, 3 }
mag += clip_max3(levels[4]); // { 0, 4 }
}
mag as usize
}
pub fn get_nz_map_ctx_from_stats(
&mut self,
stats: usize,
coeff_idx: usize, // raster order
bwl: usize,
tx_size: TxSize,
tx_class: TxClass
) -> usize {
if (tx_class as u32 | coeff_idx as u32) == 0 {
return 0;
};
let row = coeff_idx >> bwl;
let col = coeff_idx - (row << bwl);
let mut ctx = (stats + 1) >> 1;
ctx = cmp::min(ctx, 4);
match tx_class {
TX_CLASS_2D => {
// This is the algorithm to generate table av1_nz_map_ctx_offset[].
// const int width = tx_size_wide[tx_size];
// const int height = tx_size_high[tx_size];
// if (width < height) {
// if (row < 2) return 11 + ctx;
// } else if (width > height) {
// if (col < 2) return 16 + ctx;
// }
// if (row + col < 2) return ctx + 1;
// if (row + col < 4) return 5 + ctx + 1;
// return 21 + ctx;
ctx + av1_nz_map_ctx_offset[tx_size as usize][cmp::min(row, 4)][cmp::min(col, 4)] as usize
}
TX_CLASS_HORIZ => {
let row = coeff_idx >> bwl;
let col = coeff_idx - (row << bwl);
ctx + nz_map_ctx_offset_1d[col as usize]
}
TX_CLASS_VERT => {
let row = coeff_idx >> bwl;
ctx + nz_map_ctx_offset_1d[row]
}
}
}
pub fn get_nz_map_ctx(
&mut self, levels: &[u8], coeff_idx: usize, bwl: usize, height: usize,
scan_idx: usize, is_eob: bool, tx_size: TxSize, tx_class: TxClass
) -> usize {
if is_eob {
if scan_idx == 0 {
return 0;
}
if scan_idx <= (height << bwl) / 8 {
return 1;
}
if scan_idx <= (height << bwl) / 4 {
return 2;
}
return 3;
}
let padded_idx = coeff_idx + ((coeff_idx >> bwl) << TX_PAD_HOR_LOG2);
let stats = self.get_nz_mag(&levels[padded_idx..], bwl, tx_class);
self.get_nz_map_ctx_from_stats(stats, coeff_idx, bwl, tx_size, tx_class)
}
pub fn get_nz_map_contexts(
&mut self, levels: &mut [u8], scan: &[u16], eob: u16,
tx_size: TxSize, tx_class: TxClass, coeff_contexts: &mut [i8]
) {
let bwl = self.get_txb_bwl(tx_size);
let height = av1_get_coded_tx_size(tx_size).height();
for i in 0..eob {
let pos = scan[i as usize];
coeff_contexts[pos as usize] = self.get_nz_map_ctx(
levels,
pos as usize,
bwl,
height,
i as usize,
i == eob - 1,
tx_size,
tx_class
) as i8;
}
}
pub fn get_br_ctx(
&mut self,
levels: &[u8],
c: usize, // raster order
bwl: usize,
tx_class: TxClass
) -> usize {
let row: usize = c >> bwl;
let col: usize = c - (row << bwl);
let stride: usize = (1 << bwl) + TX_PAD_HOR;
let pos: usize = row * stride + col;
let mut mag: usize = levels[pos + 1] as usize;
mag += levels[pos + stride] as usize;
match tx_class {
TX_CLASS_2D => {
mag += levels[pos + stride + 1] as usize;
mag = cmp::min((mag + 1) >> 1, 6);
if c == 0 {
return mag;
}
if (row < 2) && (col < 2) {
return mag + 7;
}
}
TX_CLASS_HORIZ => {
mag += levels[pos + 2] as usize;
mag = cmp::min((mag + 1) >> 1, 6);
if c == 0 {
return mag;
}
if col == 0 {
return mag + 7;
}
}
TX_CLASS_VERT => {
mag += levels[pos + (stride << 1)] as usize;
mag = cmp::min((mag + 1) >> 1, 6);
if c == 0 {
return mag;
}
if row == 0 {
return mag + 7;
}
}
}
mag + 14
}
pub fn get_level_mag_with_txclass(
&mut self, levels: &[u8], stride: usize, row: usize, col: usize,
mag: &mut [usize], tx_class: TxClass
) {
for idx in 0..CONTEXT_MAG_POSITION_NUM {
let ref_row =
row + mag_ref_offset_with_txclass[tx_class as usize][idx][0];
let ref_col =
col + mag_ref_offset_with_txclass[tx_class as usize][idx][1];
let pos = ref_row * stride + ref_col;
mag[idx] = levels[pos] as usize;
}
}
pub fn write_coeffs_lv_map(
&mut self, w: &mut dyn Writer, plane: usize, bo: &BlockOffset, coeffs_in: &[i32],
pred_mode: PredictionMode,
tx_size: TxSize, tx_type: TxType, plane_bsize: BlockSize, xdec: usize,
ydec: usize, use_reduced_tx_set: bool
) -> bool {
let is_inter = pred_mode >= PredictionMode::NEARESTMV;
//assert!(!is_inter);
// Note: Both intra and inter mode uses inter scan order. Surprised?
let scan_order =
&av1_scan_orders[tx_size as usize][tx_type as usize];
let scan = scan_order.scan;
let width = av1_get_coded_tx_size(tx_size).width();
let height = av1_get_coded_tx_size(tx_size).height();
let mut coeffs_storage = [0 as i32; 32*32];
let coeffs = &mut coeffs_storage[..width*height];
let mut cul_level = 0 as u32;
for i in 0..width*height {
coeffs[i] = coeffs_in[scan[i] as usize];
cul_level += coeffs[i].abs() as u32;
}
let eob = if cul_level == 0 { 0 } else {
coeffs.iter().rposition(|&v| v != 0).map(|i| i + 1).unwrap_or(0)
};
let txs_ctx = self.get_txsize_entropy_ctx(tx_size);
let txb_ctx =
self.bc.get_txb_ctx(plane_bsize, tx_size, plane, bo, xdec, ydec);
{
let cdf = &mut self.fc.txb_skip_cdf[txs_ctx][txb_ctx.txb_skip_ctx];
symbol_with_update!(self, w, (eob == 0) as u32, cdf);
}
if eob == 0 {
self.bc.set_coeff_context(plane, bo, tx_size, xdec, ydec, 0);
return false;
}
let mut levels_buf = [0 as u8; TX_PAD_2D];
self.txb_init_levels(
coeffs_in,
width,
height,
&mut levels_buf
);
let tx_class = tx_type_to_class[tx_type as usize];
let plane_type = if plane == 0 {
0
} else {
1
} as usize;
assert!(tx_size <= TX_32X32 || tx_type == DCT_DCT);
// Signal tx_type for luma plane only
if plane == 0 {
self.write_tx_type(
w,
tx_size,
tx_type,
pred_mode,
is_inter,
use_reduced_tx_set
);
}
// Encode EOB
let mut eob_extra = 0 as u32;
let eob_pt = self.get_eob_pos_token(eob, &mut eob_extra);
let eob_multi_size: usize = tx_size.area_log2() - 4;
let eob_multi_ctx: usize = if tx_class == TX_CLASS_2D {
0
} else {
1
};
symbol_with_update!(
self,
w,
eob_pt - 1,
match eob_multi_size {
0 => &mut self.fc.eob_flag_cdf16[plane_type][eob_multi_ctx],
1 => &mut self.fc.eob_flag_cdf32[plane_type][eob_multi_ctx],
2 => &mut self.fc.eob_flag_cdf64[plane_type][eob_multi_ctx],
3 => &mut self.fc.eob_flag_cdf128[plane_type][eob_multi_ctx],
4 => &mut self.fc.eob_flag_cdf256[plane_type][eob_multi_ctx],
5 => &mut self.fc.eob_flag_cdf512[plane_type][eob_multi_ctx],
_ => &mut self.fc.eob_flag_cdf1024[plane_type][eob_multi_ctx],
}
);
let eob_offset_bits = k_eob_offset_bits[eob_pt as usize];
if eob_offset_bits > 0 {
let mut eob_shift = eob_offset_bits - 1;
let mut bit = if (eob_extra & (1 << eob_shift)) != 0 {
1
} else {
0
} as u32;
symbol_with_update!(
self,
w,
bit,
&mut self.fc.eob_extra_cdf[txs_ctx][plane_type][(eob_pt - 3) as usize]
);
for i in 1..eob_offset_bits {
eob_shift = eob_offset_bits as u16 - 1 - i as u16;
bit = if (eob_extra & (1 << eob_shift)) != 0 {
1
} else {
0
};
w.bit(bit as u16);
}
}
let mut coeff_contexts = [0 as i8; MAX_TX_SQUARE];
let levels =
&mut levels_buf[TX_PAD_TOP * (width + TX_PAD_HOR)..];
self.get_nz_map_contexts(
levels,
scan,
eob as u16,
tx_size,
tx_class,
&mut coeff_contexts
);
let bwl = self.get_txb_bwl(tx_size);
for c in (0..eob).rev() {
let pos = scan[c];
let coeff_ctx = coeff_contexts[pos as usize];
let v = coeffs_in[pos as usize];
let level: u32 = v.abs() as u32;
if c == eob - 1 {
symbol_with_update!(
self,
w,
(cmp::min(level, 3) - 1) as u32,
&mut self.fc.coeff_base_eob_cdf[txs_ctx][plane_type]
[coeff_ctx as usize]
);
} else {
symbol_with_update!(
self,
w,
(cmp::min(level, 3)) as u32,
&mut self.fc.coeff_base_cdf[txs_ctx][plane_type][coeff_ctx as usize]
);
}
if level > NUM_BASE_LEVELS as u32 {
let pos = scan[c as usize];
let v = coeffs_in[pos as usize];
let level = v.abs() as u16;
if level <= NUM_BASE_LEVELS as u16 {
continue;
}
let base_range = level - 1 - NUM_BASE_LEVELS as u16;
let br_ctx = self.get_br_ctx(levels, pos as usize, bwl, tx_class);
let mut idx = 0;
loop {
if idx >= COEFF_BASE_RANGE {
break;
}
let k = cmp::min(base_range - idx as u16, BR_CDF_SIZE as u16 - 1);
symbol_with_update!(
self,
w,
k as u32,
&mut self.fc.coeff_br_cdf
[cmp::min(txs_ctx, TxSize::TX_32X32 as usize)][plane_type]
[br_ctx]
);
if k < BR_CDF_SIZE as u16 - 1 {
break;
}
idx += BR_CDF_SIZE - 1;
}
}
}
// Loop to code all signs in the transform block,
// starting with the sign of DC (if applicable)
for c in 0..eob {
let v = coeffs_in[scan[c] as usize];
let level = v.abs() as u32;
if level == 0 {
continue;
}
let sign = if v < 0 {
1
} else {
0
};
if c == 0 {
symbol_with_update!(
self,
w,
sign,
&mut self.fc.dc_sign_cdf[plane_type][txb_ctx.dc_sign_ctx]
);
} else {
w.bit(sign as u16);
}
// save extra golomb codes for separate loop
if level > (COEFF_BASE_RANGE + NUM_BASE_LEVELS) as u32 {
let pos = scan[c];
w.write_golomb(
coeffs_in[pos as usize].abs() as u16
- COEFF_BASE_RANGE as u16
- 1
- NUM_BASE_LEVELS as u16
);
}
}
cul_level = cmp::min(COEFF_CONTEXT_MASK as u32, cul_level);
self.bc.set_dc_sign(&mut cul_level, coeffs[0]);
self.bc.set_coeff_context(plane, bo, tx_size, xdec, ydec, cul_level as u8);
true
}
pub fn checkpoint(&mut self) -> ContextWriterCheckpoint {
ContextWriterCheckpoint {
fc: self.fc,
bc: self.bc.checkpoint()
}
}
pub fn rollback(&mut self, checkpoint: &ContextWriterCheckpoint) {
self.fc = checkpoint.fc;
self.bc.rollback(&checkpoint.bc);
#[cfg(debug)] {
if self.fc_map.is_some() {
self.fc_map = Some(FieldMap {
map: self.fc.build_map()
});
}
}
}
}
/* Symbols for coding magnitude class of nonzero components */
const MV_CLASSES:usize = 11;
// MV Class Types
const MV_CLASS_0: usize = 0; /* (0, 2] integer pel */
const MV_CLASS_1: usize = 1; /* (2, 4] integer pel */
const MV_CLASS_2: usize = 2; /* (4, 8] integer pel */
const MV_CLASS_3: usize = 3; /* (8, 16] integer pel */
const MV_CLASS_4: usize = 4; /* (16, 32] integer pel */
const MV_CLASS_5: usize = 5; /* (32, 64] integer pel */
const MV_CLASS_6: usize = 6; /* (64, 128] integer pel */
const MV_CLASS_7: usize = 7; /* (128, 256] integer pel */
const MV_CLASS_8: usize = 8; /* (256, 512] integer pel */
const MV_CLASS_9: usize = 9; /* (512, 1024] integer pel */
const MV_CLASS_10: usize = 10; /* (1024,2048] integer pel */
const CLASS0_BITS: usize = 1; /* bits at integer precision for class 0 */
const CLASS0_SIZE: usize = (1 << CLASS0_BITS);
const MV_OFFSET_BITS: usize = (MV_CLASSES + CLASS0_BITS - 2);
const MV_BITS_CONTEXTS: usize = 6;
const MV_FP_SIZE: usize = 4;
const MV_MAX_BITS: usize = (MV_CLASSES + CLASS0_BITS + 2);
const MV_MAX: usize = ((1 << MV_MAX_BITS) - 1);
const MV_VALS: usize = ((MV_MAX << 1) + 1);
const MV_IN_USE_BITS: usize = 14;
const MV_UPP: i32 = (1 << MV_IN_USE_BITS);
const MV_LOW: i32 = (-(1 << MV_IN_USE_BITS));
#[inline(always)]
pub fn av1_get_mv_joint(mv: MotionVector) -> MvJointType {
if mv.row == 0 {
if mv.col == 0 { MvJointType::MV_JOINT_ZERO } else { MvJointType::MV_JOINT_HNZVZ }
} else {
if mv.col == 0 { MvJointType::MV_JOINT_HZVNZ } else { MvJointType::MV_JOINT_HNZVNZ }
}
}
#[inline(always)]
pub fn mv_joint_vertical(joint_type: MvJointType) -> bool {
joint_type == MvJointType::MV_JOINT_HZVNZ || joint_type == MvJointType::MV_JOINT_HNZVNZ
}
#[inline(always)]
pub fn mv_joint_horizontal(joint_type: MvJointType ) -> bool {
joint_type == MvJointType::MV_JOINT_HNZVZ || joint_type == MvJointType::MV_JOINT_HNZVNZ
}
#[inline(always)]
pub fn mv_class_base(mv_class: usize) -> u32 {
if mv_class != MV_CLASS_0 {
(CLASS0_SIZE << (mv_class as usize + 2)) as u32 }
else { 0 }
}
#[inline(always)]
// If n != 0, returns the floor of log base 2 of n. If n == 0, returns 0.
pub fn log_in_base_2(n: u32) -> u8 {
31 - cmp::min(31, n.leading_zeros() as u8)
}
#[inline(always)]
pub fn get_mv_class(z: u32, offset: &mut u32) -> usize {
let c =
if z >= CLASS0_SIZE as u32 * 4096 { MV_CLASS_10 }
else { log_in_base_2(z >> 3) as usize };
*offset = z - mv_class_base(c);
c
}
pub fn encode_mv_component(w: &mut Writer, comp: i32,
mvcomp: &mut NMVComponent, precision: MvSubpelPrecision) {
assert!(comp != 0);
let mut offset: u32 = 0;
let sign: u32 = if comp < 0 { 1 } else { 0 };
let mag: u32 = if sign == 1 { -comp as u32 } else { comp as u32 };
let mv_class = get_mv_class(mag - 1, &mut offset);
let d = offset >> 3; // int mv data
let fr = (offset >> 1) & 3; // fractional mv data
let hp = offset & 1; // high precision mv data
// Sign
w.symbol_with_update(sign, &mut mvcomp.sign_cdf);
// Class
w.symbol_with_update(mv_class as u32, &mut mvcomp.classes_cdf);
// Integer bits
if mv_class == MV_CLASS_0 {
w.symbol_with_update(d, &mut mvcomp.class0_cdf);
} else {
let n = mv_class + CLASS0_BITS - 1; // number of bits
for i in 0..n {
w.symbol_with_update((d >> i) & 1, &mut mvcomp.bits_cdf[i]);
}
}
// Fractional bits
if precision > MvSubpelPrecision::MV_SUBPEL_NONE {
w.symbol_with_update(
fr,
if mv_class == MV_CLASS_0 { &mut mvcomp.class0_fp_cdf[d as usize] }
else { &mut mvcomp.fp_cdf });
}
// High precision bit
if precision > MvSubpelPrecision::MV_SUBPEL_LOW_PRECISION {
w.symbol_with_update(
hp,
if mv_class == MV_CLASS_0 { &mut mvcomp.class0_hp_cdf }
else { &mut mvcomp.hp_cdf});
}
}
|
use byteorder:: {
ReadBytesExt,
WriteBytesExt,
};
use crc::crc32::Hasher32;
use futures::Future;
use std::collections::VecDeque;
use std::io::{
BufRead,
Error,
Read,
Seek,
SeekFrom,
Write,
};
use std::str;
// 100 blocks of 64 kiB, even accounting for a huge overhead,
// is still less than 10 MiB, which is trivially manageable.
// Additionally, there's no chance that 100 threads or more
// give any speedup inflating blocks of at most 64 kiB.
const MAX_FUTURES: usize = 100;
const BUFFER_SIZE: u64 = 65536;
const GZIP_IDENTIFIER: [u8; 2] = [0x1f, 0x8b];
const BGZF_IDENTIFIER: [u8; 2] = [0x42, 0x43];
const DEFLATE: u8 = 8;
const FEXTRA: u8 = 1 << 2;
pub fn version() -> &'static str {
return option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
}
pub trait Rescuable: BufRead + Seek {}
impl<T: BufRead + Seek> Rescuable for T {}
pub trait ListenProgress {
fn on_new_target(&mut self, target: u64);
fn on_progress(&mut self, progress: u64);
fn on_bad_block(&mut self);
fn on_finished(&mut self);
}
struct BGZFBlock {
header_bytes: Vec<u8>,
deflated_payload_bytes: Vec<u8>,
inflated_payload_crc32: u32,
inflated_payload_size: u32,
corrupted: bool,
end_position: u64,
}
struct BGZFBlockStatus {
corrupted: bool,
inflated_payload_size: u32,
block: Option<BGZFBlock>,
}
pub struct Results {
pub blocks_count: u64,
pub blocks_size: u64,
pub bad_blocks_count: u64,
pub bad_blocks_size: u64,
pub truncated_in_block: bool,
pub truncated_between_blocks: bool,
}
fn seek_next_block(reader: &mut dyn Rescuable, block_position: u64) {
let mut current_position = block_position;
reader.seek(SeekFrom::Start(current_position)).unwrap();
let mut bytes = vec![];
'seek: loop {
let mut buffer_reader = reader.take(BUFFER_SIZE);
let buffer_size = buffer_reader.read_to_end(&mut bytes).unwrap();
for window in bytes.windows(4) {
let mut correct_bytes = 0;
if window[0] == GZIP_IDENTIFIER[0] {
correct_bytes += 1;
}
if window[1] == GZIP_IDENTIFIER[1] {
correct_bytes += 1;
}
if window[2] == DEFLATE {
correct_bytes += 1;
}
if window[3] == FEXTRA {
correct_bytes += 1;
}
if correct_bytes >= 3 {
break 'seek;
}
current_position += 1;
}
if buffer_size < BUFFER_SIZE as usize {
return;
}
{
let (beginning, end) = bytes.split_at_mut(4);
beginning.copy_from_slice(&end[end.len() - 4..]);
}
bytes.resize(4, 0);
current_position -= 4;
}
reader.seek(SeekFrom::Start(current_position)).unwrap();
}
fn process_payload(block: Option<BGZFBlock>) -> Result<BGZFBlockStatus, Error> {
match block {
None => Ok(BGZFBlockStatus {
corrupted: false,
inflated_payload_size: 0,
block: None,
}),
Some(block) => {
let inflated_payload_bytes = match inflate::inflate_bytes(&block.deflated_payload_bytes) {
Ok(inflated_payload_bytes) => inflated_payload_bytes,
Err(_) => return Ok(BGZFBlockStatus {
corrupted: true,
inflated_payload_size: block.inflated_payload_size,
block: None,
}),
};
let mut inflated_payload_digest = crc::crc32::Digest::new(crc::crc32::IEEE);
inflated_payload_digest.write(&inflated_payload_bytes);
let inflated_payload_crc32 = inflated_payload_digest.sum32();
if inflated_payload_crc32 != block.inflated_payload_crc32 {
return Ok(BGZFBlockStatus {
corrupted: true,
inflated_payload_size: block.inflated_payload_size,
block: None,
});
}
let inflated_payload_size = inflated_payload_bytes.len() as u32;
if inflated_payload_size != block.inflated_payload_size {
// TODO recoverable (wrong size is not a big issue if the CRC32 is correct)
return Ok(BGZFBlockStatus {
corrupted: true,
inflated_payload_size: block.inflated_payload_size,
block: None,
});
}
Ok(BGZFBlockStatus {
corrupted: block.corrupted,
inflated_payload_size: block.inflated_payload_size,
block: if block.corrupted {
None
} else {
Some(block)
}
})
}
}
}
fn write_block(writer: &mut Option<&mut dyn Write>, block: &Option<BGZFBlock>) {
if let Some(ref mut writer) = writer {
if let Some(block) = block {
writer.write_all(&block.header_bytes).unwrap();
writer.write_all(&block.deflated_payload_bytes).unwrap();
writer.write_u32::<byteorder::LittleEndian>(block.inflated_payload_crc32).unwrap();
writer.write_u32::<byteorder::LittleEndian>(block.inflated_payload_size).unwrap();
}
}
}
fn report_progress(progress_listener: &mut Option<&mut dyn ListenProgress>, block: &Option<BGZFBlock>) {
if let Some(ref mut progress_listener) = progress_listener {
if let Some(block) = block {
progress_listener.on_progress(block.end_position);
}
}
}
fn report_bad_block(results: &mut Results, progress_listener: &mut Option<&mut dyn ListenProgress>, payload_status: &BGZFBlockStatus) {
results.bad_blocks_count += 1;
results.bad_blocks_size += payload_status.inflated_payload_size as u64;
if let Some(ref mut progress_listener) = progress_listener {
progress_listener.on_bad_block();
}
}
macro_rules! fail {
($fail_fast: expr, $results: expr, $previous_block: expr, $previous_block_corrupted: expr, $current_block_corrupted_ref: expr, $current_block_corrupted: expr, $truncated_in_block: expr) => {
match $previous_block {
None => {
$current_block_corrupted_ref |= $previous_block_corrupted;
},
Some(ref mut block) => {
block.corrupted |= $previous_block_corrupted;
}
}
$current_block_corrupted_ref |= $current_block_corrupted;
assert!($current_block_corrupted_ref || true); // TODO workaround the "unused assignment warning"
if $truncated_in_block {
$results.truncated_in_block = true;
}
if $fail_fast {
$results.bad_blocks_count += 1;
return $results;
}
}
}
fn process(reader: &mut dyn Rescuable, mut writer: Option<&mut dyn Write>, fail_fast: bool, threads: usize, progress_listener: &mut Option<&mut dyn ListenProgress>) -> Results {
let reader_size = reader.seek(SeekFrom::End(0)).unwrap();
reader.seek(SeekFrom::Start(0)).unwrap();
if let Some(ref mut progress_listener) = progress_listener {
progress_listener.on_new_target(reader_size);
}
let mut results = Results {
blocks_count: 0u64,
blocks_size: 0u64,
bad_blocks_count: 0u64,
bad_blocks_size: 0u64,
truncated_in_block: false,
truncated_between_blocks: false,
};
let pool;
if threads == 0 {
pool = futures_cpupool::CpuPool::new_num_cpus();
} else {
pool = futures_cpupool::CpuPool::new(threads);
}
let mut payload_status_futures = VecDeque::<futures_cpupool::CpuFuture<BGZFBlockStatus, Error>>::with_capacity(MAX_FUTURES);
let mut previous_block: Option<BGZFBlock> = None;
let mut previous_block_position;
let mut current_block_position = 0u64;
let mut current_block_corrupted = false;
'blocks: loop {
if payload_status_futures.len() == MAX_FUTURES {
let payload_status = payload_status_futures.pop_front().unwrap().wait().unwrap();
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
report_progress(progress_listener, &payload_status.block);
}
previous_block_position = current_block_position;
current_block_position = reader.seek(SeekFrom::Current(0i64)).unwrap();
current_block_corrupted = false;
let mut header_bytes = vec![];
{
let mut header_reader = reader.take(12);
match header_reader.read_to_end(&mut header_bytes) {
Ok(header_size) => {
if header_size == 0 {
break 'blocks;
}
if header_size < 12 {
fail!(fail_fast, results, previous_block, true, current_block_corrupted, false, true);
break 'blocks;
}
},
Err(_) => {
fail!(fail_fast, results, previous_block, true, current_block_corrupted, false, true);
break 'blocks;
}
}
}
let mut correct_bytes = 0;
if header_bytes[0] == GZIP_IDENTIFIER[0] {
correct_bytes += 1;
}
if header_bytes[1] == GZIP_IDENTIFIER[1] {
correct_bytes += 1;
}
if header_bytes[2] == DEFLATE {
correct_bytes += 1;
}
if header_bytes[3] == FEXTRA {
correct_bytes += 1;
}
if correct_bytes < 4 {
if correct_bytes == 3 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
// single corrupted byte, can probably deal with it in place
// TODO fix the four bytes for rescue
} else {
fail!(fail_fast, results, previous_block, true, current_block_corrupted, false, false);
// multiple corrupted bytes, safer to jump to the next block
seek_next_block(reader, previous_block_position + 1);
continue 'blocks;
}
}
// header_bytes[4..8] => modification time; can be anything
// header_bytes[8] => extra flags; can be anything
// header_bytes[9] => operating system; can be anything
let extra_field_size = (&mut &header_bytes[10..12]).read_u16::<byteorder::LittleEndian>().unwrap();
if writer.is_some() {
{
let mut extra_field_reader = reader.take(extra_field_size as u64);
match extra_field_reader.read_to_end(&mut header_bytes) {
Ok(extra_field_actual_size) => {
if extra_field_actual_size < extra_field_size as usize {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
},
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
}
}
// TODO potential optimization:
// Read the extra subfields from header_bytes instead of from reader and don't seek back
reader.seek(SeekFrom::Current(-(extra_field_size as i64))).unwrap();
}
let mut bgzf_block_size = 0u16;
let mut remaining_extra_field_size = extra_field_size;
while remaining_extra_field_size > 4 {
let mut extra_subfield_identifier = [0u8; 2];
match reader.read_exact(&mut extra_subfield_identifier) {
Ok(_) => (),
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
}
let extra_subfield_size = match reader.read_u16::<byteorder::LittleEndian>() {
Ok(extra_subfield_size) => extra_subfield_size,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
let mut correct_bytes = 0;
if extra_subfield_identifier[0] == BGZF_IDENTIFIER[0] {
correct_bytes += 1;
}
if extra_subfield_identifier[1] == BGZF_IDENTIFIER[1] {
correct_bytes += 1;
}
if extra_subfield_size & 0xff == 2 {
correct_bytes += 1;
}
if extra_subfield_size & 0xff00 == 0 {
correct_bytes += 1;
}
if extra_subfield_size > remaining_extra_field_size - 4 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
seek_next_block(reader, current_block_position + 1);
continue 'blocks;
}
if correct_bytes == 4 ||
(correct_bytes == 3 &&
extra_field_size == 6) {
if correct_bytes != 4 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
// single corrupted byte, but most likely at the right place anyway
// TODO fix the four bytes for rescue
}
bgzf_block_size = match reader.read_u16::<byteorder::LittleEndian>() {
Ok(bgzf_block_size) => bgzf_block_size + 1,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
} else {
match reader.seek(SeekFrom::Current(extra_subfield_size as i64)) {
Ok(_) => (),
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
}
}
remaining_extra_field_size -= 4 + extra_subfield_size;
}
if remaining_extra_field_size != 0u16 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
seek_next_block(reader, current_block_position + 1);
continue 'blocks;
}
if bgzf_block_size == 0u16 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
seek_next_block(reader, current_block_position + 1);
continue 'blocks;
}
if threads == 1 {
let payload_status = process_payload(previous_block).unwrap();
previous_block = None;
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
report_progress(progress_listener, &payload_status.block);
} else {
let payload_status_future = pool.spawn_fn(move || {
process_payload(previous_block)
});
payload_status_futures.push_back(payload_status_future);
previous_block = None;
}
let mut deflated_payload_bytes = vec![];
{
let deflated_payload_size = bgzf_block_size - 20u16 - extra_field_size;
let mut deflated_payload_reader = reader.take(deflated_payload_size as u64);
match deflated_payload_reader.read_to_end(&mut deflated_payload_bytes) {
Ok(deflated_payload_read_size) => {
if deflated_payload_read_size < deflated_payload_size as usize {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
},
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
}
}
let inflated_payload_crc32 = match reader.read_u32::<byteorder::LittleEndian>() {
Ok(inflated_payload_crc32) => inflated_payload_crc32,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
let inflated_payload_size = match reader.read_u32::<byteorder::LittleEndian>() {
Ok(inflated_payload_size) => inflated_payload_size,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
previous_block = Some(BGZFBlock {
header_bytes: header_bytes,
deflated_payload_bytes: deflated_payload_bytes,
inflated_payload_crc32: inflated_payload_crc32,
inflated_payload_size: inflated_payload_size,
corrupted: current_block_corrupted,
end_position: reader.seek(SeekFrom::Current(0i64)).unwrap(),
});
results.blocks_count += 1;
results.blocks_size += inflated_payload_size as u64;
}
let mut last_inflated_payload_size = 0u32;
if threads == 1 {
let payload_status = process_payload(previous_block).unwrap();
previous_block = None;
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
last_inflated_payload_size = payload_status.inflated_payload_size;
report_progress(progress_listener, &payload_status.block);
} else {
let payload_status_future = pool.spawn_fn(move || {
process_payload(previous_block)
});
previous_block = None;
payload_status_futures.push_back(payload_status_future);
for payload_status_future in payload_status_futures.iter_mut() {
let payload_status = payload_status_future.wait().unwrap();
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
last_inflated_payload_size = payload_status.inflated_payload_size;
report_progress(progress_listener, &payload_status.block);
}
}
if last_inflated_payload_size != 0u32 {
results.truncated_between_blocks = true;
write_block(&mut writer, &Some(BGZFBlock {
header_bytes: vec![
0x1f, 0x8b, // gzip identifier
0x08, // method (deflate)
0x04, // flags (FEXTRA)
0x00, 0x00, 0x00, 0x00, // modification time
0x00, // extra flags
0xff, // operating system (unknown)
0x06, 0x00, // extra field size (6 bytes)
0x42, 0x43, // bgzf identifier
0x02, 0x00, // extra subfield length (2 bytes)
0x1b, 0x00, // bgzf block size, minus one (28 bytes - 1)
],
deflated_payload_bytes: vec![
0x03, 0x00 // deflated empty string
],
inflated_payload_crc32: 0,
inflated_payload_size: 0,
corrupted: false,
end_position: 0
}));
if fail_fast {
return results;
}
}
if let Some(ref mut progress_listener) = progress_listener {
progress_listener.on_finished();
}
results
}
pub fn check(reader: &mut dyn Rescuable, fail_fast: bool, threads: usize, progress_listener: &mut Option<&mut dyn ListenProgress>) -> Results {
process(reader, None, fail_fast, threads, progress_listener)
}
pub fn rescue(reader: &mut dyn Rescuable, writer: &mut dyn Write, threads: usize, progress_listener: &mut Option<&mut dyn ListenProgress>) -> Results {
process(reader, Some(writer), false, threads, progress_listener)
}
Make the code a bit more consise
use byteorder:: {
ReadBytesExt,
WriteBytesExt,
};
use crc::crc32::Hasher32;
use futures::Future;
use std::collections::VecDeque;
use std::io::{
BufRead,
Error,
Read,
Seek,
SeekFrom,
Write,
};
use std::str;
// 100 blocks of 64 kiB, even accounting for a huge overhead,
// is still less than 10 MiB, which is trivially manageable.
// Additionally, there's no chance that 100 threads or more
// give any speedup inflating blocks of at most 64 kiB.
const MAX_FUTURES: usize = 100;
const BUFFER_SIZE: u64 = 65536;
const GZIP_IDENTIFIER: [u8; 2] = [0x1f, 0x8b];
const BGZF_IDENTIFIER: [u8; 2] = [0x42, 0x43];
const DEFLATE: u8 = 8;
const FEXTRA: u8 = 1 << 2;
pub fn version() -> &'static str {
return option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
}
pub trait Rescuable: BufRead + Seek {}
impl<T: BufRead + Seek> Rescuable for T {}
pub trait ListenProgress {
fn on_new_target(&mut self, target: u64);
fn on_progress(&mut self, progress: u64);
fn on_bad_block(&mut self);
fn on_finished(&mut self);
}
struct BGZFBlock {
header_bytes: Vec<u8>,
deflated_payload_bytes: Vec<u8>,
inflated_payload_crc32: u32,
inflated_payload_size: u32,
corrupted: bool,
end_position: u64,
}
struct BGZFBlockStatus {
corrupted: bool,
inflated_payload_size: u32,
block: Option<BGZFBlock>,
}
pub struct Results {
pub blocks_count: u64,
pub blocks_size: u64,
pub bad_blocks_count: u64,
pub bad_blocks_size: u64,
pub truncated_in_block: bool,
pub truncated_between_blocks: bool,
}
fn seek_next_block(reader: &mut dyn Rescuable, block_position: u64) {
let mut current_position = block_position;
reader.seek(SeekFrom::Start(current_position)).unwrap();
let mut bytes = vec![];
'seek: loop {
let mut buffer_reader = reader.take(BUFFER_SIZE);
let buffer_size = buffer_reader.read_to_end(&mut bytes).unwrap();
for window in bytes.windows(4) {
let mut correct_bytes = 0;
if window[0] == GZIP_IDENTIFIER[0] {
correct_bytes += 1;
}
if window[1] == GZIP_IDENTIFIER[1] {
correct_bytes += 1;
}
if window[2] == DEFLATE {
correct_bytes += 1;
}
if window[3] == FEXTRA {
correct_bytes += 1;
}
if correct_bytes >= 3 {
break 'seek;
}
current_position += 1;
}
if buffer_size < BUFFER_SIZE as usize {
return;
}
{
let (beginning, end) = bytes.split_at_mut(4);
beginning.copy_from_slice(&end[end.len() - 4..]);
}
bytes.resize(4, 0);
current_position -= 4;
}
reader.seek(SeekFrom::Start(current_position)).unwrap();
}
fn process_payload(block: Option<BGZFBlock>) -> Result<BGZFBlockStatus, Error> {
match block {
None => Ok(BGZFBlockStatus {
corrupted: false,
inflated_payload_size: 0,
block: None,
}),
Some(block) => {
let inflated_payload_bytes = match inflate::inflate_bytes(&block.deflated_payload_bytes) {
Ok(inflated_payload_bytes) => inflated_payload_bytes,
Err(_) => return Ok(BGZFBlockStatus {
corrupted: true,
inflated_payload_size: block.inflated_payload_size,
block: None,
}),
};
let mut inflated_payload_digest = crc::crc32::Digest::new(crc::crc32::IEEE);
inflated_payload_digest.write(&inflated_payload_bytes);
let inflated_payload_crc32 = inflated_payload_digest.sum32();
if inflated_payload_crc32 != block.inflated_payload_crc32 {
return Ok(BGZFBlockStatus {
corrupted: true,
inflated_payload_size: block.inflated_payload_size,
block: None,
});
}
let inflated_payload_size = inflated_payload_bytes.len() as u32;
if inflated_payload_size != block.inflated_payload_size {
// TODO recoverable (wrong size is not a big issue if the CRC32 is correct)
return Ok(BGZFBlockStatus {
corrupted: true,
inflated_payload_size: block.inflated_payload_size,
block: None,
});
}
Ok(BGZFBlockStatus {
corrupted: block.corrupted,
inflated_payload_size: block.inflated_payload_size,
block: if block.corrupted {
None
} else {
Some(block)
}
})
}
}
}
fn write_block(writer: &mut Option<&mut dyn Write>, block: &Option<BGZFBlock>) {
if let Some(ref mut writer) = writer {
if let Some(block) = block {
writer.write_all(&block.header_bytes).unwrap();
writer.write_all(&block.deflated_payload_bytes).unwrap();
writer.write_u32::<byteorder::LittleEndian>(block.inflated_payload_crc32).unwrap();
writer.write_u32::<byteorder::LittleEndian>(block.inflated_payload_size).unwrap();
}
}
}
fn report_progress(progress_listener: &mut Option<&mut dyn ListenProgress>, block: &Option<BGZFBlock>) {
if let Some(ref mut progress_listener) = progress_listener {
if let Some(block) = block {
progress_listener.on_progress(block.end_position);
}
}
}
fn report_bad_block(results: &mut Results, progress_listener: &mut Option<&mut dyn ListenProgress>, payload_status: &BGZFBlockStatus) {
results.bad_blocks_count += 1;
results.bad_blocks_size += payload_status.inflated_payload_size as u64;
if let Some(ref mut progress_listener) = progress_listener {
progress_listener.on_bad_block();
}
}
macro_rules! fail {
($fail_fast: expr, $results: expr, $previous_block: expr, $previous_block_corrupted: expr, $current_block_corrupted_ref: expr, $current_block_corrupted: expr, $truncated_in_block: expr) => {
match $previous_block {
None => {
$current_block_corrupted_ref |= $previous_block_corrupted;
},
Some(ref mut block) => {
block.corrupted |= $previous_block_corrupted;
}
}
$current_block_corrupted_ref |= $current_block_corrupted;
assert!($current_block_corrupted_ref || true); // TODO workaround the "unused assignment warning"
if $truncated_in_block {
$results.truncated_in_block = true;
}
if $fail_fast {
$results.bad_blocks_count += 1;
return $results;
}
}
}
fn process(reader: &mut dyn Rescuable, mut writer: Option<&mut dyn Write>, fail_fast: bool, threads: usize, progress_listener: &mut Option<&mut dyn ListenProgress>) -> Results {
let reader_size = reader.seek(SeekFrom::End(0)).unwrap();
reader.seek(SeekFrom::Start(0)).unwrap();
if let Some(ref mut progress_listener) = progress_listener {
progress_listener.on_new_target(reader_size);
}
let mut results = Results {
blocks_count: 0u64,
blocks_size: 0u64,
bad_blocks_count: 0u64,
bad_blocks_size: 0u64,
truncated_in_block: false,
truncated_between_blocks: false,
};
let pool;
if threads == 0 {
pool = futures_cpupool::CpuPool::new_num_cpus();
} else {
pool = futures_cpupool::CpuPool::new(threads);
}
let mut payload_status_futures = VecDeque::<futures_cpupool::CpuFuture<BGZFBlockStatus, Error>>::with_capacity(MAX_FUTURES);
let mut previous_block: Option<BGZFBlock> = None;
let mut previous_block_position;
let mut current_block_position = 0u64;
let mut current_block_corrupted = false;
'blocks: loop {
if payload_status_futures.len() == MAX_FUTURES {
let payload_status = payload_status_futures.pop_front().unwrap().wait().unwrap();
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
report_progress(progress_listener, &payload_status.block);
}
previous_block_position = current_block_position;
current_block_position = reader.seek(SeekFrom::Current(0i64)).unwrap();
current_block_corrupted = false;
let mut header_bytes = vec![];
{
let mut header_reader = reader.take(12);
match header_reader.read_to_end(&mut header_bytes) {
Ok(header_size) => {
if header_size == 0 {
break 'blocks;
}
if header_size < 12 {
fail!(fail_fast, results, previous_block, true, current_block_corrupted, false, true);
break 'blocks;
}
},
Err(_) => {
fail!(fail_fast, results, previous_block, true, current_block_corrupted, false, true);
break 'blocks;
}
}
}
let mut correct_bytes = 0;
if header_bytes[0] == GZIP_IDENTIFIER[0] {
correct_bytes += 1;
}
if header_bytes[1] == GZIP_IDENTIFIER[1] {
correct_bytes += 1;
}
if header_bytes[2] == DEFLATE {
correct_bytes += 1;
}
if header_bytes[3] == FEXTRA {
correct_bytes += 1;
}
if correct_bytes < 4 {
if correct_bytes == 3 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
// single corrupted byte, can probably deal with it in place
// TODO fix the four bytes for rescue
} else {
fail!(fail_fast, results, previous_block, true, current_block_corrupted, false, false);
// multiple corrupted bytes, safer to jump to the next block
seek_next_block(reader, previous_block_position + 1);
continue 'blocks;
}
}
// header_bytes[4..8] => modification time; can be anything
// header_bytes[8] => extra flags; can be anything
// header_bytes[9] => operating system; can be anything
let extra_field_size = (&mut &header_bytes[10..12]).read_u16::<byteorder::LittleEndian>().unwrap();
if writer.is_some() {
{
let mut extra_field_reader = reader.take(extra_field_size as u64);
match extra_field_reader.read_to_end(&mut header_bytes) {
Ok(extra_field_actual_size) => {
if extra_field_actual_size < extra_field_size as usize {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
},
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
}
}
// TODO potential optimization:
// Read the extra subfields from header_bytes instead of from reader and don't seek back
reader.seek(SeekFrom::Current(-(extra_field_size as i64))).unwrap();
}
let mut bgzf_block_size = 0u16;
let mut remaining_extra_field_size = extra_field_size;
while remaining_extra_field_size > 4 {
let mut extra_subfield_identifier = [0u8; 2];
if let Err(_) = reader.read_exact(&mut extra_subfield_identifier) {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
let extra_subfield_size = match reader.read_u16::<byteorder::LittleEndian>() {
Ok(extra_subfield_size) => extra_subfield_size,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
let mut correct_bytes = 0;
if extra_subfield_identifier[0] == BGZF_IDENTIFIER[0] {
correct_bytes += 1;
}
if extra_subfield_identifier[1] == BGZF_IDENTIFIER[1] {
correct_bytes += 1;
}
if extra_subfield_size & 0xff == 2 {
correct_bytes += 1;
}
if extra_subfield_size & 0xff00 == 0 {
correct_bytes += 1;
}
if extra_subfield_size > remaining_extra_field_size - 4 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
seek_next_block(reader, current_block_position + 1);
continue 'blocks;
}
if correct_bytes == 4 ||
(correct_bytes == 3 &&
extra_field_size == 6) {
if correct_bytes != 4 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
// single corrupted byte, but most likely at the right place anyway
// TODO fix the four bytes for rescue
}
bgzf_block_size = match reader.read_u16::<byteorder::LittleEndian>() {
Ok(bgzf_block_size) => bgzf_block_size + 1,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
} else if let Err(_) = reader.seek(SeekFrom::Current(extra_subfield_size as i64)) {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
remaining_extra_field_size -= 4 + extra_subfield_size;
}
if remaining_extra_field_size != 0u16 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
seek_next_block(reader, current_block_position + 1);
continue 'blocks;
}
if bgzf_block_size == 0u16 {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, false);
seek_next_block(reader, current_block_position + 1);
continue 'blocks;
}
if threads == 1 {
let payload_status = process_payload(previous_block).unwrap();
previous_block = None;
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
report_progress(progress_listener, &payload_status.block);
} else {
let payload_status_future = pool.spawn_fn(move || {
process_payload(previous_block)
});
payload_status_futures.push_back(payload_status_future);
previous_block = None;
}
let mut deflated_payload_bytes = vec![];
{
let deflated_payload_size = bgzf_block_size - 20u16 - extra_field_size;
let mut deflated_payload_reader = reader.take(deflated_payload_size as u64);
match deflated_payload_reader.read_to_end(&mut deflated_payload_bytes) {
Ok(deflated_payload_read_size) => {
if deflated_payload_read_size < deflated_payload_size as usize {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
},
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
}
}
let inflated_payload_crc32 = match reader.read_u32::<byteorder::LittleEndian>() {
Ok(inflated_payload_crc32) => inflated_payload_crc32,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
let inflated_payload_size = match reader.read_u32::<byteorder::LittleEndian>() {
Ok(inflated_payload_size) => inflated_payload_size,
Err(_) => {
fail!(fail_fast, results, previous_block, false, current_block_corrupted, true, true);
break 'blocks;
}
};
previous_block = Some(BGZFBlock {
header_bytes: header_bytes,
deflated_payload_bytes: deflated_payload_bytes,
inflated_payload_crc32: inflated_payload_crc32,
inflated_payload_size: inflated_payload_size,
corrupted: current_block_corrupted,
end_position: reader.seek(SeekFrom::Current(0i64)).unwrap(),
});
results.blocks_count += 1;
results.blocks_size += inflated_payload_size as u64;
}
let mut last_inflated_payload_size = 0u32;
if threads == 1 {
let payload_status = process_payload(previous_block).unwrap();
previous_block = None;
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
last_inflated_payload_size = payload_status.inflated_payload_size;
report_progress(progress_listener, &payload_status.block);
} else {
let payload_status_future = pool.spawn_fn(move || {
process_payload(previous_block)
});
previous_block = None;
payload_status_futures.push_back(payload_status_future);
for payload_status_future in payload_status_futures.iter_mut() {
let payload_status = payload_status_future.wait().unwrap();
if payload_status.corrupted {
report_bad_block(&mut results, progress_listener, &payload_status);
fail!(fail_fast, results, previous_block, false, current_block_corrupted, false, false);
} else {
write_block(&mut writer, &payload_status.block);
}
last_inflated_payload_size = payload_status.inflated_payload_size;
report_progress(progress_listener, &payload_status.block);
}
}
if last_inflated_payload_size != 0u32 {
results.truncated_between_blocks = true;
write_block(&mut writer, &Some(BGZFBlock {
header_bytes: vec![
0x1f, 0x8b, // gzip identifier
0x08, // method (deflate)
0x04, // flags (FEXTRA)
0x00, 0x00, 0x00, 0x00, // modification time
0x00, // extra flags
0xff, // operating system (unknown)
0x06, 0x00, // extra field size (6 bytes)
0x42, 0x43, // bgzf identifier
0x02, 0x00, // extra subfield length (2 bytes)
0x1b, 0x00, // bgzf block size, minus one (28 bytes - 1)
],
deflated_payload_bytes: vec![
0x03, 0x00 // deflated empty string
],
inflated_payload_crc32: 0,
inflated_payload_size: 0,
corrupted: false,
end_position: 0
}));
if fail_fast {
return results;
}
}
if let Some(ref mut progress_listener) = progress_listener {
progress_listener.on_finished();
}
results
}
pub fn check(reader: &mut dyn Rescuable, fail_fast: bool, threads: usize, progress_listener: &mut Option<&mut dyn ListenProgress>) -> Results {
process(reader, None, fail_fast, threads, progress_listener)
}
pub fn rescue(reader: &mut dyn Rescuable, writer: &mut dyn Write, threads: usize, progress_listener: &mut Option<&mut dyn ListenProgress>) -> Results {
process(reader, Some(writer), false, threads, progress_listener)
}
|
extern crate z3_sys;
extern crate libc;
pub mod z3;
exposing Z3 directly
extern crate z3_sys;
extern crate libc;
pub mod z3;
pub use z3::*;
|
#![allow(non_camel_case_types)]
extern crate libc;
use libc::{c_void,c_int,c_char,c_ulonglong,size_t,dev_t};
#[cfg(hwdb)]
pub use hwdb::*;
#[repr(C)]
pub struct udev;
#[repr(C)]
pub struct udev_list_entry;
#[repr(C)]
pub struct udev_device;
#[repr(C)]
pub struct udev_monitor;
#[repr(C)]
pub struct udev_enumerate;
#[repr(C)]
pub struct udev_queue;
extern "C" {
// udev
pub fn udev_new() -> *mut udev;
pub fn udev_ref(udev: *mut udev) -> *mut udev;
pub fn udev_unref(udev: *mut udev) -> *mut udev;
pub fn udev_set_userdata(udev: *mut udev, userdata: *mut c_void);
pub fn udev_get_userdata(udev: *mut udev) -> *mut c_void;
// udev_list
pub fn udev_list_entry_get_next(list_entry: *mut udev_list_entry) -> *mut udev_list_entry;
pub fn udev_list_entry_get_by_name(list_entry: *mut udev_list_entry, name: *const c_char) -> *mut udev_list_entry;
pub fn udev_list_entry_get_name(list_entry: *mut udev_list_entry) -> *const c_char;
pub fn udev_list_entry_get_value(list_entry: *mut udev_list_entry) -> *const c_char;
// udev_device
pub fn udev_device_ref(udev_device: *mut udev_device) -> *mut udev_device;
pub fn udev_device_unref(udev_device: *mut udev_device) -> *mut udev_device;
pub fn udev_device_get_udev(udev_device: *mut udev_device) -> *mut udev;
pub fn udev_device_new_from_syspath(udev: *mut udev, syspath: *const c_char) -> *mut udev_device;
pub fn udev_device_new_from_devnum(udev: *mut udev, dev_type: c_char, devnum: dev_t) -> *mut udev_device;
pub fn udev_device_new_from_subsystem_sysname(udev: *mut udev, subsystem: *const c_char, sysname: *const c_char) -> *mut udev_device;
pub fn udev_device_new_from_device_id(udev: *mut udev, id: *const c_char) -> *mut udev_device;
pub fn udev_device_new_from_environment(udev: *mut udev) -> *mut udev_device;
pub fn udev_device_get_parent(udev_device: *mut udev_device) -> *mut udev_device;
pub fn udev_device_get_parent_with_subsystem_devtype(udev_device: *mut udev_device, subsystem: *const c_char, devtype: *const c_char) -> *mut udev_device;
pub fn udev_device_get_devpath(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_subsystem(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_devtype(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_syspath(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_sysname(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_sysnum(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_devnode(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_is_initialized(udev_device: *mut udev_device) -> c_int;
pub fn udev_device_get_devlinks_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_properties_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_tags_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_property_value(udev_device: *mut udev_device, key: *const c_char) -> *const c_char;
pub fn udev_device_get_driver(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_devnum(udev_device: *mut udev_device) -> dev_t;
pub fn udev_device_get_action(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_sysattr_value(udev_device: *mut udev_device, sysattr: *const c_char) -> *const c_char;
pub fn udev_device_set_sysattr_value(udev_device: *mut udev_device, sysattr: *const c_char, value: *mut c_char) -> c_int;
pub fn udev_device_get_sysattr_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_seqnum(udev_device: *mut udev_device) -> c_ulonglong;
pub fn udev_device_get_usec_since_initialized(udev_device: *mut udev_device) -> c_ulonglong;
pub fn udev_device_has_tag(udev_device: *mut udev_device, tag: *const c_char) -> c_int;
// udev_monitor
pub fn udev_monitor_ref(udev_monitor: *mut udev_monitor) -> *mut udev_monitor;
pub fn udev_monitor_unref(udev_monitor: *mut udev_monitor) -> *mut udev_monitor;
pub fn udev_monitor_get_udev(udev_monitor: *mut udev_monitor) -> *mut udev;
pub fn udev_monitor_new_from_netlink(udev: *mut udev, name: *const c_char) -> *mut udev_monitor;
pub fn udev_monitor_enable_receiving(udev_monitor: *mut udev_monitor) -> c_int;
pub fn udev_monitor_set_receive_buffer_size(udev_monitor: *mut udev_monitor, size: c_int) -> c_int;
pub fn udev_monitor_get_fd(udev_monitor: *mut udev_monitor) -> c_int;
pub fn udev_monitor_receive_device(udev_monitor: *mut udev_monitor) -> *mut udev_device;
pub fn udev_monitor_filter_add_match_subsystem_devtype(udev_monitor: *mut udev_monitor, subsystem: *const c_char, devtype: *const c_char) -> c_int;
pub fn udev_monitor_filter_add_match_tag(udev_monitor: *mut udev_monitor, tag: *const c_char) -> c_int;
pub fn udev_monitor_filter_update(udev_monitor: *mut udev_monitor) -> c_int;
pub fn udev_monitor_filter_remove(udev_monitor: *mut udev_monitor) -> c_int;
// udev_enumerate
pub fn udev_enumerate_ref(udev_enumerate: *mut udev_enumerate) -> *mut udev_enumerate;
pub fn udev_enumerate_unref(udev_enumerate: *mut udev_enumerate) -> *mut udev_enumerate;
pub fn udev_enumerate_get_udev(udev_enumerate: *mut udev_enumerate) -> *mut udev;
pub fn udev_enumerate_new(udev: *mut udev) -> *mut udev_enumerate;
pub fn udev_enumerate_add_match_subsystem(udev_enumerate: *mut udev_enumerate, subsystem: *const c_char) -> c_int;
pub fn udev_enumerate_add_nomatch_subsystem(udev_enumerate: *mut udev_enumerate, subsystem: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_sysattr(udev_enumerate: *mut udev_enumerate, sysattr: *const c_char, value: *const c_char) -> c_int;
pub fn udev_enumerate_add_nomatch_sysattr(udev_enumerate: *mut udev_enumerate, sysattr: *const c_char, value: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_property(udev_enumerate: *mut udev_enumerate, property: *const c_char, value: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_tag(udev_enumerate: *mut udev_enumerate, tag: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_parent(udev_enumerate: *mut udev_enumerate, parent: *mut udev_device) -> c_int;
pub fn udev_enumerate_add_match_is_initialized(udev_enumerate: *mut udev_enumerate) -> c_int;
pub fn udev_enumerate_add_match_sysname(udev_enumerate: *mut udev_enumerate, sysname: *const c_char) -> c_int;
pub fn udev_enumerate_add_syspath(udev_enumerate: *mut udev_enumerate, syspath: *const c_char) -> c_int;
pub fn udev_enumerate_scan_devices(udev_enumerate: *mut udev_enumerate) -> c_int;
pub fn udev_enumerate_scan_subsystems(udev_enumerate: *mut udev_enumerate) -> c_int;
pub fn udev_enumerate_get_list_entry(udev_enumerate: *mut udev_enumerate) -> *mut udev_list_entry;
// udev_queue
pub fn udev_queue_ref(udev_queue: *mut udev_queue) -> *mut udev_queue;
pub fn udev_queue_unref(udev_queue: *mut udev_queue) -> *mut udev_queue;
pub fn udev_queue_get_udev(udev_queue: *mut udev_queue) -> *mut udev;
pub fn udev_queue_new(udev: *mut udev) -> *mut udev_queue;
pub fn udev_queue_get_udev_is_active(udev_queue: *mut udev_queue) -> c_int;
pub fn udev_queue_get_queue_is_empty(udev_queue: *mut udev_queue) -> c_int;
pub fn udev_queue_get_fd(udev_queue: *mut udev_queue) -> c_int;
pub fn udev_queue_flush(udev_queue: *mut udev_queue) -> c_int;
// udev_util
pub fn udev_util_encode_string(str: *const c_char, str_enc: *mut c_char, len: size_t) -> c_int;
}
#[cfg(hwdb)]
mod hwdb {
use super::libc::{c_uint,c_char};
use super::{udev,udev_list_entry};
#[repr(C)]
pub struct udev_hwdb;
extern "C" {
pub fn udev_hwdb_ref(hwdb: *mut udev_hwdb) -> *mut udev_hwdb;
pub fn udev_hwdb_unref(hwdb: *mut udev_hwdb) -> *mut udev_hwdb;
pub fn udev_hwdb_new(udev: *mut udev) -> *mut udev_hwdb;
pub fn udev_hwdb_get_properties_list_entry(hwdb: *mut udev_hwdb, modalias: *const c_char, flags: c_uint) -> *mut udev_list_entry;
}
}
fixes zero-sized structs in FFI
#![allow(non_camel_case_types)]
extern crate libc;
use libc::{c_void,c_int,c_char,c_ulonglong,size_t,dev_t};
#[cfg(hwdb)]
pub use hwdb::*;
#[repr(C)]
pub struct udev {
__private: c_void,
}
#[repr(C)]
pub struct udev_list_entry {
__private: c_void,
}
#[repr(C)]
pub struct udev_device {
__private: c_void,
}
#[repr(C)]
pub struct udev_monitor {
__private: c_void,
}
#[repr(C)]
pub struct udev_enumerate {
__private: c_void,
}
#[repr(C)]
pub struct udev_queue {
__private: c_void,
}
extern "C" {
// udev
pub fn udev_new() -> *mut udev;
pub fn udev_ref(udev: *mut udev) -> *mut udev;
pub fn udev_unref(udev: *mut udev) -> *mut udev;
pub fn udev_set_userdata(udev: *mut udev, userdata: *mut c_void);
pub fn udev_get_userdata(udev: *mut udev) -> *mut c_void;
// udev_list
pub fn udev_list_entry_get_next(list_entry: *mut udev_list_entry) -> *mut udev_list_entry;
pub fn udev_list_entry_get_by_name(list_entry: *mut udev_list_entry, name: *const c_char) -> *mut udev_list_entry;
pub fn udev_list_entry_get_name(list_entry: *mut udev_list_entry) -> *const c_char;
pub fn udev_list_entry_get_value(list_entry: *mut udev_list_entry) -> *const c_char;
// udev_device
pub fn udev_device_ref(udev_device: *mut udev_device) -> *mut udev_device;
pub fn udev_device_unref(udev_device: *mut udev_device) -> *mut udev_device;
pub fn udev_device_get_udev(udev_device: *mut udev_device) -> *mut udev;
pub fn udev_device_new_from_syspath(udev: *mut udev, syspath: *const c_char) -> *mut udev_device;
pub fn udev_device_new_from_devnum(udev: *mut udev, dev_type: c_char, devnum: dev_t) -> *mut udev_device;
pub fn udev_device_new_from_subsystem_sysname(udev: *mut udev, subsystem: *const c_char, sysname: *const c_char) -> *mut udev_device;
pub fn udev_device_new_from_device_id(udev: *mut udev, id: *const c_char) -> *mut udev_device;
pub fn udev_device_new_from_environment(udev: *mut udev) -> *mut udev_device;
pub fn udev_device_get_parent(udev_device: *mut udev_device) -> *mut udev_device;
pub fn udev_device_get_parent_with_subsystem_devtype(udev_device: *mut udev_device, subsystem: *const c_char, devtype: *const c_char) -> *mut udev_device;
pub fn udev_device_get_devpath(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_subsystem(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_devtype(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_syspath(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_sysname(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_sysnum(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_devnode(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_is_initialized(udev_device: *mut udev_device) -> c_int;
pub fn udev_device_get_devlinks_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_properties_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_tags_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_property_value(udev_device: *mut udev_device, key: *const c_char) -> *const c_char;
pub fn udev_device_get_driver(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_devnum(udev_device: *mut udev_device) -> dev_t;
pub fn udev_device_get_action(udev_device: *mut udev_device) -> *const c_char;
pub fn udev_device_get_sysattr_value(udev_device: *mut udev_device, sysattr: *const c_char) -> *const c_char;
pub fn udev_device_set_sysattr_value(udev_device: *mut udev_device, sysattr: *const c_char, value: *mut c_char) -> c_int;
pub fn udev_device_get_sysattr_list_entry(udev_device: *mut udev_device) -> *mut udev_list_entry;
pub fn udev_device_get_seqnum(udev_device: *mut udev_device) -> c_ulonglong;
pub fn udev_device_get_usec_since_initialized(udev_device: *mut udev_device) -> c_ulonglong;
pub fn udev_device_has_tag(udev_device: *mut udev_device, tag: *const c_char) -> c_int;
// udev_monitor
pub fn udev_monitor_ref(udev_monitor: *mut udev_monitor) -> *mut udev_monitor;
pub fn udev_monitor_unref(udev_monitor: *mut udev_monitor) -> *mut udev_monitor;
pub fn udev_monitor_get_udev(udev_monitor: *mut udev_monitor) -> *mut udev;
pub fn udev_monitor_new_from_netlink(udev: *mut udev, name: *const c_char) -> *mut udev_monitor;
pub fn udev_monitor_enable_receiving(udev_monitor: *mut udev_monitor) -> c_int;
pub fn udev_monitor_set_receive_buffer_size(udev_monitor: *mut udev_monitor, size: c_int) -> c_int;
pub fn udev_monitor_get_fd(udev_monitor: *mut udev_monitor) -> c_int;
pub fn udev_monitor_receive_device(udev_monitor: *mut udev_monitor) -> *mut udev_device;
pub fn udev_monitor_filter_add_match_subsystem_devtype(udev_monitor: *mut udev_monitor, subsystem: *const c_char, devtype: *const c_char) -> c_int;
pub fn udev_monitor_filter_add_match_tag(udev_monitor: *mut udev_monitor, tag: *const c_char) -> c_int;
pub fn udev_monitor_filter_update(udev_monitor: *mut udev_monitor) -> c_int;
pub fn udev_monitor_filter_remove(udev_monitor: *mut udev_monitor) -> c_int;
// udev_enumerate
pub fn udev_enumerate_ref(udev_enumerate: *mut udev_enumerate) -> *mut udev_enumerate;
pub fn udev_enumerate_unref(udev_enumerate: *mut udev_enumerate) -> *mut udev_enumerate;
pub fn udev_enumerate_get_udev(udev_enumerate: *mut udev_enumerate) -> *mut udev;
pub fn udev_enumerate_new(udev: *mut udev) -> *mut udev_enumerate;
pub fn udev_enumerate_add_match_subsystem(udev_enumerate: *mut udev_enumerate, subsystem: *const c_char) -> c_int;
pub fn udev_enumerate_add_nomatch_subsystem(udev_enumerate: *mut udev_enumerate, subsystem: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_sysattr(udev_enumerate: *mut udev_enumerate, sysattr: *const c_char, value: *const c_char) -> c_int;
pub fn udev_enumerate_add_nomatch_sysattr(udev_enumerate: *mut udev_enumerate, sysattr: *const c_char, value: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_property(udev_enumerate: *mut udev_enumerate, property: *const c_char, value: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_tag(udev_enumerate: *mut udev_enumerate, tag: *const c_char) -> c_int;
pub fn udev_enumerate_add_match_parent(udev_enumerate: *mut udev_enumerate, parent: *mut udev_device) -> c_int;
pub fn udev_enumerate_add_match_is_initialized(udev_enumerate: *mut udev_enumerate) -> c_int;
pub fn udev_enumerate_add_match_sysname(udev_enumerate: *mut udev_enumerate, sysname: *const c_char) -> c_int;
pub fn udev_enumerate_add_syspath(udev_enumerate: *mut udev_enumerate, syspath: *const c_char) -> c_int;
pub fn udev_enumerate_scan_devices(udev_enumerate: *mut udev_enumerate) -> c_int;
pub fn udev_enumerate_scan_subsystems(udev_enumerate: *mut udev_enumerate) -> c_int;
pub fn udev_enumerate_get_list_entry(udev_enumerate: *mut udev_enumerate) -> *mut udev_list_entry;
// udev_queue
pub fn udev_queue_ref(udev_queue: *mut udev_queue) -> *mut udev_queue;
pub fn udev_queue_unref(udev_queue: *mut udev_queue) -> *mut udev_queue;
pub fn udev_queue_get_udev(udev_queue: *mut udev_queue) -> *mut udev;
pub fn udev_queue_new(udev: *mut udev) -> *mut udev_queue;
pub fn udev_queue_get_udev_is_active(udev_queue: *mut udev_queue) -> c_int;
pub fn udev_queue_get_queue_is_empty(udev_queue: *mut udev_queue) -> c_int;
pub fn udev_queue_get_fd(udev_queue: *mut udev_queue) -> c_int;
pub fn udev_queue_flush(udev_queue: *mut udev_queue) -> c_int;
// udev_util
pub fn udev_util_encode_string(str: *const c_char, str_enc: *mut c_char, len: size_t) -> c_int;
}
#[cfg(hwdb)]
mod hwdb {
use super::libc::{c_void,c_uint,c_char};
use super::{udev,udev_list_entry};
#[repr(C)]
pub struct udev_hwdb {
__private: c_void,
}
extern "C" {
pub fn udev_hwdb_ref(hwdb: *mut udev_hwdb) -> *mut udev_hwdb;
pub fn udev_hwdb_unref(hwdb: *mut udev_hwdb) -> *mut udev_hwdb;
pub fn udev_hwdb_new(udev: *mut udev) -> *mut udev_hwdb;
pub fn udev_hwdb_get_properties_list_entry(hwdb: *mut udev_hwdb, modalias: *const c_char, flags: c_uint) -> *mut udev_list_entry;
}
}
|
//! [](http://i.imgur.com/RUEw8EW.png)
//!
//! bspline
//! ===
//! A library for computing B-spline interpolating curves on generic control points. bspline can
//! be used to evaluate B-splines of varying orders on any type that can be linearly interpolated,
//! ranging from floats, positions, RGB colors to transformation matrices and so on.
//!
//! The bspline logo was generated using this library with a cubic B-spline in 2D for the positioning
//! of the curve and a quadratic B-spline in RGB space to color it (check out the
//! [logo](https://github.com/Twinklebear/bspline/blob/master/examples/logo.rs) example!). Other
//! much simpler examples of 1D and 2D quadratic, cubic and quartic B-splines can also be found in
//! the [examples](https://github.com/Twinklebear/bspline/tree/master/examples).
//!
//! # 1D Example
//!
//! This example shows how to create the 1D cardinal cubic B-spline example shown on [Wikipedia's
//! B-splines page](https://en.wikipedia.org/wiki/B-spline). For examples of evaluating the spline
//! to an image and saving the output see the [examples](https://github.com/Twinklebear/bspline/tree/master/examples).
//!
//! ```rust
//! let points = vec![0.0, 0.0, 0.0, 6.0, 0.0, 0.0, 0.0];
//! let knots = vec![-2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0, 2.0];
//! let degree = 3;
//! let spline = bspline::BSpline::new(degree, points, knots);
//! ```
//!
//! # Readings on B-splines
//!
//! The library assumes you are familiar at some level with how B-splines work, e.g. how
//! control points and knots and effect the curve produced. No interactive
//! editor is provided (at least currently). Some good places to start reading about B-splines to
//! effectively use this library can be found below.
//!
//! - [Wikipedia page on B-splines](https://en.wikipedia.org/wiki/B-spline)
//! - [Fundamentals of Computer Graphics](http://www.amazon.com/Fundamentals-Computer-Graphics-Peter-Shirley/dp/1568814690)
//! (has a good chapter on curves)
//! - [Splines and B-splines: An Introduction](http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v07/undervisningsmateriale/kap1.pdf)
//! - [Geometric Modeling](http://atrey.karlin.mff.cuni.cz/projekty/vrr/doc/grafika/geometric%20modelling.pdf)
//! - [A nice set of interactive examples](https://www.ibiblio.org/e-notes/Splines/Intro.htm)
//!
use std::ops::{Mul, Add};
use std::slice::Iter;
/// The interpolate trait is used to linearly interpolate between two types (or in the
/// case of Quaternions, spherically linearly interpolate). The B-spline curve uses this
/// trait to compute points on the curve for the given parameter value.
///
/// A default implementation of this trait is provided for all `T` that are `Mul<f32, Output = T>
/// + Add<Output = T> + Copy` since the interpolation provided by the trait is expected to be
/// a simple linear interpolation.
pub trait Interpolate {
/// Linearly interpolate between `self` and `other` using `t`, for example with floats:
///
/// ```text
/// self * (1.0 - t) + other * t
/// ```
///
/// If the result returned is not a correct linear interpolation of the values the
/// curve produced using the value may not be correct.
fn interpolate(&self, other: &Self, t: f32) -> Self;
}
impl<T: Mul<f32, Output = T> + Add<Output = T> + Copy> Interpolate for T {
fn interpolate(&self, other: &Self, t: f32) -> Self {
*self * (1.0 - t) + *other * t
}
}
/// Represents a B-spline that will use polynomials of the specified degree to interpolate
/// between the control points given the knots.
#[derive(Clone)]
pub struct BSpline<T: Interpolate + Copy> {
/// Degree of the polynomial that we use to make the curve segments
degree: usize,
/// Control points for the curve
control_points: Vec<T>,
/// The knot vector
knots: Vec<f32>,
}
impl<T: Interpolate + Copy> BSpline<T> {
/// Create a new B-spline curve of the desired `degree` that will interpolate
/// the `control_points` using the `knots`. The knots should be sorted in non-decreasing
/// order, otherwise they will be sorted for you which may lead to undesired knots
/// for control points. Note that this is in terms of the interpolating polynomial degree,
/// if you are familiar with the convention of "B-spline curve order" the degree is `curve_order - 1`.
///
/// Your curve must have a valid number of control points and knots or the function will panic. A B-spline
/// curve requires at least as many control points as the degree (`control_points.len() >=
/// degree`) and the number of knots should be equal to `control_points.len() + degree + 1`.
pub fn new(degree: usize, control_points: Vec<T>, mut knots: Vec<f32>) -> BSpline<T> {
if control_points.len() < degree {
panic!("Too few control points for curve");
}
if knots.len() != control_points.len() + degree + 1 {
panic!(format!("Invalid number of knots, got {}, expected {}", knots.len(),
control_points.len() + degree + 1));
}
knots.sort_by(|a, b| a.partial_cmp(b).unwrap());
BSpline { degree: degree, control_points: control_points, knots: knots }
}
/// Compute a point on the curve at `t`, the parameter **must** be in the inclusive range
/// of values returned by `knot_domain`. If `t` is out of bounds this function will assert
/// on debug builds and on release builds you'll likely get an out of bounds crash.
pub fn point(&self, t: f32) -> T {
debug_assert!(t >= self.knot_domain().0 && t <= self.knot_domain().1);
// Find the first index with a knot value greater than the t we're searching for. We want
// to find i such that: knot[i] <= t < knot[i + 1]
let i = match upper_bounds(&self.knots[..], t) {
Some(x) if x == 0 => self.degree,
Some(x) => x,
None => self.knots.len() - self.degree - 1,
};
self.de_boor_iterative(t, i)
}
/// Get an iterator over the control points.
pub fn control_points(&self) -> Iter<T> {
self.control_points.iter()
}
/// Get an iterator over the knots.
pub fn knots(&self) -> Iter<f32> {
self.knots.iter()
}
/// Get the min and max knot domain values for finding the `t` range to compute
/// the curve over. The curve is only defined over this (inclusive) range, passing
/// a `t` value out of this range will assert on debug builds and likely result in
/// a crash on release builds.
pub fn knot_domain(&self) -> (f32, f32) {
(self.knots[self.degree], self.knots[self.knots.len() - 1 - self.degree])
}
/// Iteratively compute de Boor's B-spline algorithm, this computes the recursive
/// de Boor algorithm tree from the bottom up. At each level we use the results
/// from the previous one to compute this level and store the results in the
/// array indices we no longer need to compute the current level (the left one
/// used computing node j).
fn de_boor_iterative(&self, t: f32, i_start: usize) -> T {
let mut tmp = Vec::with_capacity(self.degree + 1);
for j in 0..self.degree + 1 {
let p = j + i_start - self.degree - 1;
tmp.push(self.control_points[p]);
}
for lvl in 0..self.degree {
let k = lvl + 1;
for j in 0..self.degree - lvl {
let i = j + k + i_start - self.degree;
let alpha = (t - self.knots[i - 1]) / (self.knots[i + self.degree - k] - self.knots[i - 1]);
if alpha.is_nan() {
panic!("alpha has nans");
}
tmp[j] = tmp[j].interpolate(&tmp[j + 1], alpha);
}
}
tmp[0]
}
}
/// Return the index of the first element greater than the value passed.
/// The data **must** be sorted.
/// If no element greater than the value passed is found the function returns None.
fn upper_bounds(data: &[f32], value: f32) -> Option<usize> {
let mut first = 0usize;
let mut step;
let mut count = data.len() as isize;
while count > 0 {
step = count / 2;
let it = first + step as usize;
if !value.lt(&data[it]) {
first = it + 1;
count -= step + 1;
} else {
count = step;
}
}
// If we didn't find an element greater than value
if first == data.len() {
None
} else {
Some(first)
}
}
#[cfg(test)]
mod test {
use super::BSpline;
/// Check that the bspline returns the values we expect it to at various t values
fn check_bspline(spline: &BSpline<f32>, expect: &Vec<(f32, f32)>) -> bool {
expect.iter().fold(true, |ac, &(t, x)| ac && spline.point(t) == x)
}
#[test]
fn linear_bspline() {
let expect = vec![(0.0, 0.0), (0.2, 0.2), (0.4, 0.4), (0.6, 0.6),
(0.8, 0.8), (1.0, 1.0)];
let points = vec![0.0, 1.0];
let knots = vec![0.0, 0.0, 1.0, 1.0];
let degree = 1;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
#[test]
fn quadratic_bspline() {
let expect = vec![(0.0, 0.0), (0.5, 0.125), (1.0, 0.5), (1.4, 0.74), (1.5, 0.75),
(1.6, 0.74), (2.0, 0.5), (2.5, 0.125), (3.0, 0.0)];
let points = vec![0.0, 0.0, 1.0, 0.0, 0.0];
let knots = vec![0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0];
let degree = 2;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
#[test]
fn cubic_bspline() {
let expect = vec![(-2.0, 0.0), (-1.5, 0.125), (-1.0, 1.0), (-0.6, 2.488),
(0.0, 4.0), (0.5, 2.875), (1.5, 0.12500001), (2.0, 0.0)];
let points = vec![0.0, 0.0, 0.0, 6.0, 0.0, 0.0, 0.0];
let knots = vec![-2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0, 2.0];
let degree = 3;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
#[test]
fn quartic_bspline() {
let expect = vec![(0.0, 0.0), (0.4, 0.0010666668), (1.0, 0.041666668),
(1.5, 0.19791667), (2.0, 0.4583333), (2.5, 0.5989583),
(3.0, 0.4583333), (3.2, 0.35206667), (4.1, 0.02733751),
(4.5, 0.002604167), (5.0, 0.0)];
let points = vec![0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0];
let knots = vec![0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0];
let degree = 4;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
}
Clarify doc on knot domain
//! [](http://i.imgur.com/RUEw8EW.png)
//!
//! bspline
//! ===
//! A library for computing B-spline interpolating curves on generic control points. bspline can
//! be used to evaluate B-splines of varying orders on any type that can be linearly interpolated,
//! ranging from floats, positions, RGB colors to transformation matrices and so on.
//!
//! The bspline logo was generated using this library with a cubic B-spline in 2D for the positioning
//! of the curve and a quadratic B-spline in RGB space to color it (check out the
//! [logo](https://github.com/Twinklebear/bspline/blob/master/examples/logo.rs) example!). Other
//! much simpler examples of 1D and 2D quadratic, cubic and quartic B-splines can also be found in
//! the [examples](https://github.com/Twinklebear/bspline/tree/master/examples).
//!
//! # 1D Example
//!
//! This example shows how to create the 1D cardinal cubic B-spline example shown on [Wikipedia's
//! B-splines page](https://en.wikipedia.org/wiki/B-spline). For examples of evaluating the spline
//! to an image and saving the output see the [examples](https://github.com/Twinklebear/bspline/tree/master/examples).
//!
//! ```rust
//! let points = vec![0.0, 0.0, 0.0, 6.0, 0.0, 0.0, 0.0];
//! let knots = vec![-2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0, 2.0];
//! let degree = 3;
//! let spline = bspline::BSpline::new(degree, points, knots);
//! ```
//!
//! # Readings on B-splines
//!
//! The library assumes you are familiar at some level with how B-splines work, e.g. how
//! control points and knots and effect the curve produced. No interactive
//! editor is provided (at least currently). Some good places to start reading about B-splines to
//! effectively use this library can be found below.
//!
//! - [Wikipedia page on B-splines](https://en.wikipedia.org/wiki/B-spline)
//! - [Fundamentals of Computer Graphics](http://www.amazon.com/Fundamentals-Computer-Graphics-Peter-Shirley/dp/1568814690)
//! (has a good chapter on curves)
//! - [Splines and B-splines: An Introduction](http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v07/undervisningsmateriale/kap1.pdf)
//! - [Geometric Modeling](http://atrey.karlin.mff.cuni.cz/projekty/vrr/doc/grafika/geometric%20modelling.pdf)
//! - [A nice set of interactive examples](https://www.ibiblio.org/e-notes/Splines/Intro.htm)
//!
use std::ops::{Mul, Add};
use std::slice::Iter;
/// The interpolate trait is used to linearly interpolate between two types (or in the
/// case of Quaternions, spherically linearly interpolate). The B-spline curve uses this
/// trait to compute points on the curve for the given parameter value.
///
/// A default implementation of this trait is provided for all `T` that are `Mul<f32, Output = T>
/// + Add<Output = T> + Copy` since the interpolation provided by the trait is expected to be
/// a simple linear interpolation.
pub trait Interpolate {
/// Linearly interpolate between `self` and `other` using `t`, for example with floats:
///
/// ```text
/// self * (1.0 - t) + other * t
/// ```
///
/// If the result returned is not a correct linear interpolation of the values the
/// curve produced using the value may not be correct.
fn interpolate(&self, other: &Self, t: f32) -> Self;
}
impl<T: Mul<f32, Output = T> + Add<Output = T> + Copy> Interpolate for T {
fn interpolate(&self, other: &Self, t: f32) -> Self {
*self * (1.0 - t) + *other * t
}
}
/// Represents a B-spline that will use polynomials of the specified degree to interpolate
/// between the control points given the knots.
#[derive(Clone)]
pub struct BSpline<T: Interpolate + Copy> {
/// Degree of the polynomial that we use to make the curve segments
degree: usize,
/// Control points for the curve
control_points: Vec<T>,
/// The knot vector
knots: Vec<f32>,
}
impl<T: Interpolate + Copy> BSpline<T> {
/// Create a new B-spline curve of the desired `degree` that will interpolate
/// the `control_points` using the `knots`. The knots should be sorted in non-decreasing
/// order, otherwise they will be sorted for you which may lead to undesired knots
/// for control points. Note that this is in terms of the interpolating polynomial degree,
/// if you are familiar with the convention of "B-spline curve order" the degree is `curve_order - 1`.
///
/// Your curve must have a valid number of control points and knots or the function will panic. A B-spline
/// curve requires at least as many control points as the degree (`control_points.len() >=
/// degree`) and the number of knots should be equal to `control_points.len() + degree + 1`.
pub fn new(degree: usize, control_points: Vec<T>, mut knots: Vec<f32>) -> BSpline<T> {
if control_points.len() < degree {
panic!("Too few control points for curve");
}
if knots.len() != control_points.len() + degree + 1 {
panic!(format!("Invalid number of knots, got {}, expected {}", knots.len(),
control_points.len() + degree + 1));
}
knots.sort_by(|a, b| a.partial_cmp(b).unwrap());
BSpline { degree: degree, control_points: control_points, knots: knots }
}
/// Compute a point on the curve at `t`, the parameter **must** be in the inclusive range
/// of values returned by `knot_domain`. If `t` is out of bounds this function will assert
/// on debug builds and on release builds you'll likely get an out of bounds crash.
pub fn point(&self, t: f32) -> T {
debug_assert!(t >= self.knot_domain().0 && t < self.knot_domain().1);
// Find the first index with a knot value greater than the t we're searching for. We want
// to find i such that: knot[i] <= t < knot[i + 1]
let i = match upper_bounds(&self.knots[..], t) {
Some(x) if x == 0 => self.degree,
Some(x) => x,
None => self.knots.len() - self.degree - 1,
};
self.de_boor_iterative(t, i)
}
/// Get an iterator over the control points.
pub fn control_points(&self) -> Iter<T> {
self.control_points.iter()
}
/// Get an iterator over the knots.
pub fn knots(&self) -> Iter<f32> {
self.knots.iter()
}
/// Get the min and max knot domain values for finding the `t` range to compute
/// the curve over. The curve is only defined over this range in [min, max), passing
/// a `t` value out of this range will assert on debug builds and likely result in
/// a crash on release builds.
pub fn knot_domain(&self) -> (f32, f32) {
(self.knots[self.degree], self.knots[self.knots.len() - 1 - self.degree])
}
/// Iteratively compute de Boor's B-spline algorithm, this computes the recursive
/// de Boor algorithm tree from the bottom up. At each level we use the results
/// from the previous one to compute this level and store the results in the
/// array indices we no longer need to compute the current level (the left one
/// used computing node j).
fn de_boor_iterative(&self, t: f32, i_start: usize) -> T {
let mut tmp = Vec::with_capacity(self.degree + 1);
for j in 0..self.degree + 1 {
let p = j + i_start - self.degree - 1;
tmp.push(self.control_points[p]);
}
for lvl in 0..self.degree {
let k = lvl + 1;
for j in 0..self.degree - lvl {
let i = j + k + i_start - self.degree;
let alpha = (t - self.knots[i - 1]) / (self.knots[i + self.degree - k] - self.knots[i - 1]);
if alpha.is_nan() {
panic!("alpha has nans");
}
tmp[j] = tmp[j].interpolate(&tmp[j + 1], alpha);
}
}
tmp[0]
}
}
/// Return the index of the first element greater than the value passed.
/// The data **must** be sorted.
/// If no element greater than the value passed is found the function returns None.
fn upper_bounds(data: &[f32], value: f32) -> Option<usize> {
let mut first = 0usize;
let mut step;
let mut count = data.len() as isize;
while count > 0 {
step = count / 2;
let it = first + step as usize;
if !value.lt(&data[it]) {
first = it + 1;
count -= step + 1;
} else {
count = step;
}
}
// If we didn't find an element greater than value
if first == data.len() {
None
} else {
Some(first)
}
}
#[cfg(test)]
mod test {
use super::BSpline;
/// Check that the bspline returns the values we expect it to at various t values
fn check_bspline(spline: &BSpline<f32>, expect: &Vec<(f32, f32)>) -> bool {
expect.iter().fold(true, |ac, &(t, x)| ac && spline.point(t) == x)
}
#[test]
fn linear_bspline() {
let expect = vec![(0.0, 0.0), (0.2, 0.2), (0.4, 0.4), (0.6, 0.6),
(0.8, 0.8), (1.0, 1.0)];
let points = vec![0.0, 1.0];
let knots = vec![0.0, 0.0, 1.0, 1.0];
let degree = 1;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
#[test]
fn quadratic_bspline() {
let expect = vec![(0.0, 0.0), (0.5, 0.125), (1.0, 0.5), (1.4, 0.74), (1.5, 0.75),
(1.6, 0.74), (2.0, 0.5), (2.5, 0.125), (3.0, 0.0)];
let points = vec![0.0, 0.0, 1.0, 0.0, 0.0];
let knots = vec![0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0];
let degree = 2;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
#[test]
fn cubic_bspline() {
let expect = vec![(-2.0, 0.0), (-1.5, 0.125), (-1.0, 1.0), (-0.6, 2.488),
(0.0, 4.0), (0.5, 2.875), (1.5, 0.12500001), (2.0, 0.0)];
let points = vec![0.0, 0.0, 0.0, 6.0, 0.0, 0.0, 0.0];
let knots = vec![-2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0, 2.0];
let degree = 3;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
#[test]
fn quartic_bspline() {
let expect = vec![(0.0, 0.0), (0.4, 0.0010666668), (1.0, 0.041666668),
(1.5, 0.19791667), (2.0, 0.4583333), (2.5, 0.5989583),
(3.0, 0.4583333), (3.2, 0.35206667), (4.1, 0.02733751),
(4.5, 0.002604167), (5.0, 0.0)];
let points = vec![0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0];
let knots = vec![0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0];
let degree = 4;
let spline = BSpline::new(degree, points, knots);
assert!(check_bspline(&spline, &expect));
}
}
|
remove empty lib.rs
|
#![feature(rustc_private)]
#![feature(map_first_last)]
#![feature(map_try_insert)]
#![feature(never_type)]
#![feature(or_patterns)]
#![feature(try_blocks)]
#![warn(rust_2018_idioms)]
#![allow(clippy::cast_lossless)]
extern crate rustc_apfloat;
extern crate rustc_ast;
#[macro_use] extern crate rustc_middle;
extern crate rustc_data_structures;
extern crate rustc_hir;
extern crate rustc_index;
extern crate rustc_mir;
extern crate rustc_span;
extern crate rustc_target;
mod data_race;
mod diagnostics;
mod eval;
mod helpers;
mod intptrcast;
mod machine;
mod mono_hash_map;
mod operator;
mod range_map;
mod shims;
mod stacked_borrows;
mod sync;
mod thread;
mod vector_clock;
// Establish a "crate-wide prelude": we often import `crate::*`.
// Make all those symbols available in the same place as our own.
pub use rustc_mir::interpret::*;
// Resolve ambiguity.
pub use rustc_mir::interpret::{self, AllocMap, PlaceTy};
pub use crate::shims::dlsym::{Dlsym, EvalContextExt as _};
pub use crate::shims::env::{EnvVars, EvalContextExt as _};
pub use crate::shims::foreign_items::EvalContextExt as _;
pub use crate::shims::intrinsics::EvalContextExt as _;
pub use crate::shims::os_str::EvalContextExt as _;
pub use crate::shims::time::EvalContextExt as _;
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _};
pub use crate::shims::tls::{EvalContextExt as _, TlsData};
pub use crate::shims::EvalContextExt as _;
pub use crate::data_race::{
AtomicReadOp, AtomicWriteOp, AtomicRwOp, AtomicFenceOp,
EvalContextExt as DataRaceEvalContextExt
};
pub use crate::diagnostics::{
register_diagnostic, report_error, EvalContextExt as DiagnosticsEvalContextExt,
TerminationInfo, NonHaltingDiagnostic,
};
pub use crate::eval::{create_ecx, eval_main, AlignmentCheck, MiriConfig};
pub use crate::helpers::EvalContextExt as HelpersEvalContextExt;
pub use crate::machine::{
AllocExtra, Evaluator, FrameData, MemoryExtra, MiriEvalContext, MiriEvalContextExt,
MiriMemoryKind, NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
pub use crate::range_map::RangeMap;
pub use crate::stacked_borrows::{
EvalContextExt as StackedBorEvalContextExt, Item, Permission, CallId, PtrId, Stack, Stacks, Tag,
};
pub use crate::thread::{
EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, ThreadState,
};
pub use crate::sync::{
EvalContextExt as SyncEvalContextExt, CondvarId, MutexId, RwLockId
};
pub use crate::vector_clock::{
VClock, VectorIdx, VTimestamp
};
/// Insert rustc arguments at the beginning of the argument list that Miri wants to be
/// set per default, for maximal validation power.
pub const MIRI_DEFAULT_ARGS: &[&str] = &[
"-Zalways-encode-mir",
"-Zmir-emit-retag",
"-Zmir-opt-level=0",
"--cfg=miri",
"-Cdebug-assertions=on",
];
Remove `#![feature(or_patterns)]`
#![feature(rustc_private)]
#![feature(map_first_last)]
#![feature(map_try_insert)]
#![feature(never_type)]
#![feature(try_blocks)]
#![warn(rust_2018_idioms)]
#![allow(clippy::cast_lossless)]
extern crate rustc_apfloat;
extern crate rustc_ast;
#[macro_use] extern crate rustc_middle;
extern crate rustc_data_structures;
extern crate rustc_hir;
extern crate rustc_index;
extern crate rustc_mir;
extern crate rustc_span;
extern crate rustc_target;
mod data_race;
mod diagnostics;
mod eval;
mod helpers;
mod intptrcast;
mod machine;
mod mono_hash_map;
mod operator;
mod range_map;
mod shims;
mod stacked_borrows;
mod sync;
mod thread;
mod vector_clock;
// Establish a "crate-wide prelude": we often import `crate::*`.
// Make all those symbols available in the same place as our own.
pub use rustc_mir::interpret::*;
// Resolve ambiguity.
pub use rustc_mir::interpret::{self, AllocMap, PlaceTy};
pub use crate::shims::dlsym::{Dlsym, EvalContextExt as _};
pub use crate::shims::env::{EnvVars, EvalContextExt as _};
pub use crate::shims::foreign_items::EvalContextExt as _;
pub use crate::shims::intrinsics::EvalContextExt as _;
pub use crate::shims::os_str::EvalContextExt as _;
pub use crate::shims::time::EvalContextExt as _;
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _};
pub use crate::shims::tls::{EvalContextExt as _, TlsData};
pub use crate::shims::EvalContextExt as _;
pub use crate::data_race::{
AtomicReadOp, AtomicWriteOp, AtomicRwOp, AtomicFenceOp,
EvalContextExt as DataRaceEvalContextExt
};
pub use crate::diagnostics::{
register_diagnostic, report_error, EvalContextExt as DiagnosticsEvalContextExt,
TerminationInfo, NonHaltingDiagnostic,
};
pub use crate::eval::{create_ecx, eval_main, AlignmentCheck, MiriConfig};
pub use crate::helpers::EvalContextExt as HelpersEvalContextExt;
pub use crate::machine::{
AllocExtra, Evaluator, FrameData, MemoryExtra, MiriEvalContext, MiriEvalContextExt,
MiriMemoryKind, NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
pub use crate::range_map::RangeMap;
pub use crate::stacked_borrows::{
EvalContextExt as StackedBorEvalContextExt, Item, Permission, CallId, PtrId, Stack, Stacks, Tag,
};
pub use crate::thread::{
EvalContextExt as ThreadsEvalContextExt, SchedulingAction, ThreadId, ThreadManager, ThreadState,
};
pub use crate::sync::{
EvalContextExt as SyncEvalContextExt, CondvarId, MutexId, RwLockId
};
pub use crate::vector_clock::{
VClock, VectorIdx, VTimestamp
};
/// Insert rustc arguments at the beginning of the argument list that Miri wants to be
/// set per default, for maximal validation power.
pub const MIRI_DEFAULT_ARGS: &[&str] = &[
"-Zalways-encode-mir",
"-Zmir-emit-retag",
"-Zmir-opt-level=0",
"--cfg=miri",
"-Cdebug-assertions=on",
];
|
#![cfg_attr(test, deny(warnings))]
#![deny(missing_docs)]
//! A set of middleware for sharing data between requests in the Iron
//! framework.
extern crate iron;
extern crate plugin;
use iron::{Request, Response, BeforeMiddleware, AfterMiddleware, IronResult};
use iron::typemap::Assoc;
use std::sync::{Arc, RWLock, Mutex};
use plugin::{PluginFor, Phantom};
/// Middleware for data that persists between requests with read and write capabilities.
///
/// The data is stored behind a `RWLock`, so multiple read locks
/// can be taken out concurrently.
///
/// If most threads need to take out a write lock, you may want to
/// consider `Write`, which stores the data behind a `Mutex`, which
/// has a faster locking speed.
///
/// `State` can be linked as `BeforeMiddleware` to add data to the `Request`
/// extensions and it can be linked as an `AfterMiddleware` to add data to
/// the `Response` extensions.
///
/// `State` also implements `PluginFor`, so the data stored within can be
/// accessed through `request.get::<State<P, D>>()` as an `Arc<RWLock<D>>`.
pub struct State<P, D> {
data: Arc<RWLock<D>>
}
/// Middleware for data that persists between Requests with read-only capabilities.
///
/// The data is stored behind an Arc, so multiple threads can have
/// concurrent, non-blocking access.
///
/// `Read` can be linked as `BeforeMiddleware` to add data to the `Request`
/// extensions and it can be linked as an `AfterMiddleware` to add data to
/// the `Response` extensions.
///
/// `Read` also implements `PluginFor`, so the data stored within can be
/// accessed through `request.get::<Read<P, D>>()` as an `Arc<D>`.
pub struct Read<P, D> {
data: Arc<D>
}
/// Middleware for data that persists between Requests for data which mostly
/// needs to be written instead of read.
///
/// The data is stored behind a `Mutex`, so only one request at a time can
/// access the data. This is more performant than `State` in the case where
/// most uses of the data require a write lock.
///
/// `Write` can be linked as `BeforeMiddleware` to add data to the `Request`
/// extensions and it can be linked as an `AfterMiddleware` to add data to
/// the `Response` extensions.
///
/// `Write` also implements `PluginFor`, so the data stored within can be
/// accessed through `request.get::<Write<P, D>>()` as an `Arc<Mutex<D>>`.
pub struct Write<P, D> {
data: Arc<Mutex<D>>
}
impl<P, D: Send + Sync> Clone for Read<P, D> {
fn clone(&self) -> Read<P, D> {
Read { data: self.data.clone() }
}
}
impl<P, D: Send + Sync> Clone for State<P, D> {
fn clone(&self) -> State<P, D> {
State { data: self.data.clone() }
}
}
impl<P, D: Send> Clone for Write<P, D> {
fn clone(&self) -> Write<P, D> {
Write { data: self.data.clone() }
}
}
impl<P, D:'static> Assoc<Arc<RWLock<D>>> for State<P, D> where P: Assoc<D> {}
impl<P, D:'static> Assoc<Arc<D>> for Read<P, D> where P: Assoc<D> {}
impl<P, D:'static> Assoc<Arc<Mutex<D>>> for Write<P, D> where P: Assoc<D> {}
impl<P, D> PluginFor<Request, Arc<RWLock<D>>> for State<P, D>
where D: Send + Sync,
P: Assoc<D> {
fn eval(req: &mut Request, _: Phantom<State<P, D>>) -> Option<Arc<RWLock<D>>> {
req.extensions.get::<State<P, D>, Arc<RWLock<D>>>()
.map(|x| x.clone())
}
}
impl<P, D> PluginFor<Request, Arc<D>> for Read<P, D>
where D: Send + Sync,
P: Assoc<D> {
fn eval(req: &mut Request, _: Phantom<Read<P, D>>) -> Option<Arc<D>> {
req.extensions.get::<Read<P, D>, Arc<D>>()
.map(|x| x.clone())
}
}
impl<P, D> PluginFor<Request, Arc<Mutex<D>>> for Write<P, D>
where D: Send,
P: Assoc<D> {
fn eval(req: &mut Request, _: Phantom<Write<P, D>>) -> Option<Arc<Mutex<D>>> {
req.extensions.get::<Write<P, D>, Arc<Mutex<D>>>()
.map(|x| x.clone())
}
}
impl<D: Send + Sync, P: Assoc<D>> BeforeMiddleware for State<P, D> {
fn before(&self, req: &mut Request) -> IronResult<()> {
req.extensions.insert::<State<P, D>, Arc<RWLock<D>>>(self.data.clone());
Ok(())
}
}
impl<D: Send + Sync, P: Assoc<D>> AfterMiddleware for State<P, D> {
fn after(&self, _: &mut Request, res: &mut Response) -> IronResult<()> {
res.extensions.insert::<State<P, D>, Arc<RWLock<D>>>(self.data.clone());
Ok(())
}
}
impl<D: Send + Sync, P: Assoc<D>> BeforeMiddleware for Read<P, D> {
fn before(&self, req: &mut Request) -> IronResult<()> {
req.extensions.insert::<Read<P, D>, Arc<D>>(self.data.clone());
Ok(())
}
}
impl<D: Send + Sync, P: Assoc<D>> AfterMiddleware for Read<P, D> {
fn after(&self, _: &mut Request, res: &mut Response) -> IronResult<()> {
res.extensions.insert::<Read<P, D>, Arc<D>>(self.data.clone());
Ok(())
}
}
impl<D: Send, P: Assoc<D>> BeforeMiddleware for Write<P, D> {
fn before(&self, req: &mut Request) -> IronResult<()> {
req.extensions.insert::<Write<P, D>, Arc<Mutex<D>>>(self.data.clone());
Ok(())
}
}
impl<D: Send, P: Assoc<D>> AfterMiddleware for Write<P, D> {
fn after(&self, _: &mut Request, res: &mut Response) -> IronResult<()> {
res.extensions.insert::<Write<P, D>, Arc<Mutex<D>>>(self.data.clone());
Ok(())
}
}
impl<P, D> State<P, D> where D: Send + Sync, P: Assoc<D> {
/// Construct a new pair of `State` that can be passed directly to `Chain::link`.
///
/// The data is initialized with the passed-in value.
pub fn both(start: D) -> (State<P, D>, State<P, D>) {
let x = State { data: Arc::new(RWLock::new(start)) };
(x.clone(), x)
}
/// Construct a new `State` that can be passed directly to
/// `Chain::link_before` or `Chain::link_after`.
///
/// The data is initialized with the passed-in value.
pub fn one(start: D) -> State<P, D> {
State { data: Arc::new(RWLock::new(start)) }
}
}
impl<P, D> Read<P, D> where D: Send + Sync, P: Assoc<D> {
/// Construct a new pair of `Read` that can be passed directly to `Chain::link`.
///
/// The data is initialized with the passed-in value.
pub fn both(start: D) -> (Read<P, D>, Read<P, D>) {
let x = Read { data: Arc::new(start) };
(x.clone(), x)
}
/// Construct a new `Read` that can be passed directly to
/// `Chain::link_before` or `Chain::link_after`.
///
/// The data is initialized with the passed-in value.
pub fn one(start: D) -> Read<P, D> {
Read { data: Arc::new(start) }
}
}
impl<P, D> Write<P, D> where D: Send, P: Assoc<D> {
/// Construct a new pair of `Write` that can be passed directly to `Chain::link`.
///
/// The data is initialized with the passed-in value.
pub fn both(start: D) -> (Write<P, D>, Write<P, D>) {
let x = Write { data: Arc::new(Mutex::new(start)) };
(x.clone(), x)
}
/// Construct a new `Write` that can be passed directly to
/// `Chain::link_before` or `Chain::link_after`.
///
/// The data is initialized with the passed-in value.
pub fn one(start: D) -> Write<P, D> {
Write { data: Arc::new(Mutex::new(start)) }
}
}
(fix) Update for Iron, Plugin, and Rust.
#![cfg_attr(test, deny(warnings))]
#![allow(unstable)]
#![deny(missing_docs)]
//! A set of middleware for sharing data between requests in the Iron
//! framework.
extern crate iron;
extern crate plugin;
use iron::{Request, Response, BeforeMiddleware, AfterMiddleware, IronResult};
use iron::typemap::Key;
use std::sync::{Arc, RwLock, Mutex};
use plugin::{Plugin, Phantom};
/// Middleware for data that persists between requests with read and write capabilities.
///
/// The data is stored behind a `RwLock`, so multiple read locks
/// can be taken out concurrently.
///
/// If most threads need to take out a write lock, you may want to
/// consider `Write`, which stores the data behind a `Mutex`, which
/// has a faster locking speed.
///
/// `State` can be linked as `BeforeMiddleware` to add data to the `Request`
/// extensions and it can be linked as an `AfterMiddleware` to add data to
/// the `Response` extensions.
///
/// `State` also implements `Plugin`, so the data stored within can be
/// accessed through `request.get::<State<P>>()` as an `Arc<RwLock<P::Value>>`.
pub struct State<P: Key> {
data: Arc<RwLock<P::Value>>
}
/// Middleware for data that persists between Requests with read-only capabilities.
///
/// The data is stored behind an Arc, so multiple threads can have
/// concurrent, non-blocking access.
///
/// `Read` can be linked as `BeforeMiddleware` to add data to the `Request`
/// extensions and it can be linked as an `AfterMiddleware` to add data to
/// the `Response` extensions.
///
/// `Read` also implements `Plugin`, so the data stored within can be
/// accessed through `request.get::<Read<P>>()` as an `Arc<P::Value>`.
pub struct Read<P: Key> {
data: Arc<P::Value>
}
/// Middleware for data that persists between Requests for data which mostly
/// needs to be written instead of read.
///
/// The data is stored behind a `Mutex`, so only one request at a time can
/// access the data. This is more performant than `State` in the case where
/// most uses of the data require a write lock.
///
/// `Write` can be linked as `BeforeMiddleware` to add data to the `Request`
/// extensions and it can be linked as an `AfterMiddleware` to add data to
/// the `Response` extensions.
///
/// `Write` also implements `Plugin`, so the data stored within can be
/// accessed through `request.get::<Write<P>>()` as an `Arc<Mutex<P::Value>>`.
pub struct Write<P: Key> {
data: Arc<Mutex<P::Value>>
}
macro_rules! impl_persistent {
($name:ty, $out:ty, $param:ident) => {
}
}
impl<P: Key> Clone for Read<P> where P::Value: Send + Sync {
fn clone(&self) -> Read<P> {
Read { data: self.data.clone() }
}
}
impl<P: Key> Clone for State<P> where P::Value: Send + Sync {
fn clone(&self) -> State<P> {
State { data: self.data.clone() }
}
}
impl<P: Key> Clone for Write<P> where P::Value: Send {
fn clone(&self) -> Write<P> {
Write { data: self.data.clone() }
}
}
impl<P: Key> Key for State<P> where P::Value: 'static {
type Value = Arc<RwLock<P::Value>>;
}
impl<P: Key> Key for Read<P> where P::Value: 'static {
type Value = Arc<P::Value>;
}
impl<P: Key> Key for Write<P> where P::Value: 'static {
type Value = Arc<Mutex<P::Value>>;
}
impl<P: Key> Plugin<Request> for State<P> where P::Value: Send + Sync {
fn eval(req: &mut Request, _: Phantom<State<P>>) -> Option<Arc<RwLock<P::Value>>> {
req.extensions.get::<State<P>>().cloned()
}
}
impl<P: Key> Plugin<Request> for Read<P> where P::Value: Send + Sync {
fn eval(req: &mut Request, _: Phantom<Read<P>>) -> Option<Arc<P::Value>> {
req.extensions.get::<Read<P>>().cloned()
}
}
impl<P: Key> Plugin<Request> for Write<P> where P::Value: Send {
fn eval(req: &mut Request, _: Phantom<Write<P>>) -> Option<Arc<Mutex<P::Value>>> {
req.extensions.get::<Write<P>>().cloned()
}
}
impl<P: Key> BeforeMiddleware for State<P> where P::Value: Send + Sync {
fn before(&self, req: &mut Request) -> IronResult<()> {
req.extensions.insert::<State<P>>(self.data.clone());
Ok(())
}
}
impl<P: Key> BeforeMiddleware for Read<P> where P::Value: Send + Sync {
fn before(&self, req: &mut Request) -> IronResult<()> {
req.extensions.insert::<Read<P>>(self.data.clone());
Ok(())
}
}
impl<P: Key> BeforeMiddleware for Write<P> where P::Value: Send {
fn before(&self, req: &mut Request) -> IronResult<()> {
req.extensions.insert::<Write<P>>(self.data.clone());
Ok(())
}
}
impl<P: Key> AfterMiddleware for State<P> where P::Value: Send + Sync {
fn after(&self, _: &mut Request, res: &mut Response) -> IronResult<()> {
res.extensions.insert::<State<P>>(self.data.clone());
Ok(())
}
}
impl<P: Key> AfterMiddleware for Read<P> where P::Value: Send + Sync {
fn after(&self, _: &mut Request, res: &mut Response) -> IronResult<()> {
res.extensions.insert::<Read<P>>(self.data.clone());
Ok(())
}
}
impl<P: Key> AfterMiddleware for Write<P> where P::Value: Send {
fn after(&self, _: &mut Request, res: &mut Response) -> IronResult<()> {
res.extensions.insert::<Write<P>>(self.data.clone());
Ok(())
}
}
impl<P: Key> State<P> where P::Value: Send + Sync {
/// Construct a new pair of `State` that can be passed directly to `Chain::link`.
///
/// The data is initialized with the passed-in value.
pub fn both(start: P::Value) -> (State<P>, State<P>) {
let x = State { data: Arc::new(RwLock::new(start)) };
(x.clone(), x)
}
/// Construct a new `State` that can be passed directly to
/// `Chain::link_before` or `Chain::link_after`.
///
/// The data is initialized with the passed-in value.
pub fn one(start: P::Value) -> State<P> {
State { data: Arc::new(RwLock::new(start)) }
}
}
impl<P: Key> Read<P> where P::Value: Send + Sync {
/// Construct a new pair of `Read` that can be passed directly to `Chain::link`.
///
/// The data is initialized with the passed-in value.
pub fn both(start: P::Value) -> (Read<P>, Read<P>) {
let x = Read { data: Arc::new(start) };
(x.clone(), x)
}
/// Construct a new `Read` that can be passed directly to
/// `Chain::link_before` or `Chain::link_after`.
///
/// The data is initialized with the passed-in value.
pub fn one(start: P::Value) -> Read<P> {
Read { data: Arc::new(start) }
}
}
impl<P: Key> Write<P> where P::Value: Send {
/// Construct a new pair of `Write` that can be passed directly to `Chain::link`.
///
/// The data is initialized with the passed-in value.
pub fn both(start: P::Value) -> (Write<P>, Write<P>) {
let x = Write { data: Arc::new(Mutex::new(start)) };
(x.clone(), x)
}
/// Construct a new `Write` that can be passed directly to
/// `Chain::link_before` or `Chain::link_after`.
///
/// The data is initialized with the passed-in value.
pub fn one(start: P::Value) -> Write<P> {
Write { data: Arc::new(Mutex::new(start)) }
}
}
|
//! Use this library to open a path or URL using the program configured on the system.
//!
//! # Usage
//!
//! Open the given URL in the default web browser.
//!
//! ```no_run
//! open::that("http://rust-lang.org").unwrap();
//! ```
//!
//! Alternatively, specify the program to be used to open the path or URL.
//!
//! ```no_run
//! open::with("http://rust-lang.org", "firefox").unwrap();
//! ```
//!
//! # Notes
//!
//! As an operating system program is used, the open operation can fail.
//! Therefore, you are advised to at least check the result and behave
//! accordingly, e.g. by letting the user know that the open operation failed.
//!
//! ```no_run
//! let path = "http://rust-lang.org";
//!
//! match open::that(path) {
//! Ok(()) => println!("Opened '{}' successfully.", path),
//! Err(err) => eprintln!("An error occurred when opening '{}': {}", path, err),
//! }
//! ```
#[cfg(target_os = "windows")]
use windows as os;
#[cfg(target_os = "macos")]
use macos as os;
#[cfg(target_os = "ios")]
use ios as os;
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris"
))]
use unix as os;
#[cfg(not(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris",
target_os = "ios",
target_os = "macos",
target_os = "windows",
)))]
compile_error!("open is not supported on this platform");
use std::{
ffi::OsStr,
io,
process::{Command, Output, Stdio},
thread,
};
type Result = io::Result<()>;
/// Open path with the default application.
///
/// # Examples
///
/// ```no_run
/// let path = "http://rust-lang.org";
///
/// match open::that(path) {
/// Ok(()) => println!("Opened '{}' successfully.", path),
/// Err(err) => panic!("An error occurred when opening '{}': {}", path, err),
/// }
/// ```
///
/// # Errors
///
/// A [`std::io::Error`] is returned on failure. Because different operating systems
/// handle errors differently it is recommend to not match on a certain error.
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
os::that(path)
}
/// Open path with the given application.
///
/// # Examples
///
/// ```no_run
/// let path = "http://rust-lang.org";
/// let app = "firefox";
///
/// match open::with(path, app) {
/// Ok(()) => println!("Opened '{}' successfully.", path),
/// Err(err) => panic!("An error occurred when opening '{}': {}", path, err),
/// }
/// ```
///
/// # Errors
///
/// A [`std::io::Error`] is returned on failure. Because different operating systems
/// handle errors differently it is recommend to not match on a certain error.
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
os::with(path, app)
}
/// Open path with the default application in a new thread.
///
/// See documentation of [`that`] for more details.
pub fn that_in_background<T: AsRef<OsStr> + Sized>(path: T) -> thread::JoinHandle<Result> {
let path = path.as_ref().to_os_string();
thread::spawn(|| that(path))
}
/// Open path with the given application in a new thread.
///
/// See documentation of [`with`] for more details.
pub fn with_in_background<T: AsRef<OsStr> + Sized>(
path: T,
app: impl Into<String>,
) -> thread::JoinHandle<Result> {
let path = path.as_ref().to_os_string();
let app = app.into();
thread::spawn(|| with(path, app))
}
trait IntoResult<T> {
fn into_result(self) -> T;
}
impl IntoResult<Result> for io::Result<Output> {
fn into_result(self) -> Result {
match self {
Ok(o) if o.status.success() => Ok(()),
Ok(o) => Err(from_output(o)),
Err(err) => Err(err),
}
}
}
#[cfg(windows)]
impl IntoResult<Result> for winapi::ctypes::c_int {
fn into_result(self) -> Result {
match self {
i if i > 32 => Ok(()),
_ => Err(io::Error::last_os_error()),
}
}
}
fn from_output(output: Output) -> io::Error {
let error_msg = match output.stderr.is_empty() {
true => output.status.to_string(),
false => format!(
"{} ({})",
String::from_utf8_lossy(&output.stderr).trim(),
output.status
),
};
io::Error::new(io::ErrorKind::Other, error_msg)
}
trait CommandExt {
fn output_stderr(&mut self) -> io::Result<Output>;
}
impl CommandExt for Command {
fn output_stderr(&mut self) -> io::Result<Output> {
let mut process = self
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::piped())
.spawn()?;
// Consume all stderr - it's open just for a few programs which can't handle it being closed.
use std::io::Read;
let mut stderr = vec![0; 256];
let mut stderr_src = process.stderr.take().expect("piped stderr");
let len = stderr_src.read(&mut stderr).unwrap_or(0);
stderr.truncate(len);
// consume the rest to avoid blocking
std::io::copy(&mut stderr_src, &mut std::io::sink()).ok();
let status = process.wait()?;
Ok(Output {
status,
stderr,
stdout: vec![],
})
}
}
#[cfg(windows)]
mod windows {
use std::{ffi::OsStr, io, os::windows::ffi::OsStrExt, ptr};
use winapi::ctypes::c_int;
use winapi::um::shellapi::ShellExecuteW;
use crate::{IntoResult, Result};
fn convert_path(path: &OsStr) -> io::Result<Vec<u16>> {
let mut maybe_result: Vec<_> = path.encode_wide().collect();
if maybe_result.iter().any(|&u| u == 0) {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"path contains NUL byte(s)",
));
}
maybe_result.push(0);
Ok(maybe_result)
}
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
const SW_SHOW: c_int = 5;
let path = convert_path(path.as_ref())?;
let operation: Vec<u16> = OsStr::new("open\0").encode_wide().collect();
let result = unsafe {
ShellExecuteW(
ptr::null_mut(),
operation.as_ptr(),
path.as_ptr(),
ptr::null(),
ptr::null(),
SW_SHOW,
)
};
(result as c_int).into_result()
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
const SW_SHOW: c_int = 5;
let path = convert_path(path.as_ref())?;
let operation: Vec<u16> = OsStr::new("open\0").encode_wide().collect();
let app_name: Vec<u16> = OsStr::new(&format!("{}\0", app.into()))
.encode_wide()
.collect();
let result = unsafe {
ShellExecuteW(
ptr::null_mut(),
operation.as_ptr(),
app_name.as_ptr(),
path.as_ptr(),
ptr::null(),
SW_SHOW,
)
};
(result as c_int).into_result()
}
}
#[cfg(target_os = "macos")]
mod macos {
use std::{ffi::OsStr, process::Command};
use crate::{CommandExt, IntoResult, Result};
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
Command::new("/usr/bin/open")
.arg(path.as_ref())
.output_stderr()
.into_result()
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
Command::new("/usr/bin/open")
.arg(path.as_ref())
.arg("-a")
.arg(app.into())
.output_stderr()
.into_result()
}
}
#[cfg(target_os = "ios")]
mod ios {
use std::{ffi::OsStr, process::Command};
use crate::{CommandExt, IntoResult, Result};
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
Command::new("uiopen")
.arg("--url")
.arg(path.as_ref())
.output_stderr()
.into_result()
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
Command::new("uiopen")
.arg("--url")
.arg(path.as_ref())
.arg("--bundleid")
.arg(app.into())
.output_stderr()
.into_result()
}
}
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris"
))]
mod unix {
use std::{
env,
ffi::{OsStr, OsString},
path::{Path, PathBuf},
process::Command,
};
use crate::{CommandExt, IntoResult, Result};
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
let path = path.as_ref();
let open_handlers = [
("xdg-open", &[path] as &[_]),
("gio", &[OsStr::new("open"), path]),
("gnome-open", &[path]),
("kde-open", &[path]),
("wslview", &[&wsl_path(path)]),
];
let mut unsuccessful = None;
let mut io_error = None;
for (command, args) in &open_handlers {
let result = Command::new(command).args(*args).output_stderr();
match result {
Ok(o) if o.status.success() => return Ok(()),
Ok(o) => unsuccessful = unsuccessful.or_else(|| Some(crate::from_output(o))),
Err(err) => io_error = io_error.or(Some(err)),
}
}
Err(unsuccessful
.or(io_error)
.expect("successful cases don't get here"))
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
Command::new(app.into())
.arg(path.as_ref())
.output_stderr()
.into_result()
}
// Polyfill to workaround absolute path bug in wslu(wslview). In versions before
// v3.1.1, wslview is unable to find absolute paths. `wsl_path` converts an
// absolute path into a relative path starting from the current directory. If
// the path is already a relative path or the conversion fails the original path
// is returned.
fn wsl_path<T: AsRef<OsStr>>(path: T) -> OsString {
fn path_relative_to_current_dir<T: AsRef<OsStr>>(path: T) -> Option<PathBuf> {
let path = Path::new(&path);
if path.is_relative() {
return None;
}
let base = env::current_dir().ok()?;
pathdiff::diff_paths(path, base)
}
match path_relative_to_current_dir(&path) {
None => OsString::from(&path),
Some(relative) => OsString::from(relative),
}
}
}
add Illumos support
//! Use this library to open a path or URL using the program configured on the system.
//!
//! # Usage
//!
//! Open the given URL in the default web browser.
//!
//! ```no_run
//! open::that("http://rust-lang.org").unwrap();
//! ```
//!
//! Alternatively, specify the program to be used to open the path or URL.
//!
//! ```no_run
//! open::with("http://rust-lang.org", "firefox").unwrap();
//! ```
//!
//! # Notes
//!
//! As an operating system program is used, the open operation can fail.
//! Therefore, you are advised to at least check the result and behave
//! accordingly, e.g. by letting the user know that the open operation failed.
//!
//! ```no_run
//! let path = "http://rust-lang.org";
//!
//! match open::that(path) {
//! Ok(()) => println!("Opened '{}' successfully.", path),
//! Err(err) => eprintln!("An error occurred when opening '{}': {}", path, err),
//! }
//! ```
#[cfg(target_os = "windows")]
use windows as os;
#[cfg(target_os = "macos")]
use macos as os;
#[cfg(target_os = "ios")]
use ios as os;
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "openbsd",
target_os = "illumos",
target_os = "solaris"
))]
use unix as os;
#[cfg(not(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "openbsd",
target_os = "illumos",
target_os = "solaris",
target_os = "ios",
target_os = "macos",
target_os = "windows",
)))]
compile_error!("open is not supported on this platform");
use std::{
ffi::OsStr,
io,
process::{Command, Output, Stdio},
thread,
};
type Result = io::Result<()>;
/// Open path with the default application.
///
/// # Examples
///
/// ```no_run
/// let path = "http://rust-lang.org";
///
/// match open::that(path) {
/// Ok(()) => println!("Opened '{}' successfully.", path),
/// Err(err) => panic!("An error occurred when opening '{}': {}", path, err),
/// }
/// ```
///
/// # Errors
///
/// A [`std::io::Error`] is returned on failure. Because different operating systems
/// handle errors differently it is recommend to not match on a certain error.
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
os::that(path)
}
/// Open path with the given application.
///
/// # Examples
///
/// ```no_run
/// let path = "http://rust-lang.org";
/// let app = "firefox";
///
/// match open::with(path, app) {
/// Ok(()) => println!("Opened '{}' successfully.", path),
/// Err(err) => panic!("An error occurred when opening '{}': {}", path, err),
/// }
/// ```
///
/// # Errors
///
/// A [`std::io::Error`] is returned on failure. Because different operating systems
/// handle errors differently it is recommend to not match on a certain error.
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
os::with(path, app)
}
/// Open path with the default application in a new thread.
///
/// See documentation of [`that`] for more details.
pub fn that_in_background<T: AsRef<OsStr> + Sized>(path: T) -> thread::JoinHandle<Result> {
let path = path.as_ref().to_os_string();
thread::spawn(|| that(path))
}
/// Open path with the given application in a new thread.
///
/// See documentation of [`with`] for more details.
pub fn with_in_background<T: AsRef<OsStr> + Sized>(
path: T,
app: impl Into<String>,
) -> thread::JoinHandle<Result> {
let path = path.as_ref().to_os_string();
let app = app.into();
thread::spawn(|| with(path, app))
}
trait IntoResult<T> {
fn into_result(self) -> T;
}
impl IntoResult<Result> for io::Result<Output> {
fn into_result(self) -> Result {
match self {
Ok(o) if o.status.success() => Ok(()),
Ok(o) => Err(from_output(o)),
Err(err) => Err(err),
}
}
}
#[cfg(windows)]
impl IntoResult<Result> for winapi::ctypes::c_int {
fn into_result(self) -> Result {
match self {
i if i > 32 => Ok(()),
_ => Err(io::Error::last_os_error()),
}
}
}
fn from_output(output: Output) -> io::Error {
let error_msg = match output.stderr.is_empty() {
true => output.status.to_string(),
false => format!(
"{} ({})",
String::from_utf8_lossy(&output.stderr).trim(),
output.status
),
};
io::Error::new(io::ErrorKind::Other, error_msg)
}
trait CommandExt {
fn output_stderr(&mut self) -> io::Result<Output>;
}
impl CommandExt for Command {
fn output_stderr(&mut self) -> io::Result<Output> {
let mut process = self
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::piped())
.spawn()?;
// Consume all stderr - it's open just for a few programs which can't handle it being closed.
use std::io::Read;
let mut stderr = vec![0; 256];
let mut stderr_src = process.stderr.take().expect("piped stderr");
let len = stderr_src.read(&mut stderr).unwrap_or(0);
stderr.truncate(len);
// consume the rest to avoid blocking
std::io::copy(&mut stderr_src, &mut std::io::sink()).ok();
let status = process.wait()?;
Ok(Output {
status,
stderr,
stdout: vec![],
})
}
}
#[cfg(windows)]
mod windows {
use std::{ffi::OsStr, io, os::windows::ffi::OsStrExt, ptr};
use winapi::ctypes::c_int;
use winapi::um::shellapi::ShellExecuteW;
use crate::{IntoResult, Result};
fn convert_path(path: &OsStr) -> io::Result<Vec<u16>> {
let mut maybe_result: Vec<_> = path.encode_wide().collect();
if maybe_result.iter().any(|&u| u == 0) {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"path contains NUL byte(s)",
));
}
maybe_result.push(0);
Ok(maybe_result)
}
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
const SW_SHOW: c_int = 5;
let path = convert_path(path.as_ref())?;
let operation: Vec<u16> = OsStr::new("open\0").encode_wide().collect();
let result = unsafe {
ShellExecuteW(
ptr::null_mut(),
operation.as_ptr(),
path.as_ptr(),
ptr::null(),
ptr::null(),
SW_SHOW,
)
};
(result as c_int).into_result()
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
const SW_SHOW: c_int = 5;
let path = convert_path(path.as_ref())?;
let operation: Vec<u16> = OsStr::new("open\0").encode_wide().collect();
let app_name: Vec<u16> = OsStr::new(&format!("{}\0", app.into()))
.encode_wide()
.collect();
let result = unsafe {
ShellExecuteW(
ptr::null_mut(),
operation.as_ptr(),
app_name.as_ptr(),
path.as_ptr(),
ptr::null(),
SW_SHOW,
)
};
(result as c_int).into_result()
}
}
#[cfg(target_os = "macos")]
mod macos {
use std::{ffi::OsStr, process::Command};
use crate::{CommandExt, IntoResult, Result};
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
Command::new("/usr/bin/open")
.arg(path.as_ref())
.output_stderr()
.into_result()
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
Command::new("/usr/bin/open")
.arg(path.as_ref())
.arg("-a")
.arg(app.into())
.output_stderr()
.into_result()
}
}
#[cfg(target_os = "ios")]
mod ios {
use std::{ffi::OsStr, process::Command};
use crate::{CommandExt, IntoResult, Result};
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
Command::new("uiopen")
.arg("--url")
.arg(path.as_ref())
.output_stderr()
.into_result()
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
Command::new("uiopen")
.arg("--url")
.arg(path.as_ref())
.arg("--bundleid")
.arg(app.into())
.output_stderr()
.into_result()
}
}
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "openbsd",
target_os = "illumos",
target_os = "solaris"
))]
mod unix {
use std::{
env,
ffi::{OsStr, OsString},
path::{Path, PathBuf},
process::Command,
};
use crate::{CommandExt, IntoResult, Result};
pub fn that<T: AsRef<OsStr> + Sized>(path: T) -> Result {
let path = path.as_ref();
let open_handlers = [
("xdg-open", &[path] as &[_]),
("gio", &[OsStr::new("open"), path]),
("gnome-open", &[path]),
("kde-open", &[path]),
("wslview", &[&wsl_path(path)]),
];
let mut unsuccessful = None;
let mut io_error = None;
for (command, args) in &open_handlers {
let result = Command::new(command).args(*args).output_stderr();
match result {
Ok(o) if o.status.success() => return Ok(()),
Ok(o) => unsuccessful = unsuccessful.or_else(|| Some(crate::from_output(o))),
Err(err) => io_error = io_error.or(Some(err)),
}
}
Err(unsuccessful
.or(io_error)
.expect("successful cases don't get here"))
}
pub fn with<T: AsRef<OsStr> + Sized>(path: T, app: impl Into<String>) -> Result {
Command::new(app.into())
.arg(path.as_ref())
.output_stderr()
.into_result()
}
// Polyfill to workaround absolute path bug in wslu(wslview). In versions before
// v3.1.1, wslview is unable to find absolute paths. `wsl_path` converts an
// absolute path into a relative path starting from the current directory. If
// the path is already a relative path or the conversion fails the original path
// is returned.
fn wsl_path<T: AsRef<OsStr>>(path: T) -> OsString {
fn path_relative_to_current_dir<T: AsRef<OsStr>>(path: T) -> Option<PathBuf> {
let path = Path::new(&path);
if path.is_relative() {
return None;
}
let base = env::current_dir().ok()?;
pathdiff::diff_paths(path, base)
}
match path_relative_to_current_dir(&path) {
None => OsString::from(&path),
Some(relative) => OsString::from(relative),
}
}
}
|
//! `textwrap` provides functions for word wrapping and filling text.
//!
//! This can be very useful in commandline programs where you want to
//! format dynamic output nicely so it looks good in a terminal.
//!
//! To wrap text, one must know the width of each word so can know
//! when to break lines. This library measures the width of text using
//! the [displayed width][unicode-width], not the size in bytes.
//!
//! This is important for non-ASCII text. ASCII characters such as `a`
//! and `!` are simple: the displayed with is the same as the number
//! of bytes used in their UTF-8 encoding (one ASCII character takes
//! up one byte in UTF-8). However, non-ASCII characters and symbols
//! take up more than one byte: `é` is `0xc3 0xa9` and `⚙` is `0xe2
//! 0x9a 0x99` in UTF-8, respectively.
//!
//! This is why we take care to use the displayed width instead of the
//! byte count when computing line lengths. All functions in this
//! library handle Unicode characters like this.
//!
//! [unicode-width]: https://unicode-rs.github.io/unicode-width/unicode_width/index.html
extern crate unicode_width;
extern crate hyphenation;
use unicode_width::UnicodeWidthStr;
use unicode_width::UnicodeWidthChar;
use hyphenation::Hyphenation;
use hyphenation::Corpus;
/// A Wrapper holds settings for wrapping and filling text.
///
/// The algorithm used by the `wrap` method works by doing a single
/// scan over words in the input string and splitting them into one or
/// more lines. The time and memory complexity is O(*n*) where *n* is
/// the length of the input string.
pub struct Wrapper<'a> {
/// The width in columns at which the text will be wrapped.
pub width: usize,
/// Allow long words to be broken if they cannot fit on a line.
/// When set to false, some lines be being longer than self.width.
pub break_words: bool,
/// The hyphenation corpus (if any) used for automatic
/// hyphenation.
pub corpus: Option<&'a Corpus>,
}
impl<'a> Wrapper<'a> {
/// Create a new Wrapper for wrapping at the specified width. By
/// default, we allow words longer than `width` to be broken. No
/// hyphenation corpus is loaded by default.
pub fn new(width: usize) -> Wrapper<'a> {
Wrapper::<'a> {
width: width,
break_words: true,
corpus: None,
}
}
/// Fill a line of text at `self.width` characters. Strings are
/// wrapped based on their displayed width, not their size in
/// bytes.
///
/// The result is a string with newlines between each line. Use
/// the `wrap` method if you need access to the individual lines.
///
/// ```
/// use textwrap::Wrapper;
///
/// let wrapper = Wrapper::new(15);
/// assert_eq!(wrapper.fill("Memory safety without garbage collection."),
/// "Memory safety\nwithout garbage\ncollection.");
/// ```
///
/// This method simply joins the lines produced by `wrap`. As
/// such, it inherits the O(*n*) time and memory complexity where
/// *n* is the input string length.
pub fn fill(&self, s: &str) -> String {
self.wrap(&s).join("\n")
}
/// Wrap a line of text at `self.width` characters. Strings are
/// wrapped based on their displayed width, not their size in
/// bytes.
///
/// ```
/// use textwrap::Wrapper;
///
/// let wrap15 = Wrapper::new(15);
/// assert_eq!(wrap15.wrap("Concurrency without data races."),
/// vec!["Concurrency",
/// "without data",
/// "races."]);
///
/// let wrap20 = Wrapper::new(20);
/// assert_eq!(wrap20.wrap("Concurrency without data races."),
/// vec!["Concurrency without",
/// "data races."]);
/// ```
///
/// This method does a single scan over the input string, it has
/// an O(*n*) time and memory complexity where *n* is the input
/// string length.
pub fn wrap(&self, s: &str) -> Vec<String> {
let mut result = Vec::with_capacity(s.len() / (self.width + 1));
let mut line = String::with_capacity(self.width);
let mut remaining = self.width;
for mut word in s.split_whitespace() {
// Attempt to fit the word without any splitting.
if self.fit_part(word, "", &mut remaining, &mut line) {
continue;
}
// If that failed, loop until nothing remains to be added.
while !word.is_empty() {
let splits = self.split_word(&word);
let (smallest, hyphen, longest) = splits[0];
let min_width = smallest.width() + hyphen.len();
// Add a new line if even the smallest split doesn't
// fit.
if !line.is_empty() && 1 + min_width > remaining {
result.push(line);
line = String::with_capacity(self.width);
remaining = self.width;
}
// Find a split that fits on the current line.
for &(head, hyphen, tail) in splits.iter().rev() {
if self.fit_part(head, hyphen, &mut remaining, &mut line) {
word = tail;
break;
}
}
// If even the smallest split doesn't fit on the line,
// we might have to break the word.
if line.is_empty() {
if self.break_words && self.width > 1 {
// Break word on a character boundary as close
// to self.width as possible. Characters are
// at most 2 columns wide, so we will chop off
// at least one character.
let mut head_width = 0;
for (idx, c) in word.char_indices() {
head_width += c.width().unwrap_or(0);
if head_width > self.width {
let (head, tail) = word.split_at(idx);
result.push(String::from(head));
word = tail;
break;
}
}
} else {
// We forcibly add the smallest split and
// continue with the longest tail. This will
// result in a line longer than self.width.
result.push(String::from(smallest) + hyphen);
remaining = self.width;
word = longest;
}
}
}
}
if !line.is_empty() {
result.push(line);
}
return result;
}
/// Split word into all possible parts (head, tail). Word must be
/// non-empty. The returned vector will always be non-empty.
fn split_word<'b>(&self, word: &'b str) -> Vec<(&'b str, &'b str, &'b str)> {
let mut result = Vec::new();
// Split on hyphens or use the language corpus.
match self.corpus {
None => {
// Split on hyphens, smallest split first. We only use
// hyphens that are surrounded by alphanumeric
// characters. This is to avoid splitting on repeated
// hyphens, such as those found in --foo-bar.
let char_indices = word.char_indices().collect::<Vec<_>>();
for w in char_indices.windows(3) {
let ((_, prev), (n, c), (_, next)) = (w[0], w[1], w[2]);
if prev.is_alphanumeric() && c == '-' && next.is_alphanumeric() {
let (head, tail) = word.split_at(n + 1);
result.push((head, "", tail));
}
}
}
Some(corpus) => {
// Find splits based on language corpus. This includes
// the splits that would have been found above.
for n in word.opportunities(corpus) {
let (head, tail) = word.split_at(n);
let hyphen = if head.as_bytes()[head.len() - 1] == b'-' {
""
} else {
"-"
};
result.push((head, hyphen, tail));
}
}
}
// Finally option is no split at all.
result.push((word, "", ""));
return result;
}
/// Try to fit a word (or part of a word) onto a line. The line
/// and the remaining width is updated as appropriate if the word
/// or part fits.
fn fit_part<'b>(&self,
part: &'b str,
hyphen: &'b str,
remaining: &mut usize,
line: &mut String)
-> bool {
let space = if line.is_empty() { 0 } else { 1 };
if space + part.width() + hyphen.len() <= *remaining {
if !line.is_empty() {
line.push(' ');
}
line.push_str(part);
line.push_str(hyphen);
*remaining -= space + part.width() + hyphen.len();
return true;
}
return false;
}
}
/// Fill a line of text at `width` characters. Strings are wrapped
/// based on their displayed width, not their size in bytes.
///
/// The result is a string with newlines between each line. Use `wrap`
/// if you need access to the individual lines.
///
/// ```
/// use textwrap::fill;
///
/// assert_eq!(fill("Memory safety without garbage collection.", 15),
/// "Memory safety\nwithout garbage\ncollection.");
/// ```
///
/// This function creates a Wrapper on the fly with default settings.
/// If you need to set a language corpus for automatic hyphenation, or
/// need to fill many strings, then it is suggested to create Wrapper
/// and call its [`fill` method](struct.Wrapper.html#method.fill).
pub fn fill(s: &str, width: usize) -> String {
wrap(s, width).join("\n")
}
/// Wrap a line of text at `width` characters. Strings are wrapped
/// based on their displayed width, not their size in bytes.
///
/// ```
/// use textwrap::wrap;
///
/// assert_eq!(wrap("Concurrency without data races.", 15),
/// vec!["Concurrency",
/// "without data",
/// "races."]);
///
/// assert_eq!(wrap("Concurrency without data races.", 20),
/// vec!["Concurrency without",
/// "data races."]);
/// ```
///
/// This function creates a Wrapper on the fly with default settings.
/// If you need to set a language corpus for automatic hyphenation, or
/// need to wrap many strings, then it is suggested to create Wrapper
/// and call its [`wrap` method](struct.Wrapper.html#method.wrap).
pub fn wrap(s: &str, width: usize) -> Vec<String> {
Wrapper::new(width).wrap(s)
}
/// Add prefix to each non-empty line.
///
/// ```
/// use textwrap::indent;
///
/// assert_eq!(indent("Foo\nBar\n", " "), " Foo\n Bar\n");
/// ```
///
/// Empty lines (lines consisting only of whitespace) are not indented
/// and the whitespace is replaced by a single newline (`\n`):
///
/// ```
/// use textwrap::indent;
///
/// assert_eq!(indent("Foo\n\nBar\n \t \nBaz\n", " "),
/// " Foo\n\n Bar\n\n Baz\n");
/// ```
///
/// Leading and trailing whitespace on non-empty lines is kept
/// unchanged:
///
/// ```
/// use textwrap::indent;
///
/// assert_eq!(indent(" \t Foo ", " "), " \t Foo \n");
/// ```
pub fn indent(s: &str, prefix: &str) -> String {
let mut result = String::new();
for line in s.lines() {
if line.chars().any(|c| !c.is_whitespace()) {
result.push_str(prefix);
result.push_str(line);
}
result.push('\n');
}
return result;
}
/// Removes common leading whitespace from each line.
///
/// This will look at each non-empty line and determine the maximum
/// amount of whitespace that can be removed from the line.
///
/// ```
/// use textwrap::dedent;
///
/// assert_eq!(dedent(" 1st line\n 2nd line\n"),
/// "1st line\n2nd line\n");
/// ```
pub fn dedent(s: &str) -> String {
let mut prefix = String::new();
let mut lines = s.lines();
// We first search for a non-empty line to find a prefix.
for line in &mut lines {
let whitespace = line.chars()
.take_while(|c| c.is_whitespace())
.collect::<String>();
// Check if the line had anything but whitespace
if whitespace.len() < line.len() {
prefix = whitespace;
break;
}
}
// We then continue looking through the remaining lines to
// possibly shorten the prefix.
for line in &mut lines {
let whitespace = line.chars()
.zip(prefix.chars())
.take_while(|&(a, b)| a == b)
.map(|(_, b)| b)
.collect::<String>();
// Check if we have found a shorter prefix
if whitespace.len() < prefix.len() {
prefix = whitespace;
}
}
// We now go over the lines a second time to build the result.
let mut result = String::new();
for line in s.lines() {
if line.starts_with(&prefix) && line.chars().any(|c| !c.is_whitespace()) {
let (_, tail) = line.split_at(prefix.len());
result.push_str(tail);
}
result.push('\n');
}
return result;
}
#[cfg(test)]
mod tests {
extern crate hyphenation;
use hyphenation::Language;
use super::*;
/// Add newlines. Ensures that the final line in the vector also
/// has a newline.
fn add_nl(lines: &Vec<&str>) -> String {
lines.join("\n") + "\n"
}
#[test]
fn no_wrap() {
assert_eq!(wrap("foo", 10), vec!["foo"]);
}
#[test]
fn simple() {
assert_eq!(wrap("foo bar baz", 5), vec!["foo", "bar", "baz"]);
}
#[test]
fn multi_word_on_line() {
assert_eq!(wrap("foo bar baz", 10), vec!["foo bar", "baz"]);
}
#[test]
fn long_word() {
assert_eq!(wrap("foo", 0), vec!["foo"]);
}
#[test]
fn long_words() {
assert_eq!(wrap("foo bar", 0), vec!["foo", "bar"]);
}
#[test]
fn whitespace_is_squeezed() {
assert_eq!(wrap(" foo \t bar ", 10), vec!["foo bar"]);
}
#[test]
fn wide_character_handling() {
assert_eq!(wrap("Hello, World!", 15), vec!["Hello, World!"]);
assert_eq!(wrap("Hello, World!", 15),
vec!["Hello,", "World!"]);
}
#[test]
fn hyphens() {
assert_eq!(wrap("foo-bar", 5), vec!["foo-", "bar"]);
}
#[test]
fn trailing_hyphen() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foobar-"), vec!["foobar-"]);
}
#[test]
fn multiple_hyphens() {
assert_eq!(wrap("foo-bar-baz", 5), vec!["foo-", "bar-", "baz"]);
}
#[test]
fn hyphens_flag() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("The --foo-bar flag."),
vec!["The", "--foo-", "bar", "flag."]);
}
#[test]
fn repeated_hyphens() {
let mut wrapper = Wrapper::new(4);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foo--bar"), vec!["foo--bar"]);
}
#[test]
fn hyphens_alphanumeric() {
assert_eq!(wrap("Na2-CH4", 5), vec!["Na2-", "CH4"]);
}
#[test]
fn hyphens_non_alphanumeric() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foo(-)bar"), vec!["foo(-)bar"]);
}
#[test]
fn multiple_splits() {
assert_eq!(wrap("foo-bar-baz", 9), vec!["foo-bar-", "baz"]);
}
#[test]
fn forced_split() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foobar-baz"), vec!["foobar-", "baz"]);
}
#[test]
fn auto_hyphenation() {
let corpus = hyphenation::load(Language::English_US).unwrap();
let mut wrapper = Wrapper::new(10);
assert_eq!(wrapper.wrap("Internationalization"),
vec!["Internatio", "nalization"]);
wrapper.corpus = Some(&corpus);
assert_eq!(wrapper.wrap("Internationalization"),
vec!["Interna-", "tionaliza-", "tion"]);
}
#[test]
fn auto_hyphenation_with_hyphen() {
let corpus = hyphenation::load(Language::English_US).unwrap();
let mut wrapper = Wrapper::new(8);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("over-caffinated"), vec!["over-", "caffinated"]);
wrapper.corpus = Some(&corpus);
assert_eq!(wrapper.wrap("over-caffinated"),
vec!["over-", "caffi-", "nated"]);
}
#[test]
fn break_words() {
assert_eq!(wrap("foobarbaz", 3), vec!["foo", "bar", "baz"]);
}
#[test]
fn break_words_wide_characters() {
assert_eq!(wrap("Hello", 5), vec!["He", "ll", "o"]);
}
#[test]
fn break_words_zero_width() {
assert_eq!(wrap("foobar", 0), vec!["foobar"]);
}
#[test]
fn test_fill() {
assert_eq!(fill("foo bar baz", 10), "foo bar\nbaz");
}
#[test]
fn test_indent_empty() {
assert_eq!(indent("\n", " "), "\n");
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_indent_nonempty() {
let x = vec![" foo",
"bar",
" baz"];
let y = vec!["// foo",
"//bar",
"// baz"];
assert_eq!(indent(&add_nl(&x), "//"), add_nl(&y));
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_indent_empty_line() {
let x = vec![" foo",
"bar",
"",
" baz"];
let y = vec!["// foo",
"//bar",
"",
"// baz"];
assert_eq!(indent(&add_nl(&x), "//"), add_nl(&y));
}
#[test]
fn test_dedent_empty() {
assert_eq!(dedent(""), "");
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_dedent_multi_line() {
let x = vec![" foo",
" bar",
" baz"];
let y = vec![" foo",
"bar",
" baz"];
assert_eq!(dedent(&add_nl(&x)), add_nl(&y));
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_dedent_empty_line() {
let x = vec![" foo",
" bar",
" ",
" baz"];
let y = vec![" foo",
"bar",
"",
" baz"];
assert_eq!(dedent(&add_nl(&x)), add_nl(&y));
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_dedent_mixed_whitespace() {
let x = vec!["\tfoo",
" bar"];
let y = vec!["\tfoo",
" bar"];
assert_eq!(dedent(&add_nl(&x)), add_nl(&y));
}
}
wrap: avoid needless_borrow Clippy warning
//! `textwrap` provides functions for word wrapping and filling text.
//!
//! This can be very useful in commandline programs where you want to
//! format dynamic output nicely so it looks good in a terminal.
//!
//! To wrap text, one must know the width of each word so can know
//! when to break lines. This library measures the width of text using
//! the [displayed width][unicode-width], not the size in bytes.
//!
//! This is important for non-ASCII text. ASCII characters such as `a`
//! and `!` are simple: the displayed with is the same as the number
//! of bytes used in their UTF-8 encoding (one ASCII character takes
//! up one byte in UTF-8). However, non-ASCII characters and symbols
//! take up more than one byte: `é` is `0xc3 0xa9` and `⚙` is `0xe2
//! 0x9a 0x99` in UTF-8, respectively.
//!
//! This is why we take care to use the displayed width instead of the
//! byte count when computing line lengths. All functions in this
//! library handle Unicode characters like this.
//!
//! [unicode-width]: https://unicode-rs.github.io/unicode-width/unicode_width/index.html
extern crate unicode_width;
extern crate hyphenation;
use unicode_width::UnicodeWidthStr;
use unicode_width::UnicodeWidthChar;
use hyphenation::Hyphenation;
use hyphenation::Corpus;
/// A Wrapper holds settings for wrapping and filling text.
///
/// The algorithm used by the `wrap` method works by doing a single
/// scan over words in the input string and splitting them into one or
/// more lines. The time and memory complexity is O(*n*) where *n* is
/// the length of the input string.
pub struct Wrapper<'a> {
/// The width in columns at which the text will be wrapped.
pub width: usize,
/// Allow long words to be broken if they cannot fit on a line.
/// When set to false, some lines be being longer than self.width.
pub break_words: bool,
/// The hyphenation corpus (if any) used for automatic
/// hyphenation.
pub corpus: Option<&'a Corpus>,
}
impl<'a> Wrapper<'a> {
/// Create a new Wrapper for wrapping at the specified width. By
/// default, we allow words longer than `width` to be broken. No
/// hyphenation corpus is loaded by default.
pub fn new(width: usize) -> Wrapper<'a> {
Wrapper::<'a> {
width: width,
break_words: true,
corpus: None,
}
}
/// Fill a line of text at `self.width` characters. Strings are
/// wrapped based on their displayed width, not their size in
/// bytes.
///
/// The result is a string with newlines between each line. Use
/// the `wrap` method if you need access to the individual lines.
///
/// ```
/// use textwrap::Wrapper;
///
/// let wrapper = Wrapper::new(15);
/// assert_eq!(wrapper.fill("Memory safety without garbage collection."),
/// "Memory safety\nwithout garbage\ncollection.");
/// ```
///
/// This method simply joins the lines produced by `wrap`. As
/// such, it inherits the O(*n*) time and memory complexity where
/// *n* is the input string length.
pub fn fill(&self, s: &str) -> String {
self.wrap(&s).join("\n")
}
/// Wrap a line of text at `self.width` characters. Strings are
/// wrapped based on their displayed width, not their size in
/// bytes.
///
/// ```
/// use textwrap::Wrapper;
///
/// let wrap15 = Wrapper::new(15);
/// assert_eq!(wrap15.wrap("Concurrency without data races."),
/// vec!["Concurrency",
/// "without data",
/// "races."]);
///
/// let wrap20 = Wrapper::new(20);
/// assert_eq!(wrap20.wrap("Concurrency without data races."),
/// vec!["Concurrency without",
/// "data races."]);
/// ```
///
/// This method does a single scan over the input string, it has
/// an O(*n*) time and memory complexity where *n* is the input
/// string length.
pub fn wrap(&self, s: &str) -> Vec<String> {
let mut result = Vec::with_capacity(s.len() / (self.width + 1));
let mut line = String::with_capacity(self.width);
let mut remaining = self.width;
for mut word in s.split_whitespace() {
// Attempt to fit the word without any splitting.
if self.fit_part(word, "", &mut remaining, &mut line) {
continue;
}
// If that failed, loop until nothing remains to be added.
while !word.is_empty() {
let splits = self.split_word(word);
let (smallest, hyphen, longest) = splits[0];
let min_width = smallest.width() + hyphen.len();
// Add a new line if even the smallest split doesn't
// fit.
if !line.is_empty() && 1 + min_width > remaining {
result.push(line);
line = String::with_capacity(self.width);
remaining = self.width;
}
// Find a split that fits on the current line.
for &(head, hyphen, tail) in splits.iter().rev() {
if self.fit_part(head, hyphen, &mut remaining, &mut line) {
word = tail;
break;
}
}
// If even the smallest split doesn't fit on the line,
// we might have to break the word.
if line.is_empty() {
if self.break_words && self.width > 1 {
// Break word on a character boundary as close
// to self.width as possible. Characters are
// at most 2 columns wide, so we will chop off
// at least one character.
let mut head_width = 0;
for (idx, c) in word.char_indices() {
head_width += c.width().unwrap_or(0);
if head_width > self.width {
let (head, tail) = word.split_at(idx);
result.push(String::from(head));
word = tail;
break;
}
}
} else {
// We forcibly add the smallest split and
// continue with the longest tail. This will
// result in a line longer than self.width.
result.push(String::from(smallest) + hyphen);
remaining = self.width;
word = longest;
}
}
}
}
if !line.is_empty() {
result.push(line);
}
return result;
}
/// Split word into all possible parts (head, tail). Word must be
/// non-empty. The returned vector will always be non-empty.
fn split_word<'b>(&self, word: &'b str) -> Vec<(&'b str, &'b str, &'b str)> {
let mut result = Vec::new();
// Split on hyphens or use the language corpus.
match self.corpus {
None => {
// Split on hyphens, smallest split first. We only use
// hyphens that are surrounded by alphanumeric
// characters. This is to avoid splitting on repeated
// hyphens, such as those found in --foo-bar.
let char_indices = word.char_indices().collect::<Vec<_>>();
for w in char_indices.windows(3) {
let ((_, prev), (n, c), (_, next)) = (w[0], w[1], w[2]);
if prev.is_alphanumeric() && c == '-' && next.is_alphanumeric() {
let (head, tail) = word.split_at(n + 1);
result.push((head, "", tail));
}
}
}
Some(corpus) => {
// Find splits based on language corpus. This includes
// the splits that would have been found above.
for n in word.opportunities(corpus) {
let (head, tail) = word.split_at(n);
let hyphen = if head.as_bytes()[head.len() - 1] == b'-' {
""
} else {
"-"
};
result.push((head, hyphen, tail));
}
}
}
// Finally option is no split at all.
result.push((word, "", ""));
return result;
}
/// Try to fit a word (or part of a word) onto a line. The line
/// and the remaining width is updated as appropriate if the word
/// or part fits.
fn fit_part<'b>(&self,
part: &'b str,
hyphen: &'b str,
remaining: &mut usize,
line: &mut String)
-> bool {
let space = if line.is_empty() { 0 } else { 1 };
if space + part.width() + hyphen.len() <= *remaining {
if !line.is_empty() {
line.push(' ');
}
line.push_str(part);
line.push_str(hyphen);
*remaining -= space + part.width() + hyphen.len();
return true;
}
return false;
}
}
/// Fill a line of text at `width` characters. Strings are wrapped
/// based on their displayed width, not their size in bytes.
///
/// The result is a string with newlines between each line. Use `wrap`
/// if you need access to the individual lines.
///
/// ```
/// use textwrap::fill;
///
/// assert_eq!(fill("Memory safety without garbage collection.", 15),
/// "Memory safety\nwithout garbage\ncollection.");
/// ```
///
/// This function creates a Wrapper on the fly with default settings.
/// If you need to set a language corpus for automatic hyphenation, or
/// need to fill many strings, then it is suggested to create Wrapper
/// and call its [`fill` method](struct.Wrapper.html#method.fill).
pub fn fill(s: &str, width: usize) -> String {
wrap(s, width).join("\n")
}
/// Wrap a line of text at `width` characters. Strings are wrapped
/// based on their displayed width, not their size in bytes.
///
/// ```
/// use textwrap::wrap;
///
/// assert_eq!(wrap("Concurrency without data races.", 15),
/// vec!["Concurrency",
/// "without data",
/// "races."]);
///
/// assert_eq!(wrap("Concurrency without data races.", 20),
/// vec!["Concurrency without",
/// "data races."]);
/// ```
///
/// This function creates a Wrapper on the fly with default settings.
/// If you need to set a language corpus for automatic hyphenation, or
/// need to wrap many strings, then it is suggested to create Wrapper
/// and call its [`wrap` method](struct.Wrapper.html#method.wrap).
pub fn wrap(s: &str, width: usize) -> Vec<String> {
Wrapper::new(width).wrap(s)
}
/// Add prefix to each non-empty line.
///
/// ```
/// use textwrap::indent;
///
/// assert_eq!(indent("Foo\nBar\n", " "), " Foo\n Bar\n");
/// ```
///
/// Empty lines (lines consisting only of whitespace) are not indented
/// and the whitespace is replaced by a single newline (`\n`):
///
/// ```
/// use textwrap::indent;
///
/// assert_eq!(indent("Foo\n\nBar\n \t \nBaz\n", " "),
/// " Foo\n\n Bar\n\n Baz\n");
/// ```
///
/// Leading and trailing whitespace on non-empty lines is kept
/// unchanged:
///
/// ```
/// use textwrap::indent;
///
/// assert_eq!(indent(" \t Foo ", " "), " \t Foo \n");
/// ```
pub fn indent(s: &str, prefix: &str) -> String {
let mut result = String::new();
for line in s.lines() {
if line.chars().any(|c| !c.is_whitespace()) {
result.push_str(prefix);
result.push_str(line);
}
result.push('\n');
}
return result;
}
/// Removes common leading whitespace from each line.
///
/// This will look at each non-empty line and determine the maximum
/// amount of whitespace that can be removed from the line.
///
/// ```
/// use textwrap::dedent;
///
/// assert_eq!(dedent(" 1st line\n 2nd line\n"),
/// "1st line\n2nd line\n");
/// ```
pub fn dedent(s: &str) -> String {
let mut prefix = String::new();
let mut lines = s.lines();
// We first search for a non-empty line to find a prefix.
for line in &mut lines {
let whitespace = line.chars()
.take_while(|c| c.is_whitespace())
.collect::<String>();
// Check if the line had anything but whitespace
if whitespace.len() < line.len() {
prefix = whitespace;
break;
}
}
// We then continue looking through the remaining lines to
// possibly shorten the prefix.
for line in &mut lines {
let whitespace = line.chars()
.zip(prefix.chars())
.take_while(|&(a, b)| a == b)
.map(|(_, b)| b)
.collect::<String>();
// Check if we have found a shorter prefix
if whitespace.len() < prefix.len() {
prefix = whitespace;
}
}
// We now go over the lines a second time to build the result.
let mut result = String::new();
for line in s.lines() {
if line.starts_with(&prefix) && line.chars().any(|c| !c.is_whitespace()) {
let (_, tail) = line.split_at(prefix.len());
result.push_str(tail);
}
result.push('\n');
}
return result;
}
#[cfg(test)]
mod tests {
extern crate hyphenation;
use hyphenation::Language;
use super::*;
/// Add newlines. Ensures that the final line in the vector also
/// has a newline.
fn add_nl(lines: &Vec<&str>) -> String {
lines.join("\n") + "\n"
}
#[test]
fn no_wrap() {
assert_eq!(wrap("foo", 10), vec!["foo"]);
}
#[test]
fn simple() {
assert_eq!(wrap("foo bar baz", 5), vec!["foo", "bar", "baz"]);
}
#[test]
fn multi_word_on_line() {
assert_eq!(wrap("foo bar baz", 10), vec!["foo bar", "baz"]);
}
#[test]
fn long_word() {
assert_eq!(wrap("foo", 0), vec!["foo"]);
}
#[test]
fn long_words() {
assert_eq!(wrap("foo bar", 0), vec!["foo", "bar"]);
}
#[test]
fn whitespace_is_squeezed() {
assert_eq!(wrap(" foo \t bar ", 10), vec!["foo bar"]);
}
#[test]
fn wide_character_handling() {
assert_eq!(wrap("Hello, World!", 15), vec!["Hello, World!"]);
assert_eq!(wrap("Hello, World!", 15),
vec!["Hello,", "World!"]);
}
#[test]
fn hyphens() {
assert_eq!(wrap("foo-bar", 5), vec!["foo-", "bar"]);
}
#[test]
fn trailing_hyphen() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foobar-"), vec!["foobar-"]);
}
#[test]
fn multiple_hyphens() {
assert_eq!(wrap("foo-bar-baz", 5), vec!["foo-", "bar-", "baz"]);
}
#[test]
fn hyphens_flag() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("The --foo-bar flag."),
vec!["The", "--foo-", "bar", "flag."]);
}
#[test]
fn repeated_hyphens() {
let mut wrapper = Wrapper::new(4);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foo--bar"), vec!["foo--bar"]);
}
#[test]
fn hyphens_alphanumeric() {
assert_eq!(wrap("Na2-CH4", 5), vec!["Na2-", "CH4"]);
}
#[test]
fn hyphens_non_alphanumeric() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foo(-)bar"), vec!["foo(-)bar"]);
}
#[test]
fn multiple_splits() {
assert_eq!(wrap("foo-bar-baz", 9), vec!["foo-bar-", "baz"]);
}
#[test]
fn forced_split() {
let mut wrapper = Wrapper::new(5);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("foobar-baz"), vec!["foobar-", "baz"]);
}
#[test]
fn auto_hyphenation() {
let corpus = hyphenation::load(Language::English_US).unwrap();
let mut wrapper = Wrapper::new(10);
assert_eq!(wrapper.wrap("Internationalization"),
vec!["Internatio", "nalization"]);
wrapper.corpus = Some(&corpus);
assert_eq!(wrapper.wrap("Internationalization"),
vec!["Interna-", "tionaliza-", "tion"]);
}
#[test]
fn auto_hyphenation_with_hyphen() {
let corpus = hyphenation::load(Language::English_US).unwrap();
let mut wrapper = Wrapper::new(8);
wrapper.break_words = false;
assert_eq!(wrapper.wrap("over-caffinated"), vec!["over-", "caffinated"]);
wrapper.corpus = Some(&corpus);
assert_eq!(wrapper.wrap("over-caffinated"),
vec!["over-", "caffi-", "nated"]);
}
#[test]
fn break_words() {
assert_eq!(wrap("foobarbaz", 3), vec!["foo", "bar", "baz"]);
}
#[test]
fn break_words_wide_characters() {
assert_eq!(wrap("Hello", 5), vec!["He", "ll", "o"]);
}
#[test]
fn break_words_zero_width() {
assert_eq!(wrap("foobar", 0), vec!["foobar"]);
}
#[test]
fn test_fill() {
assert_eq!(fill("foo bar baz", 10), "foo bar\nbaz");
}
#[test]
fn test_indent_empty() {
assert_eq!(indent("\n", " "), "\n");
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_indent_nonempty() {
let x = vec![" foo",
"bar",
" baz"];
let y = vec!["// foo",
"//bar",
"// baz"];
assert_eq!(indent(&add_nl(&x), "//"), add_nl(&y));
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_indent_empty_line() {
let x = vec![" foo",
"bar",
"",
" baz"];
let y = vec!["// foo",
"//bar",
"",
"// baz"];
assert_eq!(indent(&add_nl(&x), "//"), add_nl(&y));
}
#[test]
fn test_dedent_empty() {
assert_eq!(dedent(""), "");
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_dedent_multi_line() {
let x = vec![" foo",
" bar",
" baz"];
let y = vec![" foo",
"bar",
" baz"];
assert_eq!(dedent(&add_nl(&x)), add_nl(&y));
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_dedent_empty_line() {
let x = vec![" foo",
" bar",
" ",
" baz"];
let y = vec![" foo",
"bar",
"",
" baz"];
assert_eq!(dedent(&add_nl(&x)), add_nl(&y));
}
#[test]
#[cfg_attr(rustfmt, rustfmt_skip)]
fn test_dedent_mixed_whitespace() {
let x = vec!["\tfoo",
" bar"];
let y = vec!["\tfoo",
" bar"];
assert_eq!(dedent(&add_nl(&x)), add_nl(&y));
}
}
|
// Module for parsing ISO Base Media Format aka video/mp4 streams.
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// Basic ISO box structure.
pub struct BoxHeader {
/// Four character box type
pub name: u32,
/// Size of the box in bytes
pub size: u64,
/// Offset to the start of the contained data (or header size).
pub offset: u64,
}
/// File type box 'ftyp'.
pub struct FileTypeBox {
name: u32,
size: u64,
major_brand: u32,
minor_version: u32,
compatible_brands: Vec<u32>,
}
/// Movie header box 'mvhd'.
pub struct MovieHeaderBox {
pub name: u32,
pub size: u64,
pub timescale: u32,
pub duration: u64,
// Ignore other fields.
}
pub struct TrackHeaderBox {
pub name: u32,
pub size: u64,
pub track_id: u32,
pub duration: u64,
pub width: u32,
pub height: u32,
}
extern crate byteorder;
use byteorder::{BigEndian, ReadBytesExt};
use std::io::{Read, Result, Seek, SeekFrom, Take};
use std::io::Cursor;
/// Parse a box out of a data buffer.
pub fn read_box_header<T: ReadBytesExt>(src: &mut T) -> Result<BoxHeader> {
let tmp_size = try!(src.read_u32::<BigEndian>());
let name = try!(src.read_u32::<BigEndian>());
let size = match tmp_size {
1 => try!(src.read_u64::<BigEndian>()),
_ => tmp_size as u64,
};
assert!(size >= 8);
if tmp_size == 1 {
assert!(size >= 16);
}
let offset = match tmp_size {
1 => 4 + 4 + 8,
_ => 4 + 4,
};
assert!(offset <= size);
Ok(BoxHeader{
name: name,
size: size,
offset: offset,
})
}
/// Parse the extra header fields for a full box.
fn read_fullbox_extra<T: ReadBytesExt>(src: &mut T) -> (u8, u32) {
let version = src.read_u8().unwrap();
let flags_a = src.read_u8().unwrap();
let flags_b = src.read_u8().unwrap();
let flags_c = src.read_u8().unwrap();
(version, (flags_a as u32) << 16 |
(flags_b as u32) << 8 |
(flags_c as u32))
}
/// Skip over the contents of a box.
pub fn skip_box_content<T: ReadBytesExt + Seek>
(src: &mut T, header: &BoxHeader)
-> std::io::Result<u64>
{
src.seek(SeekFrom::Current((header.size - header.offset) as i64))
}
/// Helper to construct a Take over the contents of a box.
fn limit<'a, T: Read>(f: &'a mut T, h: &BoxHeader) -> Take<&'a mut T> {
f.take(h.size - h.offset)
}
/// Helper to construct a Cursor over the contents of a box.
fn recurse<T: Read>(f: &mut T, h: &BoxHeader) -> Result<()> {
println!("{} -- recursing", h);
// FIXME: I couldn't figure out how to do this without copying.
// We use Seek on the Read we return in skip_box_content, but
// that trait isn't implemented for a Take like our limit()
// returns. Slurping the buffer and wrapping it in a Cursor
// functions as a work around.
let buf: Vec<u8> = limit(f, &h)
.bytes()
.map(|u| u.unwrap())
.collect();
let mut content = Cursor::new(buf);
loop {
match read_box(&mut content) {
Ok(_) => {},
Err(e) => {
println!("Error '{:?}' reading box", e.kind());
return Err(e);
},
}
}
println!("{} -- end", h);
Ok(())
}
/// Read the contents of a box, including sub boxes.
/// Right now it just prints the box value rather than
/// returning anything.
pub fn read_box<T: Read + Seek>(f: &mut T) -> Result<()> {
read_box_header(f).and_then(|h| {
match &(fourcc_to_string(h.name))[..] {
"ftyp" => {
let mut content = limit(f, &h);
let ftyp = read_ftyp(&mut content, &h).unwrap();
println!("{}", ftyp);
},
"moov" => try!(recurse(f, &h)),
"mvhd" => {
let mut content = limit(f, &h);
let mvhd = read_mvhd(&mut content, &h).unwrap();
println!(" {}", mvhd);
},
"trak" => try!(recurse(f, &h)),
"tkhd" => {
let mut content = limit(f, &h);
let tkhd = read_tkhd(&mut content, &h).unwrap();
println!(" {}", tkhd);
},
_ => {
// Skip the contents of unknown chunks.
println!("{} (skipped)", h);
try!(skip_box_content(f, &h).and(Ok(())));
},
};
Ok(()) // and_then needs a Result to return.
})
}
/// Entry point for C language callers.
/// Take a buffer and call read_box() on it.
#[no_mangle]
pub unsafe extern fn read_box_from_buffer(buffer: *const u8, size: usize)
-> bool {
use std::slice;
use std::thread;
// Validate arguments from C.
if buffer.is_null() || size < 8 {
return false;
}
// Wrap the buffer we've been give in a slice.
let b = slice::from_raw_parts(buffer, size);
let mut c = Cursor::new(b);
// Parse in a subthread.
let task = thread::spawn(move || {
read_box(&mut c).unwrap();
});
// Catch any panics.
task.join().is_ok()
}
/// Parse an ftype box.
pub fn read_ftyp<T: ReadBytesExt>(src: &mut T, head: &BoxHeader)
-> Option<FileTypeBox> {
let major = src.read_u32::<BigEndian>().unwrap();
let minor = src.read_u32::<BigEndian>().unwrap();
let brand_count = (head.size - 8 - 8) /4;
let mut brands = Vec::new();
for _ in 0..brand_count {
brands.push(src.read_u32::<BigEndian>().unwrap());
}
Some(FileTypeBox{
name: head.name,
size: head.size,
major_brand: major,
minor_version: minor,
compatible_brands: brands,
})
}
/// Parse an mvhd box.
pub fn read_mvhd<T: ReadBytesExt>(src: &mut T, head: &BoxHeader)
-> Option<MovieHeaderBox> {
let (version, _) = read_fullbox_extra(src);
match version {
1 => {
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 16];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
0 => {
// 32 bit creation and modification times.
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 8];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
_ => panic!("invalid mhdr version"),
}
let timescale = src.read_u32::<BigEndian>().unwrap();
let duration = match version {
1 => src.read_u64::<BigEndian>().unwrap(),
0 => src.read_u32::<BigEndian>().unwrap() as u64,
_ => panic!("invalid mhdr version"),
};
// Skip remaining fields.
let mut skip: Vec<u8> = vec![0; 80];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
Some(MovieHeaderBox {
name: head.name,
size: head.size,
timescale: timescale,
duration: duration,
})
}
/// Parse a tkhd box.
pub fn read_tkhd<T: ReadBytesExt>(src: &mut T, head: &BoxHeader)
-> Option<TrackHeaderBox> {
let (version, flags) = read_fullbox_extra(src);
if flags & 0x1u32 == 0 || flags & 0x2u32 == 0 {
// Track is disabled.
return None;
}
match version {
1 => {
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 16];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
0 => {
// 32 bit creation and modification times.
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 8];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
_ => panic!("invalid tkhd version"),
}
let track_id = src.read_u32::<BigEndian>().unwrap();
let _reserved = src.read_u32::<BigEndian>().unwrap();
assert!(_reserved == 0);
let duration = match version {
1 => {
src.read_u64::<BigEndian>().unwrap()
},
0 => src.read_u32::<BigEndian>().unwrap() as u64,
_ => panic!("invalid tkhd version"),
};
let _reserved = src.read_u32::<BigEndian>().unwrap();
let _reserved = src.read_u32::<BigEndian>().unwrap();
// Skip uninterested fields.
let mut skip: Vec<u8> = vec![0; 44];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
let width = src.read_u32::<BigEndian>().unwrap();
let height = src.read_u32::<BigEndian>().unwrap();
Some(TrackHeaderBox {
name: head.name,
size: head.size,
track_id: track_id,
duration: duration,
width: width,
height: height,
})
}
/// Convert the iso box type or other 4-character value to a string.
fn fourcc_to_string(name: u32) -> String {
let u32_to_vec = |u| {
vec!((u >> 24 & 0xffu32) as u8,
(u >> 16 & 0xffu32) as u8,
(u >> 8 & 0xffu32) as u8,
(u & 0xffu32) as u8)
};
let name_bytes = u32_to_vec(name);
String::from_utf8_lossy(&name_bytes).into_owned()
}
use std::fmt;
impl fmt::Display for BoxHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "'{}' {} bytes", fourcc_to_string(self.name), self.size)
}
}
impl fmt::Display for FileTypeBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = fourcc_to_string(self.name);
let brand = fourcc_to_string(self.major_brand);
write!(f, "'{}' {} bytes '{}' v{}", name, self.size,
brand, self.minor_version)
}
}
impl fmt::Display for MovieHeaderBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = fourcc_to_string(self.name);
write!(f, "'{}' {} bytes duration {}s", name, self.size,
(self.duration as f64)/(self.timescale as f64))
}
}
use std::u16;
impl fmt::Display for TrackHeaderBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = fourcc_to_string(self.name);
// Dimensions are 16.16 fixed-point.
let base = u16::MAX as f64 + 1.0;
let width = (self.width as f64) / base;
let height = (self.height as f64) / base;
write!(f, "'{}' {} bytes duration {} id {} {}x{}",
name, self.size, self.duration, self.track_id,
width, height)
}
}
#[test]
fn test_read_box_header() {
use std::io::Cursor;
use std::io::Write;
let mut test: Vec<u8> = vec![0, 0, 0, 8]; // minimal box length
write!(&mut test, "test").unwrap(); // box type
let mut stream = Cursor::new(test);
let parsed = read_box_header(&mut stream).unwrap();
assert_eq!(parsed.name, 1952805748);
assert_eq!(parsed.size, 8);
println!("box {}", parsed);
}
#[test]
fn test_read_box_header_long() {
use std::io::Cursor;
let mut test: Vec<u8> = vec![0, 0, 0, 1]; // long box extension code
test.extend("long".to_string().into_bytes()); // box type
test.extend(vec![0, 0, 0, 0, 0, 0, 16, 0]); // 64 bit size
// Skip generating box content.
let mut stream = Cursor::new(test);
let parsed = read_box_header(&mut stream).unwrap();
assert_eq!(parsed.name, 1819242087);
assert_eq!(parsed.size, 4096);
println!("box {}", parsed);
}
#[test]
fn test_read_ftyp() {
use std::io::Cursor;
use std::io::Write;
let mut test: Vec<u8> = vec![0, 0, 0, 24]; // size
write!(&mut test, "ftyp").unwrap(); // type
write!(&mut test, "mp42").unwrap(); // major brand
test.extend(vec![0, 0, 0, 0]); // minor version
write!(&mut test, "isom").unwrap(); // compatible brands...
write!(&mut test, "mp42").unwrap();
assert_eq!(test.len(), 24);
let mut stream = Cursor::new(test);
let header = read_box_header(&mut stream).unwrap();
let parsed = read_ftyp(&mut stream, &header).unwrap();
assert_eq!(parsed.name, 1718909296);
assert_eq!(parsed.size, 24);
assert_eq!(parsed.major_brand, 1836069938);
assert_eq!(parsed.minor_version, 0);
assert_eq!(parsed.compatible_brands.len(), 2);
println!("box {}", parsed);
}
Return Result from read_tkhd.
// Module for parsing ISO Base Media Format aka video/mp4 streams.
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// Basic ISO box structure.
pub struct BoxHeader {
/// Four character box type
pub name: u32,
/// Size of the box in bytes
pub size: u64,
/// Offset to the start of the contained data (or header size).
pub offset: u64,
}
/// File type box 'ftyp'.
pub struct FileTypeBox {
name: u32,
size: u64,
major_brand: u32,
minor_version: u32,
compatible_brands: Vec<u32>,
}
/// Movie header box 'mvhd'.
pub struct MovieHeaderBox {
pub name: u32,
pub size: u64,
pub timescale: u32,
pub duration: u64,
// Ignore other fields.
}
pub struct TrackHeaderBox {
pub name: u32,
pub size: u64,
pub track_id: u32,
pub duration: u64,
pub width: u32,
pub height: u32,
}
extern crate byteorder;
use byteorder::{BigEndian, ReadBytesExt};
use std::io::{Error, ErrorKind, Read, Result, Seek, SeekFrom, Take};
use std::io::Cursor;
/// Parse a box out of a data buffer.
pub fn read_box_header<T: ReadBytesExt>(src: &mut T) -> Result<BoxHeader> {
let tmp_size = try!(src.read_u32::<BigEndian>());
let name = try!(src.read_u32::<BigEndian>());
let size = match tmp_size {
1 => try!(src.read_u64::<BigEndian>()),
_ => tmp_size as u64,
};
assert!(size >= 8);
if tmp_size == 1 {
assert!(size >= 16);
}
let offset = match tmp_size {
1 => 4 + 4 + 8,
_ => 4 + 4,
};
assert!(offset <= size);
Ok(BoxHeader{
name: name,
size: size,
offset: offset,
})
}
/// Parse the extra header fields for a full box.
fn read_fullbox_extra<T: ReadBytesExt>(src: &mut T) -> (u8, u32) {
let version = src.read_u8().unwrap();
let flags_a = src.read_u8().unwrap();
let flags_b = src.read_u8().unwrap();
let flags_c = src.read_u8().unwrap();
(version, (flags_a as u32) << 16 |
(flags_b as u32) << 8 |
(flags_c as u32))
}
/// Skip over the contents of a box.
pub fn skip_box_content<T: ReadBytesExt + Seek>
(src: &mut T, header: &BoxHeader)
-> std::io::Result<u64>
{
src.seek(SeekFrom::Current((header.size - header.offset) as i64))
}
/// Helper to construct a Take over the contents of a box.
fn limit<'a, T: Read>(f: &'a mut T, h: &BoxHeader) -> Take<&'a mut T> {
f.take(h.size - h.offset)
}
/// Helper to construct a Cursor over the contents of a box.
fn recurse<T: Read>(f: &mut T, h: &BoxHeader) -> Result<()> {
println!("{} -- recursing", h);
// FIXME: I couldn't figure out how to do this without copying.
// We use Seek on the Read we return in skip_box_content, but
// that trait isn't implemented for a Take like our limit()
// returns. Slurping the buffer and wrapping it in a Cursor
// functions as a work around.
let buf: Vec<u8> = limit(f, &h)
.bytes()
.map(|u| u.unwrap())
.collect();
let mut content = Cursor::new(buf);
loop {
match read_box(&mut content) {
Ok(_) => {},
Err(e) => {
println!("Error '{:?}' reading box", e.kind());
return Err(e);
},
}
}
println!("{} -- end", h);
Ok(())
}
/// Read the contents of a box, including sub boxes.
/// Right now it just prints the box value rather than
/// returning anything.
pub fn read_box<T: Read + Seek>(f: &mut T) -> Result<()> {
read_box_header(f).and_then(|h| {
match &(fourcc_to_string(h.name))[..] {
"ftyp" => {
let mut content = limit(f, &h);
let ftyp = read_ftyp(&mut content, &h).unwrap();
println!("{}", ftyp);
},
"moov" => try!(recurse(f, &h)),
"mvhd" => {
let mut content = limit(f, &h);
let mvhd = read_mvhd(&mut content, &h).unwrap();
println!(" {}", mvhd);
},
"trak" => try!(recurse(f, &h)),
"tkhd" => {
let mut content = limit(f, &h);
let tkhd = read_tkhd(&mut content, &h).unwrap();
println!(" {}", tkhd);
},
_ => {
// Skip the contents of unknown chunks.
println!("{} (skipped)", h);
try!(skip_box_content(f, &h).and(Ok(())));
},
};
Ok(()) // and_then needs a Result to return.
})
}
/// Entry point for C language callers.
/// Take a buffer and call read_box() on it.
#[no_mangle]
pub unsafe extern fn read_box_from_buffer(buffer: *const u8, size: usize)
-> bool {
use std::slice;
use std::thread;
// Validate arguments from C.
if buffer.is_null() || size < 8 {
return false;
}
// Wrap the buffer we've been give in a slice.
let b = slice::from_raw_parts(buffer, size);
let mut c = Cursor::new(b);
// Parse in a subthread.
let task = thread::spawn(move || {
read_box(&mut c).unwrap();
});
// Catch any panics.
task.join().is_ok()
}
/// Parse an ftype box.
pub fn read_ftyp<T: ReadBytesExt>(src: &mut T, head: &BoxHeader)
-> Option<FileTypeBox> {
let major = src.read_u32::<BigEndian>().unwrap();
let minor = src.read_u32::<BigEndian>().unwrap();
let brand_count = (head.size - 8 - 8) /4;
let mut brands = Vec::new();
for _ in 0..brand_count {
brands.push(src.read_u32::<BigEndian>().unwrap());
}
Some(FileTypeBox{
name: head.name,
size: head.size,
major_brand: major,
minor_version: minor,
compatible_brands: brands,
})
}
/// Parse an mvhd box.
pub fn read_mvhd<T: ReadBytesExt>(src: &mut T, head: &BoxHeader)
-> Option<MovieHeaderBox> {
let (version, _) = read_fullbox_extra(src);
match version {
1 => {
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 16];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
0 => {
// 32 bit creation and modification times.
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 8];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
_ => panic!("invalid mhdr version"),
}
let timescale = src.read_u32::<BigEndian>().unwrap();
let duration = match version {
1 => src.read_u64::<BigEndian>().unwrap(),
0 => src.read_u32::<BigEndian>().unwrap() as u64,
_ => panic!("invalid mhdr version"),
};
// Skip remaining fields.
let mut skip: Vec<u8> = vec![0; 80];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
Some(MovieHeaderBox {
name: head.name,
size: head.size,
timescale: timescale,
duration: duration,
})
}
/// Parse a tkhd box.
pub fn read_tkhd<T: ReadBytesExt>(src: &mut T, head: &BoxHeader)
-> Result<TrackHeaderBox> {
let (version, flags) = read_fullbox_extra(src);
if flags & 0x1u32 == 0 || flags & 0x2u32 == 0 {
return Err(Error::new(ErrorKind::Other, "Track is disabled"));
}
match version {
1 => {
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 16];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
0 => {
// 32 bit creation and modification times.
// 64 bit creation and modification times.
let mut skip: Vec<u8> = vec![0; 8];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
},
_ => panic!("invalid tkhd version"),
}
let track_id = src.read_u32::<BigEndian>().unwrap();
let _reserved = src.read_u32::<BigEndian>().unwrap();
assert!(_reserved == 0);
let duration = match version {
1 => {
src.read_u64::<BigEndian>().unwrap()
},
0 => src.read_u32::<BigEndian>().unwrap() as u64,
_ => panic!("invalid tkhd version"),
};
let _reserved = src.read_u32::<BigEndian>().unwrap();
let _reserved = src.read_u32::<BigEndian>().unwrap();
// Skip uninterested fields.
let mut skip: Vec<u8> = vec![0; 44];
let r = src.read(&mut skip).unwrap();
assert!(r == skip.len());
let width = src.read_u32::<BigEndian>().unwrap();
let height = src.read_u32::<BigEndian>().unwrap();
Ok(TrackHeaderBox {
name: head.name,
size: head.size,
track_id: track_id,
duration: duration,
width: width,
height: height,
})
}
/// Convert the iso box type or other 4-character value to a string.
fn fourcc_to_string(name: u32) -> String {
let u32_to_vec = |u| {
vec!((u >> 24 & 0xffu32) as u8,
(u >> 16 & 0xffu32) as u8,
(u >> 8 & 0xffu32) as u8,
(u & 0xffu32) as u8)
};
let name_bytes = u32_to_vec(name);
String::from_utf8_lossy(&name_bytes).into_owned()
}
use std::fmt;
impl fmt::Display for BoxHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "'{}' {} bytes", fourcc_to_string(self.name), self.size)
}
}
impl fmt::Display for FileTypeBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = fourcc_to_string(self.name);
let brand = fourcc_to_string(self.major_brand);
write!(f, "'{}' {} bytes '{}' v{}", name, self.size,
brand, self.minor_version)
}
}
impl fmt::Display for MovieHeaderBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = fourcc_to_string(self.name);
write!(f, "'{}' {} bytes duration {}s", name, self.size,
(self.duration as f64)/(self.timescale as f64))
}
}
use std::u16;
impl fmt::Display for TrackHeaderBox {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = fourcc_to_string(self.name);
// Dimensions are 16.16 fixed-point.
let base = u16::MAX as f64 + 1.0;
let width = (self.width as f64) / base;
let height = (self.height as f64) / base;
write!(f, "'{}' {} bytes duration {} id {} {}x{}",
name, self.size, self.duration, self.track_id,
width, height)
}
}
#[test]
fn test_read_box_header() {
use std::io::Cursor;
use std::io::Write;
let mut test: Vec<u8> = vec![0, 0, 0, 8]; // minimal box length
write!(&mut test, "test").unwrap(); // box type
let mut stream = Cursor::new(test);
let parsed = read_box_header(&mut stream).unwrap();
assert_eq!(parsed.name, 1952805748);
assert_eq!(parsed.size, 8);
println!("box {}", parsed);
}
#[test]
fn test_read_box_header_long() {
use std::io::Cursor;
let mut test: Vec<u8> = vec![0, 0, 0, 1]; // long box extension code
test.extend("long".to_string().into_bytes()); // box type
test.extend(vec![0, 0, 0, 0, 0, 0, 16, 0]); // 64 bit size
// Skip generating box content.
let mut stream = Cursor::new(test);
let parsed = read_box_header(&mut stream).unwrap();
assert_eq!(parsed.name, 1819242087);
assert_eq!(parsed.size, 4096);
println!("box {}", parsed);
}
#[test]
fn test_read_ftyp() {
use std::io::Cursor;
use std::io::Write;
let mut test: Vec<u8> = vec![0, 0, 0, 24]; // size
write!(&mut test, "ftyp").unwrap(); // type
write!(&mut test, "mp42").unwrap(); // major brand
test.extend(vec![0, 0, 0, 0]); // minor version
write!(&mut test, "isom").unwrap(); // compatible brands...
write!(&mut test, "mp42").unwrap();
assert_eq!(test.len(), 24);
let mut stream = Cursor::new(test);
let header = read_box_header(&mut stream).unwrap();
let parsed = read_ftyp(&mut stream, &header).unwrap();
assert_eq!(parsed.name, 1718909296);
assert_eq!(parsed.size, 24);
assert_eq!(parsed.major_brand, 1836069938);
assert_eq!(parsed.minor_version, 0);
assert_eq!(parsed.compatible_brands.len(), 2);
println!("box {}", parsed);
}
|
// tag_safe
//
// A linting plugin to flag calls to methods not marked "tag_safe"
// from methods marked "tag_safe".
//
// Author: John Hodge (thePowersGang/Mutabah)
//
// TODO: Support '#[tag_unsafe(type)]' which is used when a method has no marker
// - Allows default safe fallback, with upwards propagation.
//
#![crate_name="tag_safe"]
#![crate_type="dylib"]
#![feature(plugin_registrar, rustc_private)]
#[macro_use]
extern crate log;
extern crate syntax;
#[macro_use]
extern crate rustc;
extern crate rustc_front;
#[macro_use]
extern crate rustc_plugin;
use syntax::ast;
use rustc::middle::def_id::DefId;
use rustc_front::intravisit;
use syntax::codemap::Span;
use rustc::lint::{self, LintContext, LintPass, LateLintPass, LintArray};
use rustc_plugin::Registry;
use rustc::middle::{def,ty};
use rustc_front::hir;
use syntax::attr::AttrMetaMethods;
declare_lint!(NOT_TAGGED_SAFE, Warn, "Warn about use of non-tagged methods within tagged function");
#[derive(Copy,Clone,Debug)]
enum SafetyType
{
Safe,
Unsafe,
Unknown,
}
#[derive(Default)]
struct Pass
{
/// Cache of flag types
flag_types: Vec<String>,
/// Node => (Type => IsSafe)
flag_cache: ::rustc::util::nodemap::NodeMap< ::rustc::util::nodemap::FnvHashMap<usize, SafetyType> >,
lvl: usize,
}
struct Visitor<'a, 'tcx: 'a, F: FnMut(&Span) + 'a>
{
pass: &'a mut Pass,
tcx: &'a ty::ctxt<'tcx>,
name: &'a str,
unknown_assume: bool,
cb: F,
}
// Hack to provide indenting in debug calls
struct Indent(usize);
impl ::std::fmt::Display for Indent {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
for s in ::std::iter::repeat(" ").take(self.0) {
try!(write!(f, "{}", s));
}
Ok( () )
}
}
impl LintPass for Pass {
fn get_lints(&self) -> LintArray {
lint_array!(NOT_TAGGED_SAFE)
}
}
impl LateLintPass for Pass {
fn check_fn(&mut self, cx: &lint::LateContext, _kind: ::rustc_front::intravisit::FnKind, _decl: &hir::FnDecl, body: &hir::Block, _: Span, id: ast::NodeId) {
let attrs = cx.tcx.map.attrs(id);
for ty in attrs.iter()
.filter(|a| a.check_name("tag_safe"))
.filter_map(|a| a.meta_item_list())
.flat_map(|x| x.iter())
{
// Search body for calls to non safe methods
let mut v = Visitor{
pass: self, tcx: cx.tcx, name: &ty.name(),
// - Assumes an untagged method is unsafe
unknown_assume: false,
cb: |span| {
cx.span_lint(NOT_TAGGED_SAFE, *span,
&format!("Calling {}-unsafe method from a #[tag_safe({})] method", ty.name(), ty.name())[..]
);
},
};
debug!("Method {:?} is marked safe '{}'", id, ty.name());
intravisit::walk_block(&mut v, body);
}
}
}
impl Pass
{
// Searches for the relevant marker
fn check_for_marker(tcx: &ty::ctxt, id: ast::NodeId, marker: &str, name: &str) -> bool
{
debug!("Checking for marker {}({}) on {:?}", marker, name, id);
tcx.map.attrs(id).iter()
.filter_map( |a| if a.check_name(marker) { a.meta_item_list() } else { None })
.flat_map(|x| x.iter())
.any(|a| a.name() == name)
}
/// Recursively check that the provided function is either safe or unsafe.
// Used to avoid excessive annotating
fn recurse_fcn_body(&mut self, tcx: &ty::ctxt, node_id: ast::NodeId, name_id: usize, name: &str, unknown_assume: bool) -> bool
{
// Cache this method as unknown (to prevent infinite recursion)
self.flag_cache.entry(node_id)
.or_insert(Default::default())
.insert(name_id, SafetyType::Unknown)
;
// and apply a visitor to all
match tcx.map.get(node_id)
{
rustc::front::map::NodeItem(i) =>
match i.node {
hir::ItemFn(_, _, _, _, _, ref body) => {
// Enumerate this function's code, recursively checking for a call to an unsafe method
let mut is_safe = true;
{
let mut v = Visitor {
pass: self, tcx: tcx, name: name,
unknown_assume: true,
cb: |_| { is_safe = false; }
};
intravisit::walk_block(&mut v, body);
}
is_safe
},
_ => unknown_assume,
},
rustc::front::map::NodeImplItem(i) =>
match i.node {
hir::ImplItemKind::Method(_, ref body) => {
let mut is_safe = true;
{
let mut v = Visitor {
pass: self, tcx: tcx, name: name,
unknown_assume: true,
cb: |_| { is_safe = false; }
};
intravisit::walk_block(&mut v, body);
}
is_safe
},
_ => unknown_assume,
},
rustc::front::map::NodeForeignItem(i) =>
if Self::check_for_marker(tcx, i.id, "tag_safe", name) {
true
}
else if Self::check_for_marker(tcx, i.id, "tag_unsafe", name) {
false
}
else {
unknown_assume
},
v @ _ => {
error!("Node ID {} points to non-item {:?}", node_id, v);
unknown_assume
}
}
}
/// Check that a method within this crate is safe with the provided tag
fn crate_method_is_safe(&mut self, tcx: &ty::ctxt, node_id: ast::NodeId, name: &str, unknown_assume: bool) -> bool
{
// Obtain tag name ID (avoids storing a string in the map)
let name_id =
match self.flag_types.iter().position(|a| *a == name)
{
Some(v) => v,
None => {
self.flag_types.push( String::from(name) );
self.flag_types.len() - 1
},
};
// Check cache first
if let Some(&st) = self.flag_cache.get(&node_id).and_then(|a| a.get(&name_id))
{
match st
{
SafetyType::Safe => true,
SafetyType::Unsafe => false,
SafetyType::Unknown => unknown_assume,
}
}
else
{
// Search for a safety marker, possibly recursing
let is_safe =
if Self::check_for_marker(tcx, node_id, "tag_safe", name) {
true
}
else if Self::check_for_marker(tcx, node_id, "tag_unsafe", name) {
false
}
else {
self.recurse_fcn_body(tcx, node_id, name_id, name, unknown_assume)
};
// Save resultant value
self.flag_cache.entry(node_id)
.or_insert(Default::default())
.insert(name_id, if is_safe { SafetyType::Safe } else { SafetyType::Unsafe })
;
is_safe
}
}
/// Locate a #[tag_safe(<name>)] attribute on the passed item
pub fn method_is_safe(&mut self, tcx: &ty::ctxt, id: DefId, name: &str, unknown_assume: bool) -> bool
{
debug!("{}Checking method {:?} (A {})", Indent(self.lvl), id, unknown_assume);
self.lvl += 1;
let rv = if id.krate == 0 {
self.crate_method_is_safe(tcx, tcx.map.as_local_node_id(id).unwrap(), name, unknown_assume)
}
else {
for a in tcx.get_attrs(id).iter()
{
if a.check_name("tag_safe") {
if a.meta_item_list().iter().flat_map(|a| a.iter()).any(|a| a.name() == name) {
return true;
}
}
if a.check_name("tag_unsafe") {
if a.meta_item_list().iter().flat_map(|a| a.iter()).any(|a| a.name() == name) {
return false;
}
}
}
warn!("TODO: Crate ID non-zero {:?} (assuming safe)", id);
// TODO: Check the crate import for an annotation
true
};
self.lvl -= 1;
debug!("{}Checking method {:?} = {}", Indent(self.lvl), id, rv);
rv
}
}
impl<'a, 'tcx: 'a, F: FnMut(&Span)> intravisit::Visitor<'a> for Visitor<'a,'tcx, F>
{
// Locate function/method calls in a code block
// - uses visit_expr_post because it doesn't _need_ to do anything
fn visit_expr_post(&mut self, ex: &'a hir::Expr) {
match ex.node
{
// Call expressions - check that it's a path call
hir::ExprCall(ref fcn, _) =>
match fcn.node
{
hir::ExprPath(ref _qs, ref _p) => {
if let def::Def::Fn(did) = self.tcx.resolve_expr(&fcn) {
// Check for a safety tag
if !self.pass.method_is_safe(self.tcx, did, self.name, self.unknown_assume)
{
(self.cb)(&ex.span);
}
}
},
_ => {},
},
// Method call expressions - get the relevant method
hir::ExprMethodCall(ref _id, ref _tys, ref _exprs) =>
{
let tables = self.tcx.tables.borrow();
let mm = &tables.method_map;
let callee = mm.get( &ty::MethodCall::expr(ex.id) ).unwrap();
let id = callee.def_id;
//if let ty::MethodStatic(id) = callee.origin {
// Check for a safety tag
if !self.pass.method_is_safe(self.tcx, id, self.name, self.unknown_assume) {
(self.cb)(&ex.span);
}
//}
},
// Ignore any other type of node
_ => {},
}
}
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
use syntax::feature_gate::AttributeType;
reg.register_late_lint_pass( Box::new(Pass::default()) );
reg.register_attribute(String::from("tag_safe"), AttributeType::Whitelisted);
reg.register_attribute(String::from("tag_unsafe"), AttributeType::Whitelisted);
}
// vim: ts=4 expandtab sw=4
Update for breaking change rust-lang/rust pr31979
// tag_safe
//
// A linting plugin to flag calls to methods not marked "tag_safe"
// from methods marked "tag_safe".
//
// Author: John Hodge (thePowersGang/Mutabah)
//
// TODO: Support '#[tag_unsafe(type)]' which is used when a method has no marker
// - Allows default safe fallback, with upwards propagation.
//
#![crate_name="tag_safe"]
#![crate_type="dylib"]
#![feature(plugin_registrar, rustc_private)]
#[macro_use]
extern crate log;
extern crate syntax;
#[macro_use]
extern crate rustc;
extern crate rustc_front;
#[macro_use]
extern crate rustc_plugin;
use syntax::ast;
use rustc::middle::def_id::DefId;
use rustc_front::intravisit;
use syntax::codemap::Span;
use rustc::lint::{self, LintContext, LintPass, LateLintPass, LintArray};
use rustc_plugin::Registry;
use rustc::middle::{def,ty};
use rustc::middle::ty::TyCtxt;
use rustc_front::hir;
use syntax::attr::AttrMetaMethods;
declare_lint!(NOT_TAGGED_SAFE, Warn, "Warn about use of non-tagged methods within tagged function");
#[derive(Copy,Clone,Debug)]
enum SafetyType
{
Safe,
Unsafe,
Unknown,
}
#[derive(Default)]
struct Pass
{
/// Cache of flag types
flag_types: Vec<String>,
/// Node => (Type => IsSafe)
flag_cache: ::rustc::util::nodemap::NodeMap< ::rustc::util::nodemap::FnvHashMap<usize, SafetyType> >,
lvl: usize,
}
struct Visitor<'a, 'tcx: 'a, F: FnMut(&Span) + 'a>
{
pass: &'a mut Pass,
tcx: &'a TyCtxt<'tcx>,
name: &'a str,
unknown_assume: bool,
cb: F,
}
// Hack to provide indenting in debug calls
struct Indent(usize);
impl ::std::fmt::Display for Indent {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
for s in ::std::iter::repeat(" ").take(self.0) {
try!(write!(f, "{}", s));
}
Ok( () )
}
}
impl LintPass for Pass {
fn get_lints(&self) -> LintArray {
lint_array!(NOT_TAGGED_SAFE)
}
}
impl LateLintPass for Pass {
fn check_fn(&mut self, cx: &lint::LateContext, _kind: ::rustc_front::intravisit::FnKind, _decl: &hir::FnDecl, body: &hir::Block, _: Span, id: ast::NodeId) {
let attrs = cx.tcx.map.attrs(id);
for ty in attrs.iter()
.filter(|a| a.check_name("tag_safe"))
.filter_map(|a| a.meta_item_list())
.flat_map(|x| x.iter())
{
// Search body for calls to non safe methods
let mut v = Visitor{
pass: self, tcx: cx.tcx, name: &ty.name(),
// - Assumes an untagged method is unsafe
unknown_assume: false,
cb: |span| {
cx.span_lint(NOT_TAGGED_SAFE, *span,
&format!("Calling {}-unsafe method from a #[tag_safe({})] method", ty.name(), ty.name())[..]
);
},
};
debug!("Method {:?} is marked safe '{}'", id, ty.name());
intravisit::walk_block(&mut v, body);
}
}
}
impl Pass
{
// Searches for the relevant marker
fn check_for_marker(tcx: &TyCtxt, id: ast::NodeId, marker: &str, name: &str) -> bool
{
debug!("Checking for marker {}({}) on {:?}", marker, name, id);
tcx.map.attrs(id).iter()
.filter_map( |a| if a.check_name(marker) { a.meta_item_list() } else { None })
.flat_map(|x| x.iter())
.any(|a| a.name() == name)
}
/// Recursively check that the provided function is either safe or unsafe.
// Used to avoid excessive annotating
fn recurse_fcn_body(&mut self, tcx: &TyCtxt, node_id: ast::NodeId, name_id: usize, name: &str, unknown_assume: bool) -> bool
{
// Cache this method as unknown (to prevent infinite recursion)
self.flag_cache.entry(node_id)
.or_insert(Default::default())
.insert(name_id, SafetyType::Unknown)
;
// and apply a visitor to all
match tcx.map.get(node_id)
{
rustc::front::map::NodeItem(i) =>
match i.node {
hir::ItemFn(_, _, _, _, _, ref body) => {
// Enumerate this function's code, recursively checking for a call to an unsafe method
let mut is_safe = true;
{
let mut v = Visitor {
pass: self, tcx: tcx, name: name,
unknown_assume: true,
cb: |_| { is_safe = false; }
};
intravisit::walk_block(&mut v, body);
}
is_safe
},
_ => unknown_assume,
},
rustc::front::map::NodeImplItem(i) =>
match i.node {
hir::ImplItemKind::Method(_, ref body) => {
let mut is_safe = true;
{
let mut v = Visitor {
pass: self, tcx: tcx, name: name,
unknown_assume: true,
cb: |_| { is_safe = false; }
};
intravisit::walk_block(&mut v, body);
}
is_safe
},
_ => unknown_assume,
},
rustc::front::map::NodeForeignItem(i) =>
if Self::check_for_marker(tcx, i.id, "tag_safe", name) {
true
}
else if Self::check_for_marker(tcx, i.id, "tag_unsafe", name) {
false
}
else {
unknown_assume
},
v @ _ => {
error!("Node ID {} points to non-item {:?}", node_id, v);
unknown_assume
}
}
}
/// Check that a method within this crate is safe with the provided tag
fn crate_method_is_safe(&mut self, tcx: &TyCtxt, node_id: ast::NodeId, name: &str, unknown_assume: bool) -> bool
{
// Obtain tag name ID (avoids storing a string in the map)
let name_id =
match self.flag_types.iter().position(|a| *a == name)
{
Some(v) => v,
None => {
self.flag_types.push( String::from(name) );
self.flag_types.len() - 1
},
};
// Check cache first
if let Some(&st) = self.flag_cache.get(&node_id).and_then(|a| a.get(&name_id))
{
match st
{
SafetyType::Safe => true,
SafetyType::Unsafe => false,
SafetyType::Unknown => unknown_assume,
}
}
else
{
// Search for a safety marker, possibly recursing
let is_safe =
if Self::check_for_marker(tcx, node_id, "tag_safe", name) {
true
}
else if Self::check_for_marker(tcx, node_id, "tag_unsafe", name) {
false
}
else {
self.recurse_fcn_body(tcx, node_id, name_id, name, unknown_assume)
};
// Save resultant value
self.flag_cache.entry(node_id)
.or_insert(Default::default())
.insert(name_id, if is_safe { SafetyType::Safe } else { SafetyType::Unsafe })
;
is_safe
}
}
/// Locate a #[tag_safe(<name>)] attribute on the passed item
pub fn method_is_safe(&mut self, tcx: &TyCtxt, id: DefId, name: &str, unknown_assume: bool) -> bool
{
debug!("{}Checking method {:?} (A {})", Indent(self.lvl), id, unknown_assume);
self.lvl += 1;
let rv = if id.krate == 0 {
self.crate_method_is_safe(tcx, tcx.map.as_local_node_id(id).unwrap(), name, unknown_assume)
}
else {
for a in tcx.get_attrs(id).iter()
{
if a.check_name("tag_safe") {
if a.meta_item_list().iter().flat_map(|a| a.iter()).any(|a| a.name() == name) {
return true;
}
}
if a.check_name("tag_unsafe") {
if a.meta_item_list().iter().flat_map(|a| a.iter()).any(|a| a.name() == name) {
return false;
}
}
}
warn!("TODO: Crate ID non-zero {:?} (assuming safe)", id);
// TODO: Check the crate import for an annotation
true
};
self.lvl -= 1;
debug!("{}Checking method {:?} = {}", Indent(self.lvl), id, rv);
rv
}
}
impl<'a, 'tcx: 'a, F: FnMut(&Span)> intravisit::Visitor<'a> for Visitor<'a,'tcx, F>
{
// Locate function/method calls in a code block
// - uses visit_expr_post because it doesn't _need_ to do anything
fn visit_expr_post(&mut self, ex: &'a hir::Expr) {
match ex.node
{
// Call expressions - check that it's a path call
hir::ExprCall(ref fcn, _) =>
match fcn.node
{
hir::ExprPath(ref _qs, ref _p) => {
if let def::Def::Fn(did) = self.tcx.resolve_expr(&fcn) {
// Check for a safety tag
if !self.pass.method_is_safe(self.tcx, did, self.name, self.unknown_assume)
{
(self.cb)(&ex.span);
}
}
},
_ => {},
},
// Method call expressions - get the relevant method
hir::ExprMethodCall(ref _id, ref _tys, ref _exprs) =>
{
let tables = self.tcx.tables.borrow();
let mm = &tables.method_map;
let callee = mm.get( &ty::MethodCall::expr(ex.id) ).unwrap();
let id = callee.def_id;
//if let ty::MethodStatic(id) = callee.origin {
// Check for a safety tag
if !self.pass.method_is_safe(self.tcx, id, self.name, self.unknown_assume) {
(self.cb)(&ex.span);
}
//}
},
// Ignore any other type of node
_ => {},
}
}
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
use syntax::feature_gate::AttributeType;
reg.register_late_lint_pass( Box::new(Pass::default()) );
reg.register_attribute(String::from("tag_safe"), AttributeType::Whitelisted);
reg.register_attribute(String::from("tag_unsafe"), AttributeType::Whitelisted);
}
// vim: ts=4 expandtab sw=4
|
//!
//! FUSE userspace library implementation (as of libosxfuse-2.5.5).
//!
//! This is an improved rewrite of the FUSE userspace library (lowlevel
//! interface) to fully take advantage of Rust's architecture. The only thing
//! we rely on in the real libfuse are mount and unmount calls which are
//! needed to establish a fd to talk to the kernel driver.
//!
#![feature(collections)]
#![feature(core)]
#![feature(io)]
#![feature(libc)]
#![feature(std_misc)]
#![feature(unsafe_destructor)]
#![warn(missing_docs, bad_style, unused, unused_extern_crates, unused_import_braces, unused_qualifications, unused_typecasts)]
extern crate libc;
#[macro_use]
extern crate log;
extern crate time;
use std::io;
use std::ffi::OsStr;
use std::path::{Path, AsPath};
use libc::{c_int, ENOSYS};
use time::Timespec;
pub use fuse::FUSE_ROOT_ID;
pub use fuse::consts;
pub use reply::{Reply, ReplyEmpty, ReplyData, ReplyEntry, ReplyAttr, ReplyOpen};
pub use reply::{ReplyWrite, ReplyStatfs, ReplyCreate, ReplyLock, ReplyBmap, ReplyDirectory};
#[cfg(target_os = "macos")]
pub use reply::ReplyXTimes;
pub use request::Request;
pub use session::{Session, BackgroundSession};
mod argument;
mod channel;
mod fuse;
mod reply;
mod request;
mod session;
/// File types
#[derive(Copy, Debug, Hash, PartialEq)]
pub enum FileType {
/// Named pipe (S_IFIFO)
NamedPipe,
/// Character device (S_IFCHR)
CharDevice,
/// Block device (S_IFBLK)
BlockDevice,
/// Directory (S_IFDIR)
Directory,
/// Regular file (S_IFREG)
RegularFile,
/// Symbolic link (S_IFLNK)
Symlink,
}
/// File attributes
#[derive(Copy, Debug)]
pub struct FileAttr {
/// Inode number
pub ino: u64,
/// Size in bytes
pub size: u64,
/// Size in blocks
pub blocks: u64,
/// Time of last access
pub atime: Timespec,
/// Time of last modification
pub mtime: Timespec,
/// Time of last change
pub ctime: Timespec,
/// Time of creation (OS X only)
pub crtime: Timespec,
/// Kind of file (directory, file, pipe, etc)
pub kind: FileType,
/// Permissions
pub perm: u16,
/// Number of hard links
pub nlink: u32,
/// User id
pub uid: u32,
/// Group id
pub gid: u32,
/// Rdev
pub rdev: u32,
/// Flags (OS X only, see chflags(2))
pub flags: u32,
}
/// Filesystem trait.
///
/// This trait must be implemented to provide a userspace filesystem via FUSE.
/// These methods corrospond to fuse_lowlevel_ops in libfuse. Reasonable default
/// implementations are provided here to get a mountable filesystem that does
/// nothing.
pub trait Filesystem {
/// Initialize filesystem
/// Called before any other filesystem method.
fn init (&mut self, _req: &Request) -> Result<(), c_int> {
Ok(())
}
/// Clean up filesystem
/// Called on filesystem exit.
fn destroy (&mut self, _req: &Request) {
}
/// Look up a directory entry by name and get its attributes.
fn lookup (&mut self, _req: &Request, _parent: u64, _name: &Path, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Forget about an inode
/// The nlookup parameter indicates the number of lookups previously performed on
/// this inode. If the filesystem implements inode lifetimes, it is recommended that
/// inodes acquire a single reference on each lookup, and lose nlookup references on
/// each forget. The filesystem may ignore forget calls, if the inodes don't need to
/// have a limited lifetime. On unmount it is not guaranteed, that all referenced
/// inodes will receive a forget message.
fn forget (&mut self, _req: &Request, _ino: u64, _nlookup: u64) {
}
/// Get file attributes
fn getattr (&mut self, _req: &Request, _ino: u64, reply: ReplyAttr) {
reply.error(ENOSYS);
}
/// Set file attributes
fn setattr (&mut self, _req: &Request, _ino: u64, _mode: Option<u32>, _uid: Option<u32>, _gid: Option<u32>, _size: Option<u64>, _atime: Option<Timespec>, _mtime: Option<Timespec>, _fh: Option<u64>, _crtime: Option<Timespec>, _chgtime: Option<Timespec>, _bkuptime: Option<Timespec>, _flags: Option<u32>, reply: ReplyAttr) {
reply.error(ENOSYS);
}
/// Read symbolic link
fn readlink (&mut self, _req: &Request, _ino: u64, reply: ReplyData) {
reply.error(ENOSYS);
}
/// Create file node
/// Create a regular file, character device, block device, fifo or socket node.
fn mknod (&mut self, _req: &Request, _parent: u64, _name: &Path, _mode: u32, _rdev: u32, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Create a directory
fn mkdir (&mut self, _req: &Request, _parent: u64, _name: &Path, _mode: u32, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Remove a file
fn unlink (&mut self, _req: &Request, _parent: u64, _name: &Path, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Remove a directory
fn rmdir (&mut self, _req: &Request, _parent: u64, _name: &Path, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Create a symbolic link
fn symlink (&mut self, _req: &Request, _parent: u64, _name: &Path, _link: &Path, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Rename a file
fn rename (&mut self, _req: &Request, _parent: u64, _name: &Path, _newparent: u64, _newname: &Path, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Create a hard link
fn link (&mut self, _req: &Request, _ino: u64, _newparent: u64, _newname: &Path, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Open a file
/// Open flags (with the exception of O_CREAT, O_EXCL, O_NOCTTY and O_TRUNC) are
/// available in flags. Filesystem may store an arbitrary file handle (pointer, index,
/// etc) in fh, and use this in other all other file operations (read, write, flush,
/// release, fsync). Filesystem may also implement stateless file I/O and not store
/// anything in fh. There are also some flags (direct_io, keep_cache) which the
/// filesystem may set, to change the way the file is opened. See fuse_file_info
/// structure in <fuse_common.h> for more details.
fn open (&mut self, _req: &Request, _ino: u64, _flags: u32, reply: ReplyOpen) {
reply.opened(0, 0);
}
/// Read data
/// Read should send exactly the number of bytes requested except on EOF or error,
/// otherwise the rest of the data will be substituted with zeroes. An exception to
/// this is when the file has been opened in 'direct_io' mode, in which case the
/// return value of the read system call will reflect the return value of this
/// operation. fh will contain the value set by the open method, or will be undefined
/// if the open method didn't set any value.
fn read (&mut self, _req: &Request, _ino: u64, _fh: u64, _offset: u64, _size: u32, reply: ReplyData) {
reply.error(ENOSYS);
}
/// Write data
/// Write should return exactly the number of bytes requested except on error. An
/// exception to this is when the file has been opened in 'direct_io' mode, in
/// which case the return value of the write system call will reflect the return
/// value of this operation. fh will contain the value set by the open method, or
/// will be undefined if the open method didn't set any value.
fn write (&mut self, _req: &Request, _ino: u64, _fh: u64, _offset: u64, _data: &[u8], _flags: u32, reply: ReplyWrite) {
reply.error(ENOSYS);
}
/// Flush method
/// This is called on each close() of the opened file. Since file descriptors can
/// be duplicated (dup, dup2, fork), for one open call there may be many flush
/// calls. Filesystems shouldn't assume that flush will always be called after some
/// writes, or that if will be called at all. fh will contain the value set by the
/// open method, or will be undefined if the open method didn't set any value.
/// NOTE: the name of the method is misleading, since (unlike fsync) the filesystem
/// is not forced to flush pending writes. One reason to flush data, is if the
/// filesystem wants to return write errors. If the filesystem supports file locking
/// operations (setlk, getlk) it should remove all locks belonging to 'lock_owner'.
fn flush (&mut self, _req: &Request, _ino: u64, _fh: u64, _lock_owner: u64, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Release an open file
/// Release is called when there are no more references to an open file: all file
/// descriptors are closed and all memory mappings are unmapped. For every open
/// call there will be exactly one release call. The filesystem may reply with an
/// error, but error values are not returned to close() or munmap() which triggered
/// the release. fh will contain the value set by the open method, or will be undefined
/// if the open method didn't set any value. flags will contain the same flags as for
/// open.
fn release (&mut self, _req: &Request, _ino: u64, _fh: u64, _flags: u32, _lock_owner: u64, _flush: bool, reply: ReplyEmpty) {
reply.ok();
}
/// Synchronize file contents
/// If the datasync parameter is non-zero, then only the user data should be flushed,
/// not the meta data.
fn fsync (&mut self, _req: &Request, _ino: u64, _fh: u64, _datasync: bool, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Open a directory
/// Filesystem may store an arbitrary file handle (pointer, index, etc) in fh, and
/// use this in other all other directory stream operations (readdir, releasedir,
/// fsyncdir). Filesystem may also implement stateless directory I/O and not store
/// anything in fh, though that makes it impossible to implement standard conforming
/// directory stream operations in case the contents of the directory can change
/// between opendir and releasedir.
fn opendir (&mut self, _req: &Request, _ino: u64, _flags: u32, reply: ReplyOpen) {
reply.opened(0, 0);
}
/// Read directory
/// Send a buffer filled using buffer.fill(), with size not exceeding the
/// requested size. Send an empty buffer on end of stream. fh will contain the
/// value set by the opendir method, or will be undefined if the opendir method
/// didn't set any value.
fn readdir (&mut self, _req: &Request, _ino: u64, _fh: u64, _offset: u64, reply: ReplyDirectory) {
reply.error(ENOSYS);
}
/// Release an open directory
/// For every opendir call there will be exactly one releasedir call. fh will
/// contain the value set by the opendir method, or will be undefined if the
/// opendir method didn't set any value.
fn releasedir (&mut self, _req: &Request, _ino: u64, _fh: u64, _flags: u32, reply: ReplyEmpty) {
reply.ok();
}
/// Synchronize directory contents
/// If the datasync parameter is set, then only the directory contents should
/// be flushed, not the meta data. fh will contain the value set by the opendir
/// method, or will be undefined if the opendir method didn't set any value.
fn fsyncdir (&mut self, _req: &Request, _ino: u64, _fh: u64, _datasync: bool, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Get file system statistics
fn statfs (&mut self, _req: &Request, _ino: u64, reply: ReplyStatfs) {
reply.statfs(0, 0, 0, 0, 0, 512, 255, 0);
}
/// Set an extended attribute
fn setxattr (&mut self, _req: &Request, _ino: u64, _name: &OsStr, _value: &[u8], _flags: u32, _position: u32, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Get an extended attribute
fn getxattr (&mut self, _req: &Request, _ino: u64, _name: &OsStr, reply: ReplyData) {
// FIXME: If arg.size is zero, the size of the value should be sent with fuse_getxattr_out
// FIXME: If arg.size is non-zero, send the value if it fits, or ERANGE otherwise
reply.error(ENOSYS);
}
/// List extended attribute names
fn listxattr (&mut self, _req: &Request, _ino: u64, reply: ReplyEmpty) {
// FIXME: If arg.size is zero, the size of the attribute list should be sent with fuse_getxattr_out
// FIXME: If arg.size is non-zero, send the attribute list if it fits, or ERANGE otherwise
reply.error(ENOSYS);
}
/// Remove an extended attribute
fn removexattr (&mut self, _req: &Request, _ino: u64, _name: &OsStr, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Check file access permissions
/// This will be called for the access() system call. If the 'default_permissions'
/// mount option is given, this method is not called. This method is not called
/// under Linux kernel versions 2.4.x
fn access (&mut self, _req: &Request, _ino: u64, _mask: u32, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Create and open a file
/// If the file does not exist, first create it with the specified mode, and then
/// open it. Open flags (with the exception of O_NOCTTY) are available in flags.
/// Filesystem may store an arbitrary file handle (pointer, index, etc) in fh,
/// and use this in other all other file operations (read, write, flush, release,
/// fsync). There are also some flags (direct_io, keep_cache) which the
/// filesystem may set, to change the way the file is opened. See fuse_file_info
/// structure in <fuse_common.h> for more details. If this method is not
/// implemented or under Linux kernel versions earlier than 2.6.15, the mknod()
/// and open() methods will be called instead.
fn create (&mut self, _req: &Request, _parent: u64, _name: &Path, _mode: u32, _flags: u32, reply: ReplyCreate) {
reply.error(ENOSYS);
}
/// Test for a POSIX file lock
fn getlk (&mut self, _req: &Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, reply: ReplyLock) {
reply.error(ENOSYS);
}
/// Acquire, modify or release a POSIX file lock
/// For POSIX threads (NPTL) there's a 1-1 relation between pid and owner, but
/// otherwise this is not always the case. For checking lock ownership,
/// 'fi->owner' must be used. The l_pid field in 'struct flock' should only be
/// used to fill in this field in getlk(). Note: if the locking methods are not
/// implemented, the kernel will still allow file locking to work locally.
/// Hence these are only interesting for network filesystems and similar.
fn setlk (&mut self, _req: &Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, _sleep: bool, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Map block index within file to block index within device
/// Note: This makes sense only for block device backed filesystems mounted
/// with the 'blkdev' option
fn bmap (&mut self, _req: &Request, _ino: u64, _blocksize: u32, _idx: u64, reply: ReplyBmap) {
reply.error(ENOSYS);
}
/// OS X only: Rename the volume. Set fuse_init_out.flags during init to
/// FUSE_VOL_RENAME to enable
#[cfg(target_os = "macos")]
fn setvolname (&mut self, _req: &Request, _name: &OsStr, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// OS X only (undocumented)
#[cfg(target_os = "macos")]
fn exchange (&mut self, _req: &Request, _parent: u64, _name: &Path, _newparent: u64, _newname: &Path, _options: u64, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// OS X only: Query extended times (bkuptime and crtime). Set fuse_init_out.flags
/// during init to FUSE_XTIMES to enable
#[cfg(target_os = "macos")]
fn getxtimes (&mut self, _req: &Request, _ino: u64, reply: ReplyXTimes) {
reply.error(ENOSYS);
}
}
/// Mount the given filesystem to the given mountpoint. This function will
/// not return until the filesystem is unmounted.
pub fn mount<FS: Filesystem+Send, P: AsPath> (filesystem: FS, mountpoint: &P, options: &[&OsStr]) {
Session::new(filesystem, mountpoint.as_path(), options).run();
}
/// Mount the given filesystem to the given mountpoint. This function spawns
/// a background thread to handle filesystem operations while being mounted
/// and therefore returns immediately. The returned handle should be stored
/// to reference the mounted filesystem. If it's dropped, the filesystem will
/// be unmounted.
pub fn spawn_mount<'a, FS: Filesystem+Send+'static, P: AsPath> (filesystem: FS, mountpoint: &P, options: &[&OsStr]) -> io::Result<BackgroundSession<'a>> {
Session::new(filesystem, mountpoint.as_path(), options).spawn()
}
use generic conversion trait AsRef instead of deprecated AsPath
//!
//! FUSE userspace library implementation (as of libosxfuse-2.5.5).
//!
//! This is an improved rewrite of the FUSE userspace library (lowlevel
//! interface) to fully take advantage of Rust's architecture. The only thing
//! we rely on in the real libfuse are mount and unmount calls which are
//! needed to establish a fd to talk to the kernel driver.
//!
#![feature(collections)]
#![feature(convert)]
#![feature(core)]
#![feature(io)]
#![feature(libc)]
#![feature(std_misc)]
#![feature(unsafe_destructor)]
#![warn(missing_docs, bad_style, unused, unused_extern_crates, unused_import_braces, unused_qualifications, unused_typecasts)]
extern crate libc;
#[macro_use]
extern crate log;
extern crate time;
use std::convert::AsRef;
use std::io;
use std::ffi::OsStr;
use std::path::Path;
use libc::{c_int, ENOSYS};
use time::Timespec;
pub use fuse::FUSE_ROOT_ID;
pub use fuse::consts;
pub use reply::{Reply, ReplyEmpty, ReplyData, ReplyEntry, ReplyAttr, ReplyOpen};
pub use reply::{ReplyWrite, ReplyStatfs, ReplyCreate, ReplyLock, ReplyBmap, ReplyDirectory};
#[cfg(target_os = "macos")]
pub use reply::ReplyXTimes;
pub use request::Request;
pub use session::{Session, BackgroundSession};
mod argument;
mod channel;
mod fuse;
mod reply;
mod request;
mod session;
/// File types
#[derive(Copy, Debug, Hash, PartialEq)]
pub enum FileType {
/// Named pipe (S_IFIFO)
NamedPipe,
/// Character device (S_IFCHR)
CharDevice,
/// Block device (S_IFBLK)
BlockDevice,
/// Directory (S_IFDIR)
Directory,
/// Regular file (S_IFREG)
RegularFile,
/// Symbolic link (S_IFLNK)
Symlink,
}
/// File attributes
#[derive(Copy, Debug)]
pub struct FileAttr {
/// Inode number
pub ino: u64,
/// Size in bytes
pub size: u64,
/// Size in blocks
pub blocks: u64,
/// Time of last access
pub atime: Timespec,
/// Time of last modification
pub mtime: Timespec,
/// Time of last change
pub ctime: Timespec,
/// Time of creation (OS X only)
pub crtime: Timespec,
/// Kind of file (directory, file, pipe, etc)
pub kind: FileType,
/// Permissions
pub perm: u16,
/// Number of hard links
pub nlink: u32,
/// User id
pub uid: u32,
/// Group id
pub gid: u32,
/// Rdev
pub rdev: u32,
/// Flags (OS X only, see chflags(2))
pub flags: u32,
}
/// Filesystem trait.
///
/// This trait must be implemented to provide a userspace filesystem via FUSE.
/// These methods corrospond to fuse_lowlevel_ops in libfuse. Reasonable default
/// implementations are provided here to get a mountable filesystem that does
/// nothing.
pub trait Filesystem {
/// Initialize filesystem
/// Called before any other filesystem method.
fn init (&mut self, _req: &Request) -> Result<(), c_int> {
Ok(())
}
/// Clean up filesystem
/// Called on filesystem exit.
fn destroy (&mut self, _req: &Request) {
}
/// Look up a directory entry by name and get its attributes.
fn lookup (&mut self, _req: &Request, _parent: u64, _name: &Path, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Forget about an inode
/// The nlookup parameter indicates the number of lookups previously performed on
/// this inode. If the filesystem implements inode lifetimes, it is recommended that
/// inodes acquire a single reference on each lookup, and lose nlookup references on
/// each forget. The filesystem may ignore forget calls, if the inodes don't need to
/// have a limited lifetime. On unmount it is not guaranteed, that all referenced
/// inodes will receive a forget message.
fn forget (&mut self, _req: &Request, _ino: u64, _nlookup: u64) {
}
/// Get file attributes
fn getattr (&mut self, _req: &Request, _ino: u64, reply: ReplyAttr) {
reply.error(ENOSYS);
}
/// Set file attributes
fn setattr (&mut self, _req: &Request, _ino: u64, _mode: Option<u32>, _uid: Option<u32>, _gid: Option<u32>, _size: Option<u64>, _atime: Option<Timespec>, _mtime: Option<Timespec>, _fh: Option<u64>, _crtime: Option<Timespec>, _chgtime: Option<Timespec>, _bkuptime: Option<Timespec>, _flags: Option<u32>, reply: ReplyAttr) {
reply.error(ENOSYS);
}
/// Read symbolic link
fn readlink (&mut self, _req: &Request, _ino: u64, reply: ReplyData) {
reply.error(ENOSYS);
}
/// Create file node
/// Create a regular file, character device, block device, fifo or socket node.
fn mknod (&mut self, _req: &Request, _parent: u64, _name: &Path, _mode: u32, _rdev: u32, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Create a directory
fn mkdir (&mut self, _req: &Request, _parent: u64, _name: &Path, _mode: u32, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Remove a file
fn unlink (&mut self, _req: &Request, _parent: u64, _name: &Path, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Remove a directory
fn rmdir (&mut self, _req: &Request, _parent: u64, _name: &Path, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Create a symbolic link
fn symlink (&mut self, _req: &Request, _parent: u64, _name: &Path, _link: &Path, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Rename a file
fn rename (&mut self, _req: &Request, _parent: u64, _name: &Path, _newparent: u64, _newname: &Path, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Create a hard link
fn link (&mut self, _req: &Request, _ino: u64, _newparent: u64, _newname: &Path, reply: ReplyEntry) {
reply.error(ENOSYS);
}
/// Open a file
/// Open flags (with the exception of O_CREAT, O_EXCL, O_NOCTTY and O_TRUNC) are
/// available in flags. Filesystem may store an arbitrary file handle (pointer, index,
/// etc) in fh, and use this in other all other file operations (read, write, flush,
/// release, fsync). Filesystem may also implement stateless file I/O and not store
/// anything in fh. There are also some flags (direct_io, keep_cache) which the
/// filesystem may set, to change the way the file is opened. See fuse_file_info
/// structure in <fuse_common.h> for more details.
fn open (&mut self, _req: &Request, _ino: u64, _flags: u32, reply: ReplyOpen) {
reply.opened(0, 0);
}
/// Read data
/// Read should send exactly the number of bytes requested except on EOF or error,
/// otherwise the rest of the data will be substituted with zeroes. An exception to
/// this is when the file has been opened in 'direct_io' mode, in which case the
/// return value of the read system call will reflect the return value of this
/// operation. fh will contain the value set by the open method, or will be undefined
/// if the open method didn't set any value.
fn read (&mut self, _req: &Request, _ino: u64, _fh: u64, _offset: u64, _size: u32, reply: ReplyData) {
reply.error(ENOSYS);
}
/// Write data
/// Write should return exactly the number of bytes requested except on error. An
/// exception to this is when the file has been opened in 'direct_io' mode, in
/// which case the return value of the write system call will reflect the return
/// value of this operation. fh will contain the value set by the open method, or
/// will be undefined if the open method didn't set any value.
fn write (&mut self, _req: &Request, _ino: u64, _fh: u64, _offset: u64, _data: &[u8], _flags: u32, reply: ReplyWrite) {
reply.error(ENOSYS);
}
/// Flush method
/// This is called on each close() of the opened file. Since file descriptors can
/// be duplicated (dup, dup2, fork), for one open call there may be many flush
/// calls. Filesystems shouldn't assume that flush will always be called after some
/// writes, or that if will be called at all. fh will contain the value set by the
/// open method, or will be undefined if the open method didn't set any value.
/// NOTE: the name of the method is misleading, since (unlike fsync) the filesystem
/// is not forced to flush pending writes. One reason to flush data, is if the
/// filesystem wants to return write errors. If the filesystem supports file locking
/// operations (setlk, getlk) it should remove all locks belonging to 'lock_owner'.
fn flush (&mut self, _req: &Request, _ino: u64, _fh: u64, _lock_owner: u64, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Release an open file
/// Release is called when there are no more references to an open file: all file
/// descriptors are closed and all memory mappings are unmapped. For every open
/// call there will be exactly one release call. The filesystem may reply with an
/// error, but error values are not returned to close() or munmap() which triggered
/// the release. fh will contain the value set by the open method, or will be undefined
/// if the open method didn't set any value. flags will contain the same flags as for
/// open.
fn release (&mut self, _req: &Request, _ino: u64, _fh: u64, _flags: u32, _lock_owner: u64, _flush: bool, reply: ReplyEmpty) {
reply.ok();
}
/// Synchronize file contents
/// If the datasync parameter is non-zero, then only the user data should be flushed,
/// not the meta data.
fn fsync (&mut self, _req: &Request, _ino: u64, _fh: u64, _datasync: bool, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Open a directory
/// Filesystem may store an arbitrary file handle (pointer, index, etc) in fh, and
/// use this in other all other directory stream operations (readdir, releasedir,
/// fsyncdir). Filesystem may also implement stateless directory I/O and not store
/// anything in fh, though that makes it impossible to implement standard conforming
/// directory stream operations in case the contents of the directory can change
/// between opendir and releasedir.
fn opendir (&mut self, _req: &Request, _ino: u64, _flags: u32, reply: ReplyOpen) {
reply.opened(0, 0);
}
/// Read directory
/// Send a buffer filled using buffer.fill(), with size not exceeding the
/// requested size. Send an empty buffer on end of stream. fh will contain the
/// value set by the opendir method, or will be undefined if the opendir method
/// didn't set any value.
fn readdir (&mut self, _req: &Request, _ino: u64, _fh: u64, _offset: u64, reply: ReplyDirectory) {
reply.error(ENOSYS);
}
/// Release an open directory
/// For every opendir call there will be exactly one releasedir call. fh will
/// contain the value set by the opendir method, or will be undefined if the
/// opendir method didn't set any value.
fn releasedir (&mut self, _req: &Request, _ino: u64, _fh: u64, _flags: u32, reply: ReplyEmpty) {
reply.ok();
}
/// Synchronize directory contents
/// If the datasync parameter is set, then only the directory contents should
/// be flushed, not the meta data. fh will contain the value set by the opendir
/// method, or will be undefined if the opendir method didn't set any value.
fn fsyncdir (&mut self, _req: &Request, _ino: u64, _fh: u64, _datasync: bool, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Get file system statistics
fn statfs (&mut self, _req: &Request, _ino: u64, reply: ReplyStatfs) {
reply.statfs(0, 0, 0, 0, 0, 512, 255, 0);
}
/// Set an extended attribute
fn setxattr (&mut self, _req: &Request, _ino: u64, _name: &OsStr, _value: &[u8], _flags: u32, _position: u32, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Get an extended attribute
fn getxattr (&mut self, _req: &Request, _ino: u64, _name: &OsStr, reply: ReplyData) {
// FIXME: If arg.size is zero, the size of the value should be sent with fuse_getxattr_out
// FIXME: If arg.size is non-zero, send the value if it fits, or ERANGE otherwise
reply.error(ENOSYS);
}
/// List extended attribute names
fn listxattr (&mut self, _req: &Request, _ino: u64, reply: ReplyEmpty) {
// FIXME: If arg.size is zero, the size of the attribute list should be sent with fuse_getxattr_out
// FIXME: If arg.size is non-zero, send the attribute list if it fits, or ERANGE otherwise
reply.error(ENOSYS);
}
/// Remove an extended attribute
fn removexattr (&mut self, _req: &Request, _ino: u64, _name: &OsStr, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Check file access permissions
/// This will be called for the access() system call. If the 'default_permissions'
/// mount option is given, this method is not called. This method is not called
/// under Linux kernel versions 2.4.x
fn access (&mut self, _req: &Request, _ino: u64, _mask: u32, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Create and open a file
/// If the file does not exist, first create it with the specified mode, and then
/// open it. Open flags (with the exception of O_NOCTTY) are available in flags.
/// Filesystem may store an arbitrary file handle (pointer, index, etc) in fh,
/// and use this in other all other file operations (read, write, flush, release,
/// fsync). There are also some flags (direct_io, keep_cache) which the
/// filesystem may set, to change the way the file is opened. See fuse_file_info
/// structure in <fuse_common.h> for more details. If this method is not
/// implemented or under Linux kernel versions earlier than 2.6.15, the mknod()
/// and open() methods will be called instead.
fn create (&mut self, _req: &Request, _parent: u64, _name: &Path, _mode: u32, _flags: u32, reply: ReplyCreate) {
reply.error(ENOSYS);
}
/// Test for a POSIX file lock
fn getlk (&mut self, _req: &Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, reply: ReplyLock) {
reply.error(ENOSYS);
}
/// Acquire, modify or release a POSIX file lock
/// For POSIX threads (NPTL) there's a 1-1 relation between pid and owner, but
/// otherwise this is not always the case. For checking lock ownership,
/// 'fi->owner' must be used. The l_pid field in 'struct flock' should only be
/// used to fill in this field in getlk(). Note: if the locking methods are not
/// implemented, the kernel will still allow file locking to work locally.
/// Hence these are only interesting for network filesystems and similar.
fn setlk (&mut self, _req: &Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, _sleep: bool, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// Map block index within file to block index within device
/// Note: This makes sense only for block device backed filesystems mounted
/// with the 'blkdev' option
fn bmap (&mut self, _req: &Request, _ino: u64, _blocksize: u32, _idx: u64, reply: ReplyBmap) {
reply.error(ENOSYS);
}
/// OS X only: Rename the volume. Set fuse_init_out.flags during init to
/// FUSE_VOL_RENAME to enable
#[cfg(target_os = "macos")]
fn setvolname (&mut self, _req: &Request, _name: &OsStr, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// OS X only (undocumented)
#[cfg(target_os = "macos")]
fn exchange (&mut self, _req: &Request, _parent: u64, _name: &Path, _newparent: u64, _newname: &Path, _options: u64, reply: ReplyEmpty) {
reply.error(ENOSYS);
}
/// OS X only: Query extended times (bkuptime and crtime). Set fuse_init_out.flags
/// during init to FUSE_XTIMES to enable
#[cfg(target_os = "macos")]
fn getxtimes (&mut self, _req: &Request, _ino: u64, reply: ReplyXTimes) {
reply.error(ENOSYS);
}
}
/// Mount the given filesystem to the given mountpoint. This function will
/// not return until the filesystem is unmounted.
pub fn mount<FS: Filesystem+Send, P: AsRef<Path>> (filesystem: FS, mountpoint: &P, options: &[&OsStr]) {
Session::new(filesystem, mountpoint.as_ref(), options).run();
}
/// Mount the given filesystem to the given mountpoint. This function spawns
/// a background thread to handle filesystem operations while being mounted
/// and therefore returns immediately. The returned handle should be stored
/// to reference the mounted filesystem. If it's dropped, the filesystem will
/// be unmounted.
pub fn spawn_mount<'a, FS: Filesystem+Send+'static, P: AsRef<Path>> (filesystem: FS, mountpoint: &P, options: &[&OsStr]) -> io::Result<BackgroundSession<'a>> {
Session::new(filesystem, mountpoint.as_ref(), options).spawn()
}
|
use std::io;
use std::io::prelude::*;
use std::io::{Error, ErrorKind};
pub trait Endec {
type T;
fn encoded_len(value: &Self::T) -> usize;
fn encode(value: &Self::T, dst: &mut Write) -> io::Result<usize>;
fn decode(src: &mut Read) -> io::Result<Self::T>;
}
impl Endec for u16 {
type T = u16;
fn encoded_len(value: &Self::T) -> usize {
2
}
fn encode(value: &Self::T, dst: &mut Write) -> io::Result<usize> {
let mut buf = [0u8; 2];
buf[0] = (value >> 8 & 0xffu8 as u16) as u8;
buf[1] = (value & 0xffu8 as u16) as u8;
assert_eq!(dst.write(&buf).is_ok(), true);
Ok(2)
}
fn decode(src: &mut Read) -> io::Result<Self::T> {
let mut buf = [0u8; 2];
assert_eq!(src.read(&mut buf).is_ok(), true);
Ok(((buf[0] as u16) << 8 ) | (buf[1] as u16))
}
}
macro_rules! packets {
($($id:expr => $name:ident { $($fname:ident: $fty:ty),* })+) => {
#[derive(Debug)]
enum Protocol {
$(
$name { $($fname:$fty),* }
),+
}
impl Endec for Protocol {
type T = Protocol ;
fn encoded_len(value: &Self::T) -> usize {
let mut len: usize = 0;
match value { $(
&Protocol::$name { $($fname),* } => {$(
//TODO: try! is not the best thing here
len += <$fty as Endec>::encoded_len(&$fname);
)*}
)+}
len
}
fn encode(value: &Self::T, dst: &mut Write) -> io::Result<usize> {
let mut len: usize = 0;
match value { $(
&Protocol::$name { $($fname),* } => {$(
//TODO: try! is not the best thing here
len += try!(<$fty as Endec>::encode(&$fname, dst));
)*}
)+}
Ok(len)
}
fn decode(src: &mut Read) -> io::Result<Self::T> {
let id = 0;
match id {
$(
$id => Ok(Protocol::$name {
$(
$fname:try!(<$fty as Endec>::decode(src))
),*
}),
)+
_ => Err(Error::new(ErrorKind::Other, "oh no!"))
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
use std::io::prelude::*;
use std::io::{Error, ErrorKind};
packets! {
0 => Message { a: u16, b: u16 }
1 => Msg { b: u16 }
}
#[test]
fn test_main() {
let x:Protocol = Protocol::Message { a: 10, b: 15 };
let mut buf:Vec<u8> = Vec::new();
Protocol::encode(&x, &mut buf);
println!("{:?}", buf);
assert!(Protocol::encoded_len(&x) == 4);
}
}
macro now generates proper trait implementation
use std::io;
use std::io::prelude::*;
use std::io::{Error, ErrorKind};
pub trait Endec {
type T;
fn encoded_len(value: &Self::T) -> usize;
fn encode(value: &Self::T, dst: &mut Write) -> io::Result<usize>;
fn decode(src: &mut Read) -> io::Result<Self::T>;
}
pub trait Packet {
type T;
fn encode(&self, dst: &mut Write) -> io::Result<usize>;
fn decode(src: &mut Read) -> io::Result<Self::T>;
}
impl Endec for u16 {
type T = u16;
fn encoded_len(value: &Self::T) -> usize {
2
}
fn encode(value: &Self::T, dst: &mut Write) -> io::Result<usize> {
let mut buf = [0u8; 2];
buf[0] = (value >> 8 & 0xffu8 as u16) as u8;
buf[1] = (value & 0xffu8 as u16) as u8;
assert_eq!(dst.write(&buf).is_ok(), true);
Ok(2)
}
fn decode(src: &mut Read) -> io::Result<Self::T> {
let mut buf = [0u8; 2];
assert_eq!(src.read(&mut buf).is_ok(), true);
Ok(((buf[0] as u16) << 8 ) | (buf[1] as u16))
}
}
macro_rules! packets {
($($id:expr => $name:ident { $($fname:ident: $fty:ty),* })+) => {
#[derive(Debug)]
enum Protocol {
$(
$name { $($fname:$fty),* }
),+
}
impl Endec for Protocol {
type T = Protocol ;
fn encoded_len(value: &Self::T) -> usize {
let mut len: usize = 0;
match value { $(
&Protocol::$name { $($fname),* } => {$(
//TODO: try! is not the best thing here
len += <$fty as Endec>::encoded_len(&$fname);
)*}
)+}
len
}
fn encode(value: &Self::T, dst: &mut Write) -> io::Result<usize> {
let mut len: usize = 0;
match value { $(
&Protocol::$name { $($fname),* } => {$(
//TODO: try! is not the best thing here
len += try!(<$fty as Endec>::encode(&$fname, dst));
)*}
)+}
Ok(len)
}
fn decode(src: &mut Read) -> io::Result<Self::T> {
let id = 0;
match id {
$(
$id => Ok(Protocol::$name {
$(
$fname:try!(<$fty as Endec>::decode(src))
),*
}),
)+
_ => Err(Error::new(ErrorKind::Other, "oh no!"))
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io;
use std::io::prelude::*;
use std::io::{Error, ErrorKind};
packets! {
0 => Message { a: u16, b: u16 }
1 => Msg { b: u16 }
}
#[test]
fn test_main() {
let x:Protocol = Protocol::Message { a: 10, b: 15 };
let mut buf:Vec<u8> = Vec::new();
Protocol::encode(&x, &mut buf);
println!("{:?}", buf);
assert!(Protocol::encoded_len(&x) == 4);
}
}
|
pub mod crypto {
pub fn crypto_mod_test(){
println!("Crypto Mod Test");
}
}
pub mod sql {
extern crate rpassword;
extern crate rusqlite;
use std::path::Path;
use self::rusqlite::Connection;
fn columns() -> Vec<String> {
return vec!["name".to_string(),"username".to_string(),"password".to_string(),"url".to_string(),"notes".to_string()];
}
pub fn sql_mod_test(){
println!("SQL Mod Test");
}
pub fn open_db(filepath: &String) -> Connection {
use std::convert;
let path = Path::new(filepath);
//see if the db exists, to take user creds if not.
let mut db_exists = true;
if !path.exists() {
db_exists = false;
}
//Opening the connection will create the file if it does not exist, or connect to the file
//if it does.
let conn: rusqlite::Connection = Connection::open(&path).expect("Could not open a connection to the database.");
conn.execute("CREATE TABLE IF NOT EXISTS user (password TEXT);",&[]).expect("Unable to create table.");
//If the database did not exist, set the master password for it.
if !db_exists {
use std::io;
println!("Enter a password for this database.\nNote: You will not be able to see the password as you are entering it.");
let mut password = rpassword::prompt_password_stdout("Password: ").unwrap();
password = password.trim().to_string();
insert_user(&conn, &password);
}
create_entry_table(&conn);
//insert_entry(&conn);
get_entry(&conn);
return conn;
}
fn insert_user(conn: &Connection, pass:&String) {
conn.execute("INSERT into user(password) VALUES (?)",&[pass]).expect("Could not add password to the user table.");
}
fn create_entry_table(conn: &Connection) {
let entry_columns = columns();
let result = conn.execute(&format!("CREATE TABLE IF NOT EXISTS password_entry ({0} TEXT, {1} TEXT, {2} TEXT, {3} TEXT, {4} TEXT)",
entry_columns[0],entry_columns[1],entry_columns[2],entry_columns[3],entry_columns[4]),
&[]).expect("Unable to create password entry table.");
}
fn insert_entry(conn: &Connection) {
let entry_columns = columns();
let user_input = user_input();
//Stop if not all the information is not there, meaning the user used the termination
//string.
if user_input.len() == columns().len() {
conn.execute(&format!("INSERT INTO password_entry ({0},{1},{2},{3},{4}) VALUES (?1,?2,?3,?4,?5)",
entry_columns[0],entry_columns[1],entry_columns[2],entry_columns[3],entry_columns[4]),
&[&user_input[0],&user_input[1],&user_input[2],&user_input[3],&user_input[4]]);
}
else {
println!("Not all properties were added, so the the password entry was not added.");
}
}
fn user_input() -> Vec<String> {
use std::io;
let mut info: Vec<String> = Vec::new(); //TODO make this an array?
let columns = columns();
let mut entry = String::new();
let mut broken = false;
let stop_keyword = "!stop".to_string();
//Gather user input for each DB column.
for item in columns.iter() { //TODO make this a counting for-loop
println!("Enter the {} for this entry:",item);
//Hide user entry if password is being entered.
if item == "password" { //TODO is there a way to generalize this?
let mut different = true;
let mut confirm = String::new();
//Has user confirm password to cutdown on potential spelling errors.
while different {
entry = rpassword::prompt_password_stdout("Note: The password will be hidden.\n").unwrap();
//Stop if user entry enters the termination string.
if (entry.trim().to_string() == stop_keyword) {
broken = true;
break;
}
confirm = rpassword::prompt_password_stdout("Please confirm your password.\n").unwrap();
//Stop if user entry enters the termination string.
if (confirm.trim().to_string() == stop_keyword) {
broken = true;
break;
}
if (entry == confirm) {
different = false;
}
else {
println!("The passwords did not match. Please re-enter your password.");
}
}
}
else {
io::stdin().read_line(&mut entry).expect("Unable to read property.");
}
//Remove newlines and store for entry.
entry = entry.trim().to_string();
//Stop if user entry enters or has entered the termination string.
if (entry != stop_keyword && !broken) {
info.push(entry.clone()); //TODO If this is turned into an array, this will need to be added at an instead of pushed.
}
else {
break;
}
entry.clear(); //read_line just appends input, this makes it act like it's overwriting the input.
}
return info;
}
fn get_entry(conn: &Connection) {
let mut stmt = conn.prepare("select * from password_entry").expect("Unable to get password entry.");
let mut stmt_iter = stmt.query_map(&[],|row|{
for num in 0..columns().len() as i32 {
//Need to specify the type used to find the right column in the row and the output type.
print!("{} | ", row.get::<i32,String>(num));
}
println!("");
}).unwrap();
//Appears as if the resulting MappedRows need to be used before they can be printed to console. Not really sure why.
let count = stmt_iter.count();
}
//fn delete(&conn) {
//
//}
}
Added ability for user to search database against entry names.
pub mod crypto {
pub fn crypto_mod_test(){
println!("Crypto Mod Test");
}
}
pub mod sql {
extern crate rpassword;
extern crate rusqlite;
use std::path::Path;
use self::rusqlite::Connection;
use std::io;
fn columns() -> Vec<String> {
return vec!["name".to_string(),"username".to_string(),"password".to_string(),"url".to_string(),"notes".to_string()];
}
pub fn sql_mod_test(){
println!("SQL Mod Test");
}
pub fn open_db(filepath: &String) -> Connection {
use std::convert;
let path = Path::new(filepath);
//see if the db exists, to take user creds if not.
let mut db_exists = true;
if !path.exists() {
db_exists = false;
}
//Opening the connection will create the file if it does not exist, or connect to the file
//if it does.
let conn: rusqlite::Connection = Connection::open(&path).expect("Could not open a connection to the database.");
conn.execute("CREATE TABLE IF NOT EXISTS user (password TEXT);",&[]).expect("Unable to create table.");
//If the database did not exist, set the master password for it.
if !db_exists {
use std::io;
println!("Enter a password for this database.\nNote: You will not be able to see the password as you are entering it.");
let mut password = rpassword::prompt_password_stdout("Password: ").unwrap();
password = password.trim().to_string();
insert_user(&conn, &password);
}
create_entry_table(&conn);
//insert_entry(&conn);
search_entry(&conn);
return conn;
}
fn insert_user(conn: &Connection, pass:&String) {
conn.execute("INSERT into user(password) VALUES (?)",&[pass]).expect("Could not add password to the user table.");
}
fn create_entry_table(conn: &Connection) {
let entry_columns = columns();
let result = conn.execute(&format!("CREATE TABLE IF NOT EXISTS password_entry ({0} TEXT, {1} TEXT, {2} TEXT, {3} TEXT, {4} TEXT)",
entry_columns[0],entry_columns[1],entry_columns[2],entry_columns[3],entry_columns[4]),
&[]).expect("Unable to create password entry table.");
}
fn insert_entry(conn: &Connection) {
let entry_columns = columns();
let user_input = user_input();
//Stop if not all the information is not there, meaning the user used the termination
//string.
if user_input.len() == columns().len() {
conn.execute(&format!("INSERT INTO password_entry ({0},{1},{2},{3},{4}) VALUES (?1,?2,?3,?4,?5)",
entry_columns[0],entry_columns[1],entry_columns[2],entry_columns[3],entry_columns[4]),
&[&user_input[0],&user_input[1],&user_input[2],&user_input[3],&user_input[4]]);
}
else {
println!("Not all properties were added, so the the password entry was not added.");
}
}
fn user_input() -> Vec<String> {
use std::io;
let mut info: Vec<String> = Vec::new(); //TODO make this an array?
let columns = columns();
let mut entry = String::new();
let mut broken = false;
let stop_keyword = "!stop".to_string();
//Gather user input for each DB column.
for item in columns.iter() { //TODO make this a counting for-loop
println!("Enter the {} for this entry:",item);
//Hide user entry if password is being entered.
if item == "password" { //TODO is there a way to generalize this?
let mut different = true;
let mut confirm = String::new();
//Has user confirm password to cutdown on potential spelling errors.
while different {
entry = rpassword::prompt_password_stdout("Note: The password will be hidden.\n").unwrap();
//Stop if user entry enters the termination string.
if (entry.trim().to_string() == stop_keyword) {
broken = true;
break;
}
confirm = rpassword::prompt_password_stdout("Please confirm your password.\n").unwrap();
//Stop if user entry enters the termination string.
if (confirm.trim().to_string() == stop_keyword) {
broken = true;
break;
}
if (entry == confirm) {
different = false;
}
else {
println!("The passwords did not match. Please re-enter your password.");
}
}
}
else {
io::stdin().read_line(&mut entry).expect("Unable to read property.");
}
//Remove newlines and store for entry.
entry = entry.trim().to_string();
//Stop if user entry enters or has entered the termination string.
if (entry != stop_keyword && !broken) {
info.push(entry.clone()); //TODO If this is turned into an array, this will need to be added at an instead of pushed.
}
else {
break;
}
entry.clear(); //read_line just appends input, this makes it act like it's overwriting the input.
}
return info;
}
fn search_entry(conn: &Connection) {
//TODO Currently hardcoded to search by site name. Would be better to ask users for
//column(s?) to search against.
let mut stmt = conn.prepare(&format!("select * from password_entry where {} LIKE ?",columns()[0])).expect("Unable to get password entry.");
println!("Enter text to search against name's of entries:");
let mut search_term = String::new();
io::stdin().read_line(&mut search_term).expect("Not a string.");
search_term = search_term.to_string().trim().to_string();
let mut stmt_iter = stmt.query_map(&[&format!("%{}%",search_term)],|row|{
for num in 0..columns().len() as i32 {
//Need to specify the type used to find the right column in the row and the output type.
print!("{} | ", row.get::<i32,String>(num));
}
println!("");
}).unwrap();
//Appears as if the resulting MappedRows need to be used before they can be printed to console. Not really sure why.
let count = stmt_iter.count();
}
//fn delete(&conn) {
//
//}
}
|
extern crate pulldown_cmark;
extern crate handlebars;
extern crate walkdir;
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate serde_yaml;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate error_chain;
use std::fs::{self, File};
use std::io::Read;
use std::path::{Path, PathBuf};
mod html;
mod walker;
mod file_utils;
mod config;
mod templates;
#[cfg(test)]
mod test_utils;
use walker::MarkdownFileList;
/// Error type for the conversion of the markdown files to the static site.
error_chain!{
foreign_links {
IO(std::io::Error);
Template(handlebars::RenderError);
Config(serde_yaml::Error);
}
errors {
Fail(t: String)
}
}
#[derive(Debug)]
pub struct Convertor {
configuration: config::Configuration,
root_dir: PathBuf,
}
#[derive(Debug)]
pub struct ConvertedFile {
path: PathBuf,
content: String,
}
impl Convertor {
/// Initialize a new convertor for the provided root directory.
/// This will read and validate the configuration.
pub fn new<P: AsRef<Path>>(root_dir: P) -> Result<Convertor> {
let root_dir: PathBuf = root_dir.as_ref().to_path_buf();
info!("Generating site from directory: {}",
root_dir.canonicalize()?.display());
let configuration = read_config(&root_dir)?;
handle_config(&root_dir, &configuration)?;
Ok(Convertor {
configuration,
root_dir,
})
}
/// Entry function which will perform the entire process for the static site
/// generation.
///
/// Through here it will:
///
/// * Find all markdown files to use
/// * Convert all to HTML
pub fn generate_site(&self) -> Result<Vec<ConvertedFile>> {
info!("Generating site");
let mut converted_files = vec![];
let all_files = find_all_files(&self.root_dir)?;
let out_dir = self.configuration.out_dir();
if self.configuration.gen_index() {
debug!("Index to be generated");
let index_content = templates::generate_index(&all_files, &self.configuration).unwrap();
converted_files.push(ConvertedFile {
path: PathBuf::from(&out_dir).join("index.html"),
content: index_content,
})
}
for file in all_files.get_files() {
let result = create_html(file.get_path(), &self.configuration).unwrap();
converted_files.push(ConvertedFile {
path:
PathBuf::from(&out_dir).join(format!("{}.html",
file.get_file_name())),
content: result,
})
}
Ok(converted_files)
}
/// Write the files provided to the file system
///
/// The files provided will already be produced using `generate_site` and hence have all configuration information present
pub fn write_files(&self, files: Vec<ConvertedFile>) -> Result<()> {
if !file_utils::check_dir_exists(self.configuration.out_dir()) {
fs::create_dir(self.configuration.out_dir())?;
}
for file in files {
file_utils::write_to_file(file.path, file.content);
}
if self.configuration.copy_resources() {
for stylesheet in &self.configuration.stylesheet() {
// Copy across the stylesheet
file_utils::copy_file(&self.root_dir, &self.configuration.out_dir(), stylesheet)?;
}
// Copy across the images
let images_source = self.root_dir.join("images");
let images_dest = format!("{}/images", self.configuration.out_dir());
fs::create_dir_all(&images_dest)?;
for entry in fs::read_dir(format!("{}/images", self.root_dir.to_str().unwrap()))? {
let entry = entry?;
info!("Copying {:?}", entry.file_name());
file_utils::copy_file(&images_source,
&images_dest,
&entry.file_name().into_string().unwrap())?;
}
}
Ok(())
}
}
/// Starting at the root directory provided, find all Markdown files within in.
fn find_all_files<P: AsRef<Path>>(root_dir: P) -> Result<MarkdownFileList> {
let files = walker::find_markdown_files(root_dir)?;
for file in &files {
debug!("{:?}", file);
}
Ok(MarkdownFileList::new(files))
}
/// Converts the provided Markdown file to it HTML equivalent. This ia a direct
/// mapping it does not add more tags, such as `<body>` or `<html>`.
fn create_html<P: AsRef<Path>>(file_name: P, config: &config::Configuration) -> Result<String> {
let mut content = String::new();
File::open(file_name)
.and_then(|mut x| x.read_to_string(&mut content))?;
let parser = pulldown_cmark::Parser::new_ext(&content, pulldown_cmark::OPTION_ENABLE_TABLES);
templates::encapsulate_bare_html(html::consume(parser), config)
}
/// Finds the configuration file and deserializes it.
fn read_config<P: AsRef<Path>>(path: P) -> Result<config::Configuration> {
const CONFIG_NAME: &'static str = "mdup.yml";
let full_path = path.as_ref().to_path_buf();
debug!("Starting search for configuration file at: {:?}",
path.as_ref());
let root_iter = fs::read_dir(&path)?.filter_map(|x| x.ok());
for entry in root_iter {
if let Ok(file_name) = entry.file_name().into_string() {
if file_name.eq(CONFIG_NAME) {
return Ok(config::Configuration::from(full_path.join(file_name))?);
}
}
}
Err(ErrorKind::Fail(format!("Configuration file: {} not found in {}",
CONFIG_NAME,
fs::canonicalize(path).unwrap().display()))
.into())
}
/// Processes the configuration and produces a configuration addressing if
/// aspects are not present and other implications.
fn handle_config(root_dir: &AsRef<Path>, config: &config::Configuration) -> Result<()> {
// If not specified don't generate, if true generate
if !config.gen_index() {
let path = root_dir.as_ref().join("index.md");
info!("Checking that {:?} exists like the configuration says it will",
path);
return if file_utils::check_file_exists(path) {
Ok(())
} else {
Err(ErrorKind::Fail("Expected index.md in the root directory".into()).into())
};
}
Ok(())
}
#[cfg(test)]
mod tests {
use test_utils;
use std::env;
use std::fs::File;
#[test]
fn test_create_html() {
// Read expected
let config = super::config::Configuration::from("resources/mdup.yml").unwrap();
let expected = include_str!("../tests/resources/all_test_good.html");
let actual = super::create_html("resources/all_test.md", &config).unwrap();
test_utils::compare_string_content(expected, &actual);
}
// Ensure that will return an error when no configuration found
#[test]
fn test_fail_read_config() {
assert!(super::read_config("src").is_err());
}
// Ensure that return error when no index found but specified it should not generate one
#[test]
fn test_fail_handle_config() {
let config = super::config::Configuration::from("tests/resources/test_conf_all.yml")
.unwrap();
assert!(super::handle_config(&"resouces", &config).is_err());
}
// Ensure that return positive result when the index is not to be generated and one exists
#[test]
fn test_pass_handle_config() {
let config = super::config::Configuration::from("tests/resources/test_conf_all.yml")
.unwrap();
let mut tmp_dir = env::temp_dir();
tmp_dir.push("index.md");
File::create(tmp_dir).unwrap();
assert!(super::handle_config(&env::temp_dir(), &config).is_ok());
}
}
Added handling for presence of index when should be generated.
extern crate pulldown_cmark;
extern crate handlebars;
extern crate walkdir;
extern crate serde_json;
#[macro_use]
extern crate log;
extern crate serde_yaml;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate error_chain;
use std::fs::{self, File};
use std::io::Read;
use std::path::{Path, PathBuf};
mod html;
mod walker;
mod file_utils;
mod config;
mod templates;
#[cfg(test)]
mod test_utils;
use walker::MarkdownFileList;
/// Error type for the conversion of the markdown files to the static site.
error_chain!{
foreign_links {
IO(std::io::Error);
Template(handlebars::RenderError);
Config(serde_yaml::Error);
}
errors {
Fail(t: String)
}
}
#[derive(Debug)]
pub struct Convertor {
configuration: config::Configuration,
root_dir: PathBuf,
}
#[derive(Debug)]
pub struct ConvertedFile {
path: PathBuf,
content: String,
}
impl Convertor {
/// Initialize a new convertor for the provided root directory.
/// This will read and validate the configuration.
pub fn new<P: AsRef<Path>>(root_dir: P) -> Result<Convertor> {
let root_dir: PathBuf = root_dir.as_ref().to_path_buf();
info!("Generating site from directory: {}",
root_dir.canonicalize()?.display());
let configuration = read_config(&root_dir)?;
handle_config(&root_dir, &configuration)?;
Ok(Convertor {
configuration,
root_dir,
})
}
/// Entry function which will perform the entire process for the static site
/// generation.
///
/// Through here it will:
///
/// * Find all markdown files to use
/// * Convert all to HTML
pub fn generate_site(&self) -> Result<Vec<ConvertedFile>> {
info!("Generating site");
let mut converted_files = vec![];
let all_files = find_all_files(&self.root_dir)?;
let out_dir = self.configuration.out_dir();
if self.configuration.gen_index() {
debug!("Index to be generated");
let index_content = templates::generate_index(&all_files, &self.configuration).unwrap();
converted_files.push(ConvertedFile {
path: PathBuf::from(&out_dir).join("index.html"),
content: index_content,
})
}
for file in all_files.get_files() {
let result = create_html(file.get_path(), &self.configuration).unwrap();
converted_files.push(ConvertedFile {
path: PathBuf::from(&out_dir)
.join(format!("{}.html", file.get_file_name())),
content: result,
})
}
Ok(converted_files)
}
/// Write the files provided to the file system
///
/// The files provided will already be produced using `generate_site` and hence have all configuration information present
pub fn write_files(&self, files: Vec<ConvertedFile>) -> Result<()> {
if !file_utils::check_dir_exists(self.configuration.out_dir()) {
fs::create_dir(self.configuration.out_dir())?;
}
for file in files {
file_utils::write_to_file(file.path, file.content);
}
if self.configuration.copy_resources() {
for stylesheet in &self.configuration.stylesheet() {
// Copy across the stylesheet
file_utils::copy_file(&self.root_dir, &self.configuration.out_dir(), stylesheet)?;
}
// Copy across the images
let images_source = self.root_dir.join("images");
let images_dest = format!("{}/images", self.configuration.out_dir());
fs::create_dir_all(&images_dest)?;
for entry in fs::read_dir(format!("{}/images", self.root_dir.to_str().unwrap()))? {
let entry = entry?;
info!("Copying {:?}", entry.file_name());
file_utils::copy_file(&images_source,
&images_dest,
&entry.file_name().into_string().unwrap())?;
}
}
Ok(())
}
}
/// Starting at the root directory provided, find all Markdown files within in.
fn find_all_files<P: AsRef<Path>>(root_dir: P) -> Result<MarkdownFileList> {
let files = walker::find_markdown_files(root_dir)?;
for file in &files {
debug!("{:?}", file);
}
Ok(MarkdownFileList::new(files))
}
/// Converts the provided Markdown file to it HTML equivalent. This ia a direct
/// mapping it does not add more tags, such as `<body>` or `<html>`.
fn create_html<P: AsRef<Path>>(file_name: P, config: &config::Configuration) -> Result<String> {
let mut content = String::new();
File::open(file_name)
.and_then(|mut x| x.read_to_string(&mut content))?;
let parser = pulldown_cmark::Parser::new_ext(&content, pulldown_cmark::OPTION_ENABLE_TABLES);
templates::encapsulate_bare_html(html::consume(parser), config)
}
/// Finds the configuration file and deserializes it.
fn read_config<P: AsRef<Path>>(path: P) -> Result<config::Configuration> {
const CONFIG_NAME: &'static str = "mdup.yml";
let full_path = path.as_ref().to_path_buf();
debug!("Starting search for configuration file at: {:?}",
path.as_ref());
let root_iter = fs::read_dir(&path)?.filter_map(|x| x.ok());
for entry in root_iter {
if let Ok(file_name) = entry.file_name().into_string() {
if file_name.eq(CONFIG_NAME) {
return Ok(config::Configuration::from(full_path.join(file_name))?);
}
}
}
Err(ErrorKind::Fail(format!("Configuration file: {} not found in {}",
CONFIG_NAME,
fs::canonicalize(path).unwrap().display()))
.into())
}
/// Processes the configuration and produces a configuration addressing if
/// aspects are not present and other implications.
fn handle_config(root_dir: &AsRef<Path>, config: &config::Configuration) -> Result<()> {
// If not specified don't generate, if true better not have an index present
let path = root_dir.as_ref().join("index.md");
if !config.gen_index() {
info!("Checking that {:?} exists like the configuration says it will",
path);
if file_utils::check_file_exists(path) {
Ok(())
} else {
Err(ErrorKind::Fail("Expected index.md in the root directory".into()).into())
}
} else {
info!("Checking that {:?} does not exist so can generate index",
path);
if file_utils::check_file_exists(path) {
Err(ErrorKind::Fail("Should not have index.md in the root directory".into()).into())
} else {
Ok(())
}
}
}
#[cfg(test)]
mod tests {
use test_utils;
use std::env;
use std::fs::{self, File};
#[test]
fn test_create_html() {
// Read expected
let config = super::config::Configuration::from("resources/mdup.yml").unwrap();
let expected = include_str!("../tests/resources/all_test_good.html");
let actual = super::create_html("resources/all_test.md", &config).unwrap();
test_utils::compare_string_content(expected, &actual);
}
// Ensure that will return an error when no configuration found
#[test]
fn test_fail_read_config() {
assert!(super::read_config("src").is_err());
}
// Ensure that return error when no index found but specified it should not generate one
#[test]
fn test_fail_handle_config_no_index() {
let config = super::config::Configuration::from("tests/resources/test_conf_all.yml")
.unwrap();
assert!(super::handle_config(&"resouces", &config).is_err());
}
// Ensure that return error when index is found but configured to generate one
#[test]
fn test_fail_handle_config_with_index() {
let config = super::config::Configuration::from("resources/mdup.yml").unwrap();
let _ = fs::File::create("resources/index.md").unwrap();
assert!(super::handle_config(&"resources", &config).is_err());
fs::remove_file("resources/index.md").unwrap();
}
// Ensure that return positive result when the index is not to be generated and one exists
#[test]
fn test_pass_handle_config() {
let config = super::config::Configuration::from("tests/resources/test_conf_all.yml")
.unwrap();
let mut tmp_dir = env::temp_dir();
tmp_dir.push("index.md");
File::create(tmp_dir).unwrap();
assert!(super::handle_config(&env::temp_dir(), &config).is_ok());
}
}
|
//! [![github]](https://github.com/dtolnay/quote) [![crates-io]](https://crates.io/crates/quote) [![docs-rs]](https://docs.rs/quote)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K
//!
//! <br>
//!
//! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that — producing
//! tokens to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! ```toml
//! [dependencies]
//! quote = "1.0"
//! ```
//!
//! <br>
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
// Quote types in rustdoc of other crates get linked to here.
#![doc(html_root_url = "https://docs.rs/quote/1.0.15")]
#![allow(
clippy::doc_markdown,
clippy::missing_errors_doc,
clippy::missing_panics_doc,
clippy::module_name_repetitions,
// false positive https://github.com/rust-lang/rust-clippy/issues/6983
clippy::wrong_self_convention,
)]
#[cfg(all(
not(all(target_arch = "wasm32", target_os = "unknown")),
feature = "proc-macro"
))]
extern crate proc_macro;
mod ext;
mod format;
mod ident_fragment;
mod to_tokens;
// Not public API.
#[doc(hidden)]
#[path = "runtime.rs"]
pub mod __private;
pub use crate::ext::TokenStreamExt;
pub use crate::ident_fragment::IdentFragment;
pub use crate::to_tokens::ToTokens;
// Not public API.
#[doc(hidden)]
pub mod spanned;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`proc_macro2::TokenStream`].
///
/// Note: for returning tokens to the compiler in a procedural macro, use
/// `.into()` on the result to convert to [`proc_macro::TokenStream`].
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html
///
/// <br>
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition body
/// for each one. The variables in an interpolation may be a `Vec`, slice,
/// `BTreeSet`, or any `Iterator`.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
///
/// <br>
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// <br>
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// <br>
///
/// # Examples
///
/// ### Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # extern crate proc_macro2;
///
/// # #[cfg(any())]
/// use proc_macro::TokenStream;
/// # use proc_macro2::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /* ... */;
/// let expr = /* ... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// <p><br></p>
///
/// ### Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// <p><br></p>
///
/// ### Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect
/// quote! {
/// let mut _#ident = 0;
/// }
/// # ;
/// ```
///
/// The solution is to build a new identifier token with the correct value. As
/// this is such a common case, the [`format_ident!`] macro provides a
/// convenient utility for doing so correctly.
///
/// ```
/// # use proc_macro2::{Ident, Span};
/// # use quote::{format_ident, quote};
/// #
/// # let ident = Ident::new("i", Span::call_site());
/// #
/// let varname = format_ident!("_{}", ident);
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// Alternatively, the APIs provided by Syn and proc-macro2 can be used to
/// directly build the identifier. This is roughly equivalent to the above, but
/// will not handle `ident` being a raw identifier.
///
/// ```
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// <p><br></p>
///
/// ### Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
///
/// <p><br></p>
///
/// ### Interpolating text inside of doc comments
///
/// Neither doc comments nor string literals get interpolation behavior in
/// quote:
///
/// ```compile_fail
/// quote! {
/// /// try to interpolate: #ident
/// ///
/// /// ...
/// }
/// ```
///
/// ```compile_fail
/// quote! {
/// #[doc = "try to interpolate: #ident"]
/// }
/// ```
///
/// Macro calls in a doc attribute are not valid syntax:
///
/// ```compile_fail
/// quote! {
/// #[doc = concat!("try to interpolate: ", stringify!(#ident))]
/// }
/// ```
///
/// Instead the best way to build doc comments that involve variables is by
/// formatting the doc string literal outside of quote.
///
/// ```rust
/// # use proc_macro2::{Ident, Span};
/// # use quote::quote;
/// #
/// # const IGNORE: &str = stringify! {
/// let msg = format!(...);
/// # };
/// #
/// # let ident = Ident::new("var", Span::call_site());
/// # let msg = format!("try to interpolate: {}", ident);
/// quote! {
/// #[doc = #msg]
/// ///
/// /// ...
/// }
/// # ;
/// ```
///
/// <p><br></p>
///
/// ### Indexing into a tuple struct
///
/// When interpolating indices of a tuple or tuple struct, we need them not to
/// appears suffixed as integer literals by interpolating them as [`syn::Index`]
/// instead.
///
/// [`syn::Index`]: https://docs.rs/syn/1.0/syn/struct.Index.html
///
/// ```compile_fail
/// let i = 0usize..self.fields.len();
///
/// // expands to 0 + self.0usize.heap_size() + self.1usize.heap_size() + ...
/// // which is not valid syntax
/// quote! {
/// 0 #( + self.#i.heap_size() )*
/// }
/// ```
///
/// ```
/// # use proc_macro2::{Ident, TokenStream};
/// # use quote::quote;
/// #
/// # mod syn {
/// # use proc_macro2::{Literal, TokenStream};
/// # use quote::{ToTokens, TokenStreamExt};
/// #
/// # pub struct Index(usize);
/// #
/// # impl From<usize> for Index {
/// # fn from(i: usize) -> Self {
/// # Index(i)
/// # }
/// # }
/// #
/// # impl ToTokens for Index {
/// # fn to_tokens(&self, tokens: &mut TokenStream) {
/// # tokens.append(Literal::usize_unsuffixed(self.0));
/// # }
/// # }
/// # }
/// #
/// # struct Struct {
/// # fields: Vec<Ident>,
/// # }
/// #
/// # impl Struct {
/// # fn example(&self) -> TokenStream {
/// let i = (0..self.fields.len()).map(syn::Index::from);
///
/// // expands to 0 + self.0.heap_size() + self.1.heap_size() + ...
/// quote! {
/// 0 #( + self.#i.heap_size() )*
/// }
/// # }
/// # }
/// ```
#[macro_export]
macro_rules! quote {
() => {
$crate::__private::TokenStream::new()
};
($($tt:tt)*) => {{
let mut _s = $crate::__private::TokenStream::new();
$crate::quote_each_token!(_s $($tt)*);
_s
}};
}
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// <br>
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief — use a variable for
/// anything more than a few characters. There should be no space before the
/// `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html
///
/// ```
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /* ... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// <br>
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// <br>
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is highlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler.
#[macro_export]
macro_rules! quote_spanned {
($span:expr=>) => {{
let _: $crate::__private::Span = $span;
$crate::__private::TokenStream::new()
}};
($span:expr=> $($tt:tt)*) => {{
let mut _s = $crate::__private::TokenStream::new();
let _span: $crate::__private::Span = $span;
$crate::quote_each_token_spanned!(_s _span $($tt)*);
_s
}};
}
// Extract the names of all #metavariables and pass them to the $call macro.
//
// in: pounded_var_names!(then!(...) a #b c #( #d )* #e)
// out: then!(... b);
// then!(... d);
// then!(... e);
#[macro_export]
#[doc(hidden)]
macro_rules! pounded_var_names {
($call:ident! $extra:tt $($tts:tt)*) => {
$crate::pounded_var_names_with_context!($call! $extra
(@ $($tts)*)
($($tts)* @)
)
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! pounded_var_names_with_context {
($call:ident! $extra:tt ($($b1:tt)*) ($($curr:tt)*)) => {
$(
$crate::pounded_var_with_context!($call! $extra $b1 $curr);
)*
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! pounded_var_with_context {
($call:ident! $extra:tt $b1:tt ( $($inner:tt)* )) => {
$crate::pounded_var_names!($call! $extra $($inner)*);
};
($call:ident! $extra:tt $b1:tt [ $($inner:tt)* ]) => {
$crate::pounded_var_names!($call! $extra $($inner)*);
};
($call:ident! $extra:tt $b1:tt { $($inner:tt)* }) => {
$crate::pounded_var_names!($call! $extra $($inner)*);
};
($call:ident!($($extra:tt)*) # $var:ident) => {
$crate::$call!($($extra)* $var);
};
($call:ident! $extra:tt $b1:tt $curr:tt) => {};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_bind_into_iter {
($has_iter:ident $var:ident) => {
// `mut` may be unused if $var occurs multiple times in the list.
#[allow(unused_mut)]
let (mut $var, i) = $var.quote_into_iter();
let $has_iter = $has_iter | i;
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_bind_next_or_break {
($var:ident) => {
let $var = match $var.next() {
Some(_x) => $crate::__private::RepInterp(_x),
None => break,
};
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_each_token {
($tokens:ident $($tts:tt)*) => {
$crate::quote_tokens_with_context!($tokens
(@ @ @ @ @ @ $($tts)*)
(@ @ @ @ @ $($tts)* @)
(@ @ @ @ $($tts)* @ @)
(@ @ @ $(($tts))* @ @ @)
(@ @ $($tts)* @ @ @ @)
(@ $($tts)* @ @ @ @ @)
($($tts)* @ @ @ @ @ @)
);
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_each_token_spanned {
($tokens:ident $span:ident $($tts:tt)*) => {
$crate::quote_tokens_with_context_spanned!($tokens $span
(@ @ @ @ @ @ $($tts)*)
(@ @ @ @ @ $($tts)* @)
(@ @ @ @ $($tts)* @ @)
(@ @ @ $(($tts))* @ @ @)
(@ @ $($tts)* @ @ @ @)
(@ $($tts)* @ @ @ @ @)
($($tts)* @ @ @ @ @ @)
);
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_tokens_with_context {
($tokens:ident
($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*)
($($curr:tt)*)
($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*)
) => {
$(
$crate::quote_token_with_context!($tokens $b3 $b2 $b1 $curr $a1 $a2 $a3);
)*
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_tokens_with_context_spanned {
($tokens:ident $span:ident
($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*)
($($curr:tt)*)
($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*)
) => {
$(
$crate::quote_token_with_context_spanned!($tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3);
)*
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token_with_context {
($tokens:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{
use $crate::__private::ext::*;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
// This is `while true` instead of `loop` because if there are no
// iterators used inside of this repetition then the body would not
// contain any `break`, so the compiler would emit unreachable code
// warnings on anything below the loop. We use has_iter to detect and
// fail to compile when there are no iterators, so here we just work
// around the unneeded extra warning.
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
$crate::quote_each_token!($tokens $($inner)*);
}
}};
($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{
use $crate::__private::ext::*;
let mut _i = 0usize;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
if _i > 0 {
$crate::quote_token!($tokens $sep);
}
_i += 1;
$crate::quote_each_token!($tokens $($inner)*);
}
}};
($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {};
($tokens:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {};
($tokens:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => {
// https://github.com/dtolnay/quote/issues/130
$crate::quote_token!($tokens *);
};
($tokens:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => {
$crate::ToTokens::to_tokens(&$var, &mut $tokens);
};
($tokens:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => {
$crate::quote_token!($tokens $curr);
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token_with_context_spanned {
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{
use $crate::__private::ext::*;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
// This is `while true` instead of `loop` because if there are no
// iterators used inside of this repetition then the body would not
// contain any `break`, so the compiler would emit unreachable code
// warnings on anything below the loop. We use has_iter to detect and
// fail to compile when there are no iterators, so here we just work
// around the unneeded extra warning.
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
$crate::quote_each_token_spanned!($tokens $span $($inner)*);
}
}};
($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{
use $crate::__private::ext::*;
let mut _i = 0usize;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
if _i > 0 {
$crate::quote_token_spanned!($tokens $span $sep);
}
_i += 1;
$crate::quote_each_token_spanned!($tokens $span $($inner)*);
}
}};
($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {};
($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {};
($tokens:ident $span:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => {
// https://github.com/dtolnay/quote/issues/130
$crate::quote_token_spanned!($tokens $span *);
};
($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => {
$crate::ToTokens::to_tokens(&$var, &mut $tokens);
};
($tokens:ident $span:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => {
$crate::quote_token_spanned!($tokens $span $curr);
};
}
// These rules are ordered by approximate token frequency, at least for the
// first 10 or so, to improve compile times. Having `ident` first is by far the
// most important, because it's typically 2-3x more common than the next most
// common token.
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token {
($tokens:ident $ident:ident) => {
$crate::__private::push_ident(&mut $tokens, stringify!($ident));
};
($tokens:ident ::) => {
$crate::__private::push_colon2(&mut $tokens);
};
($tokens:ident ( $($inner:tt)* )) => {
$crate::__private::push_group(
&mut $tokens,
$crate::__private::Delimiter::Parenthesis,
$crate::quote!($($inner)*),
);
};
($tokens:ident [ $($inner:tt)* ]) => {
$crate::__private::push_group(
&mut $tokens,
$crate::__private::Delimiter::Bracket,
$crate::quote!($($inner)*),
);
};
($tokens:ident { $($inner:tt)* }) => {
$crate::__private::push_group(
&mut $tokens,
$crate::__private::Delimiter::Brace,
$crate::quote!($($inner)*),
);
};
($tokens:ident #) => {
$crate::__private::push_pound(&mut $tokens);
};
($tokens:ident ,) => {
$crate::__private::push_comma(&mut $tokens);
};
($tokens:ident .) => {
$crate::__private::push_dot(&mut $tokens);
};
($tokens:ident ;) => {
$crate::__private::push_semi(&mut $tokens);
};
($tokens:ident :) => {
$crate::__private::push_colon(&mut $tokens);
};
($tokens:ident +) => {
$crate::__private::push_add(&mut $tokens);
};
($tokens:ident +=) => {
$crate::__private::push_add_eq(&mut $tokens);
};
($tokens:ident &) => {
$crate::__private::push_and(&mut $tokens);
};
($tokens:ident &&) => {
$crate::__private::push_and_and(&mut $tokens);
};
($tokens:ident &=) => {
$crate::__private::push_and_eq(&mut $tokens);
};
($tokens:ident @) => {
$crate::__private::push_at(&mut $tokens);
};
($tokens:ident !) => {
$crate::__private::push_bang(&mut $tokens);
};
($tokens:ident ^) => {
$crate::__private::push_caret(&mut $tokens);
};
($tokens:ident ^=) => {
$crate::__private::push_caret_eq(&mut $tokens);
};
($tokens:ident /) => {
$crate::__private::push_div(&mut $tokens);
};
($tokens:ident /=) => {
$crate::__private::push_div_eq(&mut $tokens);
};
($tokens:ident ..) => {
$crate::__private::push_dot2(&mut $tokens);
};
($tokens:ident ...) => {
$crate::__private::push_dot3(&mut $tokens);
};
($tokens:ident ..=) => {
$crate::__private::push_dot_dot_eq(&mut $tokens);
};
($tokens:ident =) => {
$crate::__private::push_eq(&mut $tokens);
};
($tokens:ident ==) => {
$crate::__private::push_eq_eq(&mut $tokens);
};
($tokens:ident >=) => {
$crate::__private::push_ge(&mut $tokens);
};
($tokens:ident >) => {
$crate::__private::push_gt(&mut $tokens);
};
($tokens:ident <=) => {
$crate::__private::push_le(&mut $tokens);
};
($tokens:ident <) => {
$crate::__private::push_lt(&mut $tokens);
};
($tokens:ident *=) => {
$crate::__private::push_mul_eq(&mut $tokens);
};
($tokens:ident !=) => {
$crate::__private::push_ne(&mut $tokens);
};
($tokens:ident |) => {
$crate::__private::push_or(&mut $tokens);
};
($tokens:ident |=) => {
$crate::__private::push_or_eq(&mut $tokens);
};
($tokens:ident ||) => {
$crate::__private::push_or_or(&mut $tokens);
};
($tokens:ident ?) => {
$crate::__private::push_question(&mut $tokens);
};
($tokens:ident ->) => {
$crate::__private::push_rarrow(&mut $tokens);
};
($tokens:ident <-) => {
$crate::__private::push_larrow(&mut $tokens);
};
($tokens:ident %) => {
$crate::__private::push_rem(&mut $tokens);
};
($tokens:ident %=) => {
$crate::__private::push_rem_eq(&mut $tokens);
};
($tokens:ident =>) => {
$crate::__private::push_fat_arrow(&mut $tokens);
};
($tokens:ident <<) => {
$crate::__private::push_shl(&mut $tokens);
};
($tokens:ident <<=) => {
$crate::__private::push_shl_eq(&mut $tokens);
};
($tokens:ident >>) => {
$crate::__private::push_shr(&mut $tokens);
};
($tokens:ident >>=) => {
$crate::__private::push_shr_eq(&mut $tokens);
};
($tokens:ident *) => {
$crate::__private::push_star(&mut $tokens);
};
($tokens:ident -) => {
$crate::__private::push_sub(&mut $tokens);
};
($tokens:ident -=) => {
$crate::__private::push_sub_eq(&mut $tokens);
};
($tokens:ident $lifetime:lifetime) => {
$crate::__private::push_lifetime(&mut $tokens, stringify!($lifetime));
};
($tokens:ident _) => {
$crate::__private::push_underscore(&mut $tokens);
};
($tokens:ident $other:tt) => {
$crate::__private::parse(&mut $tokens, stringify!($other));
};
}
// See the comment above `quote_token!` about the rule ordering.
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token_spanned {
($tokens:ident $span:ident $ident:ident) => {
$crate::__private::push_ident_spanned(&mut $tokens, $span, stringify!($ident));
};
($tokens:ident $span:ident ::) => {
$crate::__private::push_colon2_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ( $($inner:tt)* )) => {
$crate::__private::push_group_spanned(
&mut $tokens,
$span,
$crate::__private::Delimiter::Parenthesis,
$crate::quote_spanned!($span=> $($inner)*),
);
};
($tokens:ident $span:ident [ $($inner:tt)* ]) => {
$crate::__private::push_group_spanned(
&mut $tokens,
$span,
$crate::__private::Delimiter::Bracket,
$crate::quote_spanned!($span=> $($inner)*),
);
};
($tokens:ident $span:ident { $($inner:tt)* }) => {
$crate::__private::push_group_spanned(
&mut $tokens,
$span,
$crate::__private::Delimiter::Brace,
$crate::quote_spanned!($span=> $($inner)*),
);
};
($tokens:ident $span:ident #) => {
$crate::__private::push_pound_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ,) => {
$crate::__private::push_comma_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident .) => {
$crate::__private::push_dot_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ;) => {
$crate::__private::push_semi_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident :) => {
$crate::__private::push_colon_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident +) => {
$crate::__private::push_add_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident +=) => {
$crate::__private::push_add_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident &) => {
$crate::__private::push_and_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident &&) => {
$crate::__private::push_and_and_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident &=) => {
$crate::__private::push_and_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident @) => {
$crate::__private::push_at_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident !) => {
$crate::__private::push_bang_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ^) => {
$crate::__private::push_caret_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ^=) => {
$crate::__private::push_caret_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident /) => {
$crate::__private::push_div_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident /=) => {
$crate::__private::push_div_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ..) => {
$crate::__private::push_dot2_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ...) => {
$crate::__private::push_dot3_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ..=) => {
$crate::__private::push_dot_dot_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident =) => {
$crate::__private::push_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ==) => {
$crate::__private::push_eq_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >=) => {
$crate::__private::push_ge_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >) => {
$crate::__private::push_gt_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <=) => {
$crate::__private::push_le_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <) => {
$crate::__private::push_lt_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident *=) => {
$crate::__private::push_mul_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident !=) => {
$crate::__private::push_ne_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident |) => {
$crate::__private::push_or_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident |=) => {
$crate::__private::push_or_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ||) => {
$crate::__private::push_or_or_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ?) => {
$crate::__private::push_question_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ->) => {
$crate::__private::push_rarrow_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <-) => {
$crate::__private::push_larrow_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident %) => {
$crate::__private::push_rem_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident %=) => {
$crate::__private::push_rem_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident =>) => {
$crate::__private::push_fat_arrow_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <<) => {
$crate::__private::push_shl_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <<=) => {
$crate::__private::push_shl_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >>) => {
$crate::__private::push_shr_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >>=) => {
$crate::__private::push_shr_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident *) => {
$crate::__private::push_star_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident -) => {
$crate::__private::push_sub_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident -=) => {
$crate::__private::push_sub_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident $lifetime:lifetime) => {
$crate::__private::push_lifetime_spanned(&mut $tokens, $span, stringify!($lifetime));
};
($tokens:ident $span:ident _) => {
$crate::__private::push_underscore_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident $other:tt) => {
$crate::__private::parse_spanned(&mut $tokens, $span, stringify!($other));
};
}
reorder `quote_token` arguments
//! [![github]](https://github.com/dtolnay/quote) [![crates-io]](https://crates.io/crates/quote) [![docs-rs]](https://docs.rs/quote)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust
//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K
//!
//! <br>
//!
//! This crate provides the [`quote!`] macro for turning Rust syntax tree data
//! structures into tokens of source code.
//!
//! [`quote!`]: macro.quote.html
//!
//! Procedural macros in Rust receive a stream of tokens as input, execute
//! arbitrary Rust code to determine how to manipulate those tokens, and produce
//! a stream of tokens to hand back to the compiler to compile into the caller's
//! crate. Quasi-quoting is a solution to one piece of that — producing
//! tokens to return to the compiler.
//!
//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
//! Within the `quote!` macro, we can write what looks like code to our text
//! editor or IDE. We get all the benefits of the editor's brace matching,
//! syntax highlighting, indentation, and maybe autocompletion. But rather than
//! compiling that as code into the current crate, we can treat it as data, pass
//! it around, mutate it, and eventually hand it back to the compiler as tokens
//! to compile into the macro caller's crate.
//!
//! This crate is motivated by the procedural macro use case, but is a
//! general-purpose Rust quasi-quoting library and is not specific to procedural
//! macros.
//!
//! ```toml
//! [dependencies]
//! quote = "1.0"
//! ```
//!
//! <br>
//!
//! # Example
//!
//! The following quasi-quoted block of code is something you might find in [a]
//! procedural macro having to do with data structure serialization. The `#var`
//! syntax performs interpolation of runtime variables into the quoted tokens.
//! Check out the documentation of the [`quote!`] macro for more detail about
//! the syntax. See also the [`quote_spanned!`] macro which is important for
//! implementing hygienic procedural macros.
//!
//! [a]: https://serde.rs/
//! [`quote_spanned!`]: macro.quote_spanned.html
//!
//! ```
//! # use quote::quote;
//! #
//! # let generics = "";
//! # let where_clause = "";
//! # let field_ty = "";
//! # let item_ty = "";
//! # let path = "";
//! # let value = "";
//! #
//! let tokens = quote! {
//! struct SerializeWith #generics #where_clause {
//! value: &'a #field_ty,
//! phantom: core::marker::PhantomData<#item_ty>,
//! }
//!
//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
//! where
//! S: serde::Serializer,
//! {
//! #path(self.value, serializer)
//! }
//! }
//!
//! SerializeWith {
//! value: #value,
//! phantom: core::marker::PhantomData::<#item_ty>,
//! }
//! };
//! ```
// Quote types in rustdoc of other crates get linked to here.
#![doc(html_root_url = "https://docs.rs/quote/1.0.15")]
#![allow(
clippy::doc_markdown,
clippy::missing_errors_doc,
clippy::missing_panics_doc,
clippy::module_name_repetitions,
// false positive https://github.com/rust-lang/rust-clippy/issues/6983
clippy::wrong_self_convention,
)]
#[cfg(all(
not(all(target_arch = "wasm32", target_os = "unknown")),
feature = "proc-macro"
))]
extern crate proc_macro;
mod ext;
mod format;
mod ident_fragment;
mod to_tokens;
// Not public API.
#[doc(hidden)]
#[path = "runtime.rs"]
pub mod __private;
pub use crate::ext::TokenStreamExt;
pub use crate::ident_fragment::IdentFragment;
pub use crate::to_tokens::ToTokens;
// Not public API.
#[doc(hidden)]
pub mod spanned;
/// The whole point.
///
/// Performs variable interpolation against the input and produces it as
/// [`proc_macro2::TokenStream`].
///
/// Note: for returning tokens to the compiler in a procedural macro, use
/// `.into()` on the result to convert to [`proc_macro::TokenStream`].
///
/// [`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html
///
/// <br>
///
/// # Interpolation
///
/// Variable interpolation is done with `#var` (similar to `$var` in
/// `macro_rules!` macros). This grabs the `var` variable that is currently in
/// scope and inserts it in that location in the output tokens. Any type
/// implementing the [`ToTokens`] trait can be interpolated. This includes most
/// Rust primitive types as well as most of the syntax tree types from the [Syn]
/// crate.
///
/// [`ToTokens`]: trait.ToTokens.html
/// [Syn]: https://github.com/dtolnay/syn
///
/// Repetition is done using `#(...)*` or `#(...),*` again similar to
/// `macro_rules!`. This iterates through the elements of any variable
/// interpolated within the repetition and inserts a copy of the repetition body
/// for each one. The variables in an interpolation may be a `Vec`, slice,
/// `BTreeSet`, or any `Iterator`.
///
/// - `#(#var)*` — no separators
/// - `#(#var),*` — the character before the asterisk is used as a separator
/// - `#( struct #var; )*` — the repetition can contain other tokens
/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
///
/// <br>
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote!`
/// invocation are spanned with [`Span::call_site()`].
///
/// [`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site
///
/// A different span can be provided through the [`quote_spanned!`] macro.
///
/// [`quote_spanned!`]: macro.quote_spanned.html
///
/// <br>
///
/// # Return type
///
/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
/// Meanwhile Rust procedural macros are expected to return the type
/// `proc_macro::TokenStream`.
///
/// The difference between the two types is that `proc_macro` types are entirely
/// specific to procedural macros and cannot ever exist in code outside of a
/// procedural macro, while `proc_macro2` types may exist anywhere including
/// tests and non-macro code like main.rs and build.rs. This is why even the
/// procedural macro ecosystem is largely built around `proc_macro2`, because
/// that ensures the libraries are unit testable and accessible in non-macro
/// contexts.
///
/// There is a [`From`]-conversion in both directions so returning the output of
/// `quote!` from a procedural macro usually looks like `tokens.into()` or
/// `proc_macro::TokenStream::from(tokens)`.
///
/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
///
/// <br>
///
/// # Examples
///
/// ### Procedural macro
///
/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
/// crate for further useful guidance on using `quote!` as part of a procedural
/// macro.
///
/// [Syn]: https://github.com/dtolnay/syn
///
/// ```
/// # #[cfg(any())]
/// extern crate proc_macro;
/// # extern crate proc_macro2;
///
/// # #[cfg(any())]
/// use proc_macro::TokenStream;
/// # use proc_macro2::TokenStream;
/// use quote::quote;
///
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// #[proc_macro_derive(HeapSize)]
/// # };
/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
/// // Parse the input and figure out what implementation to generate...
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let name = /* ... */;
/// let expr = /* ... */;
/// # };
/// #
/// # let name = 0;
/// # let expr = 0;
///
/// let expanded = quote! {
/// // The generated impl.
/// impl heapsize::HeapSize for #name {
/// fn heap_size_of_children(&self) -> usize {
/// #expr
/// }
/// }
/// };
///
/// // Hand the output tokens back to the compiler.
/// TokenStream::from(expanded)
/// }
/// ```
///
/// <p><br></p>
///
/// ### Combining quoted fragments
///
/// Usually you don't end up constructing an entire final `TokenStream` in one
/// piece. Different parts may come from different helper functions. The tokens
/// produced by `quote!` themselves implement `ToTokens` and so can be
/// interpolated into later `quote!` invocations to build up a final result.
///
/// ```
/// # use quote::quote;
/// #
/// let type_definition = quote! {...};
/// let methods = quote! {...};
///
/// let tokens = quote! {
/// #type_definition
/// #methods
/// };
/// ```
///
/// <p><br></p>
///
/// ### Constructing identifiers
///
/// Suppose we have an identifier `ident` which came from somewhere in a macro
/// input and we need to modify it in some way for the macro output. Let's
/// consider prepending the identifier with an underscore.
///
/// Simply interpolating the identifier next to an underscore will not have the
/// behavior of concatenating them. The underscore and the identifier will
/// continue to be two separate tokens as if you had written `_ x`.
///
/// ```
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// // incorrect
/// quote! {
/// let mut _#ident = 0;
/// }
/// # ;
/// ```
///
/// The solution is to build a new identifier token with the correct value. As
/// this is such a common case, the [`format_ident!`] macro provides a
/// convenient utility for doing so correctly.
///
/// ```
/// # use proc_macro2::{Ident, Span};
/// # use quote::{format_ident, quote};
/// #
/// # let ident = Ident::new("i", Span::call_site());
/// #
/// let varname = format_ident!("_{}", ident);
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// Alternatively, the APIs provided by Syn and proc-macro2 can be used to
/// directly build the identifier. This is roughly equivalent to the above, but
/// will not handle `ident` being a raw identifier.
///
/// ```
/// # use proc_macro2::{self as syn, Span};
/// # use quote::quote;
/// #
/// # let ident = syn::Ident::new("i", Span::call_site());
/// #
/// let concatenated = format!("_{}", ident);
/// let varname = syn::Ident::new(&concatenated, ident.span());
/// quote! {
/// let mut #varname = 0;
/// }
/// # ;
/// ```
///
/// <p><br></p>
///
/// ### Making method calls
///
/// Let's say our macro requires some type specified in the macro input to have
/// a constructor called `new`. We have the type in a variable called
/// `field_type` of type `syn::Type` and want to invoke the constructor.
///
/// ```
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// // incorrect
/// quote! {
/// let value = #field_type::new();
/// }
/// # ;
/// ```
///
/// This works only sometimes. If `field_type` is `String`, the expanded code
/// contains `String::new()` which is fine. But if `field_type` is something
/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
/// but for macros often the following is more convenient.
///
/// ```
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type>::new();
/// }
/// # ;
/// ```
///
/// This expands to `<Vec<i32>>::new()` which behaves correctly.
///
/// A similar pattern is appropriate for trait methods.
///
/// ```
/// # use quote::quote;
/// #
/// # let field_type = quote!(...);
/// #
/// quote! {
/// let value = <#field_type as core::default::Default>::default();
/// }
/// # ;
/// ```
///
/// <p><br></p>
///
/// ### Interpolating text inside of doc comments
///
/// Neither doc comments nor string literals get interpolation behavior in
/// quote:
///
/// ```compile_fail
/// quote! {
/// /// try to interpolate: #ident
/// ///
/// /// ...
/// }
/// ```
///
/// ```compile_fail
/// quote! {
/// #[doc = "try to interpolate: #ident"]
/// }
/// ```
///
/// Macro calls in a doc attribute are not valid syntax:
///
/// ```compile_fail
/// quote! {
/// #[doc = concat!("try to interpolate: ", stringify!(#ident))]
/// }
/// ```
///
/// Instead the best way to build doc comments that involve variables is by
/// formatting the doc string literal outside of quote.
///
/// ```rust
/// # use proc_macro2::{Ident, Span};
/// # use quote::quote;
/// #
/// # const IGNORE: &str = stringify! {
/// let msg = format!(...);
/// # };
/// #
/// # let ident = Ident::new("var", Span::call_site());
/// # let msg = format!("try to interpolate: {}", ident);
/// quote! {
/// #[doc = #msg]
/// ///
/// /// ...
/// }
/// # ;
/// ```
///
/// <p><br></p>
///
/// ### Indexing into a tuple struct
///
/// When interpolating indices of a tuple or tuple struct, we need them not to
/// appears suffixed as integer literals by interpolating them as [`syn::Index`]
/// instead.
///
/// [`syn::Index`]: https://docs.rs/syn/1.0/syn/struct.Index.html
///
/// ```compile_fail
/// let i = 0usize..self.fields.len();
///
/// // expands to 0 + self.0usize.heap_size() + self.1usize.heap_size() + ...
/// // which is not valid syntax
/// quote! {
/// 0 #( + self.#i.heap_size() )*
/// }
/// ```
///
/// ```
/// # use proc_macro2::{Ident, TokenStream};
/// # use quote::quote;
/// #
/// # mod syn {
/// # use proc_macro2::{Literal, TokenStream};
/// # use quote::{ToTokens, TokenStreamExt};
/// #
/// # pub struct Index(usize);
/// #
/// # impl From<usize> for Index {
/// # fn from(i: usize) -> Self {
/// # Index(i)
/// # }
/// # }
/// #
/// # impl ToTokens for Index {
/// # fn to_tokens(&self, tokens: &mut TokenStream) {
/// # tokens.append(Literal::usize_unsuffixed(self.0));
/// # }
/// # }
/// # }
/// #
/// # struct Struct {
/// # fields: Vec<Ident>,
/// # }
/// #
/// # impl Struct {
/// # fn example(&self) -> TokenStream {
/// let i = (0..self.fields.len()).map(syn::Index::from);
///
/// // expands to 0 + self.0.heap_size() + self.1.heap_size() + ...
/// quote! {
/// 0 #( + self.#i.heap_size() )*
/// }
/// # }
/// # }
/// ```
#[macro_export]
macro_rules! quote {
() => {
$crate::__private::TokenStream::new()
};
($($tt:tt)*) => {{
let mut _s = $crate::__private::TokenStream::new();
$crate::quote_each_token!(_s $($tt)*);
_s
}};
}
/// Same as `quote!`, but applies a given span to all tokens originating within
/// the macro invocation.
///
/// <br>
///
/// # Syntax
///
/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
/// to quote. The span expression should be brief — use a variable for
/// anything more than a few characters. There should be no space before the
/// `=>` token.
///
/// [`Span`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html
///
/// ```
/// # use proc_macro2::Span;
/// # use quote::quote_spanned;
/// #
/// # const IGNORE_TOKENS: &'static str = stringify! {
/// let span = /* ... */;
/// # };
/// # let span = Span::call_site();
/// # let init = 0;
///
/// // On one line, use parentheses.
/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
///
/// // On multiple lines, place the span at the top and use braces.
/// let tokens = quote_spanned! {span=>
/// Box::into_raw(Box::new(#init))
/// };
/// ```
///
/// The lack of space before the `=>` should look jarring to Rust programmers
/// and this is intentional. The formatting is designed to be visibly
/// off-balance and draw the eye a particular way, due to the span expression
/// being evaluated in the context of the procedural macro and the remaining
/// tokens being evaluated in the generated code.
///
/// <br>
///
/// # Hygiene
///
/// Any interpolated tokens preserve the `Span` information provided by their
/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
/// invocation are spanned with the given span argument.
///
/// <br>
///
/// # Example
///
/// The following procedural macro code uses `quote_spanned!` to assert that a
/// particular Rust type implements the [`Sync`] trait so that references can be
/// safely shared between threads.
///
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
///
/// ```
/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
/// # use proc_macro2::{Span, TokenStream};
/// #
/// # struct Type;
/// #
/// # impl Type {
/// # fn span(&self) -> Span {
/// # Span::call_site()
/// # }
/// # }
/// #
/// # impl ToTokens for Type {
/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
/// # }
/// #
/// # let ty = Type;
/// # let call_site = Span::call_site();
/// #
/// let ty_span = ty.span();
/// let assert_sync = quote_spanned! {ty_span=>
/// struct _AssertSync where #ty: Sync;
/// };
/// ```
///
/// If the assertion fails, the user will see an error like the following. The
/// input span of their type is highlighted in the error.
///
/// ```text
/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
/// --> src/main.rs:10:21
/// |
/// 10 | static ref PTR: *const () = &();
/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
/// ```
///
/// In this example it is important for the where-clause to be spanned with the
/// line/column information of the user's input type so that error messages are
/// placed appropriately by the compiler.
#[macro_export]
macro_rules! quote_spanned {
($span:expr=>) => {{
let _: $crate::__private::Span = $span;
$crate::__private::TokenStream::new()
}};
($span:expr=> $($tt:tt)*) => {{
let mut _s = $crate::__private::TokenStream::new();
let _span: $crate::__private::Span = $span;
$crate::quote_each_token_spanned!(_s _span $($tt)*);
_s
}};
}
// Extract the names of all #metavariables and pass them to the $call macro.
//
// in: pounded_var_names!(then!(...) a #b c #( #d )* #e)
// out: then!(... b);
// then!(... d);
// then!(... e);
#[macro_export]
#[doc(hidden)]
macro_rules! pounded_var_names {
($call:ident! $extra:tt $($tts:tt)*) => {
$crate::pounded_var_names_with_context!($call! $extra
(@ $($tts)*)
($($tts)* @)
)
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! pounded_var_names_with_context {
($call:ident! $extra:tt ($($b1:tt)*) ($($curr:tt)*)) => {
$(
$crate::pounded_var_with_context!($call! $extra $b1 $curr);
)*
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! pounded_var_with_context {
($call:ident! $extra:tt $b1:tt ( $($inner:tt)* )) => {
$crate::pounded_var_names!($call! $extra $($inner)*);
};
($call:ident! $extra:tt $b1:tt [ $($inner:tt)* ]) => {
$crate::pounded_var_names!($call! $extra $($inner)*);
};
($call:ident! $extra:tt $b1:tt { $($inner:tt)* }) => {
$crate::pounded_var_names!($call! $extra $($inner)*);
};
($call:ident!($($extra:tt)*) # $var:ident) => {
$crate::$call!($($extra)* $var);
};
($call:ident! $extra:tt $b1:tt $curr:tt) => {};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_bind_into_iter {
($has_iter:ident $var:ident) => {
// `mut` may be unused if $var occurs multiple times in the list.
#[allow(unused_mut)]
let (mut $var, i) = $var.quote_into_iter();
let $has_iter = $has_iter | i;
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_bind_next_or_break {
($var:ident) => {
let $var = match $var.next() {
Some(_x) => $crate::__private::RepInterp(_x),
None => break,
};
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_each_token {
($tokens:ident $($tts:tt)*) => {
$crate::quote_tokens_with_context!($tokens
(@ @ @ @ @ @ $($tts)*)
(@ @ @ @ @ $($tts)* @)
(@ @ @ @ $($tts)* @ @)
(@ @ @ $(($tts))* @ @ @)
(@ @ $($tts)* @ @ @ @)
(@ $($tts)* @ @ @ @ @)
($($tts)* @ @ @ @ @ @)
);
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_each_token_spanned {
($tokens:ident $span:ident $($tts:tt)*) => {
$crate::quote_tokens_with_context_spanned!($tokens $span
(@ @ @ @ @ @ $($tts)*)
(@ @ @ @ @ $($tts)* @)
(@ @ @ @ $($tts)* @ @)
(@ @ @ $(($tts))* @ @ @)
(@ @ $($tts)* @ @ @ @)
(@ $($tts)* @ @ @ @ @)
($($tts)* @ @ @ @ @ @)
);
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_tokens_with_context {
($tokens:ident
($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*)
($($curr:tt)*)
($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*)
) => {
$(
$crate::quote_token_with_context!($tokens $b3 $b2 $b1 $curr $a1 $a2 $a3);
)*
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_tokens_with_context_spanned {
($tokens:ident $span:ident
($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*)
($($curr:tt)*)
($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*)
) => {
$(
$crate::quote_token_with_context_spanned!($tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3);
)*
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token_with_context {
($tokens:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{
use $crate::__private::ext::*;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
// This is `while true` instead of `loop` because if there are no
// iterators used inside of this repetition then the body would not
// contain any `break`, so the compiler would emit unreachable code
// warnings on anything below the loop. We use has_iter to detect and
// fail to compile when there are no iterators, so here we just work
// around the unneeded extra warning.
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
$crate::quote_each_token!($tokens $($inner)*);
}
}};
($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{
use $crate::__private::ext::*;
let mut _i = 0usize;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
if _i > 0 {
$crate::quote_token!($sep $tokens);
}
_i += 1;
$crate::quote_each_token!($tokens $($inner)*);
}
}};
($tokens:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {};
($tokens:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {};
($tokens:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => {
// https://github.com/dtolnay/quote/issues/130
$crate::quote_token!(* $tokens);
};
($tokens:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => {
$crate::ToTokens::to_tokens(&$var, &mut $tokens);
};
($tokens:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => {
$crate::quote_token!($curr $tokens);
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token_with_context_spanned {
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{
use $crate::__private::ext::*;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
// This is `while true` instead of `loop` because if there are no
// iterators used inside of this repetition then the body would not
// contain any `break`, so the compiler would emit unreachable code
// warnings on anything below the loop. We use has_iter to detect and
// fail to compile when there are no iterators, so here we just work
// around the unneeded extra warning.
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
$crate::quote_each_token_spanned!($tokens $span $($inner)*);
}
}};
($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{
use $crate::__private::ext::*;
let mut _i = 0usize;
let has_iter = $crate::__private::ThereIsNoIteratorInRepetition;
$crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
let _: $crate::__private::HasIterator = has_iter;
while true {
$crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
if _i > 0 {
$crate::quote_token_spanned!($tokens $span $sep);
}
_i += 1;
$crate::quote_each_token_spanned!($tokens $span $($inner)*);
}
}};
($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {};
($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {};
($tokens:ident $span:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => {
// https://github.com/dtolnay/quote/issues/130
$crate::quote_token_spanned!($tokens $span *);
};
($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => {
$crate::ToTokens::to_tokens(&$var, &mut $tokens);
};
($tokens:ident $span:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {};
($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => {
$crate::quote_token_spanned!($tokens $span $curr);
};
}
// These rules are ordered by approximate token frequency, at least for the
// first 10 or so, to improve compile times. Having `ident` first is by far the
// most important, because it's typically 2-3x more common than the next most
// common token.
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token {
($ident:ident $tokens:ident) => {
$crate::__private::push_ident(&mut $tokens, stringify!($ident));
};
(:: $tokens:ident) => {
$crate::__private::push_colon2(&mut $tokens);
};
(( $($inner:tt)* ) $tokens:ident) => {
$crate::__private::push_group(
&mut $tokens,
$crate::__private::Delimiter::Parenthesis,
$crate::quote!($($inner)*),
);
};
([ $($inner:tt)* ] $tokens:ident) => {
$crate::__private::push_group(
&mut $tokens,
$crate::__private::Delimiter::Bracket,
$crate::quote!($($inner)*),
);
};
({ $($inner:tt)* } $tokens:ident) => {
$crate::__private::push_group(
&mut $tokens,
$crate::__private::Delimiter::Brace,
$crate::quote!($($inner)*),
);
};
(# $tokens:ident) => {
$crate::__private::push_pound(&mut $tokens);
};
(, $tokens:ident) => {
$crate::__private::push_comma(&mut $tokens);
};
(. $tokens:ident) => {
$crate::__private::push_dot(&mut $tokens);
};
(; $tokens:ident) => {
$crate::__private::push_semi(&mut $tokens);
};
(: $tokens:ident) => {
$crate::__private::push_colon(&mut $tokens);
};
(+ $tokens:ident) => {
$crate::__private::push_add(&mut $tokens);
};
(+= $tokens:ident) => {
$crate::__private::push_add_eq(&mut $tokens);
};
(& $tokens:ident) => {
$crate::__private::push_and(&mut $tokens);
};
(&& $tokens:ident) => {
$crate::__private::push_and_and(&mut $tokens);
};
(&= $tokens:ident) => {
$crate::__private::push_and_eq(&mut $tokens);
};
(@ $tokens:ident) => {
$crate::__private::push_at(&mut $tokens);
};
(! $tokens:ident) => {
$crate::__private::push_bang(&mut $tokens);
};
(^ $tokens:ident) => {
$crate::__private::push_caret(&mut $tokens);
};
(^= $tokens:ident) => {
$crate::__private::push_caret_eq(&mut $tokens);
};
(/ $tokens:ident) => {
$crate::__private::push_div(&mut $tokens);
};
(/= $tokens:ident) => {
$crate::__private::push_div_eq(&mut $tokens);
};
(.. $tokens:ident) => {
$crate::__private::push_dot2(&mut $tokens);
};
(... $tokens:ident) => {
$crate::__private::push_dot3(&mut $tokens);
};
(..= $tokens:ident) => {
$crate::__private::push_dot_dot_eq(&mut $tokens);
};
(= $tokens:ident) => {
$crate::__private::push_eq(&mut $tokens);
};
(== $tokens:ident) => {
$crate::__private::push_eq_eq(&mut $tokens);
};
(>= $tokens:ident) => {
$crate::__private::push_ge(&mut $tokens);
};
(> $tokens:ident) => {
$crate::__private::push_gt(&mut $tokens);
};
(<= $tokens:ident) => {
$crate::__private::push_le(&mut $tokens);
};
(< $tokens:ident) => {
$crate::__private::push_lt(&mut $tokens);
};
(*= $tokens:ident) => {
$crate::__private::push_mul_eq(&mut $tokens);
};
(!= $tokens:ident) => {
$crate::__private::push_ne(&mut $tokens);
};
(| $tokens:ident) => {
$crate::__private::push_or(&mut $tokens);
};
(|= $tokens:ident) => {
$crate::__private::push_or_eq(&mut $tokens);
};
(|| $tokens:ident) => {
$crate::__private::push_or_or(&mut $tokens);
};
(? $tokens:ident) => {
$crate::__private::push_question(&mut $tokens);
};
(-> $tokens:ident) => {
$crate::__private::push_rarrow(&mut $tokens);
};
(<- $tokens:ident) => {
$crate::__private::push_larrow(&mut $tokens);
};
(% $tokens:ident) => {
$crate::__private::push_rem(&mut $tokens);
};
(%= $tokens:ident) => {
$crate::__private::push_rem_eq(&mut $tokens);
};
(=> $tokens:ident) => {
$crate::__private::push_fat_arrow(&mut $tokens);
};
(<< $tokens:ident) => {
$crate::__private::push_shl(&mut $tokens);
};
(<<= $tokens:ident) => {
$crate::__private::push_shl_eq(&mut $tokens);
};
(>> $tokens:ident) => {
$crate::__private::push_shr(&mut $tokens);
};
(>>= $tokens:ident) => {
$crate::__private::push_shr_eq(&mut $tokens);
};
(* $tokens:ident) => {
$crate::__private::push_star(&mut $tokens);
};
(- $tokens:ident) => {
$crate::__private::push_sub(&mut $tokens);
};
(-= $tokens:ident) => {
$crate::__private::push_sub_eq(&mut $tokens);
};
($lifetime:lifetime $tokens:ident) => {
$crate::__private::push_lifetime(&mut $tokens, stringify!($lifetime));
};
(_ $tokens:ident) => {
$crate::__private::push_underscore(&mut $tokens);
};
($other:tt $tokens:ident) => {
$crate::__private::parse(&mut $tokens, stringify!($other));
};
}
// See the comment above `quote_token!` about the rule ordering.
#[macro_export]
#[doc(hidden)]
macro_rules! quote_token_spanned {
($tokens:ident $span:ident $ident:ident) => {
$crate::__private::push_ident_spanned(&mut $tokens, $span, stringify!($ident));
};
($tokens:ident $span:ident ::) => {
$crate::__private::push_colon2_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ( $($inner:tt)* )) => {
$crate::__private::push_group_spanned(
&mut $tokens,
$span,
$crate::__private::Delimiter::Parenthesis,
$crate::quote_spanned!($span=> $($inner)*),
);
};
($tokens:ident $span:ident [ $($inner:tt)* ]) => {
$crate::__private::push_group_spanned(
&mut $tokens,
$span,
$crate::__private::Delimiter::Bracket,
$crate::quote_spanned!($span=> $($inner)*),
);
};
($tokens:ident $span:ident { $($inner:tt)* }) => {
$crate::__private::push_group_spanned(
&mut $tokens,
$span,
$crate::__private::Delimiter::Brace,
$crate::quote_spanned!($span=> $($inner)*),
);
};
($tokens:ident $span:ident #) => {
$crate::__private::push_pound_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ,) => {
$crate::__private::push_comma_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident .) => {
$crate::__private::push_dot_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ;) => {
$crate::__private::push_semi_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident :) => {
$crate::__private::push_colon_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident +) => {
$crate::__private::push_add_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident +=) => {
$crate::__private::push_add_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident &) => {
$crate::__private::push_and_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident &&) => {
$crate::__private::push_and_and_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident &=) => {
$crate::__private::push_and_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident @) => {
$crate::__private::push_at_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident !) => {
$crate::__private::push_bang_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ^) => {
$crate::__private::push_caret_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ^=) => {
$crate::__private::push_caret_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident /) => {
$crate::__private::push_div_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident /=) => {
$crate::__private::push_div_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ..) => {
$crate::__private::push_dot2_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ...) => {
$crate::__private::push_dot3_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ..=) => {
$crate::__private::push_dot_dot_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident =) => {
$crate::__private::push_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ==) => {
$crate::__private::push_eq_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >=) => {
$crate::__private::push_ge_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >) => {
$crate::__private::push_gt_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <=) => {
$crate::__private::push_le_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <) => {
$crate::__private::push_lt_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident *=) => {
$crate::__private::push_mul_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident !=) => {
$crate::__private::push_ne_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident |) => {
$crate::__private::push_or_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident |=) => {
$crate::__private::push_or_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ||) => {
$crate::__private::push_or_or_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ?) => {
$crate::__private::push_question_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident ->) => {
$crate::__private::push_rarrow_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <-) => {
$crate::__private::push_larrow_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident %) => {
$crate::__private::push_rem_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident %=) => {
$crate::__private::push_rem_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident =>) => {
$crate::__private::push_fat_arrow_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <<) => {
$crate::__private::push_shl_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident <<=) => {
$crate::__private::push_shl_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >>) => {
$crate::__private::push_shr_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident >>=) => {
$crate::__private::push_shr_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident *) => {
$crate::__private::push_star_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident -) => {
$crate::__private::push_sub_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident -=) => {
$crate::__private::push_sub_eq_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident $lifetime:lifetime) => {
$crate::__private::push_lifetime_spanned(&mut $tokens, $span, stringify!($lifetime));
};
($tokens:ident $span:ident _) => {
$crate::__private::push_underscore_spanned(&mut $tokens, $span);
};
($tokens:ident $span:ident $other:tt) => {
$crate::__private::parse_spanned(&mut $tokens, $span, stringify!($other));
};
}
|
#![allow(trivial_casts)]
#![warn(rust_2018_idioms)]
#![cfg_attr(test, deny(warnings))]
extern crate conduit;
extern crate conduit_mime_types as mime;
extern crate filetime;
#[cfg(test)]
extern crate tempdir;
extern crate time;
use conduit::{Handler, Request, Response};
use filetime::FileTime;
use std::collections::HashMap;
use std::error::Error;
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
pub struct Static {
path: PathBuf,
types: mime::Types,
}
impl Static {
pub fn new<P: AsRef<Path>>(path: P) -> Static {
Static {
path: path.as_ref().to_path_buf(),
types: mime::Types::new().ok().expect("Couldn't load mime-types"),
}
}
}
impl Handler for Static {
#[allow(deprecated)]
fn call(&self, request: &mut dyn Request) -> Result<Response, Box<dyn Error + Send>> {
let request_path = &request.path()[1..];
if request_path.contains("..") {
return Ok(not_found());
}
let path = self.path.join(request_path);
let mime = self.types.mime_for_path(&path);
let file = match File::open(&path) {
Ok(f) => f,
Err(..) => return Ok(not_found()),
};
let data = file
.metadata()
.map_err(|e| Box::new(e) as Box<dyn Error + Send>)?;
if data.is_dir() {
return Ok(not_found());
}
let mtime = FileTime::from_last_modification_time(&data);
let ts = time::Timespec {
sec: mtime.unix_seconds() as i64,
nsec: mtime.nanoseconds() as i32,
};
let tm = time::at(ts).to_utc();
let mut headers = HashMap::new();
headers.insert("Content-Type".to_string(), vec![mime.to_string()]);
headers.insert("Content-Length".to_string(), vec![data.len().to_string()]);
headers.insert(
"Last-Modified".to_string(),
vec![tm.strftime("%a, %d %b %Y %T GMT").unwrap().to_string()],
);
Ok(Response {
status: (200, "OK"),
headers: headers,
body: Box::new(file),
})
}
}
fn not_found() -> Response {
let mut headers = HashMap::new();
headers.insert("Content-Length".to_string(), vec!["0".to_string()]);
headers.insert("Content-Type".to_string(), vec!["text/plain".to_string()]);
Response {
status: (404, "Not Found"),
headers: headers,
body: Box::new(io::empty()),
}
}
#[cfg(test)]
mod tests {
extern crate conduit_test as test;
use std::fs::{self, File};
use std::io::prelude::*;
use tempdir::TempDir;
use conduit::{Handler, Method};
use Static;
#[test]
fn test_static() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
let handler = Static::new(root.clone());
File::create(&root.join("Cargo.toml"))
.unwrap()
.write_all(b"[package]")
.unwrap();
let mut req = test::MockRequest::new(Method::Get, "/Cargo.toml");
let mut res = handler.call(&mut req).ok().expect("No response");
let mut body = Vec::new();
res.body.write_body(&mut body).unwrap();
assert_eq!(body, b"[package]");
assert_eq!(
res.headers.get("Content-Type"),
Some(&vec!("text/plain".to_string()))
);
assert_eq!(
res.headers.get("Content-Length"),
Some(&vec!["9".to_string()])
);
}
#[test]
fn test_mime_types() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
fs::create_dir(&root.join("src")).unwrap();
File::create(&root.join("src/fixture.css")).unwrap();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/src/fixture.css");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(
res.headers.get("Content-Type"),
Some(&vec!("text/css".to_string()))
);
assert_eq!(
res.headers.get("Content-Length"),
Some(&vec!["0".to_string()])
);
}
#[test]
fn test_missing() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/nope");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(res.status.0, 404);
}
#[test]
fn test_dir() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
fs::create_dir(&root.join("foo")).unwrap();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/foo");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(res.status.0, 404);
}
#[test]
fn last_modified() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
File::create(&root.join("test")).unwrap();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/test");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(res.status.0, 200);
assert!(res.headers.get("Last-Modified").is_some());
}
}
Minor cleanups
#![allow(trivial_casts)]
#![warn(rust_2018_idioms)]
#![cfg_attr(test, deny(warnings))]
extern crate conduit;
extern crate conduit_mime_types as mime;
extern crate filetime;
#[cfg(test)]
extern crate tempdir;
extern crate time;
use conduit::{Handler, Request, Response};
use filetime::FileTime;
use std::collections::HashMap;
use std::error::Error;
use std::fs::File;
use std::io;
use std::path::{Path, PathBuf};
pub struct Static {
path: PathBuf,
types: mime::Types,
}
impl Static {
pub fn new<P: AsRef<Path>>(path: P) -> Static {
Static {
path: path.as_ref().to_path_buf(),
types: mime::Types::new().expect("Couldn't load mime-types"),
}
}
}
impl Handler for Static {
#[allow(deprecated)]
fn call(&self, request: &mut dyn Request) -> Result<Response, Box<dyn Error + Send>> {
let request_path = &request.path()[1..];
if request_path.contains("..") {
return Ok(not_found());
}
let path = self.path.join(request_path);
let mime = self.types.mime_for_path(&path);
let file = match File::open(&path) {
Ok(f) => f,
Err(..) => return Ok(not_found()),
};
let data = file
.metadata()
.map_err(|e| Box::new(e) as Box<dyn Error + Send>)?;
if data.is_dir() {
return Ok(not_found());
}
let mtime = FileTime::from_last_modification_time(&data);
let ts = time::Timespec {
sec: mtime.unix_seconds() as i64,
nsec: mtime.nanoseconds() as i32,
};
let tm = time::at(ts).to_utc();
let mut headers = HashMap::new();
headers.insert("Content-Type".to_string(), vec![mime.to_string()]);
headers.insert("Content-Length".to_string(), vec![data.len().to_string()]);
headers.insert(
"Last-Modified".to_string(),
vec![tm.strftime("%a, %d %b %Y %T GMT").unwrap().to_string()],
);
Ok(Response {
status: (200, "OK"),
headers,
body: Box::new(file),
})
}
}
fn not_found() -> Response {
let mut headers = HashMap::new();
headers.insert("Content-Length".to_string(), vec!["0".to_string()]);
headers.insert("Content-Type".to_string(), vec!["text/plain".to_string()]);
Response {
status: (404, "Not Found"),
headers,
body: Box::new(io::empty()),
}
}
#[cfg(test)]
mod tests {
extern crate conduit_test as test;
use std::fs::{self, File};
use std::io::prelude::*;
use tempdir::TempDir;
use conduit::{Handler, Method};
use Static;
#[test]
fn test_static() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
let handler = Static::new(root.clone());
File::create(&root.join("Cargo.toml"))
.unwrap()
.write_all(b"[package]")
.unwrap();
let mut req = test::MockRequest::new(Method::Get, "/Cargo.toml");
let mut res = handler.call(&mut req).ok().expect("No response");
let mut body = Vec::new();
res.body.write_body(&mut body).unwrap();
assert_eq!(body, b"[package]");
assert_eq!(
res.headers.get("Content-Type"),
Some(&vec!("text/plain".to_string()))
);
assert_eq!(
res.headers.get("Content-Length"),
Some(&vec!["9".to_string()])
);
}
#[test]
fn test_mime_types() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
fs::create_dir(&root.join("src")).unwrap();
File::create(&root.join("src/fixture.css")).unwrap();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/src/fixture.css");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(
res.headers.get("Content-Type"),
Some(&vec!("text/css".to_string()))
);
assert_eq!(
res.headers.get("Content-Length"),
Some(&vec!["0".to_string()])
);
}
#[test]
fn test_missing() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/nope");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(res.status.0, 404);
}
#[test]
fn test_dir() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
fs::create_dir(&root.join("foo")).unwrap();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/foo");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(res.status.0, 404);
}
#[test]
fn last_modified() {
let td = TempDir::new("conduit-static").unwrap();
let root = td.path();
File::create(&root.join("test")).unwrap();
let handler = Static::new(root.clone());
let mut req = test::MockRequest::new(Method::Get, "/test");
let res = handler.call(&mut req).ok().expect("No response");
assert_eq!(res.status.0, 200);
assert!(res.headers.get("Last-Modified").is_some());
}
}
|
//! # What is This?
//! Crayon is a small, portable and extensible game framework, which loosely inspired by some
//! amazing blogs on [bitsquid](https://bitsquid.blogspot.de), [molecular](https://blog.molecular-matters.com)
//! and [floooh](http://floooh.github.io/).
//!
//! Some goals include:
//!
//! - Extensible through external code modules;
//! - Run on macOS, Linux, Windows, iOS, Android from the same source;
//! - Built from the ground up to focus on multi-thread friendly with a work-stealing job scheduler;
//! - Stateless, layered, multithread render system with OpenGL(ES) 3.0 backends;
//! - Simplified assets workflow and asynchronous data loading from various filesystem;
//! - Unified interfaces for handling input devices across platforms;
//! - etc.
//!
//! This project adheres to [Semantic Versioning](http://semver.org/), all notable changes will be documented in this [file](./CHANGELOG.md).
//!
//! ### Quick Example
//! For the sake of brevity, you can als run a simple and quick example with commands:
//!
//! ``` sh
//! git clone git@github.com:shawnscode/crayon.git
//! cargo run --example modules_3d_prefab
//! ```
extern crate crossbeam_deque;
#[macro_use]
extern crate cgmath;
extern crate gl;
extern crate glutin;
#[macro_use]
extern crate failure;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde;
pub extern crate bincode;
pub extern crate uuid;
#[doc(hidden)]
pub use log::*;
// FIXME: unresolved serde proc-macro re-export. https://github.com/serde-rs/serde/issues/1147
// #[doc(hidden)]
// pub use serde::*;
// FIXME: unresolved failure proc-macro re-export.
// #[doc(hidden)]
// pub use failure::*;
#[macro_use]
pub mod errors;
#[macro_use]
pub mod utils;
pub mod application;
#[macro_use]
pub mod video;
pub mod input;
pub mod math;
pub mod prelude;
pub mod res;
pub mod sched;
Re-export macros from cgmath.
//! # What is This?
//! Crayon is a small, portable and extensible game framework, which loosely inspired by some
//! amazing blogs on [bitsquid](https://bitsquid.blogspot.de), [molecular](https://blog.molecular-matters.com)
//! and [floooh](http://floooh.github.io/).
//!
//! Some goals include:
//!
//! - Extensible through external code modules;
//! - Run on macOS, Linux, Windows, iOS, Android from the same source;
//! - Built from the ground up to focus on multi-thread friendly with a work-stealing job scheduler;
//! - Stateless, layered, multithread render system with OpenGL(ES) 3.0 backends;
//! - Simplified assets workflow and asynchronous data loading from various filesystem;
//! - Unified interfaces for handling input devices across platforms;
//! - etc.
//!
//! This project adheres to [Semantic Versioning](http://semver.org/), all notable changes will be documented in this [file](./CHANGELOG.md).
//!
//! ### Quick Example
//! For the sake of brevity, you can als run a simple and quick example with commands:
//!
//! ``` sh
//! git clone git@github.com:shawnscode/crayon.git
//! cargo run --example modules_3d_prefab
//! ```
extern crate crossbeam_deque;
#[macro_use]
extern crate cgmath;
extern crate gl;
extern crate glutin;
#[macro_use]
extern crate failure;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde;
pub extern crate bincode;
pub extern crate uuid;
#[doc(hidden)]
pub use cgmath::*;
#[doc(hidden)]
pub use log::*;
// FIXME: unresolved serde proc-macro re-export. https://github.com/serde-rs/serde/issues/1147
// #[doc(hidden)]
// pub use serde::*;
// FIXME: unresolved failure proc-macro re-export.
// #[doc(hidden)]
// pub use failure::*;
#[macro_use]
pub mod errors;
#[macro_use]
pub mod utils;
pub mod application;
#[macro_use]
pub mod video;
pub mod input;
pub mod math;
pub mod prelude;
pub mod res;
pub mod sched;
|
//! A crate with utilities to determine the number of CPUs available on the
//! current system.
//!
//! Sometimes the CPU will exaggerate the number of CPUs it contains, because it can use
//! [processor tricks] to deliver increased performance when there are more threads. This
//! crate provides methods to get both the logical and physical numbers of cores.
//!
//! This information can be used as a guide to how many tasks can be run in parallel.
//! There are many properties of the system architecture that will affect parallelism,
//! for example memory access speeds (for all the caches and RAM) and the physical
//! architecture of the processor, so the number of CPUs should be used as a rough guide
//! only.
//!
//!
//! ## Examples
//!
//! Fetch the number of logical CPUs.
//!
//! ```
//! let cpus = num_cpus::get();
//! ```
//!
//! See [`rayon::Threadpool`] for an example of where the number of CPUs could be
//! used when setting up parallel jobs (Where the threadpool example uses a fixed
//! number 8, it could use the number of CPUs).
//!
//! [processor tricks]: https://en.wikipedia.org/wiki/Simultaneous_multithreading
//! [`rayon::ThreadPool`]: https://docs.rs/rayon/1.*/rayon/struct.ThreadPool.html
#![cfg_attr(test, deny(warnings))]
#![deny(missing_docs)]
#![doc(html_root_url = "https://docs.rs/num_cpus/1.10.1")]
#![allow(non_snake_case)]
#[cfg(not(windows))]
extern crate libc;
#[cfg(test)]
#[macro_use]
extern crate doc_comment;
#[cfg(test)]
doctest!("../README.md");
/// Returns the number of available CPUs of the current system.
///
/// This function will get the number of logical cores. Sometimes this is different from the number
/// of physical cores (See [Simultaneous multithreading on Wikipedia][smt]).
///
/// # Examples
///
/// ```
/// let cpus = num_cpus::get();
/// if cpus > 1 {
/// println!("We are on a multicore system with {} CPUs", cpus);
/// } else {
/// println!("We are on a single core system");
/// }
/// ```
///
/// # Note
///
/// This will check [sched affinity] on Linux, showing a lower number of CPUs if the current
/// thread does not have access to all the computer's CPUs.
///
/// [smt]: https://en.wikipedia.org/wiki/Simultaneous_multithreading
/// [sched affinity]: http://www.gnu.org/software/libc/manual/html_node/CPU-Affinity.html
#[inline]
pub fn get() -> usize {
get_num_cpus()
}
/// Returns the number of physical cores of the current system.
///
/// # Note
///
/// Physical count is supported only on Linux, mac OS and Windows platforms.
/// On other platforms, or if the physical count fails on supported platforms,
/// this function returns the same as [`get()`], which is the number of logical
/// CPUS.
///
/// # Examples
///
/// ```
/// let logical_cpus = num_cpus::get();
/// let physical_cpus = num_cpus::get_physical();
/// if logical_cpus > physical_cpus {
/// println!("We have simultaneous multithreading with about {:.2} \
/// logical cores to 1 physical core.",
/// (logical_cpus as f64) / (physical_cpus as f64));
/// } else if logical_cpus == physical_cpus {
/// println!("Either we don't have simultaneous multithreading, or our \
/// system doesn't support getting the number of physical CPUs.");
/// } else {
/// println!("We have less logical CPUs than physical CPUs, maybe we only have access to \
/// some of the CPUs on our system.");
/// }
/// ```
///
/// [`get()`]: fn.get.html
#[inline]
pub fn get_physical() -> usize {
get_num_physical_cpus()
}
#[cfg(not(any(target_os = "linux", target_os = "windows", target_os="macos")))]
#[inline]
fn get_num_physical_cpus() -> usize {
// Not implemented, fall back
get_num_cpus()
}
#[cfg(target_os = "windows")]
fn get_num_physical_cpus() -> usize {
match get_num_physical_cpus_windows() {
Some(num) => num,
None => get_num_cpus()
}
}
#[cfg(target_os = "windows")]
fn get_num_physical_cpus_windows() -> Option<usize> {
// Inspired by https://msdn.microsoft.com/en-us/library/ms683194
use std::ptr;
use std::mem;
#[allow(non_upper_case_globals)]
const RelationProcessorCore: u32 = 0;
#[repr(C)]
#[allow(non_camel_case_types)]
struct SYSTEM_LOGICAL_PROCESSOR_INFORMATION {
mask: usize,
relationship: u32,
_unused: [u64; 2]
}
extern "system" {
fn GetLogicalProcessorInformation(
info: *mut SYSTEM_LOGICAL_PROCESSOR_INFORMATION,
length: &mut u32
) -> u32;
}
// First we need to determine how much space to reserve.
// The required size of the buffer, in bytes.
let mut needed_size = 0;
unsafe {
GetLogicalProcessorInformation(ptr::null_mut(), &mut needed_size);
}
let struct_size = mem::size_of::<SYSTEM_LOGICAL_PROCESSOR_INFORMATION>() as u32;
// Could be 0, or some other bogus size.
if needed_size == 0 || needed_size < struct_size || needed_size % struct_size != 0 {
return None;
}
let count = needed_size / struct_size;
// Allocate some memory where we will store the processor info.
let mut buf = Vec::with_capacity(count as usize);
let result;
unsafe {
result = GetLogicalProcessorInformation(buf.as_mut_ptr(), &mut needed_size);
}
// Failed for any reason.
if result == 0 {
return None;
}
let count = needed_size / struct_size;
unsafe {
buf.set_len(count as usize);
}
let phys_proc_count = buf.iter()
// Only interested in processor packages (physical processors.)
.filter(|proc_info| proc_info.relationship == RelationProcessorCore)
.count();
if phys_proc_count == 0 {
None
} else {
Some(phys_proc_count)
}
}
#[cfg(target_os = "linux")]
fn get_num_physical_cpus() -> usize {
use std::io::BufReader;
use std::io::BufRead;
use std::fs::File;
use std::collections::HashSet;
let file = match File::open("/proc/cpuinfo") {
Ok(val) => val,
Err(_) => {return get_num_cpus()},
};
let reader = BufReader::new(file);
let mut set = HashSet::new();
let mut coreid: u32 = 0;
let mut physid: u32 = 0;
let mut chgcount = 0;
for line in reader.lines().filter_map(|result| result.ok()) {
let mut it = line.split(':');
let (key, value) = match (it.next(), it.next()) {
(Some(key), Some(value)) => (key.trim(), value.trim()),
_ => continue,
};
if key == "core id" || key == "physical id" {
let value = match value.parse() {
Ok(val) => val,
Err(_) => break,
};
match key {
"core id" => coreid = value,
"physical id" => physid = value,
_ => {},
}
chgcount += 1;
}
if chgcount == 2 {
set.insert((physid, coreid));
chgcount = 0;
}
}
let count = set.len();
if count == 0 { get_num_cpus() } else { count }
}
#[cfg(windows)]
fn get_num_cpus() -> usize {
#[repr(C)]
struct SYSTEM_INFO {
wProcessorArchitecture: u16,
wReserved: u16,
dwPageSize: u32,
lpMinimumApplicationAddress: *mut u8,
lpMaximumApplicationAddress: *mut u8,
dwActiveProcessorMask: *mut u8,
dwNumberOfProcessors: u32,
dwProcessorType: u32,
dwAllocationGranularity: u32,
wProcessorLevel: u16,
wProcessorRevision: u16,
}
extern "system" {
fn GetSystemInfo(lpSystemInfo: *mut SYSTEM_INFO);
}
unsafe {
let mut sysinfo: SYSTEM_INFO = std::mem::uninitialized();
GetSystemInfo(&mut sysinfo);
sysinfo.dwNumberOfProcessors as usize
}
}
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd"))]
fn get_num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
unsafe {
cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
}
if cpus < 1 {
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0);
}
if cpus < 1 {
cpus = 1;
}
}
cpus as usize
}
#[cfg(target_os = "openbsd")]
fn get_num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0);
}
if cpus < 1 {
cpus = 1;
}
cpus as usize
}
#[cfg(target_os = "macos")]
fn get_num_physical_cpus() -> usize {
use std::ffi::CStr;
use std::ptr;
let mut cpus: i32 = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let sysctl_name = CStr::from_bytes_with_nul(b"hw.physicalcpu\0")
.expect("byte literal is missing NUL");
unsafe {
if 0 != libc::sysctlbyname(sysctl_name.as_ptr(),
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0) {
return get_num_cpus();
}
}
cpus as usize
}
#[cfg(target_os = "linux")]
fn get_num_cpus() -> usize {
let mut set: libc::cpu_set_t = unsafe { std::mem::zeroed() };
if unsafe { libc::sched_getaffinity(0, std::mem::size_of::<libc::cpu_set_t>(), &mut set) } == 0 {
let mut count: u32 = 0;
for i in 0..libc::CPU_SETSIZE as usize {
if unsafe { libc::CPU_ISSET(i, &set) } {
count += 1
}
}
count as usize
} else {
let cpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
if cpus < 1 {
1
} else {
cpus as usize
}
}
}
#[cfg(any(
target_os = "nacl",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "solaris",
target_os = "illumos",
target_os = "fuchsia")
)]
fn get_num_cpus() -> usize {
// On ARM targets, processors could be turned off to save power.
// Use `_SC_NPROCESSORS_CONF` to get the real number.
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
const CONF_NAME: libc::c_int = libc::_SC_NPROCESSORS_CONF;
#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
const CONF_NAME: libc::c_int = libc::_SC_NPROCESSORS_ONLN;
let cpus = unsafe { libc::sysconf(CONF_NAME) };
if cpus < 1 {
1
} else {
cpus as usize
}
}
#[cfg(target_os = "haiku")]
fn get_num_cpus() -> usize {
use std::mem;
#[allow(non_camel_case_types)]
type bigtime_t = i64;
#[allow(non_camel_case_types)]
type status_t = i32;
#[repr(C)]
pub struct system_info {
pub boot_time: bigtime_t,
pub cpu_count: u32,
pub max_pages: u64,
pub used_pages: u64,
pub cached_pages: u64,
pub block_cache_pages: u64,
pub ignored_pages: u64,
pub needed_memory: u64,
pub free_memory: u64,
pub max_swap_pages: u64,
pub free_swap_pages: u64,
pub page_faults: u32,
pub max_sems: u32,
pub used_sems: u32,
pub max_ports: u32,
pub used_ports: u32,
pub max_threads: u32,
pub used_threads: u32,
pub max_teams: u32,
pub used_teams: u32,
pub kernel_name: [::std::os::raw::c_char; 256usize],
pub kernel_build_date: [::std::os::raw::c_char; 32usize],
pub kernel_build_time: [::std::os::raw::c_char; 32usize],
pub kernel_version: i64,
pub abi: u32,
}
extern {
fn get_system_info(info: *mut system_info) -> status_t;
}
let mut info: system_info = unsafe { mem::uninitialized() };
let status = unsafe { get_system_info(&mut info as *mut _) };
if status == 0 {
info.cpu_count as usize
} else {
1
}
}
#[cfg(not(any(
target_os = "nacl",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "solaris",
target_os = "illumos",
target_os = "fuchsia",
target_os = "linux",
target_os = "openbsd",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "haiku",
windows,
)))]
fn get_num_cpus() -> usize {
1
}
#[cfg(test)]
mod tests {
fn env_var(name: &'static str) -> Option<usize> {
::std::env::var(name).ok().map(|val| val.parse().unwrap())
}
#[test]
fn test_get() {
let num = super::get();
if let Some(n) = env_var("NUM_CPUS_TEST_GET") {
assert_eq!(num, n);
} else {
assert!(num > 0);
assert!(num < 236_451);
}
}
#[test]
fn test_get_physical() {
let num = super::get_physical();
if let Some(n) = env_var("NUM_CPUS_TEST_GET_PHYSICAL") {
assert_eq!(num, n);
} else {
assert!(num > 0);
assert!(num < 236_451);
}
}
}
Refactor get_physical to sum 'cpu cores' per 'physical id' (#86)
Fixes get_physical for AMD ThreadRipper
//! A crate with utilities to determine the number of CPUs available on the
//! current system.
//!
//! Sometimes the CPU will exaggerate the number of CPUs it contains, because it can use
//! [processor tricks] to deliver increased performance when there are more threads. This
//! crate provides methods to get both the logical and physical numbers of cores.
//!
//! This information can be used as a guide to how many tasks can be run in parallel.
//! There are many properties of the system architecture that will affect parallelism,
//! for example memory access speeds (for all the caches and RAM) and the physical
//! architecture of the processor, so the number of CPUs should be used as a rough guide
//! only.
//!
//!
//! ## Examples
//!
//! Fetch the number of logical CPUs.
//!
//! ```
//! let cpus = num_cpus::get();
//! ```
//!
//! See [`rayon::Threadpool`] for an example of where the number of CPUs could be
//! used when setting up parallel jobs (Where the threadpool example uses a fixed
//! number 8, it could use the number of CPUs).
//!
//! [processor tricks]: https://en.wikipedia.org/wiki/Simultaneous_multithreading
//! [`rayon::ThreadPool`]: https://docs.rs/rayon/1.*/rayon/struct.ThreadPool.html
#![cfg_attr(test, deny(warnings))]
#![deny(missing_docs)]
#![doc(html_root_url = "https://docs.rs/num_cpus/1.10.1")]
#![allow(non_snake_case)]
#[cfg(not(windows))]
extern crate libc;
#[cfg(test)]
#[macro_use]
extern crate doc_comment;
#[cfg(test)]
doctest!("../README.md");
/// Returns the number of available CPUs of the current system.
///
/// This function will get the number of logical cores. Sometimes this is different from the number
/// of physical cores (See [Simultaneous multithreading on Wikipedia][smt]).
///
/// # Examples
///
/// ```
/// let cpus = num_cpus::get();
/// if cpus > 1 {
/// println!("We are on a multicore system with {} CPUs", cpus);
/// } else {
/// println!("We are on a single core system");
/// }
/// ```
///
/// # Note
///
/// This will check [sched affinity] on Linux, showing a lower number of CPUs if the current
/// thread does not have access to all the computer's CPUs.
///
/// [smt]: https://en.wikipedia.org/wiki/Simultaneous_multithreading
/// [sched affinity]: http://www.gnu.org/software/libc/manual/html_node/CPU-Affinity.html
#[inline]
pub fn get() -> usize {
get_num_cpus()
}
/// Returns the number of physical cores of the current system.
///
/// # Note
///
/// Physical count is supported only on Linux, mac OS and Windows platforms.
/// On other platforms, or if the physical count fails on supported platforms,
/// this function returns the same as [`get()`], which is the number of logical
/// CPUS.
///
/// # Examples
///
/// ```
/// let logical_cpus = num_cpus::get();
/// let physical_cpus = num_cpus::get_physical();
/// if logical_cpus > physical_cpus {
/// println!("We have simultaneous multithreading with about {:.2} \
/// logical cores to 1 physical core.",
/// (logical_cpus as f64) / (physical_cpus as f64));
/// } else if logical_cpus == physical_cpus {
/// println!("Either we don't have simultaneous multithreading, or our \
/// system doesn't support getting the number of physical CPUs.");
/// } else {
/// println!("We have less logical CPUs than physical CPUs, maybe we only have access to \
/// some of the CPUs on our system.");
/// }
/// ```
///
/// [`get()`]: fn.get.html
#[inline]
pub fn get_physical() -> usize {
get_num_physical_cpus()
}
#[cfg(not(any(target_os = "linux", target_os = "windows", target_os="macos")))]
#[inline]
fn get_num_physical_cpus() -> usize {
// Not implemented, fall back
get_num_cpus()
}
#[cfg(target_os = "windows")]
fn get_num_physical_cpus() -> usize {
match get_num_physical_cpus_windows() {
Some(num) => num,
None => get_num_cpus()
}
}
#[cfg(target_os = "windows")]
fn get_num_physical_cpus_windows() -> Option<usize> {
// Inspired by https://msdn.microsoft.com/en-us/library/ms683194
use std::ptr;
use std::mem;
#[allow(non_upper_case_globals)]
const RelationProcessorCore: u32 = 0;
#[repr(C)]
#[allow(non_camel_case_types)]
struct SYSTEM_LOGICAL_PROCESSOR_INFORMATION {
mask: usize,
relationship: u32,
_unused: [u64; 2]
}
extern "system" {
fn GetLogicalProcessorInformation(
info: *mut SYSTEM_LOGICAL_PROCESSOR_INFORMATION,
length: &mut u32
) -> u32;
}
// First we need to determine how much space to reserve.
// The required size of the buffer, in bytes.
let mut needed_size = 0;
unsafe {
GetLogicalProcessorInformation(ptr::null_mut(), &mut needed_size);
}
let struct_size = mem::size_of::<SYSTEM_LOGICAL_PROCESSOR_INFORMATION>() as u32;
// Could be 0, or some other bogus size.
if needed_size == 0 || needed_size < struct_size || needed_size % struct_size != 0 {
return None;
}
let count = needed_size / struct_size;
// Allocate some memory where we will store the processor info.
let mut buf = Vec::with_capacity(count as usize);
let result;
unsafe {
result = GetLogicalProcessorInformation(buf.as_mut_ptr(), &mut needed_size);
}
// Failed for any reason.
if result == 0 {
return None;
}
let count = needed_size / struct_size;
unsafe {
buf.set_len(count as usize);
}
let phys_proc_count = buf.iter()
// Only interested in processor packages (physical processors.)
.filter(|proc_info| proc_info.relationship == RelationProcessorCore)
.count();
if phys_proc_count == 0 {
None
} else {
Some(phys_proc_count)
}
}
#[cfg(target_os = "linux")]
fn get_num_physical_cpus() -> usize {
use std::collections::HashMap;
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
let file = match File::open("/proc/cpuinfo") {
Ok(val) => val,
Err(_) => return get_num_cpus(),
};
let reader = BufReader::new(file);
let mut map = HashMap::new();
let mut physid: u32 = 0;
let mut cores: usize = 0;
let mut chgcount = 0;
for line in reader.lines().filter_map(|result| result.ok()) {
let mut it = line.split(':');
let (key, value) = match (it.next(), it.next()) {
(Some(key), Some(value)) => (key.trim(), value.trim()),
_ => continue,
};
if key == "physical id" {
match value.parse() {
Ok(val) => physid = val,
Err(_) => break,
};
chgcount += 1;
}
if key == "cpu cores" {
match value.parse() {
Ok(val) => cores = val,
Err(_) => break,
};
chgcount += 1;
}
if chgcount == 2 {
map.insert(physid, cores);
chgcount = 0;
}
}
let count = map.into_iter().fold(0, |acc, (_, cores)| acc + cores);
if count == 0 {
get_num_cpus()
} else {
count
}
}
#[cfg(windows)]
fn get_num_cpus() -> usize {
#[repr(C)]
struct SYSTEM_INFO {
wProcessorArchitecture: u16,
wReserved: u16,
dwPageSize: u32,
lpMinimumApplicationAddress: *mut u8,
lpMaximumApplicationAddress: *mut u8,
dwActiveProcessorMask: *mut u8,
dwNumberOfProcessors: u32,
dwProcessorType: u32,
dwAllocationGranularity: u32,
wProcessorLevel: u16,
wProcessorRevision: u16,
}
extern "system" {
fn GetSystemInfo(lpSystemInfo: *mut SYSTEM_INFO);
}
unsafe {
let mut sysinfo: SYSTEM_INFO = std::mem::uninitialized();
GetSystemInfo(&mut sysinfo);
sysinfo.dwNumberOfProcessors as usize
}
}
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd"))]
fn get_num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
unsafe {
cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
}
if cpus < 1 {
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0);
}
if cpus < 1 {
cpus = 1;
}
}
cpus as usize
}
#[cfg(target_os = "openbsd")]
fn get_num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0);
}
if cpus < 1 {
cpus = 1;
}
cpus as usize
}
#[cfg(target_os = "macos")]
fn get_num_physical_cpus() -> usize {
use std::ffi::CStr;
use std::ptr;
let mut cpus: i32 = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let sysctl_name = CStr::from_bytes_with_nul(b"hw.physicalcpu\0")
.expect("byte literal is missing NUL");
unsafe {
if 0 != libc::sysctlbyname(sysctl_name.as_ptr(),
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0) {
return get_num_cpus();
}
}
cpus as usize
}
#[cfg(target_os = "linux")]
fn get_num_cpus() -> usize {
let mut set: libc::cpu_set_t = unsafe { std::mem::zeroed() };
if unsafe { libc::sched_getaffinity(0, std::mem::size_of::<libc::cpu_set_t>(), &mut set) } == 0 {
let mut count: u32 = 0;
for i in 0..libc::CPU_SETSIZE as usize {
if unsafe { libc::CPU_ISSET(i, &set) } {
count += 1
}
}
count as usize
} else {
let cpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
if cpus < 1 {
1
} else {
cpus as usize
}
}
}
#[cfg(any(
target_os = "nacl",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "solaris",
target_os = "illumos",
target_os = "fuchsia")
)]
fn get_num_cpus() -> usize {
// On ARM targets, processors could be turned off to save power.
// Use `_SC_NPROCESSORS_CONF` to get the real number.
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
const CONF_NAME: libc::c_int = libc::_SC_NPROCESSORS_CONF;
#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
const CONF_NAME: libc::c_int = libc::_SC_NPROCESSORS_ONLN;
let cpus = unsafe { libc::sysconf(CONF_NAME) };
if cpus < 1 {
1
} else {
cpus as usize
}
}
#[cfg(target_os = "haiku")]
fn get_num_cpus() -> usize {
use std::mem;
#[allow(non_camel_case_types)]
type bigtime_t = i64;
#[allow(non_camel_case_types)]
type status_t = i32;
#[repr(C)]
pub struct system_info {
pub boot_time: bigtime_t,
pub cpu_count: u32,
pub max_pages: u64,
pub used_pages: u64,
pub cached_pages: u64,
pub block_cache_pages: u64,
pub ignored_pages: u64,
pub needed_memory: u64,
pub free_memory: u64,
pub max_swap_pages: u64,
pub free_swap_pages: u64,
pub page_faults: u32,
pub max_sems: u32,
pub used_sems: u32,
pub max_ports: u32,
pub used_ports: u32,
pub max_threads: u32,
pub used_threads: u32,
pub max_teams: u32,
pub used_teams: u32,
pub kernel_name: [::std::os::raw::c_char; 256usize],
pub kernel_build_date: [::std::os::raw::c_char; 32usize],
pub kernel_build_time: [::std::os::raw::c_char; 32usize],
pub kernel_version: i64,
pub abi: u32,
}
extern {
fn get_system_info(info: *mut system_info) -> status_t;
}
let mut info: system_info = unsafe { mem::uninitialized() };
let status = unsafe { get_system_info(&mut info as *mut _) };
if status == 0 {
info.cpu_count as usize
} else {
1
}
}
#[cfg(not(any(
target_os = "nacl",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "solaris",
target_os = "illumos",
target_os = "fuchsia",
target_os = "linux",
target_os = "openbsd",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "netbsd",
target_os = "haiku",
windows,
)))]
fn get_num_cpus() -> usize {
1
}
#[cfg(test)]
mod tests {
fn env_var(name: &'static str) -> Option<usize> {
::std::env::var(name).ok().map(|val| val.parse().unwrap())
}
#[test]
fn test_get() {
let num = super::get();
if let Some(n) = env_var("NUM_CPUS_TEST_GET") {
assert_eq!(num, n);
} else {
assert!(num > 0);
assert!(num < 236_451);
}
}
#[test]
fn test_get_physical() {
let num = super::get_physical();
if let Some(n) = env_var("NUM_CPUS_TEST_GET_PHYSICAL") {
assert_eq!(num, n);
} else {
assert!(num > 0);
assert!(num < 236_451);
}
}
}
|
//! The FFI bindings.
//!
//! See the [C API docs](http://emcrisostomo.github.io/fswatch/doc/1.9.3/libfswatch.html/libfswatch_8h.html).
#![allow(non_camel_case_types)]
#![feature(const_fn)]
extern crate libc;
#[macro_use]
extern crate cfg_if;
use libc::{c_int, c_uint, c_void, c_double, c_char, time_t};
#[link(name = "fswatch")]
extern "C" {
pub fn fsw_init_library() -> FSW_STATUS;
pub fn fsw_init_session(monitor_type: fsw_monitor_type) -> FSW_HANDLE;
pub fn fsw_add_path(handle: FSW_HANDLE, path: *const c_char) -> FSW_STATUS;
pub fn fsw_add_property(handle: FSW_HANDLE, name: *const c_char, value: *const c_char) -> FSW_STATUS;
pub fn fsw_set_allow_overflow(handle: FSW_HANDLE, allow_overflow: bool) -> FSW_STATUS;
pub fn fsw_set_callback(handle: FSW_HANDLE, callback: FSW_CEVENT_CALLBACK, data: *const c_void) -> FSW_STATUS;
pub fn fsw_set_latency(handle: FSW_HANDLE, latency: c_double) -> FSW_STATUS;
pub fn fsw_set_recursive(handle: FSW_HANDLE, recursive: bool) -> FSW_STATUS;
pub fn fsw_set_directory_only(handle: FSW_HANDLE, directory_only: bool) -> FSW_STATUS;
pub fn fsw_set_follow_symlinks(handle: FSW_HANDLE, follow_symlinks: bool) -> FSW_STATUS;
pub fn fsw_add_event_type_filter(handle: FSW_HANDLE, event_type: fsw_event_type_filter) -> FSW_STATUS;
pub fn fsw_add_filter(handle: FSW_HANDLE, filter: fsw_cmonitor_filter) -> FSW_STATUS;
pub fn fsw_start_monitor(handle: FSW_HANDLE) -> FSW_STATUS;
#[cfg(feature = "1_10_0")]
pub fn fsw_stop_monitor(handle: FSW_HANDLE) -> FSW_STATUS;
pub fn fsw_destroy_session(handle: FSW_HANDLE) -> FSW_STATUS;
pub fn fsw_last_error() -> FSW_STATUS;
pub fn fsw_is_verbose() -> bool;
pub fn fsw_set_verbose(verbose: bool);
}
cfg_if! {
if #[cfg(feature = "1_10_0")] {
pub enum FSW_SESSION {}
pub type FSW_HANDLE = *mut FSW_SESSION;
pub const FSW_INVALID_HANDLE: FSW_HANDLE = std::ptr::null_mut();
} else {
pub type FSW_HANDLE = c_int;
pub const FSW_INVALID_HANDLE: FSW_HANDLE = -1;
}
}
pub type FSW_STATUS = c_int;
pub type FSW_CEVENT_CALLBACK = extern fn(events: *const fsw_cevent, event_num: c_uint, data: *mut c_void);
pub const FSW_OK: FSW_STATUS = 0;
pub const FSW_ERR_UNKNOWN_ERROR: FSW_STATUS = 1;
pub const FSW_ERR_SESSION_UNKNOWN: FSW_STATUS = (1 << 1);
pub const FSW_ERR_MONITOR_ALREADY_EXISTS: FSW_STATUS = (1 << 2);
pub const FSW_ERR_MEMORY: FSW_STATUS = (1 << 3);
pub const FSW_ERR_UNKNOWN_MONITOR_TYPE: FSW_STATUS = (1 << 4);
pub const FSW_ERR_CALLBACK_NOT_SET: FSW_STATUS = (1 << 5);
pub const FSW_ERR_PATHS_NOT_SET: FSW_STATUS = (1 << 6);
pub const FSW_ERR_MISSING_CONTEXT: FSW_STATUS = (1 << 7);
pub const FSW_ERR_INVALID_PATH: FSW_STATUS = (1 << 8);
pub const FSW_ERR_INVALID_CALLBACK: FSW_STATUS = (1 << 9);
pub const FSW_ERR_INVALID_LATENCY: FSW_STATUS = (1 << 10);
pub const FSW_ERR_INVALID_REGEX: FSW_STATUS = (1 << 11);
pub const FSW_ERR_MONITOR_ALREADY_RUNNING: FSW_STATUS = (1 << 12);
pub const FSW_ERR_UNKNOWN_VALUE: FSW_STATUS = (1 << 13);
pub const FSW_ERR_INVALID_PROPERTY: FSW_STATUS = (1 << 14);
#[repr(C)]
pub struct fsw_event_type_filter {
pub flag: fsw_event_flag
}
#[repr(C)]
pub struct fsw_cmonitor_filter {
pub text: *const c_char,
pub filter_type: fsw_filter_type,
pub case_sensitive: bool,
pub extended: bool
}
#[repr(C)]
pub struct fsw_cevent {
pub path: *const c_char,
pub evt_time: time_t,
pub flags: *const fsw_event_flag,
pub flags_num: c_uint
}
#[repr(u32)]
pub enum fsw_event_flag {
NoOp = 0,
PlatformSpecific = 1,
Created = (1 << 1),
Updated = (1 << 2),
Removed = (1 << 3),
Renamed = (1 << 4),
OwnerModified = (1 << 5),
AttributeModified = (1 << 6),
MovedFrom = (1 << 7),
MovedTo = (1 << 8),
IsFile = (1 << 9),
IsDir = (1 << 10),
IsSymLink = (1 << 11),
Link = (1 << 12),
Overflow = (1 << 13)
}
#[repr(C)]
pub enum fsw_filter_type {
filter_include,
filter_exclude
}
#[repr(C)]
pub enum fsw_monitor_type {
system_default_monitor_type,
fsevents_monitor_type,
kqueue_monitor_type,
inotify_monitor_type,
windows_monitor_type,
poll_monitor_type,
fen_monitor_type
}
Support compilation on stable Rust channels
//! The FFI bindings.
//!
//! See the [C API docs](http://emcrisostomo.github.io/fswatch/doc/1.9.3/libfswatch.html/libfswatch_8h.html).
#![allow(non_camel_case_types)]
extern crate libc;
#[macro_use]
extern crate cfg_if;
use libc::{c_int, c_uint, c_void, c_double, c_char, time_t};
#[link(name = "fswatch")]
extern "C" {
pub fn fsw_init_library() -> FSW_STATUS;
pub fn fsw_init_session(monitor_type: fsw_monitor_type) -> FSW_HANDLE;
pub fn fsw_add_path(handle: FSW_HANDLE, path: *const c_char) -> FSW_STATUS;
pub fn fsw_add_property(handle: FSW_HANDLE, name: *const c_char, value: *const c_char) -> FSW_STATUS;
pub fn fsw_set_allow_overflow(handle: FSW_HANDLE, allow_overflow: bool) -> FSW_STATUS;
pub fn fsw_set_callback(handle: FSW_HANDLE, callback: FSW_CEVENT_CALLBACK, data: *const c_void) -> FSW_STATUS;
pub fn fsw_set_latency(handle: FSW_HANDLE, latency: c_double) -> FSW_STATUS;
pub fn fsw_set_recursive(handle: FSW_HANDLE, recursive: bool) -> FSW_STATUS;
pub fn fsw_set_directory_only(handle: FSW_HANDLE, directory_only: bool) -> FSW_STATUS;
pub fn fsw_set_follow_symlinks(handle: FSW_HANDLE, follow_symlinks: bool) -> FSW_STATUS;
pub fn fsw_add_event_type_filter(handle: FSW_HANDLE, event_type: fsw_event_type_filter) -> FSW_STATUS;
pub fn fsw_add_filter(handle: FSW_HANDLE, filter: fsw_cmonitor_filter) -> FSW_STATUS;
pub fn fsw_start_monitor(handle: FSW_HANDLE) -> FSW_STATUS;
#[cfg(feature = "1_10_0")]
pub fn fsw_stop_monitor(handle: FSW_HANDLE) -> FSW_STATUS;
pub fn fsw_destroy_session(handle: FSW_HANDLE) -> FSW_STATUS;
pub fn fsw_last_error() -> FSW_STATUS;
pub fn fsw_is_verbose() -> bool;
pub fn fsw_set_verbose(verbose: bool);
}
cfg_if! {
if #[cfg(feature = "1_10_0")] {
pub enum FSW_SESSION {}
pub type FSW_HANDLE = *mut FSW_SESSION;
pub const FSW_INVALID_HANDLE: FSW_HANDLE = std::ptr::null_mut();
} else {
pub type FSW_HANDLE = c_int;
pub const FSW_INVALID_HANDLE: FSW_HANDLE = -1;
}
}
pub type FSW_STATUS = c_int;
pub type FSW_CEVENT_CALLBACK = extern fn(events: *const fsw_cevent, event_num: c_uint, data: *mut c_void);
pub const FSW_OK: FSW_STATUS = 0;
pub const FSW_ERR_UNKNOWN_ERROR: FSW_STATUS = 1;
pub const FSW_ERR_SESSION_UNKNOWN: FSW_STATUS = (1 << 1);
pub const FSW_ERR_MONITOR_ALREADY_EXISTS: FSW_STATUS = (1 << 2);
pub const FSW_ERR_MEMORY: FSW_STATUS = (1 << 3);
pub const FSW_ERR_UNKNOWN_MONITOR_TYPE: FSW_STATUS = (1 << 4);
pub const FSW_ERR_CALLBACK_NOT_SET: FSW_STATUS = (1 << 5);
pub const FSW_ERR_PATHS_NOT_SET: FSW_STATUS = (1 << 6);
pub const FSW_ERR_MISSING_CONTEXT: FSW_STATUS = (1 << 7);
pub const FSW_ERR_INVALID_PATH: FSW_STATUS = (1 << 8);
pub const FSW_ERR_INVALID_CALLBACK: FSW_STATUS = (1 << 9);
pub const FSW_ERR_INVALID_LATENCY: FSW_STATUS = (1 << 10);
pub const FSW_ERR_INVALID_REGEX: FSW_STATUS = (1 << 11);
pub const FSW_ERR_MONITOR_ALREADY_RUNNING: FSW_STATUS = (1 << 12);
pub const FSW_ERR_UNKNOWN_VALUE: FSW_STATUS = (1 << 13);
pub const FSW_ERR_INVALID_PROPERTY: FSW_STATUS = (1 << 14);
#[repr(C)]
pub struct fsw_event_type_filter {
pub flag: fsw_event_flag
}
#[repr(C)]
pub struct fsw_cmonitor_filter {
pub text: *const c_char,
pub filter_type: fsw_filter_type,
pub case_sensitive: bool,
pub extended: bool
}
#[repr(C)]
pub struct fsw_cevent {
pub path: *const c_char,
pub evt_time: time_t,
pub flags: *const fsw_event_flag,
pub flags_num: c_uint
}
#[repr(u32)]
pub enum fsw_event_flag {
NoOp = 0,
PlatformSpecific = 1,
Created = (1 << 1),
Updated = (1 << 2),
Removed = (1 << 3),
Renamed = (1 << 4),
OwnerModified = (1 << 5),
AttributeModified = (1 << 6),
MovedFrom = (1 << 7),
MovedTo = (1 << 8),
IsFile = (1 << 9),
IsDir = (1 << 10),
IsSymLink = (1 << 11),
Link = (1 << 12),
Overflow = (1 << 13)
}
#[repr(C)]
pub enum fsw_filter_type {
filter_include,
filter_exclude
}
#[repr(C)]
pub enum fsw_monitor_type {
system_default_monitor_type,
fsevents_monitor_type,
kqueue_monitor_type,
inotify_monitor_type,
windows_monitor_type,
poll_monitor_type,
fen_monitor_type
}
|
extern crate json_schema;
extern crate serde_json;
#[macro_use]
extern crate quote;
use std::borrow::Cow;
use json_schema::{Schema, Type};
use quote::{Tokens, ToTokens};
struct Ident<S>(S);
impl<S: AsRef<str>> ToTokens for Ident<S> {
fn to_tokens(&self, tokens: &mut Tokens) {
tokens.append(self.0.as_ref())
}
}
fn field(s: &str) -> Ident<Cow<str>> {
Ident(if ["type", "struct", "enum"].iter().any(|&keyword| keyword == s) {
format!("{}_", s).into()
} else {
s.into()
})
}
fn merge(result: &mut Schema, r: &Schema) {
use std::collections::hash_map::Entry;
for (k, v) in &r.properties {
match result.properties.entry(k.clone()) {
Entry::Vacant(entry) => {
entry.insert(v.clone());
}
Entry::Occupied(mut entry) => merge(entry.get_mut(), v),
}
}
}
struct Expander<'r> {
root: &'r Schema,
}
impl<'r> Expander<'r> {
fn type_ref(&self, s: &str) -> String {
s.split('/').last().expect("Component").into()
}
fn schema(&self, s: &'r Schema) -> &'r Schema {
if let Some(ref ref_) = s.ref_ {
self.schema_ref(ref_)
} else {
s
}
}
fn schema_ref(&self, s: &str) -> &'r Schema {
s.split('/').fold(self.root, |schema, comp| {
if comp == "#" {
self.root
} else if comp == "definitions" {
schema
} else {
schema.definitions
.get(comp)
.unwrap_or_else(|| panic!("Expected definition: `{}` {}", s, comp))
}
})
}
fn expand_type(&mut self, typ: &Schema) -> String {
if let Some(ref ref_) = typ.ref_ {
self.type_ref(ref_)
} else if typ.type_.len() == 1 {
match typ.type_[0] {
Type::String => {
if !typ.enum_.is_empty() {
"serde_json::Value".into()
} else {
"String".into()
}
}
Type::Integer => "i64".into(),
Type::Boolean => "bool".into(),
Type::Number => "f64".into(),
Type::Object => "serde_json::Value".into(),
Type::Array => {
let item_schema =
typ.items.as_ref().expect("Array type must have items schema");
format!("Vec<{}>", self.expand_type(item_schema))
}
_ => panic!("Type"),
}
} else {
"serde_json::Value".into()
}
}
fn expand_fields(&mut self, schema: &Schema) -> Vec<Tokens> {
if let Some(ref ref_) = schema.ref_ {
let schema = self.schema_ref(ref_);
self.expand_fields(schema)
} else if !schema.allOf.is_empty() {
let first = schema.allOf.first().unwrap().clone();
let result = schema.allOf
.iter()
.skip(1)
.fold(first, |mut result, def| {
merge(&mut result, self.schema(def));
result
});
self.expand_fields(&result)
} else {
schema.properties
.iter()
.map(|(key, value)| {
let key = field(key);
let typ = Ident(self.expand_type(value));
quote!( #key : #typ )
})
.collect()
}
}
pub fn expand_schema(&mut self, schema: &Schema) -> Tokens {
let mut types = Vec::new();
for (name, def) in &schema.definitions {
let fields = self.expand_fields(def);
let name = Ident(name);
let tokens = quote! {
pub struct #name {
#(#fields),*
}
};
types.push(tokens);
}
quote! { #(
#types
)*
}
}
}
pub fn generate(s: &str) -> Tokens {
let schema = serde_json::from_str(s).unwrap();
let mut expander = Expander { root: &schema };
expander.expand_schema(&schema)
}
#[cfg(test)]
mod tests {
use super::*;
use super::Expander;
#[test]
fn attempt() {
let s = include_str!("../../json-schema/tests/debugserver-schema.json");
let s = generate(s).to_string();
println!("`{}`", s);
assert!(false);
}
}
feat: rustfmt the output
extern crate json_schema;
extern crate serde_json;
#[macro_use]
extern crate quote;
use std::borrow::Cow;
use std::error::Error;
use json_schema::{Schema, Type};
use quote::{Tokens, ToTokens};
struct Ident<S>(S);
impl<S: AsRef<str>> ToTokens for Ident<S> {
fn to_tokens(&self, tokens: &mut Tokens) {
tokens.append(self.0.as_ref())
}
}
fn field(s: &str) -> Ident<Cow<str>> {
Ident(if ["type", "struct", "enum"].iter().any(|&keyword| keyword == s) {
format!("{}_", s).into()
} else {
s.into()
})
}
fn merge(result: &mut Schema, r: &Schema) {
use std::collections::hash_map::Entry;
for (k, v) in &r.properties {
match result.properties.entry(k.clone()) {
Entry::Vacant(entry) => {
entry.insert(v.clone());
}
Entry::Occupied(mut entry) => merge(entry.get_mut(), v),
}
}
}
struct Expander<'r> {
root: &'r Schema,
}
impl<'r> Expander<'r> {
fn type_ref(&self, s: &str) -> String {
s.split('/').last().expect("Component").into()
}
fn schema(&self, s: &'r Schema) -> &'r Schema {
if let Some(ref ref_) = s.ref_ {
self.schema_ref(ref_)
} else {
s
}
}
fn schema_ref(&self, s: &str) -> &'r Schema {
s.split('/').fold(self.root, |schema, comp| {
if comp == "#" {
self.root
} else if comp == "definitions" {
schema
} else {
schema.definitions
.get(comp)
.unwrap_or_else(|| panic!("Expected definition: `{}` {}", s, comp))
}
})
}
fn expand_type(&mut self, typ: &Schema) -> String {
if let Some(ref ref_) = typ.ref_ {
self.type_ref(ref_)
} else if typ.type_.len() == 1 {
match typ.type_[0] {
Type::String => {
if !typ.enum_.is_empty() {
"serde_json::Value".into()
} else {
"String".into()
}
}
Type::Integer => "i64".into(),
Type::Boolean => "bool".into(),
Type::Number => "f64".into(),
Type::Object => "serde_json::Value".into(),
Type::Array => {
let item_schema =
typ.items.as_ref().expect("Array type must have items schema");
format!("Vec<{}>", self.expand_type(item_schema))
}
_ => panic!("Type"),
}
} else {
"serde_json::Value".into()
}
}
fn expand_fields(&mut self, schema: &Schema) -> Vec<Tokens> {
if let Some(ref ref_) = schema.ref_ {
let schema = self.schema_ref(ref_);
self.expand_fields(schema)
} else if !schema.allOf.is_empty() {
let first = schema.allOf.first().unwrap().clone();
let result = schema.allOf
.iter()
.skip(1)
.fold(first, |mut result, def| {
merge(&mut result, self.schema(def));
result
});
self.expand_fields(&result)
} else {
schema.properties
.iter()
.map(|(key, value)| {
let key = field(key);
let typ = Ident(self.expand_type(value));
quote!( #key : #typ )
})
.collect()
}
}
pub fn expand_schema(&mut self, schema: &Schema) -> Tokens {
let mut types = Vec::new();
for (name, def) in &schema.definitions {
let fields = self.expand_fields(def);
let name = Ident(name);
let tokens = quote! {
pub struct #name {
#(#fields),*
}
};
types.push(tokens);
}
quote! { #(
#types
)*
}
}
}
pub fn generate(s: &str) -> Result<String, Box<Error>> {
use std::process::{Command, Stdio};
use std::io::Write;
let schema = serde_json::from_str(s).unwrap();
let mut expander = Expander { root: &schema };
let output = expander.expand_schema(&schema).to_string();
let mut child =
try!(Command::new("rustfmt").stdin(Stdio::piped()).stdout(Stdio::piped()).spawn());
try!(child.stdin.as_mut().expect("stdin").write_all(output.as_bytes()));
let output = try!(child.wait_with_output());
Ok(try!(String::from_utf8(output.stdout)))
}
#[cfg(test)]
mod tests {
use super::*;
use super::Expander;
#[test]
fn attempt() {
let s = include_str!("../../json-schema/tests/debugserver-schema.json");
let s = generate(s).unwrap().to_string();
println!("{}", s);
assert!(false);
}
}
|
#![crate_id = "iron"]
#![comment = "Rapid Web Development in Rust"]
#![license = "MIT"]
#![deny(missing_doc)]
#![deny(unused_result)]
#![deny(unnecessary_qualification)]
#![deny(non_camel_case_types)]
#![deny(unused_result)]
#![deny(deprecated_owned_vector)]
#![deny(unnecessary_typecast)]
#![feature(macro_rules, phase)]
//! The main crate for the Iron library.
(fix) Fixed duplicate attribute.
#![crate_id = "iron"]
#![comment = "Rapid Web Development in Rust"]
#![license = "MIT"]
#![deny(missing_doc)]
#![deny(unused_result)]
#![deny(unnecessary_qualification)]
#![deny(non_camel_case_types)]
#![deny(unused_variable)]
#![deny(deprecated_owned_vector)]
#![deny(unnecessary_typecast)]
#![feature(macro_rules, phase)]
//! The main crate for the Iron library.
|
/// 2D grid coordinate
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Coord {
pub x: usize,
pub y: usize
}
/// 2D grid dimensions
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Size {
pub width: usize,
pub height: usize
}
impl Coord {
/// Create a grid coordinate at (x, y)
pub fn new(x: usize, y: usize) -> Coord {
Coord {
x: x,
y: y
}
}
}
impl Size {
/// Create a grid size of (width, height)
pub fn new(width: usize, height: usize) -> Size {
Size {
width: width,
height: height
}
}
/// Return true if the coordinate fits within self's width and
/// height, false otherwise.
pub fn contains_coord(&self, coord: Coord) -> bool {
coord.x < self.width && coord.y < self.height
}
}
/// Rectangle defined by inclusive minimum and maximum coordinates
#[derive(Clone, Copy, Eq, Debug, PartialEq)]
pub struct Rect {
/// Minimum coordinate (inclusive)
min_coord: Coord,
/// Maximum coordinate (inclusive)
max_coord: Coord
}
impl Rect {
fn width(&self) -> usize {
return self.max_coord.x - self.min_coord.x + 1;
}
}
pub struct GridMut<'a, Elem: 'a> {
elems: &'a mut [Elem],
size: Size
}
impl<'a, Elem> GridMut<'a, Elem> {
pub fn new(elems: &'a mut [Elem], size: Size) -> Option<GridMut<Elem>> {
if size.width * size.height == elems.len() {
Some(GridMut {
elems: elems,
size: size
})
}
else {
None
}
}
pub fn rect_iter_mut(&'a mut self, rect: Rect) -> Option<RectIterMut<'a, Elem>> {
if self.size.contains_coord(rect.max_coord) {
Some(RectIterMut {
stride: (self.size.width - rect.width()) as isize,
cur_elem: self.elems.as_mut_ptr(),
grid: self,
rect: rect,
cur_coord: rect.min_coord
})
}
else {
None
}
}
}
impl<'a, Elem> Iterator for RectIterMut<'a, Elem> {
type Item = (Coord, &'a mut Elem);
fn next(&mut self) -> Option<Self::Item> {
if self.cur_coord.y <= self.rect.max_coord.y {
let result = (self.cur_coord, unsafe { &mut *self.cur_elem });
self.cur_coord.x += 1;
if self.cur_coord.x <= self.rect.max_coord.x {
unsafe { self.cur_elem.offset(1); }
}
else {
self.cur_coord.x = self.rect.min_coord.x;
self.cur_coord.y += 1;
unsafe { self.cur_elem.offset(self.stride); }
}
Some(result)
}
else {
None
}
}
}
pub struct RectIterMut<'a, Elem: 'a> {
grid: &'a mut GridMut<'a, Elem>,
rect: Rect,
cur_elem: *mut Elem,
cur_coord: Coord,
stride: isize
}
#[test]
fn test_rect_iter_mut() {
let mut elems = [0, 1, 2, 3];
let mut grid = GridMut::new(&mut elems, Size::new(2, 2)).unwrap();
let rect = Rect::new(Coord::new(0, 0), Coord::new(1, 1)).unwrap();
let mut actual_coords = Vec::new();
for (coord, elem) in grid.rect_iter_mut(rect).unwrap() {
*elem = -(*elem);
actual_coords.push((coord.x, coord.y));
}
assert_eq!(actual_coords, [(0, 0), (1, 0), (0, 1), (1, 1)]);
}
impl Rect {
/// Create a new Rect defined by inclusive minimum and maximum
/// coordinates. If min_coord is greater than max_coord on either
/// axis then None is returned.
pub fn new(min_coord: Coord, max_coord: Coord) -> Option<Rect> {
if min_coord.x <= max_coord.x && min_coord.y <= max_coord.y {
Some(Rect {
min_coord: min_coord,
max_coord: max_coord
})
}
else {
None
}
}
}
// pub struct RectIter<usize: Copy + Unsigned> {
// rect: Rect,
// cur_coord: Coord
// }
// impl<usize: Copy + Ord + Unsigned + Add<Output=usize> + num::One> Iterator for RectIter {
// type Item = Coord;
// fn next(&mut self) -> Option<Self::Item> {
// if self.cur_coord.y <= self.rect.max_coord.y {
// let result = Some(self.cur_coord);
// self.cur_coord.x = self.cur_coord.x + usize::one();
// if self.cur_coord.x > self.rect.max_coord.x {
// self.cur_coord.x = self.rect.min_coord.x;
// self.cur_coord.y = self.cur_coord.y + usize::one();
// }
// result
// }
// else {
// None
// }
// }
// }
// #[test]
// fn test_rect_iter() {
// let rect = Rect::new(Coord::new(1, 2), Coord::new(3, 4)).unwrap();
// let coords: Vec<Coord<u8>> = rect.iter().collect();
// assert_eq!(coords, [
// Coord::new(1, 2), Coord::new(2, 2), Coord::new(3, 2),
// Coord::new(1, 3), Coord::new(2, 3), Coord::new(3, 3),
// Coord::new(1, 4), Coord::new(2, 4), Coord::new(3, 4)]);
// }
// pub struct DataRectIter<'s, S: 's, usize: Copy + Unsigned> {
// data: &'s [S],
// cur_elem: *const S,
// cur_coord: Coord,
// full: Rect,
// part: Rect
// }
// impl<'s, S: 's, usize: Copy + Unsigned> Iterator for DataRectIter<'s, S, usize> {
// type Item = (Coord, &'s S);
// fn next(&mut self) -> Option<Self::Item> {
// unsafe {
// self.cur_elem = self.cur_elem.offset(1);
// }
// None
// }
// }
Change GridMut to Vec2D
/// 2D grid coordinate
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Coord {
pub x: usize,
pub y: usize
}
/// 2D grid dimensions
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Size {
pub width: usize,
pub height: usize
}
impl Coord {
/// Create a grid coordinate at (x, y)
pub fn new(x: usize, y: usize) -> Coord {
Coord {
x: x,
y: y
}
}
}
impl Size {
/// Create a grid size of (width, height)
pub fn new(width: usize, height: usize) -> Size {
Size {
width: width,
height: height
}
}
pub fn area(&self) -> usize {
self.width * self.height
}
/// Return true if the coordinate fits within self's width and
/// height, false otherwise.
pub fn contains_coord(&self, coord: Coord) -> bool {
coord.x < self.width && coord.y < self.height
}
}
/// Rectangle defined by inclusive minimum and maximum coordinates
#[derive(Clone, Copy, Eq, Debug, PartialEq)]
pub struct Rect {
/// Minimum coordinate (inclusive)
min_coord: Coord,
/// Maximum coordinate (inclusive)
max_coord: Coord
}
impl Rect {
fn width(&self) -> usize {
return self.max_coord.x - self.min_coord.x + 1;
}
}
pub struct Vec2D<T> {
elems: Vec<T>,
size: Size
}
impl<Elem: Copy> Vec2D<Elem> {
pub fn from_example(size: Size, example: &Elem) -> Vec2D<Elem> {
Vec2D {
elems: vec![*example; size.area()],
size: size
}
}
pub fn from_vec(size: Size, src: Vec<Elem>) -> Option<Vec2D<Elem>> {
if size.area() == src.len() {
Some(Vec2D {
elems: src,
size: size
})
}
else {
None
}
}
pub fn rect_iter_mut<'a>(&'a mut self, rect: Rect) -> Option<RectIterMut<'a, Elem>> {
if self.size.contains_coord(rect.max_coord) {
Some(RectIterMut {
stride: (self.size.width - rect.width()) as isize,
cur_elem: self.elems.as_mut_ptr(),
grid: self,
rect: rect,
cur_coord: rect.min_coord
})
}
else {
None
}
}
}
impl<'a, Elem> Iterator for RectIterMut<'a, Elem> {
type Item = (Coord, &'a mut Elem);
fn next(&mut self) -> Option<Self::Item> {
if self.cur_coord.y <= self.rect.max_coord.y {
let result = (self.cur_coord, unsafe { &mut *self.cur_elem });
self.cur_coord.x += 1;
if self.cur_coord.x <= self.rect.max_coord.x {
unsafe { self.cur_elem.offset(1); }
}
else {
self.cur_coord.x = self.rect.min_coord.x;
self.cur_coord.y += 1;
unsafe { self.cur_elem.offset(self.stride); }
}
Some(result)
}
else {
None
}
}
}
pub struct RectIterMut<'a, Elem: 'a> {
grid: &'a mut Vec2D<Elem>,
rect: Rect,
cur_elem: *mut Elem,
cur_coord: Coord,
stride: isize
}
#[test]
fn test_rect_iter_mut() {
let mut elems = vec![0, 1, 2, 3];
let mut grid = Vec2D::from_vec(Size::new(2, 2), elems).unwrap();
let rect = Rect::new(Coord::new(0, 0), Coord::new(1, 1)).unwrap();
let mut actual_coords = Vec::new();
for (coord, elem) in grid.rect_iter_mut(rect).unwrap() {
*elem = -(*elem);
actual_coords.push((coord.x, coord.y));
}
assert_eq!(actual_coords, [(0, 0), (1, 0), (0, 1), (1, 1)]);
}
impl Rect {
/// Create a new Rect defined by inclusive minimum and maximum
/// coordinates. If min_coord is greater than max_coord on either
/// axis then None is returned.
pub fn new(min_coord: Coord, max_coord: Coord) -> Option<Rect> {
if min_coord.x <= max_coord.x && min_coord.y <= max_coord.y {
Some(Rect {
min_coord: min_coord,
max_coord: max_coord
})
}
else {
None
}
}
}
// pub struct RectIter<usize: Copy + Unsigned> {
// rect: Rect,
// cur_coord: Coord
// }
// impl<usize: Copy + Ord + Unsigned + Add<Output=usize> + num::One> Iterator for RectIter {
// type Item = Coord;
// fn next(&mut self) -> Option<Self::Item> {
// if self.cur_coord.y <= self.rect.max_coord.y {
// let result = Some(self.cur_coord);
// self.cur_coord.x = self.cur_coord.x + usize::one();
// if self.cur_coord.x > self.rect.max_coord.x {
// self.cur_coord.x = self.rect.min_coord.x;
// self.cur_coord.y = self.cur_coord.y + usize::one();
// }
// result
// }
// else {
// None
// }
// }
// }
// #[test]
// fn test_rect_iter() {
// let rect = Rect::new(Coord::new(1, 2), Coord::new(3, 4)).unwrap();
// let coords: Vec<Coord<u8>> = rect.iter().collect();
// assert_eq!(coords, [
// Coord::new(1, 2), Coord::new(2, 2), Coord::new(3, 2),
// Coord::new(1, 3), Coord::new(2, 3), Coord::new(3, 3),
// Coord::new(1, 4), Coord::new(2, 4), Coord::new(3, 4)]);
// }
// pub struct DataRectIter<'s, S: 's, usize: Copy + Unsigned> {
// data: &'s [S],
// cur_elem: *const S,
// cur_coord: Coord,
// full: Rect,
// part: Rect
// }
// impl<'s, S: 's, usize: Copy + Unsigned> Iterator for DataRectIter<'s, S, usize> {
// type Item = (Coord, &'s S);
// fn next(&mut self) -> Option<Self::Item> {
// unsafe {
// self.cur_elem = self.cur_elem.offset(1);
// }
// None
// }
// }
|
//! The goal of this crate is to provide Rust bindings to the Web APIs and to allow
//! a high degree of interoperability between Rust and JavaScript.
//!
//! ## Examples
//!
//! You can directly embed JavaScript code into Rust:
//!
//! ```rust
//! let message = "Hello, 世界!";
//! let result = js! {
//! alert( @{message} );
//! return 2 + 2 * 2;
//! };
//!
//! println!( "2 + 2 * 2 = {:?}", result );
//! ```
//!
//! Closures are also supported:
//!
//! ```rust
//! let print_hello = |name: String| {
//! println!( "Hello, {}!", name );
//! };
//!
//! js! {
//! var print_hello = @{print_hello};
//! print_hello( "Bob" );
//! print_hello.drop(); // Necessary to clean up the closure on Rust's side.
//! }
//! ```
//!
//! You can also pass arbitrary structures thanks to [serde]:
//!
//! ```rust
//! #[derive(Serialize)]
//! struct Person {
//! name: String,
//! age: i32
//! }
//!
//! js_serializable!( Person );
//!
//! js! {
//! var person = @{person};
//! console.log( person.name + " is " + person.age + " years old." );
//! };
//! ```
//!
//! [serde]: https://serde.rs/
//!
//! This crate also exposes a number of Web APIs, for example:
//!
//! ```rust
//! let button = document().query_selector( "#hide-button" ).unwrap().unwrap();
//! button.add_event_listener( move |_: ClickEvent| {
//! for anchor in document().query_selector_all( "#main a" ) {
//! js!( @{anchor}.style = "display: none;"; );
//! }
//! });
//! ```
//!
//! Exposing Rust functions to JavaScript is supported too:
//!
//! ```rust
//! #[js_export]
//! fn hash( string: String ) -> String {
//! let mut hasher = Sha1::new();
//! hasher.update( string.as_bytes() );
//! hasher.digest().to_string()
//! }
//! ```
//!
//! Then you can do this from Node.js:
//!
//! ```js
//! var hasher = require( "hasher.js" ); // Where `hasher.js` is generated from Rust code.
//! console.log( hasher.hash( "Hello world!" ) );
//! ```
//!
//! Or you can take the same `.js` file and use it in a web browser:
//!
//! ```html
//! <script src="hasher.js"></script>
//! <script>
//! Rust.hasher.then( function( hasher ) {
//! console.log( hasher.hash( "Hello world!" ) );
//! });
//! </script>
//! ```
//!
//! If you're using [Parcel] you can also use our [experimental Parcel plugin];
//! first do this in your existing Parcel project:
//!
//! $ npm install --save parcel-plugin-cargo-web
//!
//! And then simply:
//!
//! ```js
//! import hasher from "./hasher/Cargo.toml";
//! console.log( hasher.hash( "Hello world!" ) );
//! ```
//!
//! [Parcel]: https://parceljs.org/
//! [experimental Parcel plugin]: https://github.com/koute/parcel-plugin-cargo-web
#![deny(
missing_docs,
missing_debug_implementations,
trivial_numeric_casts,
unused_import_braces,
unused_qualifications
)]
#![cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
feature(proc_macro)
)]
#![cfg_attr(feature = "nightly", feature(core_intrinsics))]
#![cfg_attr(feature = "nightly", feature(never_type))]
#![recursion_limit="1500"]
#[cfg(feature = "serde")]
#[macro_use]
extern crate serde as serde_crate;
#[cfg(any(test, feature = "serde_json"))]
extern crate serde_json;
#[cfg(all(test, feature = "serde"))]
#[macro_use]
extern crate serde_derive;
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
extern crate stdweb_internal_macros;
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
pub use stdweb_internal_macros::js_export;
#[cfg(feature = "futures")]
extern crate futures;
#[macro_use]
extern crate stdweb_derive;
extern crate discard;
#[macro_use]
mod webcore;
mod webapi;
mod ecosystem;
// This is here so that our procedural macros
// can work within the crate.
pub(crate) mod stdweb {
pub use super::*;
}
pub use webcore::initialization::{
initialize,
event_loop
};
pub use webcore::value::{
Undefined,
Null,
Value,
Reference
};
pub use webcore::number::Number;
pub use webcore::object::Object;
pub use webcore::array::Array;
pub use webcore::symbol::Symbol;
pub use webcore::unsafe_typed_array::UnsafeTypedArray;
pub use webcore::once::Once;
pub use webcore::instance_of::InstanceOf;
pub use webcore::reference_type::ReferenceType;
pub use webcore::serialization::JsSerialize;
pub use webcore::discard::DiscardOnDrop;
#[cfg(feature = "experimental_features_which_may_break_on_minor_version_bumps")]
pub use webcore::promise::{Promise, DoneHandle};
#[cfg(all(
feature = "futures",
feature = "experimental_features_which_may_break_on_minor_version_bumps"
))]
pub use webcore::promise_future::PromiseFuture;
#[cfg(feature = "serde")]
/// A module with serde-related APIs.
pub mod serde {
pub use ecosystem::serde::{
ConversionError,
Serde
};
}
/// A module with bindings to the Web APIs.
pub mod web {
pub use webapi::window::{
Window,
window
};
pub use webapi::document::{
Document,
document
};
pub use webapi::global::{
set_timeout,
alert
};
pub use webapi::cross_origin_setting::CrossOriginSetting;
pub use webapi::date::Date;
pub use webapi::event_target::{IEventTarget, EventTarget, EventListenerHandle};
pub use webapi::window::RequestAnimationFrameHandle;
pub use webapi::node::{INode, Node, CloneKind};
pub use webapi::element::{IElement, Element};
pub use webapi::document_fragment::DocumentFragment;
pub use webapi::text_node::TextNode;
pub use webapi::html_element::{IHtmlElement, HtmlElement, Rect};
pub use webapi::window_or_worker::IWindowOrWorker;
pub use webapi::parent_node::IParentNode;
pub use webapi::non_element_parent_node::INonElementParentNode;
pub use webapi::token_list::TokenList;
pub use webapi::node_list::NodeList;
pub use webapi::string_map::StringMap;
pub use webapi::storage::Storage;
pub use webapi::location::Location;
pub use webapi::array_buffer::ArrayBuffer;
pub use webapi::typed_array::TypedArray;
pub use webapi::file_reader::{FileReader, FileReaderResult};
pub use webapi::history::History;
pub use webapi::web_socket::{WebSocket, SocketCloseCode, SocketBinaryType, SocketReadyState};
pub use webapi::rendering_context::{RenderingContext, CanvasRenderingContext2d, CanvasGradient, CanvasPattern, CanvasStyle, CompositeOperation, FillRule, ImageData, LineCap, LineJoin, Repetition, TextAlign, TextBaseline, TextMetrics};
pub use webapi::mutation_observer::{MutationObserver, MutationObserverHandle, MutationObserverInit, MutationRecord};
pub use webapi::xml_http_request::{XmlHttpRequest, XhrReadyState};
pub use webapi::blob::{IBlob, Blob};
/// A module containing error types.
pub mod error {
pub use webapi::dom_exception::{
IDomException,
DomException,
HierarchyRequestError,
IndexSizeError,
InvalidAccessError,
InvalidStateError,
NotFoundError,
NotSupportedError,
SecurityError,
SyntaxError,
TypeError,
InvalidCharacterError
};
pub use webapi::error::{IError, Error};
pub use webapi::rendering_context::{AddColorStopError, DrawImageError, GetImageDataError};
}
/// A module containing HTML DOM elements.
pub mod html_element {
pub use webapi::html_elements::ImageElement;
pub use webapi::html_elements::InputElement;
pub use webapi::html_elements::TextAreaElement;
pub use webapi::html_elements::CanvasElement;
}
/// A module containing JavaScript DOM events.
pub mod event {
pub use webapi::event::{
IEvent,
IUiEvent,
ConcreteEvent,
EventPhase
};
pub use webapi::events::mouse::{
IMouseEvent,
ClickEvent,
DoubleClickEvent,
MouseDownEvent,
MouseUpEvent,
MouseMoveEvent,
MouseOverEvent,
MouseOutEvent,
MouseButton
};
pub use webapi::events::pointer::{
IPointerEvent,
PointerOverEvent,
PointerEnterEvent,
PointerDownEvent,
PointerMoveEvent,
PointerUpEvent,
PointerCancelEvent,
PointerOutEvent,
PointerLeaveEvent,
GotPointerCaptureEvent,
LostPointerCaptureEvent,
};
pub use webapi::events::keyboard::{
IKeyboardEvent,
KeyPressEvent,
KeyDownEvent,
KeyUpEvent,
KeyboardLocation,
ModifierKey
};
pub use webapi::events::progress::{
IProgressEvent,
ProgressEvent,
LoadStartEvent,
LoadEndEvent,
ProgressLoadEvent,
ProgressAbortEvent,
ProgressErrorEvent
};
pub use webapi::events::socket::{
IMessageEvent,
SocketCloseEvent,
SocketErrorEvent,
SocketOpenEvent,
SocketMessageEvent
};
pub use webapi::events::history::{
HashChangeEvent,
PopStateEvent
};
pub use webapi::events::dom::{
ChangeEvent,
ResourceLoadEvent,
ResourceAbortEvent,
ResourceErrorEvent,
ResizeEvent,
InputEvent,
ReadyStateChangeEvent
};
pub use webapi::events::focus::{
IFocusEvent,
FocusEvent,
BlurEvent
};
}
}
/// A module containing stable counterparts to currently
/// unstable Rust features.
pub mod unstable {
pub use webcore::try_from::{
TryFrom,
TryInto
};
pub use webcore::void::Void;
}
/// A module containing reexports of all of our interface traits.
///
/// You should **only** import its contents through a wildcard, e.g.: `use stdweb::traits::*`.
pub mod traits {
pub use super::web::{
// Real interfaces.
IEventTarget,
INode,
IElement,
IHtmlElement,
IBlob,
// Mixins.
IWindowOrWorker,
IParentNode,
INonElementParentNode
};
pub use super::web::error::{
IDomException,
IError
};
pub use super::web::event::{
IEvent,
IUiEvent,
IMouseEvent,
IPointerEvent,
IKeyboardEvent,
IProgressEvent,
IMessageEvent,
IFocusEvent
};
}
#[doc(hidden)]
pub mod private {
pub use webcore::ffi::exports::*;
pub use webcore::serialization::{
JsSerialize,
JsSerializeOwned,
PreallocatedArena,
SerializedValue
};
pub use webcore::newtype::{
IntoNewtype,
Newtype
};
#[cfg(feature = "serde")]
pub use ecosystem::serde::{
to_value,
from_value
};
// This is to prevent an unused_mut warnings in macros, because an `allow` doesn't work apparently?
#[allow(dead_code)]
#[inline(always)]
pub fn noop< T >( _: &mut T ) {}
// TODO: Remove this.
#[derive(Debug)]
pub struct TODO;
impl ::std::fmt::Display for TODO {
fn fmt( &self, _: &mut ::std::fmt::Formatter ) -> Result< (), ::std::fmt::Error > {
unreachable!();
}
}
impl ::std::error::Error for TODO {
fn description( &self ) -> &str {
unreachable!();
}
}
pub use webcore::value::ConversionError;
}
Export `NodeType` (#205)
//! The goal of this crate is to provide Rust bindings to the Web APIs and to allow
//! a high degree of interoperability between Rust and JavaScript.
//!
//! ## Examples
//!
//! You can directly embed JavaScript code into Rust:
//!
//! ```rust
//! let message = "Hello, 世界!";
//! let result = js! {
//! alert( @{message} );
//! return 2 + 2 * 2;
//! };
//!
//! println!( "2 + 2 * 2 = {:?}", result );
//! ```
//!
//! Closures are also supported:
//!
//! ```rust
//! let print_hello = |name: String| {
//! println!( "Hello, {}!", name );
//! };
//!
//! js! {
//! var print_hello = @{print_hello};
//! print_hello( "Bob" );
//! print_hello.drop(); // Necessary to clean up the closure on Rust's side.
//! }
//! ```
//!
//! You can also pass arbitrary structures thanks to [serde]:
//!
//! ```rust
//! #[derive(Serialize)]
//! struct Person {
//! name: String,
//! age: i32
//! }
//!
//! js_serializable!( Person );
//!
//! js! {
//! var person = @{person};
//! console.log( person.name + " is " + person.age + " years old." );
//! };
//! ```
//!
//! [serde]: https://serde.rs/
//!
//! This crate also exposes a number of Web APIs, for example:
//!
//! ```rust
//! let button = document().query_selector( "#hide-button" ).unwrap().unwrap();
//! button.add_event_listener( move |_: ClickEvent| {
//! for anchor in document().query_selector_all( "#main a" ) {
//! js!( @{anchor}.style = "display: none;"; );
//! }
//! });
//! ```
//!
//! Exposing Rust functions to JavaScript is supported too:
//!
//! ```rust
//! #[js_export]
//! fn hash( string: String ) -> String {
//! let mut hasher = Sha1::new();
//! hasher.update( string.as_bytes() );
//! hasher.digest().to_string()
//! }
//! ```
//!
//! Then you can do this from Node.js:
//!
//! ```js
//! var hasher = require( "hasher.js" ); // Where `hasher.js` is generated from Rust code.
//! console.log( hasher.hash( "Hello world!" ) );
//! ```
//!
//! Or you can take the same `.js` file and use it in a web browser:
//!
//! ```html
//! <script src="hasher.js"></script>
//! <script>
//! Rust.hasher.then( function( hasher ) {
//! console.log( hasher.hash( "Hello world!" ) );
//! });
//! </script>
//! ```
//!
//! If you're using [Parcel] you can also use our [experimental Parcel plugin];
//! first do this in your existing Parcel project:
//!
//! $ npm install --save parcel-plugin-cargo-web
//!
//! And then simply:
//!
//! ```js
//! import hasher from "./hasher/Cargo.toml";
//! console.log( hasher.hash( "Hello world!" ) );
//! ```
//!
//! [Parcel]: https://parceljs.org/
//! [experimental Parcel plugin]: https://github.com/koute/parcel-plugin-cargo-web
#![deny(
missing_docs,
missing_debug_implementations,
trivial_numeric_casts,
unused_import_braces,
unused_qualifications
)]
#![cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
feature(proc_macro)
)]
#![cfg_attr(feature = "nightly", feature(core_intrinsics))]
#![cfg_attr(feature = "nightly", feature(never_type))]
#![recursion_limit="1500"]
#[cfg(feature = "serde")]
#[macro_use]
extern crate serde as serde_crate;
#[cfg(any(test, feature = "serde_json"))]
extern crate serde_json;
#[cfg(all(test, feature = "serde"))]
#[macro_use]
extern crate serde_derive;
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
extern crate stdweb_internal_macros;
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
pub use stdweb_internal_macros::js_export;
#[cfg(feature = "futures")]
extern crate futures;
#[macro_use]
extern crate stdweb_derive;
extern crate discard;
#[macro_use]
mod webcore;
mod webapi;
mod ecosystem;
// This is here so that our procedural macros
// can work within the crate.
pub(crate) mod stdweb {
pub use super::*;
}
pub use webcore::initialization::{
initialize,
event_loop
};
pub use webcore::value::{
Undefined,
Null,
Value,
Reference
};
pub use webcore::number::Number;
pub use webcore::object::Object;
pub use webcore::array::Array;
pub use webcore::symbol::Symbol;
pub use webcore::unsafe_typed_array::UnsafeTypedArray;
pub use webcore::once::Once;
pub use webcore::instance_of::InstanceOf;
pub use webcore::reference_type::ReferenceType;
pub use webcore::serialization::JsSerialize;
pub use webcore::discard::DiscardOnDrop;
#[cfg(feature = "experimental_features_which_may_break_on_minor_version_bumps")]
pub use webcore::promise::{Promise, DoneHandle};
#[cfg(all(
feature = "futures",
feature = "experimental_features_which_may_break_on_minor_version_bumps"
))]
pub use webcore::promise_future::PromiseFuture;
#[cfg(feature = "serde")]
/// A module with serde-related APIs.
pub mod serde {
pub use ecosystem::serde::{
ConversionError,
Serde
};
}
/// A module with bindings to the Web APIs.
pub mod web {
pub use webapi::window::{
Window,
window
};
pub use webapi::document::{
Document,
document
};
pub use webapi::global::{
set_timeout,
alert
};
pub use webapi::cross_origin_setting::CrossOriginSetting;
pub use webapi::date::Date;
pub use webapi::event_target::{IEventTarget, EventTarget, EventListenerHandle};
pub use webapi::window::RequestAnimationFrameHandle;
pub use webapi::node::{INode, Node, CloneKind, NodeType};
pub use webapi::element::{IElement, Element};
pub use webapi::document_fragment::DocumentFragment;
pub use webapi::text_node::TextNode;
pub use webapi::html_element::{IHtmlElement, HtmlElement, Rect};
pub use webapi::window_or_worker::IWindowOrWorker;
pub use webapi::parent_node::IParentNode;
pub use webapi::non_element_parent_node::INonElementParentNode;
pub use webapi::token_list::TokenList;
pub use webapi::node_list::NodeList;
pub use webapi::string_map::StringMap;
pub use webapi::storage::Storage;
pub use webapi::location::Location;
pub use webapi::array_buffer::ArrayBuffer;
pub use webapi::typed_array::TypedArray;
pub use webapi::file_reader::{FileReader, FileReaderResult};
pub use webapi::history::History;
pub use webapi::web_socket::{WebSocket, SocketCloseCode, SocketBinaryType, SocketReadyState};
pub use webapi::rendering_context::{RenderingContext, CanvasRenderingContext2d, CanvasGradient, CanvasPattern, CanvasStyle, CompositeOperation, FillRule, ImageData, LineCap, LineJoin, Repetition, TextAlign, TextBaseline, TextMetrics};
pub use webapi::mutation_observer::{MutationObserver, MutationObserverHandle, MutationObserverInit, MutationRecord};
pub use webapi::xml_http_request::{XmlHttpRequest, XhrReadyState};
pub use webapi::blob::{IBlob, Blob};
/// A module containing error types.
pub mod error {
pub use webapi::dom_exception::{
IDomException,
DomException,
HierarchyRequestError,
IndexSizeError,
InvalidAccessError,
InvalidStateError,
NotFoundError,
NotSupportedError,
SecurityError,
SyntaxError,
TypeError,
InvalidCharacterError
};
pub use webapi::error::{IError, Error};
pub use webapi::rendering_context::{AddColorStopError, DrawImageError, GetImageDataError};
}
/// A module containing HTML DOM elements.
pub mod html_element {
pub use webapi::html_elements::ImageElement;
pub use webapi::html_elements::InputElement;
pub use webapi::html_elements::TextAreaElement;
pub use webapi::html_elements::CanvasElement;
}
/// A module containing JavaScript DOM events.
pub mod event {
pub use webapi::event::{
IEvent,
IUiEvent,
ConcreteEvent,
EventPhase
};
pub use webapi::events::mouse::{
IMouseEvent,
ClickEvent,
DoubleClickEvent,
MouseDownEvent,
MouseUpEvent,
MouseMoveEvent,
MouseOverEvent,
MouseOutEvent,
MouseButton
};
pub use webapi::events::pointer::{
IPointerEvent,
PointerOverEvent,
PointerEnterEvent,
PointerDownEvent,
PointerMoveEvent,
PointerUpEvent,
PointerCancelEvent,
PointerOutEvent,
PointerLeaveEvent,
GotPointerCaptureEvent,
LostPointerCaptureEvent,
};
pub use webapi::events::keyboard::{
IKeyboardEvent,
KeyPressEvent,
KeyDownEvent,
KeyUpEvent,
KeyboardLocation,
ModifierKey
};
pub use webapi::events::progress::{
IProgressEvent,
ProgressEvent,
LoadStartEvent,
LoadEndEvent,
ProgressLoadEvent,
ProgressAbortEvent,
ProgressErrorEvent
};
pub use webapi::events::socket::{
IMessageEvent,
SocketCloseEvent,
SocketErrorEvent,
SocketOpenEvent,
SocketMessageEvent
};
pub use webapi::events::history::{
HashChangeEvent,
PopStateEvent
};
pub use webapi::events::dom::{
ChangeEvent,
ResourceLoadEvent,
ResourceAbortEvent,
ResourceErrorEvent,
ResizeEvent,
InputEvent,
ReadyStateChangeEvent
};
pub use webapi::events::focus::{
IFocusEvent,
FocusEvent,
BlurEvent
};
}
}
/// A module containing stable counterparts to currently
/// unstable Rust features.
pub mod unstable {
pub use webcore::try_from::{
TryFrom,
TryInto
};
pub use webcore::void::Void;
}
/// A module containing reexports of all of our interface traits.
///
/// You should **only** import its contents through a wildcard, e.g.: `use stdweb::traits::*`.
pub mod traits {
pub use super::web::{
// Real interfaces.
IEventTarget,
INode,
IElement,
IHtmlElement,
IBlob,
// Mixins.
IWindowOrWorker,
IParentNode,
INonElementParentNode
};
pub use super::web::error::{
IDomException,
IError
};
pub use super::web::event::{
IEvent,
IUiEvent,
IMouseEvent,
IPointerEvent,
IKeyboardEvent,
IProgressEvent,
IMessageEvent,
IFocusEvent
};
}
#[doc(hidden)]
pub mod private {
pub use webcore::ffi::exports::*;
pub use webcore::serialization::{
JsSerialize,
JsSerializeOwned,
PreallocatedArena,
SerializedValue
};
pub use webcore::newtype::{
IntoNewtype,
Newtype
};
#[cfg(feature = "serde")]
pub use ecosystem::serde::{
to_value,
from_value
};
// This is to prevent an unused_mut warnings in macros, because an `allow` doesn't work apparently?
#[allow(dead_code)]
#[inline(always)]
pub fn noop< T >( _: &mut T ) {}
// TODO: Remove this.
#[derive(Debug)]
pub struct TODO;
impl ::std::fmt::Display for TODO {
fn fmt( &self, _: &mut ::std::fmt::Formatter ) -> Result< (), ::std::fmt::Error > {
unreachable!();
}
}
impl ::std::error::Error for TODO {
fn description( &self ) -> &str {
unreachable!();
}
}
pub use webcore::value::ConversionError;
}
|
#![crate_name = "piston"]
#![deny(missing_docs)]
#![warn(dead_code)]
#![feature(default_type_params)]
#![feature(globs)]
#![feature(if_let)]
//! A user friendly game engine written in Rust.
extern crate sync;
extern crate gfx;
extern crate gfx_graphics;
extern crate opengl_graphics;
extern crate sdl2;
extern crate sdl2_window;
extern crate window;
// Crates used to reexport.
extern crate "vecmath" as vecmath_lib;
extern crate "shader_version" as shader_version_lib;
extern crate "image" as image_lib;
extern crate "graphics" as graphics_lib;
extern crate "input" as input_lib;
extern crate "event" as event_lib;
extern crate "cam" as cam_lib;
extern crate "noise" as noise_lib;
extern crate "genmesh" as genmesh_lib;
extern crate "sprite" as sprite_lib;
extern crate "current" as current_lib;
extern crate "fps_counter" as fps_counter_lib;
extern crate "wavefront-obj" as wavefront_obj_lib;
extern crate "drag_controller" as drag_controller_lib;
extern crate "read_color" as read_color_lib;
extern crate "select_color" as select_color_lib;
extern crate "texture_packer" as texture_packer_lib;
extern crate "wire" as wire_lib;
extern crate "astar" as astar_lib;
extern crate "img_hash" as img_hash_lib;
extern crate "nalgebra" as nalgebra_lib;
extern crate "ncollide" as ncollide_lib;
// Reexports.
pub use shader_version_lib as shader_version;
pub use image_lib as image;
pub use graphics_lib as graphics;
pub use vecmath_lib as vecmath;
pub use input_lib as input;
pub use event_lib as event;
pub use cam_lib as cam;
pub use noise_lib as noise;
pub use genmesh_lib as genmesh;
pub use sprite_lib as sprite;
pub use current_lib as current;
pub use fps_counter_lib as fps_counter;
pub use wavefront_obj_lib as wavefront_obj;
pub use drag_controller_lib as drag_controller;
pub use texture_packer_lib as texture_packer;
pub use wire_lib as wire;
pub use astar_lib as astar;
pub use img_hash_lib as img_hash;
pub use nalgebra_lib as nalgebra;
pub use ncollide_lib as ncollide;
pub use sdl2_window::Sdl2Window as WindowBackEnd;
pub use event::{
Event,
Events,
NoWindow,
RenderArgs,
UpdateArgs,
Window,
WindowSettings,
};
pub use current::{
Get,
Set,
Modifier,
Current,
CurrentGuard,
};
use gfx_graphics::G2D;
use opengl_graphics::Gl;
use fps_counter::FPSCounter;
use gfx::{ DeviceHelper };
pub mod color {
//! Rexported libraries for working with colors
pub use read_color_lib as read_color;
pub use select_color_lib as select_color;
}
/// Initializes window and sets up current objects.
pub fn start(
opengl: shader_version::opengl::OpenGL,
window_settings: WindowSettings,
f: ||
) {
let mut window = WindowBackEnd::new(
opengl,
window_settings,
);
let mut device = gfx::GlDevice::new(|s| unsafe {
std::mem::transmute(sdl2::video::gl_get_proc_address(s))
});
let mut gl = Gl::new(opengl);
let mut g2d = G2D::new(&mut device);
let mut renderer = device.create_renderer();
let event::window::Size([w, h]) = window.get();
let mut frame = gfx::Frame::new(w as u16, h as u16);
let mut fps_counter = FPSCounter::new();
let window_guard = CurrentGuard::new(&mut window);
let device_guard = CurrentGuard::new(&mut device);
let gl_guard = CurrentGuard::new(&mut gl);
let g2d_guard = CurrentGuard::new(&mut g2d);
let renderer_guard = CurrentGuard::new(&mut renderer);
let frame_guard = CurrentGuard::new(&mut frame);
let fps_counter_guard = CurrentGuard::new(&mut fps_counter);
f();
drop(window_guard);
drop(device_guard);
drop(gl_guard);
drop(g2d_guard);
drop(renderer_guard);
drop(frame_guard);
drop(fps_counter_guard);
}
/// The current window
pub unsafe fn current_window() -> Current<WindowBackEnd> { Current }
/// The current Gfx device
pub unsafe fn current_gfx_device() -> Current<gfx::GlDevice> { Current }
/// The current opengl_graphics back-end
pub unsafe fn current_gl() -> Current<Gl> { Current }
/// The current gfx_graphics back-end
pub unsafe fn current_g2d() -> Current<G2D> { Current }
/// The current Gfx renderer
pub unsafe fn current_renderer() -> Current<gfx::Renderer<gfx::GlCommandBuffer>> { Current }
/// The current Gfx frame
pub unsafe fn current_frame() -> Current<gfx::Frame> { Current }
/// The current FPS counter
pub unsafe fn current_fps_counter() -> Current<FPSCounter> { Current }
/// Returns an event iterator for the event loop
pub fn events() -> event::Events<Current<WindowBackEnd>> {
unsafe {
Events::new(current_window())
}
}
/// Updates the FPS counter and gets the frames per second.
pub fn fps_tick() -> uint {
unsafe {
current_fps_counter().tick()
}
}
/// Sets title of the current window.
pub fn set_title(text: String) {
unsafe {
current_window().set_mut(window::Title(text));
}
}
/// Renders 2D graphics using Gfx.
pub fn render_2d_gfx(
bg_color: Option<[f32, ..4]>,
f: |&graphics::Context,
&mut gfx_graphics::GraphicsBackEnd<gfx::GlCommandBuffer>|
) {
use gfx::Device;
unsafe {
current_g2d().draw(
&mut *current_renderer(),
&*current_frame(),
|c, g| {
use graphics::*;
if let Some(bg_color) = bg_color {
c.color(bg_color).draw(g);
}
f(&c, g);
});
current_gfx_device().submit(current_renderer().as_buffer());
current_renderer().reset();
}
}
/// Renders 2D graphics using OpenGL.
pub fn render_2d_opengl(
bg_color: Option<[f32, ..4]>,
f: |&graphics::Context,
&mut opengl_graphics::Gl|
) {
unsafe {
use graphics::*;
let gl = &mut *current_gl();
let window::Size([w, h]) = current_window().get();
gl.viewport(0, 0, w as i32, h as i32);
gl.clear_program();
let c = Context::abs(w as f64, h as f64);
if let Some(bg_color) = bg_color {
c.color(bg_color).draw(gl);
}
f(&c, gl);
}
}
Removed unused `sync` dependency
Closes https://github.com/PistonDevelopers/piston/issues/739
#![crate_name = "piston"]
#![deny(missing_docs)]
#![warn(dead_code)]
#![feature(default_type_params)]
#![feature(globs)]
#![feature(if_let)]
//! A user friendly game engine written in Rust.
extern crate gfx;
extern crate gfx_graphics;
extern crate opengl_graphics;
extern crate sdl2;
extern crate sdl2_window;
extern crate window;
// Crates used to reexport.
extern crate "vecmath" as vecmath_lib;
extern crate "shader_version" as shader_version_lib;
extern crate "image" as image_lib;
extern crate "graphics" as graphics_lib;
extern crate "input" as input_lib;
extern crate "event" as event_lib;
extern crate "cam" as cam_lib;
extern crate "noise" as noise_lib;
extern crate "genmesh" as genmesh_lib;
extern crate "sprite" as sprite_lib;
extern crate "current" as current_lib;
extern crate "fps_counter" as fps_counter_lib;
extern crate "wavefront-obj" as wavefront_obj_lib;
extern crate "drag_controller" as drag_controller_lib;
extern crate "read_color" as read_color_lib;
extern crate "select_color" as select_color_lib;
extern crate "texture_packer" as texture_packer_lib;
extern crate "wire" as wire_lib;
extern crate "astar" as astar_lib;
extern crate "img_hash" as img_hash_lib;
extern crate "nalgebra" as nalgebra_lib;
extern crate "ncollide" as ncollide_lib;
// Reexports.
pub use shader_version_lib as shader_version;
pub use image_lib as image;
pub use graphics_lib as graphics;
pub use vecmath_lib as vecmath;
pub use input_lib as input;
pub use event_lib as event;
pub use cam_lib as cam;
pub use noise_lib as noise;
pub use genmesh_lib as genmesh;
pub use sprite_lib as sprite;
pub use current_lib as current;
pub use fps_counter_lib as fps_counter;
pub use wavefront_obj_lib as wavefront_obj;
pub use drag_controller_lib as drag_controller;
pub use texture_packer_lib as texture_packer;
pub use wire_lib as wire;
pub use astar_lib as astar;
pub use img_hash_lib as img_hash;
pub use nalgebra_lib as nalgebra;
pub use ncollide_lib as ncollide;
pub use sdl2_window::Sdl2Window as WindowBackEnd;
pub use event::{
Event,
Events,
NoWindow,
RenderArgs,
UpdateArgs,
Window,
WindowSettings,
};
pub use current::{
Get,
Set,
Modifier,
Current,
CurrentGuard,
};
use gfx_graphics::G2D;
use opengl_graphics::Gl;
use fps_counter::FPSCounter;
use gfx::{ DeviceHelper };
pub mod color {
//! Rexported libraries for working with colors
pub use read_color_lib as read_color;
pub use select_color_lib as select_color;
}
/// Initializes window and sets up current objects.
pub fn start(
opengl: shader_version::opengl::OpenGL,
window_settings: WindowSettings,
f: ||
) {
let mut window = WindowBackEnd::new(
opengl,
window_settings,
);
let mut device = gfx::GlDevice::new(|s| unsafe {
std::mem::transmute(sdl2::video::gl_get_proc_address(s))
});
let mut gl = Gl::new(opengl);
let mut g2d = G2D::new(&mut device);
let mut renderer = device.create_renderer();
let event::window::Size([w, h]) = window.get();
let mut frame = gfx::Frame::new(w as u16, h as u16);
let mut fps_counter = FPSCounter::new();
let window_guard = CurrentGuard::new(&mut window);
let device_guard = CurrentGuard::new(&mut device);
let gl_guard = CurrentGuard::new(&mut gl);
let g2d_guard = CurrentGuard::new(&mut g2d);
let renderer_guard = CurrentGuard::new(&mut renderer);
let frame_guard = CurrentGuard::new(&mut frame);
let fps_counter_guard = CurrentGuard::new(&mut fps_counter);
f();
drop(window_guard);
drop(device_guard);
drop(gl_guard);
drop(g2d_guard);
drop(renderer_guard);
drop(frame_guard);
drop(fps_counter_guard);
}
/// The current window
pub unsafe fn current_window() -> Current<WindowBackEnd> { Current }
/// The current Gfx device
pub unsafe fn current_gfx_device() -> Current<gfx::GlDevice> { Current }
/// The current opengl_graphics back-end
pub unsafe fn current_gl() -> Current<Gl> { Current }
/// The current gfx_graphics back-end
pub unsafe fn current_g2d() -> Current<G2D> { Current }
/// The current Gfx renderer
pub unsafe fn current_renderer() -> Current<gfx::Renderer<gfx::GlCommandBuffer>> { Current }
/// The current Gfx frame
pub unsafe fn current_frame() -> Current<gfx::Frame> { Current }
/// The current FPS counter
pub unsafe fn current_fps_counter() -> Current<FPSCounter> { Current }
/// Returns an event iterator for the event loop
pub fn events() -> event::Events<Current<WindowBackEnd>> {
unsafe {
Events::new(current_window())
}
}
/// Updates the FPS counter and gets the frames per second.
pub fn fps_tick() -> uint {
unsafe {
current_fps_counter().tick()
}
}
/// Sets title of the current window.
pub fn set_title(text: String) {
unsafe {
current_window().set_mut(window::Title(text));
}
}
/// Renders 2D graphics using Gfx.
pub fn render_2d_gfx(
bg_color: Option<[f32, ..4]>,
f: |&graphics::Context,
&mut gfx_graphics::GraphicsBackEnd<gfx::GlCommandBuffer>|
) {
use gfx::Device;
unsafe {
current_g2d().draw(
&mut *current_renderer(),
&*current_frame(),
|c, g| {
use graphics::*;
if let Some(bg_color) = bg_color {
c.color(bg_color).draw(g);
}
f(&c, g);
});
current_gfx_device().submit(current_renderer().as_buffer());
current_renderer().reset();
}
}
/// Renders 2D graphics using OpenGL.
pub fn render_2d_opengl(
bg_color: Option<[f32, ..4]>,
f: |&graphics::Context,
&mut opengl_graphics::Gl|
) {
unsafe {
use graphics::*;
let gl = &mut *current_gl();
let window::Size([w, h]) = current_window().get();
gl.viewport(0, 0, w as i32, h as i32);
gl.clear_program();
let c = Context::abs(w as f64, h as f64);
if let Some(bg_color) = bg_color {
c.color(bg_color).draw(gl);
}
f(&c, gl);
}
}
|
#![crate_name = "vecmath"]
#![deny(missing_doc)]
//! A simple and generic library for vector math.
//!
//! Notice that row major is mathematical standard,
//! while OpenGL uses column major format.
//! This library supports both formats, prefixing functions with 'row_' or 'col_'.
//!
//! For row major affine transforms, use `Matrix2x3` (2D) and `Matrix3x4` (3D).
//! For column major affine transforms, use `Matrix3x2` (2D) and `Matrix4x3` (3D).
//!
//! If you are using `Matrix3` or `Matrix4`,
//! then you need to pick either row or column major.
//!
//! Notice that there are two kinds of transforms: Positions and vectors.
//! The vector transforms ignores the translate component.
//! For example, `row_mat2x3_transform_pos2` transforms a position.
//! `row_mat2x3_transform_vec2` transforms a vector.
use std::num::{One, Zero};
/// A 2D vector.
pub type Vector2<T> = [T, ..2];
/// A 3D vector.
pub type Vector3<T> = [T, ..3];
/// A 4D vector.
pub type Vector4<T> = [T, ..4];
/// A 2x3 matrix.
///
/// To multiply two matrices use `row_mat2x3_mul`.
pub type Matrix2x3<T> = [[T, ..3], ..2];
/// A 3x2 matrix.
///
/// To multiply two matrices use `col_mat3x2_mul`.
pub type Matrix3x2<T> = [[T, ..2], ..3];
/// A 3x3 matrix.
///
/// To multiply two matrices use `row_mat3_mul` or `col_mat3_mul`.
pub type Matrix3<T> = [[T, ..3], ..3];
/// A 3x4 matrix.
///
/// To multiply two matrices use `row_mat3x4_mul`.
pub type Matrix3x4<T> = [[T, ..4], ..3];
/// A 4x3 matrix.
///
/// To multiply two matrices use `col_mat4x3_mul`.
///
/// This format can also store vertices of a quad.
pub type Matrix4x3<T> = [[T, ..3], ..4];
/// A 4x4 matrix.
///
/// To multiply two matrices use `row_mat4_mul` or `col_mat4_mul`.
pub type Matrix4<T> = [[T, ..4], ..4];
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat3x2_mul_col<T: Num + Copy>(
a: Matrix3x2<T>,
b: Matrix3x2<T>,
i: uint
) -> Vector2<T> {
[
vec3_dot_vec2(col_mat3x2_row(a, 0), b[i]),
vec3_dot_vec2(col_mat3x2_row(a, 1), b[i])
]
}
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat3_mul_col<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>,
i: uint
) -> Vector3<T> {
[
vec3_dot(col_mat3_row(a, 0), b[i]),
vec3_dot(col_mat3_row(a, 1), b[i]),
vec3_dot(col_mat3_row(a, 2), b[i])
]
}
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat4x3_mul_col<T: Num + Copy>(
a: Matrix4x3<T>,
b: Matrix4x3<T>,
i: uint
) -> Vector3<T> {
[
vec4_dot_vec3(col_mat4x3_row(a, 0), b[i]),
vec4_dot_vec3(col_mat4x3_row(a, 1), b[i]),
vec4_dot_vec3(col_mat4x3_row(a, 2), b[i])
]
}
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat4_mul_col<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>,
i: uint
) -> Vector4<T> {
[
vec4_dot(col_mat4_row(a, 0), b[i]),
vec4_dot(col_mat4_row(a, 1), b[i]),
vec4_dot(col_mat4_row(a, 2), b[i]),
vec4_dot(col_mat4_row(a, 3), b[i])
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat2x3_mul_row<T: Num + Copy>(
a: Matrix2x3<T>,
b: Matrix2x3<T>,
i: uint
) -> Vector3<T> {
[
vec3_dot_vec2(a[i], row_mat2x3_col(b, 0)),
vec3_dot_vec2(a[i], row_mat2x3_col(b, 1)),
vec3_dot_pos2(a[i], row_mat2x3_col(b, 2))
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat3_mul_row<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>,
i: uint
) -> Vector3<T> {
[
vec3_dot(a[i], row_mat3_col(b, 0)),
vec3_dot(a[i], row_mat3_col(b, 1)),
vec3_dot(a[i], row_mat3_col(b, 2)),
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat3x4_mul_row<T: Num + Copy>(
a: Matrix3x4<T>,
b: Matrix3x4<T>,
i: uint
) -> Vector4<T> {
[
vec4_dot_vec3(a[i], row_mat3x4_col(b, 0)),
vec4_dot_vec3(a[i], row_mat3x4_col(b, 1)),
vec4_dot_vec3(a[i], row_mat3x4_col(b, 2)),
vec4_dot_pos3(a[i], row_mat3x4_col(b, 3))
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat4_mul_row<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>,
i: uint
) -> Vector4<T> {
[
vec4_dot(a[i], row_mat4_col(b, 0)),
vec4_dot(a[i], row_mat4_col(b, 1)),
vec4_dot(a[i], row_mat4_col(b, 2)),
vec4_dot(a[i], row_mat4_col(b, 3))
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat3x2_mul<T: Num + Copy>(
a: Matrix3x2<T>,
b: Matrix3x2<T>
) -> Matrix3x2<T> {
[
col_mat3x2_mul_col(a, b, 0),
col_mat3x2_mul_col(a, b, 1),
col_mat3x2_mul_col(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat3_mul<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>
) -> Matrix3<T> {
[
col_mat3_mul_col(a, b, 0),
col_mat3_mul_col(a, b, 1),
col_mat3_mul_col(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat4x3_mul<T: Num + Copy>(
a: Matrix4x3<T>,
b: Matrix4x3<T>
) -> Matrix4x3<T> {
[
col_mat4x3_mul_col(a, b, 0),
col_mat4x3_mul_col(a, b, 1),
col_mat4x3_mul_col(a, b, 2),
col_mat4x3_mul_col(a, b, 3)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat4_mul<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>
) -> Matrix4<T> {
[
col_mat4_mul_col(a, b, 0),
col_mat4_mul_col(a, b, 1),
col_mat4_mul_col(a, b, 2),
col_mat4_mul_col(a, b, 3)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat2x3_mul<T: Num + Copy>(
a: Matrix2x3<T>,
b: Matrix2x3<T>
) -> Matrix2x3<T> {
[
row_mat2x3_mul_row(a, b, 0),
row_mat2x3_mul_row(a, b, 1),
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat3_mul<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>
) -> Matrix3<T> {
[
row_mat3_mul_row(a, b, 0),
row_mat3_mul_row(a, b, 1),
row_mat3_mul_row(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat3x4_mul<T: Num + Copy>(
a: Matrix3x4<T>,
b: Matrix3x4<T>
) -> Matrix3x4<T> {
[
row_mat3x4_mul_row(a, b, 0),
row_mat3x4_mul_row(a, b, 1),
row_mat3x4_mul_row(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat4_mul<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>
) -> Matrix4<T> {
[
row_mat4_mul_row(a, b, 0),
row_mat4_mul_row(a, b, 1),
row_mat4_mul_row(a, b, 2),
row_mat4_mul_row(a, b, 3)
]
}
#[test]
fn test_row_mat2x3_mul() {
let a: Matrix2x3<f64> = mat2x3_id();
let b = a;
let _ = row_mat2x3_mul(a, b);
}
#[test]
fn test_row_mat3x4_mul() {
let a: Matrix3x4<f64> = mat3x4_id();
let b = a;
let _ = row_mat3x4_mul(a, b);
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat2x3_id<T: One + Zero + Copy>() -> Matrix2x3<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero],
[zero, one, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat3x2_id<T: One + Zero + Copy>() -> Matrix3x2<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero],
[zero, one],
[zero, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat3_id<T: One + Zero + Copy>() -> Matrix3<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero],
[zero, one, zero],
[zero, zero, one]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat3x4_id<T: One + Zero + Copy>() -> Matrix3x4<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero, zero],
[zero, one, zero, zero],
[zero, zero, one, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat4x3_id<T: One + Zero + Copy>() -> Matrix4x3<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero],
[zero, one, zero],
[zero, zero, one],
[zero, zero, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat4_id<T: One + Zero + Copy>() -> Matrix4<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero, zero],
[zero, one, zero, zero],
[zero, zero, one, zero],
[zero, zero, zero, one]
]
}
/// Converts to another vector type.
#[inline(always)]
pub fn vec2_cast<T: ToPrimitive + Copy, U: NumCast>(
a: Vector2<T>
) -> Vector2<U> {
[
NumCast::from(a[0]).unwrap(),
NumCast::from(a[1]).unwrap()
]
}
/// Converts to another vector type.
#[inline(always)]
pub fn vec3_cast<T: ToPrimitive + Copy, U: NumCast>(
a: Vector3<T>
) -> Vector3<U> {
[
NumCast::from(a[0]).unwrap(),
NumCast::from(a[1]).unwrap(),
NumCast::from(a[2]).unwrap()
]
}
/// Converts to another vector type.
#[inline(always)]
pub fn vec4_cast<T: ToPrimitive + Copy, U: NumCast>(
a: Vector4<T>
) -> Vector4<U> {
[
NumCast::from(a[0]).unwrap(),
NumCast::from(a[1]).unwrap(),
NumCast::from(a[2]).unwrap(),
NumCast::from(a[3]).unwrap()
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat2x3_cast<T: ToPrimitive + Copy, U: NumCast>(
mat: Matrix2x3<T>
) -> Matrix2x3<U> {
[
vec3_cast(mat[0]),
vec3_cast(mat[1])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat3x2_cast<T: ToPrimitive + Copy, U: NumCast>(
mat: Matrix3x2<T>
) -> Matrix3x2<U> {
[
vec2_cast(mat[0]),
vec2_cast(mat[1]),
vec2_cast(mat[2])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat3_cast<T: ToPrimitive + Copy, U: NumCast>(
mat: Matrix3<T>
) -> Matrix3<U> {
[
vec3_cast(mat[0]),
vec3_cast(mat[1]),
vec3_cast(mat[2])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat3x4_cast<T: ToPrimitive + Copy, U: NumCast>(
m: Matrix3x4<T>
) -> Matrix3x4<U> {
[
vec4_cast(m[0]),
vec4_cast(m[1]),
vec4_cast(m[2])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat4x3_cast<T: ToPrimitive + Copy, U: NumCast>(
m: Matrix4x3<T>
) -> Matrix4x3<U> {
[
vec3_cast(m[0]),
vec3_cast(m[1]),
vec3_cast(m[2]),
vec3_cast(m[3])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat4_cast<T: ToPrimitive + Copy, U: NumCast>(
m: Matrix4<T>
) -> Matrix4<U> {
[
vec4_cast(m[0]),
vec4_cast(m[1]),
vec4_cast(m[2]),
vec4_cast(m[3])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn vec2_sub<T: Num>(a: Vector2<T>, b: Vector2<T>) -> Vector2<T> {
[
a[0] - b[0],
a[1] - b[1],
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn vec3_sub<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[
a[0] - b[0],
a[1] - b[1],
a[2] - b[2],
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn vec4_sub<T: Num>(a: Vector4<T>, b: Vector4<T>) -> Vector4<T> {
[
a[0] - b[0],
a[1] - b[1],
a[2] - b[2],
a[3] - b[3]
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat2x3_sub<T: Num + Copy>(a: Matrix2x3<T>, b: Matrix2x3<T>) -> Matrix2x3<T> {
[
vec3_sub(a[0], b[0]),
vec3_sub(a[1], b[1])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat3x2_sub<T: Num + Copy>(a: Matrix3x2<T>, b: Matrix3x2<T>) -> Matrix3x2<T> {
[
vec2_sub(a[0], b[0]),
vec2_sub(a[1], b[1]),
vec2_sub(a[2], b[2])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat3_sub<T: Num + Copy>(a: Matrix3<T>, b: Matrix3<T>) -> Matrix3<T> {
[
vec3_sub(a[0], b[0]),
vec3_sub(a[1], b[1]),
vec3_sub(a[2], b[2])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat3x4_sub<T: Num + Copy>(a: Matrix3x4<T>, b: Matrix3x4<T>) -> Matrix3x4<T> {
[
vec4_sub(a[0], b[0]),
vec4_sub(a[1], b[1]),
vec4_sub(a[2], b[2])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat4x3_sub<T: Num + Copy>(a: Matrix4x3<T>, b: Matrix4x3<T>) -> Matrix4x3<T> {
[
vec3_sub(a[0], b[0]),
vec3_sub(a[1], b[1]),
vec3_sub(a[2], b[2]),
vec3_sub(a[3], b[3])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat4_sub<T: Num + Copy>(a: Matrix4<T>, b: Matrix4<T>) -> Matrix4<T> {
[
vec4_sub(a[0], b[0]),
vec4_sub(a[1], b[1]),
vec4_sub(a[2], b[2]),
vec4_sub(a[3], b[3])
]
}
/// Adds two vectors.
#[inline(always)]
pub fn vec2_add<T: Num>(a: Vector2<T>, b: Vector2<T>) -> Vector2<T> {
[
a[0] + b[0],
a[1] + b[1],
]
}
/// Adds two vectors.
#[inline(always)]
pub fn vec3_add<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[
a[0] + b[0],
a[1] + b[1],
a[2] + b[2]
]
}
/// Adds two vectors.
#[inline(always)]
pub fn vec4_add<T: Num>(a: Vector4<T>, b: Vector4<T>) -> Vector4<T> {
[
a[0] + b[0],
a[1] + b[1],
a[2] + b[2],
a[3] + b[3]
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat2x3_add<T: Num + Copy>(a: Matrix2x3<T>, b: Matrix2x3<T>) -> Matrix2x3<T> {
[
vec3_add(a[0], b[0]),
vec3_add(a[1], b[1])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat3x2_add<T: Num + Copy>(a: Matrix3x2<T>, b: Matrix3x2<T>) -> Matrix3x2<T> {
[
vec2_add(a[0], b[0]),
vec2_add(a[1], b[1]),
vec2_add(a[2], b[2])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat3_add<T: Num + Copy>(a: Matrix3<T>, b: Matrix3<T>) -> Matrix3<T> {
[
vec3_add(a[0], b[0]),
vec3_add(a[1], b[1]),
vec3_add(a[2], b[2])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat3x4_add<T: Num + Copy>(a: Matrix3x4<T>, b: Matrix3x4<T>) -> Matrix3x4<T> {
[
vec4_add(a[0], b[0]),
vec4_add(a[1], b[1]),
vec4_add(a[2], b[2])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat4x3_add<T: Num + Copy>(a: Matrix4x3<T>, b: Matrix4x3<T>) -> Matrix4x3<T> {
[
vec3_add(a[0], b[0]),
vec3_add(a[1], b[1]),
vec3_add(a[2], b[2]),
vec3_add(a[3], b[3])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat4_add<T: Num + Copy>(a: Matrix4<T>, b: Matrix4<T>) -> Matrix4<T> {
[
vec4_add(a[0], b[0]),
vec4_add(a[1], b[1]),
vec4_add(a[2], b[2]),
vec4_add(a[3], b[3])
]
}
/// Multiplies two vectors component wise.
#[inline(always)]
pub fn vec2_mul<T: Num>(a: Vector2<T>, b: Vector2<T>) -> Vector2<T> {
[a[0] * b[0], a[1] * b[1]]
}
/// Multiplies two vectors component wise.
#[inline(always)]
pub fn vec3_mul<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[a[0] * b[0], a[1] * b[1], a[2] * b[2]]
}
/// Multiplies two vectors component wise.
#[inline(always)]
pub fn vec4_mul<T: Num>(a: Vector4<T>, b: Vector4<T>) -> Vector4<T> {
[a[0] * b[0], a[1] * b[1], a[2] * b[2], a[3] * b[3]]
}
/// Computes the dot product.
#[inline(always)]
pub fn vec2_dot<T: Num>(a: Vector2<T>, b: Vector2<T>) -> T {
a[0] * b[0] + a[1] * b[1]
}
/// Computes the dot product.
#[inline(always)]
pub fn vec3_dot<T: Num>(a: Vector3<T>, b: Vector3<T>) -> T {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
}
/// Computes the dot product.
#[inline(always)]
pub fn vec4_dot<T: Num>(a: Vector4<T>, b: Vector4<T>) -> T {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]
}
/// Computes the square length of a vector.
#[inline(always)]
pub fn vec2_square_len<T: Num>(a: Vector2<T>) -> T {
a[0] * a[0] + a[1] * a[1]
}
/// Computes the square length of a vector.
#[inline(always)]
pub fn vec3_square_len<T: Num>(a: Vector3<T>) -> T {
a[0] * a[0] + a[1] * a[1] + a[2] * a[2]
}
/// Computes the square length of a vector.
#[inline(always)]
pub fn vec4_square_len<T: Num>(a: Vector4<T>) -> T {
a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3]
}
/// Computes the cross product.
#[inline(always)]
pub fn vec2_cross<T: Num>(a: Vector2<T>, b: Vector2<T>) -> T {
a[0] * b[1] - a[1] * b[0]
}
/// Computes the cross product.
#[inline(always)]
pub fn vec3_cross<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]
]
}
/// Multiplies the vector with a scalar.
#[inline(always)]
pub fn vec2_scale<T: Num>(a: Vector2<T>, b: T) -> Vector2<T> {
[
a[0] * b,
a[1] * b
]
}
/// Multiplies the vector with a scalar.
#[inline(always)]
pub fn vec3_scale<T: Num>(a: Vector3<T>, b: T) -> Vector3<T> {
[
a[0] * b,
a[1] * b,
a[2] * b
]
}
/// Multiplies the vector with a scalar.
#[inline(always)]
pub fn vec4_scale<T: Num>(a: Vector4<T>, b: T) -> Vector4<T> {
[
a[0] * b,
a[1] * b,
a[2] * b,
a[3] * b
]
}
/// Computes the length of vector.
#[inline(always)]
pub fn vec2_len<T: Float>(a: Vector2<T>) -> T {
vec2_square_len(a).sqrt()
}
/// Computes the length of vector.
#[inline(always)]
pub fn vec3_len<T: Float>(a: Vector3<T>) -> T {
vec3_square_len(a).sqrt()
}
/// Computes the length of vector.
#[inline(always)]
pub fn vec4_len<T: Float>(a: Vector4<T>) -> T {
vec4_square_len(a).sqrt()
}
/// Computes the inverse length of a vector.
#[inline(always)]
pub fn vec2_inv_len<T: Float>(a: Vector2<T>) -> T {
let one: T = One::one();
one / vec2_len(a)
}
/// Computes the inverse length of a vector.
#[inline(always)]
pub fn vec3_inv_len<T: Float>(a: Vector3<T>) -> T {
let one: T = One::one();
one / vec3_len(a)
}
/// Computes the inverse length of a vector.
#[inline(always)]
pub fn vec4_inv_len<T: Float>(a: Vector4<T>) -> T {
let one: T = One::one();
one / vec4_len(a)
}
/// Computes the normalized.
#[inline(always)]
pub fn vec2_normalized<T: Float>(a: Vector2<T>) -> Vector2<T> {
vec2_scale(a, vec2_inv_len(a))
}
/// Computes the normalized.
#[inline(always)]
pub fn vec3_normalized<T: Float>(a: Vector3<T>) -> Vector3<T> {
vec3_scale(a, vec3_inv_len(a))
}
/// Computes the normalized.
#[inline(always)]
pub fn vec4_normalized<T: Float>(a: Vector4<T>) -> Vector4<T> {
vec4_scale(a, vec4_inv_len(a))
}
/// Computes the normalized difference between two vectors.
///
/// This is often used to get direction from 'b' to 'a'.
#[inline(always)]
pub fn vec2_normalized_sub<T: Float>(
a: Vector2<T>,
b: Vector2<T>
) -> Vector2<T> {
vec2_normalized(vec2_sub(a, b))
}
/// Computes the normalized difference between two vectors.
///
/// This is often used to get direction from 'b' to 'a'.
#[inline(always)]
pub fn vec3_normalized_sub<T: Float>(
a: Vector3<T>,
b: Vector3<T>
) -> Vector3<T> {
vec3_normalized(vec3_sub(a, b))
}
/// Computes the normalized difference between two vectors.
///
/// This is often used to get direction from 'b' to 'a'.
#[inline(always)]
pub fn vec4_normalized_sub<T: Float>(
a: Vector4<T>,
b: Vector4<T>
) -> Vector4<T> {
vec4_normalized(vec4_sub(a, b))
}
/// Computes transformed vector component.
///
/// This is used when transforming vectors through matrices.
#[inline(always)]
pub fn vec3_dot_vec2<T: Num>(a: Vector3<T>, b: Vector2<T>) -> T {
a[0] * b[0] + a[1] * b[1]
}
/// Computes transformed vector component.
///
/// This is used when transforming vectors through matrices.
#[inline(always)]
pub fn vec4_dot_vec3<T: Num>(a: Vector4<T>, b: Vector3<T>) -> T {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
}
/// Computes transformed position component.
///
/// This is used when transforming points through matrices.
#[inline(always)]
pub fn vec3_dot_pos2<T: Num + Copy>(a: Vector3<T>, b: Vector2<T>) -> T {
vec3_dot_vec2(a, b) + a[2]
}
/// Computes transformed position component.
///
/// This is used when transforming points through matrices.
#[inline(always)]
pub fn vec4_dot_pos3<T: Num + Copy>(a: Vector4<T>, b: Vector3<T>) -> T {
vec4_dot_vec3(a, b) + a[3]
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat2x3_col<T: Copy>(mat: Matrix2x3<T>, i: uint) -> Vector2<T> {
[mat[0][i], mat[1][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat2x3_row<T: Copy>(mat: Matrix2x3<T>, i: uint) -> Vector2<T> {
row_mat2x3_col(mat, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat3x2_col<T: Copy>(a: Matrix3x2<T>, i: uint) -> Vector3<T> {
[a[0][i], a[1][i], a[2][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat3x2_row<T: Copy>(a: Matrix3x2<T>, i: uint) -> Vector3<T> {
row_mat3x2_col(a, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat3_col<T: Copy>(a: Matrix3<T>, i: uint) -> Vector3<T> {
[a[0][i], a[1][i], a[2][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat3_row<T: Copy>(a: Matrix3<T>, i: uint) -> Vector3<T> {
row_mat3_col(a, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat3x4_col<T: Copy>(mat: Matrix3x4<T>, i: uint) -> Vector3<T> {
[mat[0][i], mat[1][i], mat[2][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat3x4_row<T: Copy>(mat: Matrix3x4<T>, i: uint) -> Vector3<T> {
row_mat3x4_col(mat, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat4x3_col<T: Copy>(a: Matrix4x3<T>, i: uint) -> Vector4<T> {
[a[0][i], a[1][i], a[2][i], a[3][i]]
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn col_mat4x3_row<T: Copy>(a: Matrix4x3<T>, i: uint) -> Vector4<T> {
row_mat4x3_col(a, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat4_col<T: Copy>(a: Matrix4<T>, i: uint) -> Vector4<T> {
[a[0][i], a[1][i], a[2][i], a[3][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat4_row<T: Copy>(a: Matrix4<T>, i: uint) -> Vector4<T> {
row_mat4_col(a, i)
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat2x3_transposed<T: Copy>(a: Matrix2x3<T>) -> Matrix3x2<T> {
[
row_mat2x3_col(a, 0),
row_mat2x3_col(a, 1),
row_mat2x3_col(a, 2)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat3x2_transposed<T: Copy>(a: Matrix3x2<T>) -> Matrix2x3<T> {
[
row_mat3x2_col(a, 0),
row_mat3x2_col(a, 1)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat3_transposed<T: Copy>(a: Matrix3<T>) -> Matrix3<T> {
[
row_mat3_col(a, 0),
row_mat3_col(a, 1),
row_mat3_col(a, 2)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat3x4_transposed<T: Copy>(a: Matrix3x4<T>) -> Matrix4x3<T> {
[
row_mat3x4_col(a, 0),
row_mat3x4_col(a, 1),
row_mat3x4_col(a, 2),
row_mat3x4_col(a, 3)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat4x3_transposed<T: Copy>(a: Matrix4x3<T>) -> Matrix3x4<T> {
[
row_mat4x3_col(a, 0),
row_mat4x3_col(a, 1),
row_mat4x3_col(a, 2)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat4_transposed<T: Copy>(a: Matrix4<T>) -> Matrix4<T> {
[
row_mat4_col(a, 0),
row_mat4_col(a, 1),
row_mat4_col(a, 2),
row_mat4_col(a, 3)
]
}
/// Transforms a 3D vector through a matrix.
#[inline(always)]
pub fn col_mat3_transform<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec3_dot(col_mat3_row(mat, 0), a),
vec3_dot(col_mat3_row(mat, 1), a),
vec3_dot(col_mat3_row(mat, 2), a)
]
}
/// Transforms a 4D vector through a matrix.
#[inline(always)]
pub fn col_mat4_transform<T: Num + Copy>(
mat: Matrix4<T>,
a: Vector4<T>
) -> Vector4<T> {
[
vec4_dot(col_mat4_row(mat, 0), a),
vec4_dot(col_mat4_row(mat, 1), a),
vec4_dot(col_mat4_row(mat, 2), a),
vec4_dot(col_mat4_row(mat, 3), a)
]
}
/// Transforms a 3D vector through a matrix.
#[inline(always)]
pub fn row_mat3_transform<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec3_dot(mat[0], a),
vec3_dot(mat[1], a),
vec3_dot(mat[2], a)
]
}
/// Transforms a 4D vector through a matrix.
#[inline(always)]
pub fn row_mat4_transform<T: Num + Copy>(
mat: Matrix4<T>,
a: Vector4<T>
) -> Vector4<T> {
[
vec4_dot(mat[0], a),
vec4_dot(mat[1], a),
vec4_dot(mat[2], a),
vec4_dot(mat[3], a)
]
}
/// Transforms a 2D position through matrix.
#[inline(always)]
pub fn row_mat2x3_transform_pos2<T: Num + Copy>(
mat: Matrix2x3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(mat[0], a),
vec3_dot_pos2(mat[1], a)
]
}
/// Transforms a 2D position through matrix.
#[inline(always)]
pub fn col_mat3x2_transform_pos2<T: Num + Copy>(
mat: Matrix3x2<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(col_mat3x2_row(mat, 0), a),
vec3_dot_pos2(col_mat3x2_row(mat, 1), a)
]
}
/// Transforms a 2D position through row matrix.
#[inline(always)]
pub fn row_mat3_transform_pos2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(mat[0], a),
vec3_dot_pos2(mat[1], a)
]
}
/// Transforms a 2D position through column matrix.
#[inline(always)]
pub fn col_mat3_transform_pos2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(col_mat3_row(mat, 0), a),
vec3_dot_pos2(col_mat3_row(mat, 1), a)
]
}
/// Transforms a 3D position through matrix.
#[inline(always)]
pub fn row_mat3x4_transform_pos3<T: Num + Copy>(
mat: Matrix3x4<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_pos3(mat[0], a),
vec4_dot_pos3(mat[1], a),
vec4_dot_pos3(mat[2], a),
]
}
/// Transforms a 3D position through matrix.
#[inline(always)]
pub fn col_mat4x3_transform_pos3<T: Num + Copy>(
mat: Matrix4x3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_pos3(col_mat4x3_row(mat, 0), a),
vec4_dot_pos3(col_mat4x3_row(mat, 1), a),
vec4_dot_pos3(col_mat4x3_row(mat, 2), a)
]
}
/// Transforms a 2D vector through matrix.
#[inline(always)]
pub fn row_mat2x3_transform_vec2<T: Num + Copy>(
mat: Matrix2x3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(mat[0], a),
vec3_dot_vec2(mat[1], a)
]
}
/// Transforms a 2D vector through matrix.
#[inline(always)]
pub fn col_mat3x2_transform_vec2<T: Num + Copy>(
mat: Matrix3x2<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(col_mat3x2_row(mat, 0), a),
vec3_dot_vec2(col_mat3x2_row(mat, 1), a)
]
}
/// Transforms a 2D vector through row matrix.
#[inline(always)]
pub fn row_mat3_transform_vec2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(mat[0], a),
vec3_dot_vec2(mat[1], a)
]
}
/// Transforms a 2D vector through column matrix.
#[inline(always)]
pub fn col_mat3_transform_vec2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(col_mat3_row(mat, 0), a),
vec3_dot_vec2(col_mat3_row(mat, 1), a)
]
}
/// Transforms a 3D vector through matrix.
#[inline(always)]
pub fn row_mat3x4_transform_vec3<T: Num + Copy>(
mat: Matrix3x4<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_vec3(mat[0], a),
vec4_dot_vec3(mat[1], a),
vec4_dot_vec3(mat[2], a)
]
}
/// Transforms a 3D vector through matrix.
#[inline(always)]
pub fn col_mat4x3_transform_vec3<T: Num + Copy>(
mat: Matrix4x3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_vec3(col_mat4x3_row(mat, 0), a),
vec4_dot_vec3(col_mat4x3_row(mat, 1), a),
vec4_dot_vec3(col_mat4x3_row(mat, 2), a)
]
}
/// Computes the determinant of a matrix.
pub fn mat2x3_det<T: Num>(mat: Matrix2x3<T>) -> T {
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
}
/// Computes the determinant of a matrix.
pub fn mat3x2_det<T: Num>(mat: Matrix3x2<T>) -> T {
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
}
/// Computes the determinant of a matrix.
pub fn mat3_det<T: Num>(mat: Matrix3<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
}
/// Computes the determinant of a matrix.
pub fn mat3x4_det<T: Num>(mat: Matrix3x4<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
}
/// Computes the determinant of a matrix.
pub fn mat4x3_det<T: Num>(mat: Matrix4x3<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
}
/// Computes the determinant of a 4x4 matrix.
pub fn mat4_det<T: Num>(mat: Matrix4<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2] * mat[3][3]
+ mat[0][0] * mat[1][2] * mat[2][3] * mat[3][1]
+ mat[0][0] * mat[1][3] * mat[2][1] * mat[3][2]
+ mat[0][1] * mat[1][0] * mat[2][3] * mat[3][2]
+ mat[0][1] * mat[1][2] * mat[2][0] * mat[3][3]
+ mat[0][1] * mat[1][3] * mat[2][2] * mat[3][0]
+ mat[0][2] * mat[1][0] * mat[2][1] * mat[3][3]
+ mat[0][2] * mat[1][1] * mat[2][3] * mat[3][0]
+ mat[0][2] * mat[1][3] * mat[2][0] * mat[3][1]
+ mat[0][3] * mat[1][0] * mat[2][2] * mat[3][1]
+ mat[0][3] * mat[1][1] * mat[2][0] * mat[3][2]
+ mat[0][3] * mat[1][2] * mat[2][1] * mat[3][0]
- mat[0][0] * mat[1][1] * mat[2][3] * mat[3][2]
- mat[0][0] * mat[1][2] * mat[2][1] * mat[3][3]
- mat[0][0] * mat[1][3] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[1][0] * mat[2][2] * mat[3][3]
- mat[0][1] * mat[1][2] * mat[2][3] * mat[3][0]
- mat[0][1] * mat[1][3] * mat[2][0] * mat[3][2]
- mat[0][2] * mat[1][0] * mat[2][3] * mat[3][1]
- mat[0][2] * mat[1][1] * mat[2][0] * mat[3][3]
- mat[0][2] * mat[1][3] * mat[2][1] * mat[3][0]
- mat[0][3] * mat[1][0] * mat[2][1] * mat[3][2]
- mat[0][3] * mat[1][1] * mat[2][2] * mat[3][0]
- mat[0][3] * mat[1][2] * mat[2][0] * mat[3][1]
}
/// Computes inverse determinant of a 2x3 matrix.
#[inline(always)]
pub fn mat2x3_inv_det<T: Num>(mat: Matrix2x3<T>) -> T {
let one: T = One::one();
one / mat2x3_det(mat)
}
/// Computes inverse determinant of a 3x2 matrix.
#[inline(always)]
pub fn mat3x2_inv_det<T: Num>(mat: Matrix3x2<T>) -> T {
let one: T = One::one();
one / mat3x2_det(mat)
}
/// Computes inverse determinant of a 3x3 matrix.
#[inline(always)]
pub fn mat3_inv_det<T: Num>(mat: Matrix3<T>) -> T {
let one: T = One::one();
one / mat3_det(mat)
}
/// Computes inverse determinant of a 3x4 matrix.
#[inline(always)]
pub fn mat3x4_inv_det<T: Num>(mat: Matrix3x4<T>) -> T {
let one: T = One::one();
one / mat3x4_det(mat)
}
/// Computes inverse determinant of a 4x3 matrix.
#[inline(always)]
pub fn mat4x3_inv_det<T: Num>(mat: Matrix4x3<T>) -> T {
let one: T = One::one();
one / mat4x3_det(mat)
}
/// Computes the inverse determinant of a 4x4 matrix.
#[inline(always)]
pub fn mat4_inv_det<T: Num>(mat: Matrix4<T>) -> T {
let one: T = One::one();
one / mat4_det(mat)
}
/// Computes the inverse of a 2x3 matrix.
pub fn mat2x3_inv<T: Num + Copy>(mat: Matrix2x3<T>) -> Matrix2x3<T> {
let inv_det = mat2x3_inv_det(mat);
[
[
mat[1][1] * inv_det,
- mat[0][1] * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det
],
[
- mat[1][0] * inv_det,
mat[0][0] * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det,
]
]
}
/// Computes the inverse of a 3x2 matrix.
pub fn mat3x2_inv<T: Num + Copy>(mat: Matrix3x2<T>) -> Matrix3x2<T> {
let inv_det = mat3x2_inv_det(mat);
[
[
mat[1][1] * inv_det,
- mat[0][1] * inv_det
],
[
- mat[1][0] * inv_det,
mat[0][0] * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det
]
]
}
/// Computes the inverse of a 3x3 matrix.
pub fn mat3_inv<T: Num + Copy>(mat: Matrix3<T>) -> Matrix3<T> {
let inv_det = mat3_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2]
- mat[1][2] * mat[2][1]
) * inv_det,
(
mat[0][2] * mat[2][1]
- mat[0][1] * mat[2][2]
) * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det
],
[
(
mat[1][2] * mat[2][0]
- mat[1][0] * mat[2][2]
) * inv_det,
(
mat[0][0] * mat[2][2]
- mat[0][2] * mat[2][0]
) * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det,
(
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
) * inv_det
]
]
}
/// Computes the inverse of a 3x4 matrix.
pub fn mat3x4_inv<T: Num + Copy>(mat: Matrix3x4<T>) -> Matrix3x4<T> {
let inv_det = mat3x4_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2]
- mat[1][2] * mat[2][1]
) * inv_det,
(
mat[0][2] * mat[2][1]
- mat[0][1] * mat[2][2]
) * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det,
(
mat[0][1] * mat[1][3] * mat[2][2]
+ mat[0][2] * mat[1][1] * mat[2][3]
+ mat[0][3] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][2] * mat[2][3]
- mat[0][2] * mat[1][3] * mat[2][1]
- mat[0][3] * mat[1][1] * mat[2][2]
) * inv_det
],
[
(
mat[1][2] * mat[2][0]
- mat[1][0] * mat[2][2]
) * inv_det,
(
mat[0][0] * mat[2][2]
- mat[0][2] * mat[2][0]
) * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[2][3]
+ mat[0][2] * mat[1][3] * mat[2][0]
+ mat[0][3] * mat[1][0] * mat[2][2]
- mat[0][0] * mat[1][3] * mat[2][2]
- mat[0][2] * mat[1][0] * mat[2][3]
- mat[0][3] * mat[1][2] * mat[2][0]
) * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det,
(
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
) * inv_det,
(
mat[0][0] * mat[1][3] * mat[2][1]
+ mat[0][1] * mat[1][0] * mat[2][3]
+ mat[0][3] * mat[1][1] * mat[2][0]
- mat[0][0] * mat[1][1] * mat[2][3]
- mat[0][1] * mat[1][3] * mat[2][0]
- mat[0][3] * mat[1][0] * mat[2][1]
) * inv_det
]
]
}
/// Computes the inverse of a 4x3 matrix.
pub fn mat4x3_inv<T: Num + Copy>(mat: Matrix4x3<T>) -> Matrix4x3<T> {
let inv_det = mat4x3_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2]
- mat[1][2] * mat[2][1]
) * inv_det,
(
mat[0][2] * mat[2][1]
- mat[0][1] * mat[2][2]
) * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det
],
[
(
mat[1][2] * mat[2][0]
- mat[1][0] * mat[2][2]
) * inv_det,
(
mat[0][0] * mat[2][2]
- mat[0][2] * mat[2][0]
) * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det,
(
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
) * inv_det
],
[
(
mat[1][0] * mat[2][2] * mat[3][1]
+ mat[1][1] * mat[2][0] * mat[3][2]
+ mat[1][2] * mat[2][1] * mat[3][0]
- mat[1][0] * mat[2][1] * mat[3][2]
- mat[1][1] * mat[2][2] * mat[3][0]
- mat[1][2] * mat[2][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[2][1] * mat[3][2]
+ mat[0][1] * mat[2][2] * mat[3][0]
+ mat[0][2] * mat[2][0] * mat[3][1]
- mat[0][0] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[2][0] * mat[3][2]
- mat[0][2] * mat[2][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[3][1]
+ mat[0][1] * mat[1][0] * mat[3][2]
+ mat[0][2] * mat[1][1] * mat[3][0]
- mat[0][0] * mat[1][1] * mat[3][2]
- mat[0][1] * mat[1][2] * mat[3][0]
- mat[0][2] * mat[1][0] * mat[3][1]
) * inv_det
]
]
}
/// Computes the inverse of a 4x4 matrix.
pub fn mat4_inv<T: Num + Copy>(mat: Matrix4<T>) -> Matrix4<T> {
let inv_det = mat4_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2] * mat[3][3]
+ mat[1][2] * mat[2][3] * mat[3][1]
+ mat[1][3] * mat[2][1] * mat[3][2]
- mat[1][1] * mat[2][3] * mat[3][2]
- mat[1][2] * mat[2][1] * mat[3][3]
- mat[1][3] * mat[2][2] * mat[3][1]
) * inv_det,
(
mat[0][1] * mat[2][3] * mat[3][2]
+ mat[0][2] * mat[2][1] * mat[3][3]
+ mat[0][3] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[2][2] * mat[3][3]
- mat[0][2] * mat[2][3] * mat[3][1]
- mat[0][3] * mat[2][1] * mat[3][2]
) * inv_det,
(
mat[0][1] * mat[1][2] * mat[3][3]
+ mat[0][2] * mat[1][3] * mat[3][1]
+ mat[0][3] * mat[1][1] * mat[3][2]
- mat[0][1] * mat[1][3] * mat[3][2]
- mat[0][2] * mat[1][1] * mat[3][3]
- mat[0][3] * mat[1][2] * mat[3][1]
) * inv_det,
(
mat[0][1] * mat[1][3] * mat[2][2]
+ mat[0][2] * mat[1][1] * mat[2][3]
+ mat[0][3] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][2] * mat[2][3]
- mat[0][2] * mat[1][3] * mat[2][1]
- mat[0][3] * mat[1][1] * mat[2][2]
) * inv_det
],
[
(
mat[1][0] * mat[2][3] * mat[3][2]
+ mat[1][2] * mat[2][0] * mat[3][3]
+ mat[1][3] * mat[2][2] * mat[3][0]
- mat[1][0] * mat[2][2] * mat[3][3]
- mat[1][2] * mat[2][3] * mat[3][0]
- mat[1][3] * mat[2][0] * mat[3][2]
) * inv_det,
(
mat[0][0] * mat[2][2] * mat[3][3]
+ mat[0][2] * mat[2][3] * mat[3][0]
+ mat[0][3] * mat[2][0] * mat[3][2]
- mat[0][0] * mat[2][3] * mat[3][2]
- mat[0][2] * mat[2][0] * mat[3][3]
- mat[0][3] * mat[2][2] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][3] * mat[3][2]
+ mat[0][2] * mat[1][0] * mat[3][3]
+ mat[0][3] * mat[1][2] * mat[3][0]
- mat[0][0] * mat[1][2] * mat[3][3]
- mat[0][2] * mat[1][3] * mat[3][0]
- mat[0][3] * mat[1][0] * mat[3][2]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[2][3]
+ mat[0][2] * mat[1][3] * mat[2][0]
+ mat[0][3] * mat[1][0] * mat[2][2]
- mat[0][0] * mat[1][3] * mat[2][2]
- mat[0][2] * mat[1][0] * mat[2][3]
- mat[0][3] * mat[1][2] * mat[2][0]
) * inv_det
],
[
(
mat[1][0] * mat[2][1] * mat[3][3]
+ mat[1][1] * mat[2][3] * mat[3][0]
+ mat[1][3] * mat[2][0] * mat[3][1]
- mat[1][0] * mat[2][3] * mat[3][1]
- mat[1][1] * mat[2][0] * mat[3][3]
- mat[1][3] * mat[2][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[2][3] * mat[3][1]
+ mat[0][1] * mat[2][0] * mat[3][3]
+ mat[0][3] * mat[2][1] * mat[3][0]
- mat[0][0] * mat[2][1] * mat[3][3]
- mat[0][1] * mat[2][3] * mat[3][0]
- mat[0][3] * mat[2][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[1][1] * mat[3][3]
+ mat[0][1] * mat[1][3] * mat[3][0]
+ mat[0][3] * mat[1][0] * mat[3][1]
- mat[0][0] * mat[1][3] * mat[3][1]
- mat[0][1] * mat[1][0] * mat[3][3]
- mat[0][3] * mat[1][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][3] * mat[2][1]
+ mat[0][1] * mat[1][0] * mat[2][3]
+ mat[0][3] * mat[1][1] * mat[2][0]
- mat[0][0] * mat[1][1] * mat[2][3]
- mat[0][1] * mat[1][3] * mat[2][0]
- mat[0][3] * mat[1][0] * mat[2][1]
) * inv_det
],
[
(
mat[1][0] * mat[2][2] * mat[3][1]
+ mat[1][1] * mat[2][0] * mat[3][2]
+ mat[1][2] * mat[2][1] * mat[3][0]
- mat[1][0] * mat[2][1] * mat[3][2]
- mat[1][1] * mat[2][2] * mat[3][0]
- mat[1][2] * mat[2][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[2][1] * mat[3][2]
+ mat[0][1] * mat[2][2] * mat[3][0]
+ mat[0][2] * mat[2][0] * mat[3][1]
- mat[0][0] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[2][0] * mat[3][2]
- mat[0][2] * mat[2][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[3][1]
+ mat[0][1] * mat[1][0] * mat[3][2]
+ mat[0][2] * mat[1][1] * mat[3][0]
- mat[0][0] * mat[1][1] * mat[3][2]
- mat[0][1] * mat[1][2] * mat[3][0]
- mat[0][2] * mat[1][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
) * inv_det
]
]
}
Fixed missing docs warning
#![crate_name = "vecmath"]
#![deny(missing_docs)]
//! A simple and generic library for vector math.
//!
//! Notice that row major is mathematical standard,
//! while OpenGL uses column major format.
//! This library supports both formats, prefixing functions with 'row_' or 'col_'.
//!
//! For row major affine transforms, use `Matrix2x3` (2D) and `Matrix3x4` (3D).
//! For column major affine transforms, use `Matrix3x2` (2D) and `Matrix4x3` (3D).
//!
//! If you are using `Matrix3` or `Matrix4`,
//! then you need to pick either row or column major.
//!
//! Notice that there are two kinds of transforms: Positions and vectors.
//! The vector transforms ignores the translate component.
//! For example, `row_mat2x3_transform_pos2` transforms a position.
//! `row_mat2x3_transform_vec2` transforms a vector.
use std::num::{One, Zero};
/// A 2D vector.
pub type Vector2<T> = [T, ..2];
/// A 3D vector.
pub type Vector3<T> = [T, ..3];
/// A 4D vector.
pub type Vector4<T> = [T, ..4];
/// A 2x3 matrix.
///
/// To multiply two matrices use `row_mat2x3_mul`.
pub type Matrix2x3<T> = [[T, ..3], ..2];
/// A 3x2 matrix.
///
/// To multiply two matrices use `col_mat3x2_mul`.
pub type Matrix3x2<T> = [[T, ..2], ..3];
/// A 3x3 matrix.
///
/// To multiply two matrices use `row_mat3_mul` or `col_mat3_mul`.
pub type Matrix3<T> = [[T, ..3], ..3];
/// A 3x4 matrix.
///
/// To multiply two matrices use `row_mat3x4_mul`.
pub type Matrix3x4<T> = [[T, ..4], ..3];
/// A 4x3 matrix.
///
/// To multiply two matrices use `col_mat4x3_mul`.
///
/// This format can also store vertices of a quad.
pub type Matrix4x3<T> = [[T, ..3], ..4];
/// A 4x4 matrix.
///
/// To multiply two matrices use `row_mat4_mul` or `col_mat4_mul`.
pub type Matrix4<T> = [[T, ..4], ..4];
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat3x2_mul_col<T: Num + Copy>(
a: Matrix3x2<T>,
b: Matrix3x2<T>,
i: uint
) -> Vector2<T> {
[
vec3_dot_vec2(col_mat3x2_row(a, 0), b[i]),
vec3_dot_vec2(col_mat3x2_row(a, 1), b[i])
]
}
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat3_mul_col<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>,
i: uint
) -> Vector3<T> {
[
vec3_dot(col_mat3_row(a, 0), b[i]),
vec3_dot(col_mat3_row(a, 1), b[i]),
vec3_dot(col_mat3_row(a, 2), b[i])
]
}
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat4x3_mul_col<T: Num + Copy>(
a: Matrix4x3<T>,
b: Matrix4x3<T>,
i: uint
) -> Vector3<T> {
[
vec4_dot_vec3(col_mat4x3_row(a, 0), b[i]),
vec4_dot_vec3(col_mat4x3_row(a, 1), b[i]),
vec4_dot_vec3(col_mat4x3_row(a, 2), b[i])
]
}
/// Computes column vector in column matrix product.
///
/// The semantics of the order is the same as for row matrices.
#[inline(always)]
pub fn col_mat4_mul_col<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>,
i: uint
) -> Vector4<T> {
[
vec4_dot(col_mat4_row(a, 0), b[i]),
vec4_dot(col_mat4_row(a, 1), b[i]),
vec4_dot(col_mat4_row(a, 2), b[i]),
vec4_dot(col_mat4_row(a, 3), b[i])
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat2x3_mul_row<T: Num + Copy>(
a: Matrix2x3<T>,
b: Matrix2x3<T>,
i: uint
) -> Vector3<T> {
[
vec3_dot_vec2(a[i], row_mat2x3_col(b, 0)),
vec3_dot_vec2(a[i], row_mat2x3_col(b, 1)),
vec3_dot_pos2(a[i], row_mat2x3_col(b, 2))
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat3_mul_row<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>,
i: uint
) -> Vector3<T> {
[
vec3_dot(a[i], row_mat3_col(b, 0)),
vec3_dot(a[i], row_mat3_col(b, 1)),
vec3_dot(a[i], row_mat3_col(b, 2)),
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat3x4_mul_row<T: Num + Copy>(
a: Matrix3x4<T>,
b: Matrix3x4<T>,
i: uint
) -> Vector4<T> {
[
vec4_dot_vec3(a[i], row_mat3x4_col(b, 0)),
vec4_dot_vec3(a[i], row_mat3x4_col(b, 1)),
vec4_dot_vec3(a[i], row_mat3x4_col(b, 2)),
vec4_dot_pos3(a[i], row_mat3x4_col(b, 3))
]
}
/// Computes row vector in row matrix product.
#[inline(always)]
pub fn row_mat4_mul_row<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>,
i: uint
) -> Vector4<T> {
[
vec4_dot(a[i], row_mat4_col(b, 0)),
vec4_dot(a[i], row_mat4_col(b, 1)),
vec4_dot(a[i], row_mat4_col(b, 2)),
vec4_dot(a[i], row_mat4_col(b, 3))
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat3x2_mul<T: Num + Copy>(
a: Matrix3x2<T>,
b: Matrix3x2<T>
) -> Matrix3x2<T> {
[
col_mat3x2_mul_col(a, b, 0),
col_mat3x2_mul_col(a, b, 1),
col_mat3x2_mul_col(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat3_mul<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>
) -> Matrix3<T> {
[
col_mat3_mul_col(a, b, 0),
col_mat3_mul_col(a, b, 1),
col_mat3_mul_col(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat4x3_mul<T: Num + Copy>(
a: Matrix4x3<T>,
b: Matrix4x3<T>
) -> Matrix4x3<T> {
[
col_mat4x3_mul_col(a, b, 0),
col_mat4x3_mul_col(a, b, 1),
col_mat4x3_mul_col(a, b, 2),
col_mat4x3_mul_col(a, b, 3)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn col_mat4_mul<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>
) -> Matrix4<T> {
[
col_mat4_mul_col(a, b, 0),
col_mat4_mul_col(a, b, 1),
col_mat4_mul_col(a, b, 2),
col_mat4_mul_col(a, b, 3)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat2x3_mul<T: Num + Copy>(
a: Matrix2x3<T>,
b: Matrix2x3<T>
) -> Matrix2x3<T> {
[
row_mat2x3_mul_row(a, b, 0),
row_mat2x3_mul_row(a, b, 1),
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat3_mul<T: Num + Copy>(
a: Matrix3<T>,
b: Matrix3<T>
) -> Matrix3<T> {
[
row_mat3_mul_row(a, b, 0),
row_mat3_mul_row(a, b, 1),
row_mat3_mul_row(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat3x4_mul<T: Num + Copy>(
a: Matrix3x4<T>,
b: Matrix3x4<T>
) -> Matrix3x4<T> {
[
row_mat3x4_mul_row(a, b, 0),
row_mat3x4_mul_row(a, b, 1),
row_mat3x4_mul_row(a, b, 2)
]
}
/// Multiplies two matrices.
#[inline(always)]
pub fn row_mat4_mul<T: Num + Copy>(
a: Matrix4<T>,
b: Matrix4<T>
) -> Matrix4<T> {
[
row_mat4_mul_row(a, b, 0),
row_mat4_mul_row(a, b, 1),
row_mat4_mul_row(a, b, 2),
row_mat4_mul_row(a, b, 3)
]
}
#[test]
fn test_row_mat2x3_mul() {
let a: Matrix2x3<f64> = mat2x3_id();
let b = a;
let _ = row_mat2x3_mul(a, b);
}
#[test]
fn test_row_mat3x4_mul() {
let a: Matrix3x4<f64> = mat3x4_id();
let b = a;
let _ = row_mat3x4_mul(a, b);
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat2x3_id<T: One + Zero + Copy>() -> Matrix2x3<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero],
[zero, one, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat3x2_id<T: One + Zero + Copy>() -> Matrix3x2<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero],
[zero, one],
[zero, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat3_id<T: One + Zero + Copy>() -> Matrix3<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero],
[zero, one, zero],
[zero, zero, one]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat3x4_id<T: One + Zero + Copy>() -> Matrix3x4<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero, zero],
[zero, one, zero, zero],
[zero, zero, one, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat4x3_id<T: One + Zero + Copy>() -> Matrix4x3<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero],
[zero, one, zero],
[zero, zero, one],
[zero, zero, zero]
]
}
/// Constructs identity matrix.
#[inline(always)]
pub fn mat4_id<T: One + Zero + Copy>() -> Matrix4<T> {
let one = One::one();
let zero = Zero::zero();
[
[one, zero, zero, zero],
[zero, one, zero, zero],
[zero, zero, one, zero],
[zero, zero, zero, one]
]
}
/// Converts to another vector type.
#[inline(always)]
pub fn vec2_cast<T: ToPrimitive + Copy, U: NumCast>(
a: Vector2<T>
) -> Vector2<U> {
[
NumCast::from(a[0]).unwrap(),
NumCast::from(a[1]).unwrap()
]
}
/// Converts to another vector type.
#[inline(always)]
pub fn vec3_cast<T: ToPrimitive + Copy, U: NumCast>(
a: Vector3<T>
) -> Vector3<U> {
[
NumCast::from(a[0]).unwrap(),
NumCast::from(a[1]).unwrap(),
NumCast::from(a[2]).unwrap()
]
}
/// Converts to another vector type.
#[inline(always)]
pub fn vec4_cast<T: ToPrimitive + Copy, U: NumCast>(
a: Vector4<T>
) -> Vector4<U> {
[
NumCast::from(a[0]).unwrap(),
NumCast::from(a[1]).unwrap(),
NumCast::from(a[2]).unwrap(),
NumCast::from(a[3]).unwrap()
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat2x3_cast<T: ToPrimitive + Copy, U: NumCast>(
mat: Matrix2x3<T>
) -> Matrix2x3<U> {
[
vec3_cast(mat[0]),
vec3_cast(mat[1])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat3x2_cast<T: ToPrimitive + Copy, U: NumCast>(
mat: Matrix3x2<T>
) -> Matrix3x2<U> {
[
vec2_cast(mat[0]),
vec2_cast(mat[1]),
vec2_cast(mat[2])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat3_cast<T: ToPrimitive + Copy, U: NumCast>(
mat: Matrix3<T>
) -> Matrix3<U> {
[
vec3_cast(mat[0]),
vec3_cast(mat[1]),
vec3_cast(mat[2])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat3x4_cast<T: ToPrimitive + Copy, U: NumCast>(
m: Matrix3x4<T>
) -> Matrix3x4<U> {
[
vec4_cast(m[0]),
vec4_cast(m[1]),
vec4_cast(m[2])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat4x3_cast<T: ToPrimitive + Copy, U: NumCast>(
m: Matrix4x3<T>
) -> Matrix4x3<U> {
[
vec3_cast(m[0]),
vec3_cast(m[1]),
vec3_cast(m[2]),
vec3_cast(m[3])
]
}
/// Converts to another matrix type.
#[inline(always)]
pub fn mat4_cast<T: ToPrimitive + Copy, U: NumCast>(
m: Matrix4<T>
) -> Matrix4<U> {
[
vec4_cast(m[0]),
vec4_cast(m[1]),
vec4_cast(m[2]),
vec4_cast(m[3])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn vec2_sub<T: Num>(a: Vector2<T>, b: Vector2<T>) -> Vector2<T> {
[
a[0] - b[0],
a[1] - b[1],
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn vec3_sub<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[
a[0] - b[0],
a[1] - b[1],
a[2] - b[2],
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn vec4_sub<T: Num>(a: Vector4<T>, b: Vector4<T>) -> Vector4<T> {
[
a[0] - b[0],
a[1] - b[1],
a[2] - b[2],
a[3] - b[3]
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat2x3_sub<T: Num + Copy>(a: Matrix2x3<T>, b: Matrix2x3<T>) -> Matrix2x3<T> {
[
vec3_sub(a[0], b[0]),
vec3_sub(a[1], b[1])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat3x2_sub<T: Num + Copy>(a: Matrix3x2<T>, b: Matrix3x2<T>) -> Matrix3x2<T> {
[
vec2_sub(a[0], b[0]),
vec2_sub(a[1], b[1]),
vec2_sub(a[2], b[2])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat3_sub<T: Num + Copy>(a: Matrix3<T>, b: Matrix3<T>) -> Matrix3<T> {
[
vec3_sub(a[0], b[0]),
vec3_sub(a[1], b[1]),
vec3_sub(a[2], b[2])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat3x4_sub<T: Num + Copy>(a: Matrix3x4<T>, b: Matrix3x4<T>) -> Matrix3x4<T> {
[
vec4_sub(a[0], b[0]),
vec4_sub(a[1], b[1]),
vec4_sub(a[2], b[2])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat4x3_sub<T: Num + Copy>(a: Matrix4x3<T>, b: Matrix4x3<T>) -> Matrix4x3<T> {
[
vec3_sub(a[0], b[0]),
vec3_sub(a[1], b[1]),
vec3_sub(a[2], b[2]),
vec3_sub(a[3], b[3])
]
}
/// Subtracts 'b' from 'a'.
#[inline(always)]
pub fn mat4_sub<T: Num + Copy>(a: Matrix4<T>, b: Matrix4<T>) -> Matrix4<T> {
[
vec4_sub(a[0], b[0]),
vec4_sub(a[1], b[1]),
vec4_sub(a[2], b[2]),
vec4_sub(a[3], b[3])
]
}
/// Adds two vectors.
#[inline(always)]
pub fn vec2_add<T: Num>(a: Vector2<T>, b: Vector2<T>) -> Vector2<T> {
[
a[0] + b[0],
a[1] + b[1],
]
}
/// Adds two vectors.
#[inline(always)]
pub fn vec3_add<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[
a[0] + b[0],
a[1] + b[1],
a[2] + b[2]
]
}
/// Adds two vectors.
#[inline(always)]
pub fn vec4_add<T: Num>(a: Vector4<T>, b: Vector4<T>) -> Vector4<T> {
[
a[0] + b[0],
a[1] + b[1],
a[2] + b[2],
a[3] + b[3]
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat2x3_add<T: Num + Copy>(a: Matrix2x3<T>, b: Matrix2x3<T>) -> Matrix2x3<T> {
[
vec3_add(a[0], b[0]),
vec3_add(a[1], b[1])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat3x2_add<T: Num + Copy>(a: Matrix3x2<T>, b: Matrix3x2<T>) -> Matrix3x2<T> {
[
vec2_add(a[0], b[0]),
vec2_add(a[1], b[1]),
vec2_add(a[2], b[2])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat3_add<T: Num + Copy>(a: Matrix3<T>, b: Matrix3<T>) -> Matrix3<T> {
[
vec3_add(a[0], b[0]),
vec3_add(a[1], b[1]),
vec3_add(a[2], b[2])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat3x4_add<T: Num + Copy>(a: Matrix3x4<T>, b: Matrix3x4<T>) -> Matrix3x4<T> {
[
vec4_add(a[0], b[0]),
vec4_add(a[1], b[1]),
vec4_add(a[2], b[2])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat4x3_add<T: Num + Copy>(a: Matrix4x3<T>, b: Matrix4x3<T>) -> Matrix4x3<T> {
[
vec3_add(a[0], b[0]),
vec3_add(a[1], b[1]),
vec3_add(a[2], b[2]),
vec3_add(a[3], b[3])
]
}
/// Adds two matrices.
#[inline(always)]
pub fn mat4_add<T: Num + Copy>(a: Matrix4<T>, b: Matrix4<T>) -> Matrix4<T> {
[
vec4_add(a[0], b[0]),
vec4_add(a[1], b[1]),
vec4_add(a[2], b[2]),
vec4_add(a[3], b[3])
]
}
/// Multiplies two vectors component wise.
#[inline(always)]
pub fn vec2_mul<T: Num>(a: Vector2<T>, b: Vector2<T>) -> Vector2<T> {
[a[0] * b[0], a[1] * b[1]]
}
/// Multiplies two vectors component wise.
#[inline(always)]
pub fn vec3_mul<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[a[0] * b[0], a[1] * b[1], a[2] * b[2]]
}
/// Multiplies two vectors component wise.
#[inline(always)]
pub fn vec4_mul<T: Num>(a: Vector4<T>, b: Vector4<T>) -> Vector4<T> {
[a[0] * b[0], a[1] * b[1], a[2] * b[2], a[3] * b[3]]
}
/// Computes the dot product.
#[inline(always)]
pub fn vec2_dot<T: Num>(a: Vector2<T>, b: Vector2<T>) -> T {
a[0] * b[0] + a[1] * b[1]
}
/// Computes the dot product.
#[inline(always)]
pub fn vec3_dot<T: Num>(a: Vector3<T>, b: Vector3<T>) -> T {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
}
/// Computes the dot product.
#[inline(always)]
pub fn vec4_dot<T: Num>(a: Vector4<T>, b: Vector4<T>) -> T {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]
}
/// Computes the square length of a vector.
#[inline(always)]
pub fn vec2_square_len<T: Num>(a: Vector2<T>) -> T {
a[0] * a[0] + a[1] * a[1]
}
/// Computes the square length of a vector.
#[inline(always)]
pub fn vec3_square_len<T: Num>(a: Vector3<T>) -> T {
a[0] * a[0] + a[1] * a[1] + a[2] * a[2]
}
/// Computes the square length of a vector.
#[inline(always)]
pub fn vec4_square_len<T: Num>(a: Vector4<T>) -> T {
a[0] * a[0] + a[1] * a[1] + a[2] * a[2] + a[3] * a[3]
}
/// Computes the cross product.
#[inline(always)]
pub fn vec2_cross<T: Num>(a: Vector2<T>, b: Vector2<T>) -> T {
a[0] * b[1] - a[1] * b[0]
}
/// Computes the cross product.
#[inline(always)]
pub fn vec3_cross<T: Num>(a: Vector3<T>, b: Vector3<T>) -> Vector3<T> {
[
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]
]
}
/// Multiplies the vector with a scalar.
#[inline(always)]
pub fn vec2_scale<T: Num>(a: Vector2<T>, b: T) -> Vector2<T> {
[
a[0] * b,
a[1] * b
]
}
/// Multiplies the vector with a scalar.
#[inline(always)]
pub fn vec3_scale<T: Num>(a: Vector3<T>, b: T) -> Vector3<T> {
[
a[0] * b,
a[1] * b,
a[2] * b
]
}
/// Multiplies the vector with a scalar.
#[inline(always)]
pub fn vec4_scale<T: Num>(a: Vector4<T>, b: T) -> Vector4<T> {
[
a[0] * b,
a[1] * b,
a[2] * b,
a[3] * b
]
}
/// Computes the length of vector.
#[inline(always)]
pub fn vec2_len<T: Float>(a: Vector2<T>) -> T {
vec2_square_len(a).sqrt()
}
/// Computes the length of vector.
#[inline(always)]
pub fn vec3_len<T: Float>(a: Vector3<T>) -> T {
vec3_square_len(a).sqrt()
}
/// Computes the length of vector.
#[inline(always)]
pub fn vec4_len<T: Float>(a: Vector4<T>) -> T {
vec4_square_len(a).sqrt()
}
/// Computes the inverse length of a vector.
#[inline(always)]
pub fn vec2_inv_len<T: Float>(a: Vector2<T>) -> T {
let one: T = One::one();
one / vec2_len(a)
}
/// Computes the inverse length of a vector.
#[inline(always)]
pub fn vec3_inv_len<T: Float>(a: Vector3<T>) -> T {
let one: T = One::one();
one / vec3_len(a)
}
/// Computes the inverse length of a vector.
#[inline(always)]
pub fn vec4_inv_len<T: Float>(a: Vector4<T>) -> T {
let one: T = One::one();
one / vec4_len(a)
}
/// Computes the normalized.
#[inline(always)]
pub fn vec2_normalized<T: Float>(a: Vector2<T>) -> Vector2<T> {
vec2_scale(a, vec2_inv_len(a))
}
/// Computes the normalized.
#[inline(always)]
pub fn vec3_normalized<T: Float>(a: Vector3<T>) -> Vector3<T> {
vec3_scale(a, vec3_inv_len(a))
}
/// Computes the normalized.
#[inline(always)]
pub fn vec4_normalized<T: Float>(a: Vector4<T>) -> Vector4<T> {
vec4_scale(a, vec4_inv_len(a))
}
/// Computes the normalized difference between two vectors.
///
/// This is often used to get direction from 'b' to 'a'.
#[inline(always)]
pub fn vec2_normalized_sub<T: Float>(
a: Vector2<T>,
b: Vector2<T>
) -> Vector2<T> {
vec2_normalized(vec2_sub(a, b))
}
/// Computes the normalized difference between two vectors.
///
/// This is often used to get direction from 'b' to 'a'.
#[inline(always)]
pub fn vec3_normalized_sub<T: Float>(
a: Vector3<T>,
b: Vector3<T>
) -> Vector3<T> {
vec3_normalized(vec3_sub(a, b))
}
/// Computes the normalized difference between two vectors.
///
/// This is often used to get direction from 'b' to 'a'.
#[inline(always)]
pub fn vec4_normalized_sub<T: Float>(
a: Vector4<T>,
b: Vector4<T>
) -> Vector4<T> {
vec4_normalized(vec4_sub(a, b))
}
/// Computes transformed vector component.
///
/// This is used when transforming vectors through matrices.
#[inline(always)]
pub fn vec3_dot_vec2<T: Num>(a: Vector3<T>, b: Vector2<T>) -> T {
a[0] * b[0] + a[1] * b[1]
}
/// Computes transformed vector component.
///
/// This is used when transforming vectors through matrices.
#[inline(always)]
pub fn vec4_dot_vec3<T: Num>(a: Vector4<T>, b: Vector3<T>) -> T {
a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
}
/// Computes transformed position component.
///
/// This is used when transforming points through matrices.
#[inline(always)]
pub fn vec3_dot_pos2<T: Num + Copy>(a: Vector3<T>, b: Vector2<T>) -> T {
vec3_dot_vec2(a, b) + a[2]
}
/// Computes transformed position component.
///
/// This is used when transforming points through matrices.
#[inline(always)]
pub fn vec4_dot_pos3<T: Num + Copy>(a: Vector4<T>, b: Vector3<T>) -> T {
vec4_dot_vec3(a, b) + a[3]
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat2x3_col<T: Copy>(mat: Matrix2x3<T>, i: uint) -> Vector2<T> {
[mat[0][i], mat[1][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat2x3_row<T: Copy>(mat: Matrix2x3<T>, i: uint) -> Vector2<T> {
row_mat2x3_col(mat, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat3x2_col<T: Copy>(a: Matrix3x2<T>, i: uint) -> Vector3<T> {
[a[0][i], a[1][i], a[2][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat3x2_row<T: Copy>(a: Matrix3x2<T>, i: uint) -> Vector3<T> {
row_mat3x2_col(a, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat3_col<T: Copy>(a: Matrix3<T>, i: uint) -> Vector3<T> {
[a[0][i], a[1][i], a[2][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat3_row<T: Copy>(a: Matrix3<T>, i: uint) -> Vector3<T> {
row_mat3_col(a, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat3x4_col<T: Copy>(mat: Matrix3x4<T>, i: uint) -> Vector3<T> {
[mat[0][i], mat[1][i], mat[2][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat3x4_row<T: Copy>(mat: Matrix3x4<T>, i: uint) -> Vector3<T> {
row_mat3x4_col(mat, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat4x3_col<T: Copy>(a: Matrix4x3<T>, i: uint) -> Vector4<T> {
[a[0][i], a[1][i], a[2][i], a[3][i]]
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn col_mat4x3_row<T: Copy>(a: Matrix4x3<T>, i: uint) -> Vector4<T> {
row_mat4x3_col(a, i)
}
/// Returns a column vector of a row matrix.
#[inline(always)]
pub fn row_mat4_col<T: Copy>(a: Matrix4<T>, i: uint) -> Vector4<T> {
[a[0][i], a[1][i], a[2][i], a[3][i]]
}
/// Returns a row vector of a column matrix.
#[inline(always)]
pub fn col_mat4_row<T: Copy>(a: Matrix4<T>, i: uint) -> Vector4<T> {
row_mat4_col(a, i)
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat2x3_transposed<T: Copy>(a: Matrix2x3<T>) -> Matrix3x2<T> {
[
row_mat2x3_col(a, 0),
row_mat2x3_col(a, 1),
row_mat2x3_col(a, 2)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat3x2_transposed<T: Copy>(a: Matrix3x2<T>) -> Matrix2x3<T> {
[
row_mat3x2_col(a, 0),
row_mat3x2_col(a, 1)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat3_transposed<T: Copy>(a: Matrix3<T>) -> Matrix3<T> {
[
row_mat3_col(a, 0),
row_mat3_col(a, 1),
row_mat3_col(a, 2)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat3x4_transposed<T: Copy>(a: Matrix3x4<T>) -> Matrix4x3<T> {
[
row_mat3x4_col(a, 0),
row_mat3x4_col(a, 1),
row_mat3x4_col(a, 2),
row_mat3x4_col(a, 3)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat4x3_transposed<T: Copy>(a: Matrix4x3<T>) -> Matrix3x4<T> {
[
row_mat4x3_col(a, 0),
row_mat4x3_col(a, 1),
row_mat4x3_col(a, 2)
]
}
/// Constructs the transpose of a matrix.
#[inline(always)]
pub fn mat4_transposed<T: Copy>(a: Matrix4<T>) -> Matrix4<T> {
[
row_mat4_col(a, 0),
row_mat4_col(a, 1),
row_mat4_col(a, 2),
row_mat4_col(a, 3)
]
}
/// Transforms a 3D vector through a matrix.
#[inline(always)]
pub fn col_mat3_transform<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec3_dot(col_mat3_row(mat, 0), a),
vec3_dot(col_mat3_row(mat, 1), a),
vec3_dot(col_mat3_row(mat, 2), a)
]
}
/// Transforms a 4D vector through a matrix.
#[inline(always)]
pub fn col_mat4_transform<T: Num + Copy>(
mat: Matrix4<T>,
a: Vector4<T>
) -> Vector4<T> {
[
vec4_dot(col_mat4_row(mat, 0), a),
vec4_dot(col_mat4_row(mat, 1), a),
vec4_dot(col_mat4_row(mat, 2), a),
vec4_dot(col_mat4_row(mat, 3), a)
]
}
/// Transforms a 3D vector through a matrix.
#[inline(always)]
pub fn row_mat3_transform<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec3_dot(mat[0], a),
vec3_dot(mat[1], a),
vec3_dot(mat[2], a)
]
}
/// Transforms a 4D vector through a matrix.
#[inline(always)]
pub fn row_mat4_transform<T: Num + Copy>(
mat: Matrix4<T>,
a: Vector4<T>
) -> Vector4<T> {
[
vec4_dot(mat[0], a),
vec4_dot(mat[1], a),
vec4_dot(mat[2], a),
vec4_dot(mat[3], a)
]
}
/// Transforms a 2D position through matrix.
#[inline(always)]
pub fn row_mat2x3_transform_pos2<T: Num + Copy>(
mat: Matrix2x3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(mat[0], a),
vec3_dot_pos2(mat[1], a)
]
}
/// Transforms a 2D position through matrix.
#[inline(always)]
pub fn col_mat3x2_transform_pos2<T: Num + Copy>(
mat: Matrix3x2<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(col_mat3x2_row(mat, 0), a),
vec3_dot_pos2(col_mat3x2_row(mat, 1), a)
]
}
/// Transforms a 2D position through row matrix.
#[inline(always)]
pub fn row_mat3_transform_pos2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(mat[0], a),
vec3_dot_pos2(mat[1], a)
]
}
/// Transforms a 2D position through column matrix.
#[inline(always)]
pub fn col_mat3_transform_pos2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_pos2(col_mat3_row(mat, 0), a),
vec3_dot_pos2(col_mat3_row(mat, 1), a)
]
}
/// Transforms a 3D position through matrix.
#[inline(always)]
pub fn row_mat3x4_transform_pos3<T: Num + Copy>(
mat: Matrix3x4<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_pos3(mat[0], a),
vec4_dot_pos3(mat[1], a),
vec4_dot_pos3(mat[2], a),
]
}
/// Transforms a 3D position through matrix.
#[inline(always)]
pub fn col_mat4x3_transform_pos3<T: Num + Copy>(
mat: Matrix4x3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_pos3(col_mat4x3_row(mat, 0), a),
vec4_dot_pos3(col_mat4x3_row(mat, 1), a),
vec4_dot_pos3(col_mat4x3_row(mat, 2), a)
]
}
/// Transforms a 2D vector through matrix.
#[inline(always)]
pub fn row_mat2x3_transform_vec2<T: Num + Copy>(
mat: Matrix2x3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(mat[0], a),
vec3_dot_vec2(mat[1], a)
]
}
/// Transforms a 2D vector through matrix.
#[inline(always)]
pub fn col_mat3x2_transform_vec2<T: Num + Copy>(
mat: Matrix3x2<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(col_mat3x2_row(mat, 0), a),
vec3_dot_vec2(col_mat3x2_row(mat, 1), a)
]
}
/// Transforms a 2D vector through row matrix.
#[inline(always)]
pub fn row_mat3_transform_vec2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(mat[0], a),
vec3_dot_vec2(mat[1], a)
]
}
/// Transforms a 2D vector through column matrix.
#[inline(always)]
pub fn col_mat3_transform_vec2<T: Num + Copy>(
mat: Matrix3<T>,
a: Vector2<T>
) -> Vector2<T> {
[
vec3_dot_vec2(col_mat3_row(mat, 0), a),
vec3_dot_vec2(col_mat3_row(mat, 1), a)
]
}
/// Transforms a 3D vector through matrix.
#[inline(always)]
pub fn row_mat3x4_transform_vec3<T: Num + Copy>(
mat: Matrix3x4<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_vec3(mat[0], a),
vec4_dot_vec3(mat[1], a),
vec4_dot_vec3(mat[2], a)
]
}
/// Transforms a 3D vector through matrix.
#[inline(always)]
pub fn col_mat4x3_transform_vec3<T: Num + Copy>(
mat: Matrix4x3<T>,
a: Vector3<T>
) -> Vector3<T> {
[
vec4_dot_vec3(col_mat4x3_row(mat, 0), a),
vec4_dot_vec3(col_mat4x3_row(mat, 1), a),
vec4_dot_vec3(col_mat4x3_row(mat, 2), a)
]
}
/// Computes the determinant of a matrix.
pub fn mat2x3_det<T: Num>(mat: Matrix2x3<T>) -> T {
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
}
/// Computes the determinant of a matrix.
pub fn mat3x2_det<T: Num>(mat: Matrix3x2<T>) -> T {
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
}
/// Computes the determinant of a matrix.
pub fn mat3_det<T: Num>(mat: Matrix3<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
}
/// Computes the determinant of a matrix.
pub fn mat3x4_det<T: Num>(mat: Matrix3x4<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
}
/// Computes the determinant of a matrix.
pub fn mat4x3_det<T: Num>(mat: Matrix4x3<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
}
/// Computes the determinant of a 4x4 matrix.
pub fn mat4_det<T: Num>(mat: Matrix4<T>) -> T {
mat[0][0] * mat[1][1] * mat[2][2] * mat[3][3]
+ mat[0][0] * mat[1][2] * mat[2][3] * mat[3][1]
+ mat[0][0] * mat[1][3] * mat[2][1] * mat[3][2]
+ mat[0][1] * mat[1][0] * mat[2][3] * mat[3][2]
+ mat[0][1] * mat[1][2] * mat[2][0] * mat[3][3]
+ mat[0][1] * mat[1][3] * mat[2][2] * mat[3][0]
+ mat[0][2] * mat[1][0] * mat[2][1] * mat[3][3]
+ mat[0][2] * mat[1][1] * mat[2][3] * mat[3][0]
+ mat[0][2] * mat[1][3] * mat[2][0] * mat[3][1]
+ mat[0][3] * mat[1][0] * mat[2][2] * mat[3][1]
+ mat[0][3] * mat[1][1] * mat[2][0] * mat[3][2]
+ mat[0][3] * mat[1][2] * mat[2][1] * mat[3][0]
- mat[0][0] * mat[1][1] * mat[2][3] * mat[3][2]
- mat[0][0] * mat[1][2] * mat[2][1] * mat[3][3]
- mat[0][0] * mat[1][3] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[1][0] * mat[2][2] * mat[3][3]
- mat[0][1] * mat[1][2] * mat[2][3] * mat[3][0]
- mat[0][1] * mat[1][3] * mat[2][0] * mat[3][2]
- mat[0][2] * mat[1][0] * mat[2][3] * mat[3][1]
- mat[0][2] * mat[1][1] * mat[2][0] * mat[3][3]
- mat[0][2] * mat[1][3] * mat[2][1] * mat[3][0]
- mat[0][3] * mat[1][0] * mat[2][1] * mat[3][2]
- mat[0][3] * mat[1][1] * mat[2][2] * mat[3][0]
- mat[0][3] * mat[1][2] * mat[2][0] * mat[3][1]
}
/// Computes inverse determinant of a 2x3 matrix.
#[inline(always)]
pub fn mat2x3_inv_det<T: Num>(mat: Matrix2x3<T>) -> T {
let one: T = One::one();
one / mat2x3_det(mat)
}
/// Computes inverse determinant of a 3x2 matrix.
#[inline(always)]
pub fn mat3x2_inv_det<T: Num>(mat: Matrix3x2<T>) -> T {
let one: T = One::one();
one / mat3x2_det(mat)
}
/// Computes inverse determinant of a 3x3 matrix.
#[inline(always)]
pub fn mat3_inv_det<T: Num>(mat: Matrix3<T>) -> T {
let one: T = One::one();
one / mat3_det(mat)
}
/// Computes inverse determinant of a 3x4 matrix.
#[inline(always)]
pub fn mat3x4_inv_det<T: Num>(mat: Matrix3x4<T>) -> T {
let one: T = One::one();
one / mat3x4_det(mat)
}
/// Computes inverse determinant of a 4x3 matrix.
#[inline(always)]
pub fn mat4x3_inv_det<T: Num>(mat: Matrix4x3<T>) -> T {
let one: T = One::one();
one / mat4x3_det(mat)
}
/// Computes the inverse determinant of a 4x4 matrix.
#[inline(always)]
pub fn mat4_inv_det<T: Num>(mat: Matrix4<T>) -> T {
let one: T = One::one();
one / mat4_det(mat)
}
/// Computes the inverse of a 2x3 matrix.
pub fn mat2x3_inv<T: Num + Copy>(mat: Matrix2x3<T>) -> Matrix2x3<T> {
let inv_det = mat2x3_inv_det(mat);
[
[
mat[1][1] * inv_det,
- mat[0][1] * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det
],
[
- mat[1][0] * inv_det,
mat[0][0] * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det,
]
]
}
/// Computes the inverse of a 3x2 matrix.
pub fn mat3x2_inv<T: Num + Copy>(mat: Matrix3x2<T>) -> Matrix3x2<T> {
let inv_det = mat3x2_inv_det(mat);
[
[
mat[1][1] * inv_det,
- mat[0][1] * inv_det
],
[
- mat[1][0] * inv_det,
mat[0][0] * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det
]
]
}
/// Computes the inverse of a 3x3 matrix.
pub fn mat3_inv<T: Num + Copy>(mat: Matrix3<T>) -> Matrix3<T> {
let inv_det = mat3_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2]
- mat[1][2] * mat[2][1]
) * inv_det,
(
mat[0][2] * mat[2][1]
- mat[0][1] * mat[2][2]
) * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det
],
[
(
mat[1][2] * mat[2][0]
- mat[1][0] * mat[2][2]
) * inv_det,
(
mat[0][0] * mat[2][2]
- mat[0][2] * mat[2][0]
) * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det,
(
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
) * inv_det
]
]
}
/// Computes the inverse of a 3x4 matrix.
pub fn mat3x4_inv<T: Num + Copy>(mat: Matrix3x4<T>) -> Matrix3x4<T> {
let inv_det = mat3x4_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2]
- mat[1][2] * mat[2][1]
) * inv_det,
(
mat[0][2] * mat[2][1]
- mat[0][1] * mat[2][2]
) * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det,
(
mat[0][1] * mat[1][3] * mat[2][2]
+ mat[0][2] * mat[1][1] * mat[2][3]
+ mat[0][3] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][2] * mat[2][3]
- mat[0][2] * mat[1][3] * mat[2][1]
- mat[0][3] * mat[1][1] * mat[2][2]
) * inv_det
],
[
(
mat[1][2] * mat[2][0]
- mat[1][0] * mat[2][2]
) * inv_det,
(
mat[0][0] * mat[2][2]
- mat[0][2] * mat[2][0]
) * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[2][3]
+ mat[0][2] * mat[1][3] * mat[2][0]
+ mat[0][3] * mat[1][0] * mat[2][2]
- mat[0][0] * mat[1][3] * mat[2][2]
- mat[0][2] * mat[1][0] * mat[2][3]
- mat[0][3] * mat[1][2] * mat[2][0]
) * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det,
(
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
) * inv_det,
(
mat[0][0] * mat[1][3] * mat[2][1]
+ mat[0][1] * mat[1][0] * mat[2][3]
+ mat[0][3] * mat[1][1] * mat[2][0]
- mat[0][0] * mat[1][1] * mat[2][3]
- mat[0][1] * mat[1][3] * mat[2][0]
- mat[0][3] * mat[1][0] * mat[2][1]
) * inv_det
]
]
}
/// Computes the inverse of a 4x3 matrix.
pub fn mat4x3_inv<T: Num + Copy>(mat: Matrix4x3<T>) -> Matrix4x3<T> {
let inv_det = mat4x3_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2]
- mat[1][2] * mat[2][1]
) * inv_det,
(
mat[0][2] * mat[2][1]
- mat[0][1] * mat[2][2]
) * inv_det,
(
mat[0][1] * mat[1][2]
- mat[0][2] * mat[1][1]
) * inv_det
],
[
(
mat[1][2] * mat[2][0]
- mat[1][0] * mat[2][2]
) * inv_det,
(
mat[0][0] * mat[2][2]
- mat[0][2] * mat[2][0]
) * inv_det,
(
mat[0][2] * mat[1][0]
- mat[0][0] * mat[1][2]
) * inv_det
],
[
(
mat[1][0] * mat[2][1]
- mat[1][1] * mat[2][0]
) * inv_det,
(
mat[0][1] * mat[2][0]
- mat[0][0] * mat[2][1]
) * inv_det,
(
mat[0][0] * mat[1][1]
- mat[0][1] * mat[1][0]
) * inv_det
],
[
(
mat[1][0] * mat[2][2] * mat[3][1]
+ mat[1][1] * mat[2][0] * mat[3][2]
+ mat[1][2] * mat[2][1] * mat[3][0]
- mat[1][0] * mat[2][1] * mat[3][2]
- mat[1][1] * mat[2][2] * mat[3][0]
- mat[1][2] * mat[2][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[2][1] * mat[3][2]
+ mat[0][1] * mat[2][2] * mat[3][0]
+ mat[0][2] * mat[2][0] * mat[3][1]
- mat[0][0] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[2][0] * mat[3][2]
- mat[0][2] * mat[2][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[3][1]
+ mat[0][1] * mat[1][0] * mat[3][2]
+ mat[0][2] * mat[1][1] * mat[3][0]
- mat[0][0] * mat[1][1] * mat[3][2]
- mat[0][1] * mat[1][2] * mat[3][0]
- mat[0][2] * mat[1][0] * mat[3][1]
) * inv_det
]
]
}
/// Computes the inverse of a 4x4 matrix.
pub fn mat4_inv<T: Num + Copy>(mat: Matrix4<T>) -> Matrix4<T> {
let inv_det = mat4_inv_det(mat);
[
[ (
mat[1][1] * mat[2][2] * mat[3][3]
+ mat[1][2] * mat[2][3] * mat[3][1]
+ mat[1][3] * mat[2][1] * mat[3][2]
- mat[1][1] * mat[2][3] * mat[3][2]
- mat[1][2] * mat[2][1] * mat[3][3]
- mat[1][3] * mat[2][2] * mat[3][1]
) * inv_det,
(
mat[0][1] * mat[2][3] * mat[3][2]
+ mat[0][2] * mat[2][1] * mat[3][3]
+ mat[0][3] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[2][2] * mat[3][3]
- mat[0][2] * mat[2][3] * mat[3][1]
- mat[0][3] * mat[2][1] * mat[3][2]
) * inv_det,
(
mat[0][1] * mat[1][2] * mat[3][3]
+ mat[0][2] * mat[1][3] * mat[3][1]
+ mat[0][3] * mat[1][1] * mat[3][2]
- mat[0][1] * mat[1][3] * mat[3][2]
- mat[0][2] * mat[1][1] * mat[3][3]
- mat[0][3] * mat[1][2] * mat[3][1]
) * inv_det,
(
mat[0][1] * mat[1][3] * mat[2][2]
+ mat[0][2] * mat[1][1] * mat[2][3]
+ mat[0][3] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][2] * mat[2][3]
- mat[0][2] * mat[1][3] * mat[2][1]
- mat[0][3] * mat[1][1] * mat[2][2]
) * inv_det
],
[
(
mat[1][0] * mat[2][3] * mat[3][2]
+ mat[1][2] * mat[2][0] * mat[3][3]
+ mat[1][3] * mat[2][2] * mat[3][0]
- mat[1][0] * mat[2][2] * mat[3][3]
- mat[1][2] * mat[2][3] * mat[3][0]
- mat[1][3] * mat[2][0] * mat[3][2]
) * inv_det,
(
mat[0][0] * mat[2][2] * mat[3][3]
+ mat[0][2] * mat[2][3] * mat[3][0]
+ mat[0][3] * mat[2][0] * mat[3][2]
- mat[0][0] * mat[2][3] * mat[3][2]
- mat[0][2] * mat[2][0] * mat[3][3]
- mat[0][3] * mat[2][2] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][3] * mat[3][2]
+ mat[0][2] * mat[1][0] * mat[3][3]
+ mat[0][3] * mat[1][2] * mat[3][0]
- mat[0][0] * mat[1][2] * mat[3][3]
- mat[0][2] * mat[1][3] * mat[3][0]
- mat[0][3] * mat[1][0] * mat[3][2]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[2][3]
+ mat[0][2] * mat[1][3] * mat[2][0]
+ mat[0][3] * mat[1][0] * mat[2][2]
- mat[0][0] * mat[1][3] * mat[2][2]
- mat[0][2] * mat[1][0] * mat[2][3]
- mat[0][3] * mat[1][2] * mat[2][0]
) * inv_det
],
[
(
mat[1][0] * mat[2][1] * mat[3][3]
+ mat[1][1] * mat[2][3] * mat[3][0]
+ mat[1][3] * mat[2][0] * mat[3][1]
- mat[1][0] * mat[2][3] * mat[3][1]
- mat[1][1] * mat[2][0] * mat[3][3]
- mat[1][3] * mat[2][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[2][3] * mat[3][1]
+ mat[0][1] * mat[2][0] * mat[3][3]
+ mat[0][3] * mat[2][1] * mat[3][0]
- mat[0][0] * mat[2][1] * mat[3][3]
- mat[0][1] * mat[2][3] * mat[3][0]
- mat[0][3] * mat[2][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[1][1] * mat[3][3]
+ mat[0][1] * mat[1][3] * mat[3][0]
+ mat[0][3] * mat[1][0] * mat[3][1]
- mat[0][0] * mat[1][3] * mat[3][1]
- mat[0][1] * mat[1][0] * mat[3][3]
- mat[0][3] * mat[1][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][3] * mat[2][1]
+ mat[0][1] * mat[1][0] * mat[2][3]
+ mat[0][3] * mat[1][1] * mat[2][0]
- mat[0][0] * mat[1][1] * mat[2][3]
- mat[0][1] * mat[1][3] * mat[2][0]
- mat[0][3] * mat[1][0] * mat[2][1]
) * inv_det
],
[
(
mat[1][0] * mat[2][2] * mat[3][1]
+ mat[1][1] * mat[2][0] * mat[3][2]
+ mat[1][2] * mat[2][1] * mat[3][0]
- mat[1][0] * mat[2][1] * mat[3][2]
- mat[1][1] * mat[2][2] * mat[3][0]
- mat[1][2] * mat[2][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[2][1] * mat[3][2]
+ mat[0][1] * mat[2][2] * mat[3][0]
+ mat[0][2] * mat[2][0] * mat[3][1]
- mat[0][0] * mat[2][2] * mat[3][1]
- mat[0][1] * mat[2][0] * mat[3][2]
- mat[0][2] * mat[2][1] * mat[3][0]
) * inv_det,
(
mat[0][0] * mat[1][2] * mat[3][1]
+ mat[0][1] * mat[1][0] * mat[3][2]
+ mat[0][2] * mat[1][1] * mat[3][0]
- mat[0][0] * mat[1][1] * mat[3][2]
- mat[0][1] * mat[1][2] * mat[3][0]
- mat[0][2] * mat[1][0] * mat[3][1]
) * inv_det,
(
mat[0][0] * mat[1][1] * mat[2][2]
+ mat[0][1] * mat[1][2] * mat[2][0]
+ mat[0][2] * mat[1][0] * mat[2][1]
- mat[0][0] * mat[1][2] * mat[2][1]
- mat[0][1] * mat[1][0] * mat[2][2]
- mat[0][2] * mat[1][1] * mat[2][0]
) * inv_det
]
]
}
|
#![crate_id = "router"]
#![license = "MIT"]
#![deny(missing_doc)]
#![deny(unused_result, unused_result, unnecessary_qualification,
non_camel_case_types, unused_variable, unnecessary_typecast)]
#![feature(phase, globs)]
//! `Router` provides a fast router middleware for the Iron web framework.
extern crate http;
extern crate iron;
extern crate regex;
#[phase(plugin, link)] extern crate log;
#[phase(plugin)] extern crate regex_macros;
#[cfg(test)] extern crate test;
pub use router::Router;
pub use router::params::Params;
mod router;
(doc) Added doc attributes.
#![doc(html_logo_url = "https://avatars0.githubusercontent.com/u/7853871?s=128", html_favicon_url = "https://avatars0.githubusercontent.com/u/7853871?s=256", html_root_url = "http://ironframework.io/core/router")]
#![crate_id = "router"]
#![license = "MIT"]
#![deny(missing_doc)]
#![deny(unused_result, unused_result, unnecessary_qualification,
non_camel_case_types, unused_variable, unnecessary_typecast)]
#![feature(phase, globs)]
//! `Router` provides a fast router middleware for the Iron web framework.
extern crate http;
extern crate iron;
extern crate regex;
#[phase(plugin, link)] extern crate log;
#[phase(plugin)] extern crate regex_macros;
#[cfg(test)] extern crate test;
pub use router::Router;
pub use router::params::Params;
mod router;
|
use std::mem::transmute;
pub trait WriteToBitBuf {
fn write_to_bitbuf(&self, buf: &mut BitBuf);
}
pub trait FromBitBuf {
fn from_bitbuf(buf: &mut BitBuf) -> Self;
}
struct FourByte {
b1: u8,
b2: u8,
b3: u8,
b4: u8,
}
impl FourByte {
pub fn trans_from_f32(value: f32) -> FourByte {
unsafe { transmute::<f32, FourByte>(value) }
}
pub fn trans_to_f32(self) -> f32 {
unsafe { transmute::<FourByte, f32>(self) }
}
}
struct EightByte {
b1: u8,
b2: u8,
b3: u8,
b4: u8,
b5: u8,
b6: u8,
b7: u8,
b8: u8,
}
impl EightByte {
pub fn trans_from_f64(value: f64) -> EightByte {
unsafe { transmute::<f64, EightByte>(value) }
}
pub fn trans_to_f64(self) -> f64 {
unsafe { transmute::<EightByte, f64>(self) }
}
}
#[derive(Clone)]
pub struct BitBuf {
buf: Vec<u8>,
pos: usize, // The current bit position of the cursor.
size: usize, // Size in bits.
}
impl BitBuf {
/// Creates a new BitBuf, initializing a new Vec<u8>.
/// for the underlying buffer.
pub fn with_len(len: usize) -> BitBuf {
let mut vec = Vec::with_capacity(len);
unsafe { vec.set_len(len) };
for x in &mut vec { *x = 0; }
BitBuf {
buf: vec,
pos: 0,
size: len * 8,
}
}
/// Consumes the BitBuf, returning the underlying Vec<u8>.
pub fn to_vec(self) -> Vec<u8> {
self.buf
}
/// Returns a slice into the underlying Vec<u8> buffer.
//pub fn buf_as_slice(&self) -> &[u8] {
// self.buf.as_slice()
//}
/// The current bit size of the Vec<u8>.
pub fn bit_size(&self) -> usize {
self.size
}
/// The current position of the cursor. The BitBuf
/// does not insert, and will overwrite any data currently
/// at the cursor position during writing.
pub fn bit_pos(&self) -> usize {
self.pos
}
pub fn can_write_bits(&self, bit_size: usize) -> bool {
(bit_size + self.pos) < self.size
}
pub fn can_read_bits(&self, bit_size: usize) -> bool {
(bit_size + self.pos) < self.size
}
pub fn write_bool(&mut self, value: bool) {
self.in_write_byte((if value {1} else {0}), 1);
}
pub fn read_bool(&mut self) -> bool {
self.in_read_byte(1) == 1
}
pub fn write_i8(&mut self, value: i8) {
self.write_i8_part(value, 8);
}
pub fn read_i8(&mut self) -> i8 {
self.read_i8_part(8)
}
fn write_i8_part(&mut self, value: i8, bits: u8) {
self.in_write_byte(value as u8, bits);
}
fn read_i8_part(&mut self, bits: u8) -> i8 {
self.in_read_byte(bits) as i8
}
pub fn write_u8(&mut self, value: u8) {
self.write_u8_part(value, 8);
}
pub fn read_u8(&mut self) -> u8 {
self.read_u8_part(8)
}
pub fn write_u8_part(&mut self, value: u8, bits: u8) {
self.in_write_byte(value, bits);
}
pub fn read_u8_part(&mut self, bits: u8) -> u8 {
self.in_read_byte(bits)
}
pub fn write_u16(&mut self, value: u16) {
self.write_u16_part(value, 16);
}
pub fn read_u16(&mut self) -> u16 {
self.read_u16_part(16)
}
pub fn write_u16_part(&mut self, value: u16, bits: u8) {
let a = (value >> 0) as u8;
let b = (value >> 8) as u8;
match (bits + 7) / 8 {
1 => {
self.in_write_byte(a, bits);
},
2 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, bits - 8);
},
_ => {
//panic!("Must write between 1 and 32 bits.")
}
}
}
pub fn read_u16_part(&mut self, bits: u8) -> u16 {
let mut a = 0u16;
let mut b = 0u16;
match (bits + 7) / 8 {
1 => {
a = self.in_read_byte(bits) as u16;
},
2 => {
a = self.in_read_byte(8) as u16;
b = self.in_read_byte(bits - 8) as u16;
},
_ => {
//panic!("Must read between 1 and 32 bits.")
}
}
(a | (b << 8)) as u16
}
pub fn write_i16(&mut self, value: i16) {
self.write_i16_part(value, 16);
}
pub fn read_i16(&mut self) -> i16 {
self.read_i16_part(16)
}
fn write_i16_part(&mut self, value: i16, bits: u8) {
self.write_u16_part(value as u16, bits);
}
fn read_i16_part(&mut self, bits: u8) -> i16 {
self.read_u16_part(bits) as i16
}
pub fn write_u32(&mut self, value: u32) {
self.write_u32_part(value, 32);
}
pub fn read_u32(&mut self) -> u32 {
self.read_u32_part(32)
}
pub fn write_u32_part(&mut self, value: u32, bits: u8) {
let a = (value >> 0) as u8;
let b = (value >> 8) as u8;
let c = (value >> 16) as u8;
let d = (value >> 24) as u8;
match (bits + 7) / 8 {
1 => {
self.in_write_byte(a, bits);
},
2 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, bits - 8);
},
3 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, 8);
self.in_write_byte(c, bits - 16);
},
4 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, 8);
self.in_write_byte(c, 8);
self.in_write_byte(d, bits - 24);
},
_ => {
//panic!("Must write between 1 and 32 bits.")
}
}
}
pub fn read_u32_part(&mut self, bits: u8) -> u32 {
let mut a = 0i32;
let mut b = 0i32;
let mut c = 0i32;
let mut d = 0i32;
match (bits + 7) / 8 {
1 => {
a = self.in_read_byte(bits) as i32;
},
2 => {
a = self.in_read_byte(8) as i32;
b = self.in_read_byte(bits - 8) as i32;
},
3 => {
a = self.in_read_byte(8) as i32;
b = self.in_read_byte(8) as i32;
c = self.in_read_byte(bits - 16) as i32;
},
4 => {
a = self.in_read_byte(8) as i32;
b = self.in_read_byte(8) as i32;
c = self.in_read_byte(8) as i32;
d = self.in_read_byte(bits - 24) as i32;
},
_ => {
//panic!("Must read between 1 and 32 bits.")
}
}
(a | (b << 8) | (c << 16) | (d << 24)) as u32
}
pub fn read_i32(&mut self) -> i32 {
self.read_i32_part(32)
}
pub fn write_i32(&mut self, value: i32) {
self.write_i32_part(value, 32);
}
fn write_i32_part(&mut self, value: i32, bits: u8) {
self.write_u32_part(value as u32, bits);
}
fn read_i32_part(&mut self, bits: u8) -> i32 {
self.read_u32_part(bits) as i32
}
pub fn write_u64(&mut self, value: u64) {
self.write_u64_part(value, 64);
}
pub fn read_u64(&mut self) -> u64 {
self.read_u64_part(64)
}
pub fn write_u64_part(&mut self, value: u64, bits: u8) {
if bits <= 32 {
self.write_u32_part((value & 0xFFFFFFFF) as u32, bits);
} else {
self.write_u32_part(value as u32, 32);
self.write_u32_part((value >> 32) as u32, bits - 32);
}
}
pub fn read_u64_part(&mut self, bits: u8) -> u64 {
if bits <= 32 {
self.read_u32_part(bits) as u64
} else {
let a = self.read_u32_part(32) as u64;
let b = self.read_u32_part(bits - 32) as u64;
a | (b << 32)
}
}
pub fn write_i64(&mut self, value: i64) {
self.write_u64_part(value as u64, 64);
}
pub fn read_i64(&mut self) -> i64 {
self.read_u64_part(64) as i64
}
fn write_i64_part(&mut self, value: i64, bits: u8) {
self.write_u64_part(value as u64, bits);
}
fn read_i64_part(&mut self, bits: u8) -> i64 {
self.read_u64_part(bits) as i64
}
pub fn write_f32(&mut self, value: f32) {
let trans = FourByte::trans_from_f32(value);
self.in_write_byte(trans.b1, 8);
self.in_write_byte(trans.b2, 8);
self.in_write_byte(trans.b3, 8);
self.in_write_byte(trans.b4, 8);
}
pub fn read_f32(&mut self) -> f32 {
FourByte {
b1: self.in_read_byte(8),
b2: self.in_read_byte(8),
b3: self.in_read_byte(8),
b4: self.in_read_byte(8),
}.trans_to_f32()
}
pub fn write_f64(&mut self, value: f64) {
let trans = EightByte::trans_from_f64(value);
self.in_write_byte(trans.b1, 8);
self.in_write_byte(trans.b2, 8);
self.in_write_byte(trans.b3, 8);
self.in_write_byte(trans.b4, 8);
self.in_write_byte(trans.b5, 8);
self.in_write_byte(trans.b6, 8);
self.in_write_byte(trans.b7, 8);
self.in_write_byte(trans.b8, 8);
}
pub fn read_f64(&mut self) -> f64 {
EightByte {
b1: self.in_read_byte(8),
b2: self.in_read_byte(8),
b3: self.in_read_byte(8),
b4: self.in_read_byte(8),
b5: self.in_read_byte(8),
b6: self.in_read_byte(8),
b7: self.in_read_byte(8),
b8: self.in_read_byte(8),
}.trans_to_f64()
}
pub fn write_u8_slice(&mut self, value: &[u8]) {
for i in 0..value.len() {
self.in_write_byte(value[i], 8);
}
}
pub fn read_vec_u8(&mut self, length: usize) -> Vec<u8> {
(0..length).map(|_| self.in_read_byte(8)).collect()
}
pub fn write_string(&mut self, value: &str) {
self.write_u32(value.len() as u32);
self.write_u8_slice(value.as_bytes());
}
pub fn read_string(&mut self) -> String {
let len = self.read_u32() as usize;
String::from_utf8(self.read_vec_u8(len)).unwrap()
}
#[inline(always)]
fn in_write_byte(&mut self, mut value: u8, bits: u8) {
value = value & (0xFF >> (8 - bits));
let p = (self.pos >> 3) as usize;
let bits_used = self.pos & 0x7;
if bits_used == 0 {
self.buf[p] = value;
} else {
let bits_free = 8 - bits_used;
let bits_left: i16 = bits_free as i16 - bits as i16;
if bits_left >= 0 {
let mask = (0xFF >> bits_free) | (0xFF << (8 - bits_left));
self.buf[p] = (self.buf[p] & mask) | (value << bits_used);
} else {
self.buf[p] = (self.buf[p] & (0xFF >> bits_free)) | (value << bits_used);
self.buf[p + 1] = (self.buf[p + 1] & (0xFF << (bits - bits_free as u8))) | (value >> bits_free);
}
}
self.pos += bits as usize;
}
#[inline(always)]
fn in_read_byte(&mut self, bits: u8) -> u8 {
let value: u8;
let p = (self.pos >> 3) as usize;
let bits_used = self.pos % 8;
if bits_used == 0 && bits == 8 {
value = self.buf[p];
} else {
let first = self.buf[p] >> bits_used;
let remainder = bits - (8 - bits_used as u8);
if remainder < 1 {
value = first & (0xFF >> (8 - bits));
} else {
let second = self.buf[p + 1] & (0xFF >> (8 - remainder));
value = first | (second << (bits - remainder));
}
}
self.pos += bits as usize;
value
}
}
#[test]
fn bool_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = true;
buf.write_bool(testval);
buf.pos = 0;
assert!(buf.read_bool() == testval);
}
#[test]
fn u8_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 211;
buf.write_u8(testval);
buf.pos = 0;
assert!(buf.read_u8() == testval);
}
#[test]
fn u8_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 15;
buf.write_u8_part(testval, 4);
buf.pos = 0;
assert!(buf.read_u8_part(4) == testval);
}
#[test]
fn i8_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 6;
buf.write_i8_part(testval, 4);
buf.pos = 0;
assert!(buf.read_i8_part(4) == testval);
}
#[test]
fn i8_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -109;
buf.write_i8(testval);
buf.pos = 0;
assert!(buf.read_i8() == testval);
}
#[test]
fn u16_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 34507;
buf.write_u16(testval);
buf.pos = 0;
assert!(buf.read_u16() == testval);
}
#[test]
fn u16_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 448;
buf.write_u16_part(testval, 13);
buf.pos = 0;
let result = buf.read_u16_part(13);
println!("{}", result);
assert!(result == testval);
}
#[test]
fn i16_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -11066;
buf.write_i16(testval);
buf.pos = 0;
assert!(buf.read_i16() == testval);
}
#[test]
fn i16_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 10034;
buf.write_i16_part(testval, 15);
buf.pos = 0;
assert!(buf.read_i16_part(15) == testval);
}
#[test]
fn u32_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 193772;
buf.write_u32(testval);
buf.pos = 0;
assert!(buf.read_u32() == testval);
}
#[test]
fn u32_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 839011;
buf.write_u32_part(testval, 27);
buf.pos = 0;
assert!(buf.read_u32_part(27) == testval);
}
#[test]
fn i32_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 54397;
buf.write_i32_part(testval, 22);
buf.pos = 0;
assert!(buf.read_i32_part(22) == testval);
}
#[test]
fn i32_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -23498225;
buf.write_i32(testval);
buf.pos = 0;
assert!(buf.read_i32() == testval);
}
#[test]
fn u64_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 32944949231715;
buf.write_u64_part(testval, 59);
buf.pos = 0;
assert!(buf.read_u64_part(59) == testval);
}
#[test]
fn u64_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 248394023907611;
buf.write_u64(testval);
buf.pos = 0;
assert!(buf.read_u64() == testval);
}
#[test]
fn i64_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 1998372011;
buf.write_i64_part(testval, 50);
buf.pos = 0;
assert!(buf.read_i64_part(50) == testval);
}
#[test]
fn i64_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -24839402390;
buf.write_i64(testval);
buf.pos = 0;
assert!(buf.read_i64() == testval);
}
#[test]
fn f32_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 3.0393124f32;
buf.write_f32(testval);
buf.pos = 0;
assert!(buf.read_f32() == testval);
}
#[test]
fn f64_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 3.0395831239485302f64;
buf.write_f64(testval);
buf.pos = 0;
assert!(buf.read_f64() == testval);
}
#[test]
fn string_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = "This is a test string. Nothing to see here. No, really!";
buf.write_string(testval);
buf.pos = 0;
assert!(buf.read_string() == testval);
}
struct BenchPerson {
first_name: String,
last_name: String,
age: i8,
alive: bool,
weight: i16,
}
impl WriteToBitBuf for BenchPerson {
fn write_to_bitbuf(&self, buf: &mut BitBuf) {
buf.write_string(&self.first_name);
buf.write_string(&self.last_name);
buf.write_i8(self.age);
buf.write_bool(self.alive);
buf.write_i16(self.weight);
}
}
impl FromBitBuf for BenchPerson {
fn from_bitbuf(buf: &mut BitBuf) -> BenchPerson {
BenchPerson {
first_name: buf.read_string(),
last_name: buf.read_string(),
age: buf.read_i8(),
alive: buf.read_bool(),
weight: buf.read_i16(),
}
}
}
#[bench]
fn benchperson_write1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
let person = BenchPerson {
first_name: String::from_str("John"),
last_name: String::from_str("Johnson"),
age: 47,
alive: true,
weight: 203,
};
b.iter(|| {
buf.pos = 0;
for _ in 0..63 {
person.write_to_bitbuf(&mut buf);
}
})
}
#[bench]
fn benchperson_read1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
let person = BenchPerson {
first_name: String::from_str("John"),
last_name: String::from_str("Johnson"),
age: 47,
alive: true,
weight: 203,
};
for _ in 0..63 {
person.write_to_bitbuf(&mut buf);
}
b.iter(|| {
buf.pos = 0;
for _ in 0..63 {
let p: BenchPerson = FromBitBuf::from_bitbuf(&mut buf);
}
})
}
#[bench]
fn bitbuf_create_bench(b: &mut Bencher) {
b.iter(|| {
let mut buf = BitBuf::with_len(1400);
})
}
#[bench]
fn in_byte_write1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
b.iter(|| {
buf.pos = 0;
for _ in 0..1400 {
buf.in_write_byte(240, 8);
}
})
}
#[bench]
fn in_byte_read1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
for _ in 0..1400 {
buf.in_write_byte(240, 8);
}
b.iter(|| {
buf.pos = 0;
for _ in 0..1400 {
let b = buf.in_read_byte(8);
}
})
}
#[bench]
fn string_write1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
b.iter(|| {
buf.pos = 0;
for _ in 0..50 {
buf.write_string("This is a string. Woo!!!");
}
})
}
#[bench]
fn string_read1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
for _ in 0..50 {
buf.write_string("This is a string. Woo!!!");
}
b.iter(|| {
buf.pos = 0;
for _ in 0..50 {
let s = buf.read_string();
}
})
}
#[bench]
fn i32_write1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
b.iter(|| {
buf.pos = 0;
for _ in 0..350 {
buf.write_i32(123239012);
}
})
}
#[bench]
fn i64_write1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
b.iter(|| {
buf.pos = 0;
for _ in 0..175 {
buf.write_i64(12352390123458);
}
})
}
#[bench]
fn i64_read1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
for _ in 0..175 {
buf.write_i64(12352390123458);
}
b.iter(|| {
buf.pos = 0;
for _ in 0..175 {
let i = buf.read_i64();
}
})
}
#[bench]
fn f32_write1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
b.iter(|| {
buf.pos = 0;
for _ in 0..350 {
buf.write_f32(123.239012f32);
}
})
}
#[bench]
fn f64_write1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
b.iter(|| {
buf.pos = 0;
for _ in 0..175 {
buf.write_f64(1235.2390123458f64);
}
})
}
#[bench]
fn f64_read1400_bench(b: &mut Bencher) {
let mut buf = BitBuf::with_len(1400);
for _ in 0..175 {
buf.write_f64(1235.2390123458f64);
}
b.iter(|| {
buf.pos = 0;
for _ in 0..175 {
let f = buf.read_f64();
}
})
}
Comment out benchmarks since bench is unstable
use std::mem::transmute;
pub trait WriteToBitBuf {
fn write_to_bitbuf(&self, buf: &mut BitBuf);
}
pub trait FromBitBuf {
fn from_bitbuf(buf: &mut BitBuf) -> Self;
}
struct FourByte {
b1: u8,
b2: u8,
b3: u8,
b4: u8,
}
impl FourByte {
pub fn trans_from_f32(value: f32) -> FourByte {
unsafe { transmute::<f32, FourByte>(value) }
}
pub fn trans_to_f32(self) -> f32 {
unsafe { transmute::<FourByte, f32>(self) }
}
}
struct EightByte {
b1: u8,
b2: u8,
b3: u8,
b4: u8,
b5: u8,
b6: u8,
b7: u8,
b8: u8,
}
impl EightByte {
pub fn trans_from_f64(value: f64) -> EightByte {
unsafe { transmute::<f64, EightByte>(value) }
}
pub fn trans_to_f64(self) -> f64 {
unsafe { transmute::<EightByte, f64>(self) }
}
}
#[derive(Clone)]
pub struct BitBuf {
buf: Vec<u8>,
pos: usize, // The current bit position of the cursor.
size: usize, // Size in bits.
}
impl BitBuf {
/// Creates a new BitBuf, initializing a new Vec<u8>.
/// for the underlying buffer.
pub fn with_len(len: usize) -> BitBuf {
let mut vec = Vec::with_capacity(len);
unsafe { vec.set_len(len) };
for x in &mut vec { *x = 0; }
BitBuf {
buf: vec,
pos: 0,
size: len * 8,
}
}
/// Consumes the BitBuf, returning the underlying Vec<u8>.
pub fn to_vec(self) -> Vec<u8> {
self.buf
}
/// Returns a slice into the underlying Vec<u8> buffer.
//pub fn buf_as_slice(&self) -> &[u8] {
// self.buf.as_slice()
//}
/// The current bit size of the Vec<u8>.
pub fn bit_size(&self) -> usize {
self.size
}
/// The current position of the cursor. The BitBuf
/// does not insert, and will overwrite any data currently
/// at the cursor position during writing.
pub fn bit_pos(&self) -> usize {
self.pos
}
pub fn can_write_bits(&self, bit_size: usize) -> bool {
(bit_size + self.pos) < self.size
}
pub fn can_read_bits(&self, bit_size: usize) -> bool {
(bit_size + self.pos) < self.size
}
pub fn write_bool(&mut self, value: bool) {
self.in_write_byte((if value {1} else {0}), 1);
}
pub fn read_bool(&mut self) -> bool {
self.in_read_byte(1) == 1
}
pub fn write_i8(&mut self, value: i8) {
self.write_i8_part(value, 8);
}
pub fn read_i8(&mut self) -> i8 {
self.read_i8_part(8)
}
fn write_i8_part(&mut self, value: i8, bits: u8) {
self.in_write_byte(value as u8, bits);
}
fn read_i8_part(&mut self, bits: u8) -> i8 {
self.in_read_byte(bits) as i8
}
pub fn write_u8(&mut self, value: u8) {
self.write_u8_part(value, 8);
}
pub fn read_u8(&mut self) -> u8 {
self.read_u8_part(8)
}
pub fn write_u8_part(&mut self, value: u8, bits: u8) {
self.in_write_byte(value, bits);
}
pub fn read_u8_part(&mut self, bits: u8) -> u8 {
self.in_read_byte(bits)
}
pub fn write_u16(&mut self, value: u16) {
self.write_u16_part(value, 16);
}
pub fn read_u16(&mut self) -> u16 {
self.read_u16_part(16)
}
pub fn write_u16_part(&mut self, value: u16, bits: u8) {
let a = (value >> 0) as u8;
let b = (value >> 8) as u8;
match (bits + 7) / 8 {
1 => {
self.in_write_byte(a, bits);
},
2 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, bits - 8);
},
_ => {
//panic!("Must write between 1 and 32 bits.")
}
}
}
pub fn read_u16_part(&mut self, bits: u8) -> u16 {
let mut a = 0u16;
let mut b = 0u16;
match (bits + 7) / 8 {
1 => {
a = self.in_read_byte(bits) as u16;
},
2 => {
a = self.in_read_byte(8) as u16;
b = self.in_read_byte(bits - 8) as u16;
},
_ => {
//panic!("Must read between 1 and 32 bits.")
}
}
(a | (b << 8)) as u16
}
pub fn write_i16(&mut self, value: i16) {
self.write_i16_part(value, 16);
}
pub fn read_i16(&mut self) -> i16 {
self.read_i16_part(16)
}
fn write_i16_part(&mut self, value: i16, bits: u8) {
self.write_u16_part(value as u16, bits);
}
fn read_i16_part(&mut self, bits: u8) -> i16 {
self.read_u16_part(bits) as i16
}
pub fn write_u32(&mut self, value: u32) {
self.write_u32_part(value, 32);
}
pub fn read_u32(&mut self) -> u32 {
self.read_u32_part(32)
}
pub fn write_u32_part(&mut self, value: u32, bits: u8) {
let a = (value >> 0) as u8;
let b = (value >> 8) as u8;
let c = (value >> 16) as u8;
let d = (value >> 24) as u8;
match (bits + 7) / 8 {
1 => {
self.in_write_byte(a, bits);
},
2 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, bits - 8);
},
3 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, 8);
self.in_write_byte(c, bits - 16);
},
4 => {
self.in_write_byte(a, 8);
self.in_write_byte(b, 8);
self.in_write_byte(c, 8);
self.in_write_byte(d, bits - 24);
},
_ => {
//panic!("Must write between 1 and 32 bits.")
}
}
}
pub fn read_u32_part(&mut self, bits: u8) -> u32 {
let mut a = 0i32;
let mut b = 0i32;
let mut c = 0i32;
let mut d = 0i32;
match (bits + 7) / 8 {
1 => {
a = self.in_read_byte(bits) as i32;
},
2 => {
a = self.in_read_byte(8) as i32;
b = self.in_read_byte(bits - 8) as i32;
},
3 => {
a = self.in_read_byte(8) as i32;
b = self.in_read_byte(8) as i32;
c = self.in_read_byte(bits - 16) as i32;
},
4 => {
a = self.in_read_byte(8) as i32;
b = self.in_read_byte(8) as i32;
c = self.in_read_byte(8) as i32;
d = self.in_read_byte(bits - 24) as i32;
},
_ => {
//panic!("Must read between 1 and 32 bits.")
}
}
(a | (b << 8) | (c << 16) | (d << 24)) as u32
}
pub fn read_i32(&mut self) -> i32 {
self.read_i32_part(32)
}
pub fn write_i32(&mut self, value: i32) {
self.write_i32_part(value, 32);
}
fn write_i32_part(&mut self, value: i32, bits: u8) {
self.write_u32_part(value as u32, bits);
}
fn read_i32_part(&mut self, bits: u8) -> i32 {
self.read_u32_part(bits) as i32
}
pub fn write_u64(&mut self, value: u64) {
self.write_u64_part(value, 64);
}
pub fn read_u64(&mut self) -> u64 {
self.read_u64_part(64)
}
pub fn write_u64_part(&mut self, value: u64, bits: u8) {
if bits <= 32 {
self.write_u32_part((value & 0xFFFFFFFF) as u32, bits);
} else {
self.write_u32_part(value as u32, 32);
self.write_u32_part((value >> 32) as u32, bits - 32);
}
}
pub fn read_u64_part(&mut self, bits: u8) -> u64 {
if bits <= 32 {
self.read_u32_part(bits) as u64
} else {
let a = self.read_u32_part(32) as u64;
let b = self.read_u32_part(bits - 32) as u64;
a | (b << 32)
}
}
pub fn write_i64(&mut self, value: i64) {
self.write_u64_part(value as u64, 64);
}
pub fn read_i64(&mut self) -> i64 {
self.read_u64_part(64) as i64
}
fn write_i64_part(&mut self, value: i64, bits: u8) {
self.write_u64_part(value as u64, bits);
}
fn read_i64_part(&mut self, bits: u8) -> i64 {
self.read_u64_part(bits) as i64
}
pub fn write_f32(&mut self, value: f32) {
let trans = FourByte::trans_from_f32(value);
self.in_write_byte(trans.b1, 8);
self.in_write_byte(trans.b2, 8);
self.in_write_byte(trans.b3, 8);
self.in_write_byte(trans.b4, 8);
}
pub fn read_f32(&mut self) -> f32 {
FourByte {
b1: self.in_read_byte(8),
b2: self.in_read_byte(8),
b3: self.in_read_byte(8),
b4: self.in_read_byte(8),
}.trans_to_f32()
}
pub fn write_f64(&mut self, value: f64) {
let trans = EightByte::trans_from_f64(value);
self.in_write_byte(trans.b1, 8);
self.in_write_byte(trans.b2, 8);
self.in_write_byte(trans.b3, 8);
self.in_write_byte(trans.b4, 8);
self.in_write_byte(trans.b5, 8);
self.in_write_byte(trans.b6, 8);
self.in_write_byte(trans.b7, 8);
self.in_write_byte(trans.b8, 8);
}
pub fn read_f64(&mut self) -> f64 {
EightByte {
b1: self.in_read_byte(8),
b2: self.in_read_byte(8),
b3: self.in_read_byte(8),
b4: self.in_read_byte(8),
b5: self.in_read_byte(8),
b6: self.in_read_byte(8),
b7: self.in_read_byte(8),
b8: self.in_read_byte(8),
}.trans_to_f64()
}
pub fn write_u8_slice(&mut self, value: &[u8]) {
for i in 0..value.len() {
self.in_write_byte(value[i], 8);
}
}
pub fn read_vec_u8(&mut self, length: usize) -> Vec<u8> {
(0..length).map(|_| self.in_read_byte(8)).collect()
}
pub fn write_string(&mut self, value: &str) {
self.write_u32(value.len() as u32);
self.write_u8_slice(value.as_bytes());
}
pub fn read_string(&mut self) -> String {
let len = self.read_u32() as usize;
String::from_utf8(self.read_vec_u8(len)).unwrap()
}
#[inline(always)]
fn in_write_byte(&mut self, mut value: u8, bits: u8) {
value = value & (0xFF >> (8 - bits));
let p = (self.pos >> 3) as usize;
let bits_used = self.pos & 0x7;
if bits_used == 0 {
self.buf[p] = value;
} else {
let bits_free = 8 - bits_used;
let bits_left: i16 = bits_free as i16 - bits as i16;
if bits_left >= 0 {
let mask = (0xFF >> bits_free) | (0xFF << (8 - bits_left));
self.buf[p] = (self.buf[p] & mask) | (value << bits_used);
} else {
self.buf[p] = (self.buf[p] & (0xFF >> bits_free)) | (value << bits_used);
self.buf[p + 1] = (self.buf[p + 1] & (0xFF << (bits - bits_free as u8))) | (value >> bits_free);
}
}
self.pos += bits as usize;
}
#[inline(always)]
fn in_read_byte(&mut self, bits: u8) -> u8 {
let value: u8;
let p = (self.pos >> 3) as usize;
let bits_used = self.pos % 8;
if bits_used == 0 && bits == 8 {
value = self.buf[p];
} else {
let first = self.buf[p] >> bits_used;
let remainder = bits - (8 - bits_used as u8);
if remainder < 1 {
value = first & (0xFF >> (8 - bits));
} else {
let second = self.buf[p + 1] & (0xFF >> (8 - remainder));
value = first | (second << (bits - remainder));
}
}
self.pos += bits as usize;
value
}
}
#[test]
fn bool_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = true;
buf.write_bool(testval);
buf.pos = 0;
assert!(buf.read_bool() == testval);
}
#[test]
fn u8_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 211;
buf.write_u8(testval);
buf.pos = 0;
assert!(buf.read_u8() == testval);
}
#[test]
fn u8_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 15;
buf.write_u8_part(testval, 4);
buf.pos = 0;
assert!(buf.read_u8_part(4) == testval);
}
#[test]
fn i8_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 6;
buf.write_i8_part(testval, 4);
buf.pos = 0;
assert!(buf.read_i8_part(4) == testval);
}
#[test]
fn i8_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -109;
buf.write_i8(testval);
buf.pos = 0;
assert!(buf.read_i8() == testval);
}
#[test]
fn u16_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 34507;
buf.write_u16(testval);
buf.pos = 0;
assert!(buf.read_u16() == testval);
}
#[test]
fn u16_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 448;
buf.write_u16_part(testval, 13);
buf.pos = 0;
let result = buf.read_u16_part(13);
println!("{}", result);
assert!(result == testval);
}
#[test]
fn i16_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -11066;
buf.write_i16(testval);
buf.pos = 0;
assert!(buf.read_i16() == testval);
}
#[test]
fn i16_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 10034;
buf.write_i16_part(testval, 15);
buf.pos = 0;
assert!(buf.read_i16_part(15) == testval);
}
#[test]
fn u32_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 193772;
buf.write_u32(testval);
buf.pos = 0;
assert!(buf.read_u32() == testval);
}
#[test]
fn u32_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 839011;
buf.write_u32_part(testval, 27);
buf.pos = 0;
assert!(buf.read_u32_part(27) == testval);
}
#[test]
fn i32_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 54397;
buf.write_i32_part(testval, 22);
buf.pos = 0;
assert!(buf.read_i32_part(22) == testval);
}
#[test]
fn i32_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -23498225;
buf.write_i32(testval);
buf.pos = 0;
assert!(buf.read_i32() == testval);
}
#[test]
fn u64_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 32944949231715;
buf.write_u64_part(testval, 59);
buf.pos = 0;
assert!(buf.read_u64_part(59) == testval);
}
#[test]
fn u64_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 248394023907611;
buf.write_u64(testval);
buf.pos = 0;
assert!(buf.read_u64() == testval);
}
#[test]
fn i64_part_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 1998372011;
buf.write_i64_part(testval, 50);
buf.pos = 0;
assert!(buf.read_i64_part(50) == testval);
}
#[test]
fn i64_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = -24839402390;
buf.write_i64(testval);
buf.pos = 0;
assert!(buf.read_i64() == testval);
}
#[test]
fn f32_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 3.0393124f32;
buf.write_f32(testval);
buf.pos = 0;
assert!(buf.read_f32() == testval);
}
#[test]
fn f64_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = 3.0395831239485302f64;
buf.write_f64(testval);
buf.pos = 0;
assert!(buf.read_f64() == testval);
}
#[test]
fn string_writeread_equal() {
let mut buf = BitBuf::with_len(1400);
let testval = "This is a test string. Nothing to see here. No, really!";
buf.write_string(testval);
buf.pos = 0;
assert!(buf.read_string() == testval);
}
//struct BenchPerson {
// first_name: String,
// last_name: String,
// age: i8,
// alive: bool,
// weight: i16,
//}
//
//impl WriteToBitBuf for BenchPerson {
// fn write_to_bitbuf(&self, buf: &mut BitBuf) {
// buf.write_string(&self.first_name);
// buf.write_string(&self.last_name);
// buf.write_i8(self.age);
// buf.write_bool(self.alive);
// buf.write_i16(self.weight);
// }
//}
//
//impl FromBitBuf for BenchPerson {
// fn from_bitbuf(buf: &mut BitBuf) -> BenchPerson {
// BenchPerson {
// first_name: buf.read_string(),
// last_name: buf.read_string(),
// age: buf.read_i8(),
// alive: buf.read_bool(),
// weight: buf.read_i16(),
// }
// }
//}
//#[bench]
//fn benchperson_write1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// let person = BenchPerson {
// first_name: String::from_str("John"),
// last_name: String::from_str("Johnson"),
// age: 47,
// alive: true,
// weight: 203,
// };
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..63 {
// person.write_to_bitbuf(&mut buf);
// }
// })
//}
//
//#[bench]
//fn benchperson_read1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// let person = BenchPerson {
// first_name: String::from_str("John"),
// last_name: String::from_str("Johnson"),
// age: 47,
// alive: true,
// weight: 203,
// };
// for _ in 0..63 {
// person.write_to_bitbuf(&mut buf);
// }
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..63 {
// let p: BenchPerson = FromBitBuf::from_bitbuf(&mut buf);
// }
// })
//}
//
//#[bench]
//fn bitbuf_create_bench(b: &mut Bencher) {
// b.iter(|| {
// let mut buf = BitBuf::with_len(1400);
// })
//}
//
//#[bench]
//fn in_byte_write1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..1400 {
// buf.in_write_byte(240, 8);
// }
// })
//}
//
//#[bench]
//fn in_byte_read1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// for _ in 0..1400 {
// buf.in_write_byte(240, 8);
// }
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..1400 {
// let b = buf.in_read_byte(8);
// }
// })
//}
//
//#[bench]
//fn string_write1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..50 {
// buf.write_string("This is a string. Woo!!!");
// }
// })
//}
//
//#[bench]
//fn string_read1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// for _ in 0..50 {
// buf.write_string("This is a string. Woo!!!");
// }
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..50 {
// let s = buf.read_string();
// }
// })
//}
//
//#[bench]
//fn i32_write1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..350 {
// buf.write_i32(123239012);
// }
// })
//}
//
//#[bench]
//fn i64_write1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..175 {
// buf.write_i64(12352390123458);
// }
// })
//}
//
//#[bench]
//fn i64_read1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// for _ in 0..175 {
// buf.write_i64(12352390123458);
// }
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..175 {
// let i = buf.read_i64();
// }
// })
//}
//
//#[bench]
//fn f32_write1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..350 {
// buf.write_f32(123.239012f32);
// }
// })
//}
//
//#[bench]
//fn f64_write1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..175 {
// buf.write_f64(1235.2390123458f64);
// }
// })
//}
//
//#[bench]
//fn f64_read1400_bench(b: &mut Bencher) {
// let mut buf = BitBuf::with_len(1400);
// for _ in 0..175 {
// buf.write_f64(1235.2390123458f64);
// }
// b.iter(|| {
// buf.pos = 0;
// for _ in 0..175 {
// let f = buf.read_f64();
// }
// })
//}
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Generate and parse UUIDs.
//!
//! Provides support for Universally Unique Identifiers (UUIDs). A UUID is a
//! unique 128-bit number, stored as 16 octets. UUIDs are used to assign
//! unique identifiers to entities without requiring a central allocating
//! authority.
//!
//! They are particularly useful in distributed systems, though can be used in
//! disparate areas, such as databases and network protocols. Typically a UUID
//! is displayed in a readable string form as a sequence of hexadecimal digits,
//! separated into groups by hyphens.
//!
//! The uniqueness property is not strictly guaranteed, however for all
//! practical purposes, it can be assumed that an unintentional collision would
//! be extremely unlikely.
//!
//! # Dependencies
//!
//! By default, this crate depends on nothing but `std` and cannot generate
//! [`Uuid`]s. You need to enable the following Cargo features to enable
//! various pieces of functionality:
//!
//! * `v1` - adds the `Uuid::new_v1` function and the ability to create a V1
//! using an implementation of `UuidV1ClockSequence` (usually `UuidV1Context`)
//! and a timestamp from `time::timespec`.
//! * `v3` - adds the `Uuid::new_v3` function and the ability to create a V3
//! UUID based on the MD5 hash of some data.
//! * `v4` - adds the `Uuid::new_v4` function and the ability to randomly
//! generate a `Uuid`.
//! * `v5` - adds the `Uuid::new_v5` function and the ability to create a V5
//! UUID based on the SHA1 hash of some data.
//! * `serde` - adds the ability to serialize and deserialize a `Uuid` using the
//! `serde` crate.
//!
//! By default, `uuid` can be depended on with:
//!
//! ```toml
//! [dependencies]
//! uuid = "0.6"
//! ```
//!
//! To activate various features, use syntax like:
//!
//! ```toml
//! [dependencies]
//! uuid = { version = "0.6", features = ["serde", "v4"] }
//! ```
//!
//! You can disable default features with:
//!
//! ```toml
//! [dependencies]
//! uuid = { version = "0.6", default-features = false }
//! ```
//!
//! # Examples
//!
//! To parse a UUID given in the simple format and print it as a urn:
//!
//! ```rust
//! use uuid::Uuid;
//!
//! fn main() {
//! let my_uuid =
//! Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
//! println!("{}", my_uuid.urn());
//! }
//! ```
//!
//! To create a new random (V4) UUID and print it out in hexadecimal form:
//!
//! ```ignore,rust
//! // Note that this requires the `v4` feature enabled in the uuid crate.
//!
//! use uuid::Uuid;
//!
//! fn main() {
//! let my_uuid = Uuid::new_v4();
//! println!("{}", my_uuid);
//! }
//! ```
//!
//! # Strings
//!
//! Examples of string representations:
//!
//! * simple: `936DA01F9ABD4d9d80C702AF85C822A8`
//! * hyphenated: `550e8400-e29b-41d4-a716-446655440000`
//! * urn: `urn:uuid:F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4`
//!
//! # References
//!
//! * [Wikipedia: Universally Unique Identifier](
//! http://en.wikipedia.org/wiki/Universally_unique_identifier)
//! * [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace](
//! http://tools.ietf.org/html/rfc4122)
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://docs.rs/uuid"
)]
#![deny(warnings)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(all(feature = "u128", nightly), feature(i128_type))]
#[macro_use]
extern crate cfg_if;
cfg_if! {
if #[cfg(feature = "byteorder")] {
extern crate byteorder;
}
}
cfg_if! {
if #[cfg(feature = "md5")] {
extern crate md5;
}
}
cfg_if! {
if #[cfg(feature = "rand")] {
extern crate rand;
}
}
cfg_if! {
if #[cfg(feature = "serde")] {
extern crate serde;
}
}
cfg_if! {
if #[cfg(feature = "sha1")] {
extern crate sha1;
}
}
cfg_if! {
if #[cfg(all(feature = "slog", not(test)))] {
extern crate slog;
} else if #[cfg(all(feature = "slog", test))] {
#[macro_use]
extern crate slog;
}
}
cfg_if! {
if #[cfg(feature = "std")] {
use std::fmt;
use std::str;
} else if #[cfg(not(feature = "std"))] {
use core::fmt;
use core::str;
}
}
pub mod ns;
pub mod prelude;
mod core_support;
cfg_if! {
if #[cfg(feature = "v1")] {
pub mod v1;
}
}
cfg_if! {
if #[cfg(feature = "serde")] {
mod serde_support;
}
}
cfg_if! {
if #[cfg(feature = "slog")] {
mod slog_support;
}
}
cfg_if! {
if #[cfg(feature = "std")] {
mod std_support;
}
}
cfg_if! {
if #[cfg(test)] {
mod test_util;
}
}
cfg_if! {
if #[cfg(all(feature = "u128"), nightly)] {
mod u128_support;
}
}
cfg_if! {
if #[cfg(feature = "v4")] {
mod v4;
}
}
cfg_if! {
if #[cfg(feature = "v5")] {
mod v5;
}
}
/// A 128-bit (16 byte) buffer containing the ID.
pub type UuidBytes = [u8; 16];
/// The version of the UUID, denoting the generating algorithm.
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(C)]
pub enum UuidVersion {
/// Special case for `nil` [`Uuid`].
///
/// [`Uuid`]: struct.Uuid.html
Nil = 0,
/// Version 1: MAC address
Mac,
/// Version 2: DCE Security
Dce,
/// Version 3: MD5 hash
Md5,
/// Version 4: Random
Random,
/// Version 5: SHA-1 hash
Sha1,
}
/// The reserved variants of UUIDs.
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(C)]
pub enum UuidVariant {
/// Reserved by the NCS for backward compatibility
NCS = 0,
/// As described in the RFC4122 Specification (default)
RFC4122,
/// Reserved by Microsoft for backward compatibility
Microsoft,
/// Reserved for future expansion
Future,
}
/// A Universally Unique Identifier (UUID).
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Uuid {
/// The 128-bit number stored in 16 bytes
bytes: UuidBytes,
}
/// An adaptor for formatting a `Uuid` as a simple string.
pub struct Simple<'a> {
inner: &'a Uuid,
}
/// An adaptor for formatting a `Uuid` as a hyphenated string.
pub struct Hyphenated<'a> {
inner: &'a Uuid,
}
/// An adaptor for formatting a `Uuid` as a URN string.
pub struct Urn<'a> {
inner: &'a Uuid,
}
/// Error details for string parsing failures.
#[allow(missing_docs)]
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum ParseError {
InvalidLength(usize),
InvalidCharacter(char, usize),
InvalidGroups(usize),
InvalidGroupLength(usize, usize, u8),
}
const SIMPLE_LENGTH: usize = 32;
const HYPHENATED_LENGTH: usize = 36;
/// Converts a `ParseError` to a string.
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::InvalidLength(found) => write!(
f,
"Invalid length; expecting {} or {} chars, found {}",
SIMPLE_LENGTH, HYPHENATED_LENGTH, found
),
ParseError::InvalidCharacter(found, pos) => write!(
f,
"Invalid character; found `{}` (0x{:02x}) at offset {}",
found, found as usize, pos
),
ParseError::InvalidGroups(found) => write!(
f,
"Malformed; wrong number of groups: expected 1 or 5, found {}",
found
),
ParseError::InvalidGroupLength(group, found, expecting) => write!(
f,
"Malformed; length of group {} was {}, expecting {}",
group, found, expecting
),
}
}
}
// Length of each hyphenated group in hex digits.
const GROUP_LENS: [u8; 5] = [8, 4, 4, 4, 12];
// Accumulated length of each hyphenated group in hex digits.
const ACC_GROUP_LENS: [u8; 5] = [8, 12, 16, 20, 32];
impl Uuid {
/// The 'nil UUID'.
///
/// The nil UUID is special form of UUID that is specified to have all
/// 128 bits set to zero, as defined in [IETF RFC 4122 Section 4.1.7][RFC].
///
/// [RFC]: https://tools.ietf.org/html/rfc4122.html#section-4.1.7
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
///
/// assert_eq!(
/// uuid.hyphenated().to_string(),
/// "00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn nil() -> Uuid {
Uuid { bytes: [0; 16] }
}
/// Creates a new `Uuid`.
///
/// Note that not all versions can be generated currently and `None` will be
/// returned if the specified version cannot be generated.
///
/// To generate a random UUID (`UuidVersion::Md5`), then the `v3`
/// feature must be enabled for this crate.
///
/// To generate a random UUID (`UuidVersion::Random`), then the `v4`
/// feature must be enabled for this crate.
///
/// To generate a random UUID (`UuidVersion::Sha1`), then the `v5`
/// feature must be enabled for this crate.
pub fn new(v: UuidVersion) -> Option<Uuid> {
// Why 23? Ascii has roughly 6bit randomness per 8bit.
// So to reach 128bit at-least 21.333 (128/6) Bytes are required.
#[cfg(any(feature = "v3", feature = "v5"))]
let iv: String = {
use rand::Rng;
rand::thread_rng()
.gen_ascii_chars()
.take(23)
.collect()
};
match v {
#[cfg(feature = "v3")]
UuidVersion::Md5 => Some(Uuid::new_v3(&ns::NAMESPACE_DNS, &*iv)),
#[cfg(feature = "v4")]
UuidVersion::Random => Some(Uuid::new_v4()),
#[cfg(feature = "v5")]
UuidVersion::Sha1 => Some(Uuid::new_v5(&ns::NAMESPACE_DNS, &*iv)),
_ => None,
}
}
/// Creates a UUID using a name from a namespace, based on the MD5 hash.
///
/// A number of namespaces are available as constants in this crate:
///
/// * `NAMESPACE_DNS`
/// * `NAMESPACE_URL`
/// * `NAMESPACE_OID`
/// * `NAMESPACE_X500`
///
/// Note that usage of this method requires the `v3` feature of this crate
/// to be enabled.
#[cfg(feature = "v3")]
pub fn new_v3(namespace: &Uuid, name: &str) -> Uuid {
let mut ctx = md5::Context::new();
ctx.consume(namespace.as_bytes());
ctx.consume(name.as_bytes());
let mut uuid = Uuid {
bytes: ctx.compute().into(),
};
uuid.set_variant(UuidVariant::RFC4122);
uuid.set_version(UuidVersion::Md5);
uuid
}
/// Creates a `Uuid` from four field values.
///
/// # Errors
///
/// This function will return an error if `d4`'s length is not 8 bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
///
/// let d4 = [12, 3, 9, 56, 54, 43, 8, 9];
///
/// let uuid = Uuid::from_fields(42, 12, 5, &d4);
/// let uuid = uuid.map(|uuid| uuid.hyphenated().to_string());
///
/// let expected_uuid =
/// Ok(String::from("0000002a-000c-0005-0c03-0938362b0809"));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An invalid length:
///
/// ```
/// use uuid::Uuid;
/// use uuid::ParseError;
///
/// let d4 = [12];
///
/// let uuid = Uuid::from_fields(42, 12, 5, &d4);
///
/// let expected_uuid = Err(ParseError::InvalidLength(1));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
pub fn from_fields(
d1: u32,
d2: u16,
d3: u16,
d4: &[u8],
) -> Result<Uuid, ParseError> {
if d4.len() != 8 {
return Err(ParseError::InvalidLength(d4.len()));
}
Ok(Uuid {
bytes: [
(d1 >> 24) as u8,
(d1 >> 16) as u8,
(d1 >> 8) as u8,
d1 as u8,
(d2 >> 8) as u8,
d2 as u8,
(d3 >> 8) as u8,
d3 as u8,
d4[0],
d4[1],
d4[2],
d4[3],
d4[4],
d4[5],
d4[6],
d4[7],
],
})
}
/// Creates a `Uuid` using the supplied bytes.
///
/// # Errors
///
/// This function will return an error if `b` has any length other than 16.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
///
/// let bytes = [4, 54, 67, 12, 43, 2, 98, 76, 32, 50, 87, 5, 1, 33, 43,
/// 87];
///
/// let uuid = Uuid::from_bytes(&bytes);
/// let uuid = uuid.map(|uuid| uuid.hyphenated().to_string());
///
/// let expected_uuid =
/// Ok(String::from("0436430c-2b02-624c-2032-570501212b57"));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```
/// use uuid::Uuid;
/// use uuid::ParseError;
///
/// let bytes = [4, 54, 67, 12, 43, 2, 98, 76];
///
/// let uuid = Uuid::from_bytes(&bytes);
///
/// let expected_uuid = Err(ParseError::InvalidLength(8));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
pub fn from_bytes(b: &[u8]) -> Result<Uuid, ParseError> {
let len = b.len();
if len != 16 {
return Err(ParseError::InvalidLength(len));
}
let mut uuid = Uuid { bytes: [0; 16] };
uuid.bytes.copy_from_slice(b);
Ok(uuid)
}
/// Creates a `Uuid` using the supplied bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
/// use uuid::UuidBytes;
///
/// let bytes: UuidBytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145,
/// 63, 62 ];
///
/// let uuid = Uuid::from_uuid_bytes(bytes);
/// let uuid = uuid.hyphenated().to_string();
///
/// let expected_uuid =
/// String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e");
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```compile_fail
/// use uuid::Uuid;
/// use uuid::UuidBytes;
///
/// let bytes: UuidBytes = [4, 54, 67, 12, 43, 2, 98, 76]; // doesn't
/// compile
///
/// let uuid = Uuid::from_uuid_bytes(bytes);
/// ```
pub fn from_uuid_bytes(b: UuidBytes) -> Uuid {
Uuid { bytes: b }
}
/// Creates a v4 Uuid from random bytes (e.g. bytes supplied from `Rand`
/// crate)
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
/// use uuid::UuidBytes;
///
/// let bytes: UuidBytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145,
/// 63, 62 ];
/// let uuid = Uuid::from_random_bytes(bytes);
/// let uuid = uuid.hyphenated().to_string();
///
/// let expected_uuid =
/// String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e");
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
pub fn from_random_bytes(b: [u8; 16]) -> Uuid {
let mut uuid = Uuid { bytes: b };
uuid.set_variant(UuidVariant::RFC4122);
uuid.set_version(UuidVersion::Random);
uuid
}
/// Specifies the variant of the UUID structure
#[allow(dead_code)]
fn set_variant(&mut self, v: UuidVariant) {
// Octet 8 contains the variant in the most significant 3 bits
self.bytes[8] = match v {
UuidVariant::NCS => self.bytes[8] & 0x7f, // b0xx...
UuidVariant::RFC4122 => (self.bytes[8] & 0x3f) | 0x80, // b10x...
UuidVariant::Microsoft => (self.bytes[8] & 0x1f) | 0xc0, // b110...
UuidVariant::Future => (self.bytes[8] & 0x1f) | 0xe0, // b111...
}
}
/// Returns the variant of the `Uuid` structure.
///
/// This determines the interpretation of the structure of the UUID.
/// Currently only the RFC4122 variant is generated by this module.
///
/// * [Variant Reference](http://tools.ietf.org/html/rfc4122#section-4.1.1)
pub fn get_variant(&self) -> Option<UuidVariant> {
match self.bytes[8] {
x if x & 0x80 == 0x00 => Some(UuidVariant::NCS),
x if x & 0xc0 == 0x80 => Some(UuidVariant::RFC4122),
x if x & 0xe0 == 0xc0 => Some(UuidVariant::Microsoft),
x if x & 0xe0 == 0xe0 => Some(UuidVariant::Future),
_ => None,
}
}
/// Specifies the version number of the `Uuid`.
#[allow(dead_code)]
fn set_version(&mut self, v: UuidVersion) {
self.bytes[6] = (self.bytes[6] & 0xF) | ((v as u8) << 4);
}
/// Returns the version number of the `Uuid`.
///
/// This represents the algorithm used to generate the contents.
///
/// Currently only the Random (V4) algorithm is supported by this
/// module. There are security and privacy implications for using
/// older versions - see [Wikipedia: Universally Unique Identifier](
/// http://en.wikipedia.org/wiki/Universally_unique_identifier) for
/// details.
///
/// * [Version Reference](http://tools.ietf.org/html/rfc4122#section-4.1.3)
pub fn get_version_num(&self) -> usize {
(self.bytes[6] >> 4) as usize
}
/// Returns the version of the `Uuid`.
///
/// This represents the algorithm used to generate the contents
pub fn get_version(&self) -> Option<UuidVersion> {
let v = self.bytes[6] >> 4;
match v {
0 if self.is_nil() => Some(UuidVersion::Nil),
1 => Some(UuidVersion::Mac),
2 => Some(UuidVersion::Dce),
3 => Some(UuidVersion::Md5),
4 => Some(UuidVersion::Random),
5 => Some(UuidVersion::Sha1),
_ => None,
}
}
/// Returns the four field values of the UUID.
///
/// These values can be passed to the `from_fields()` method to get the
/// original `Uuid` back.
///
/// * The first field value represents the first group of (eight) hex
/// digits, taken as a big-endian `u32` value. For V1 UUIDs, this field
/// represents the low 32 bits of the timestamp.
/// * The second field value represents the second group of (four) hex
/// digits, taken as a big-endian `u16` value. For V1 UUIDs, this field
/// represents the middle 16 bits of the timestamp.
/// * The third field value represents the third group of (four) hex
/// digits, taken as a big-endian `u16` value. The 4 most significant
/// bits give the UUID version, and for V1 UUIDs, the last 12 bits
/// represent the high 12 bits of the timestamp.
/// * The last field value represents the last two groups of four and
/// twelve hex digits, taken in order. The first 1-3 bits of this
/// indicate the UUID variant, and for V1 UUIDs, the next 13-15 bits
/// indicate the clock sequence and the last 48 bits indicate the node
/// ID.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(uuid.as_fields(), (0, 0, 0, &[0u8; 8]));
///
/// let uuid =
/// Uuid::parse_str("936DA01F-9ABD-4D9D-80C7-02AF85C822A8").unwrap();
/// assert_eq!(
/// uuid.as_fields(),
/// (
/// 0x936DA01F,
/// 0x9ABD,
/// 0x4D9D,
/// b"\x80\xC7\x02\xAF\x85\xC8\x22\xA8"
/// )
/// );
/// ```
pub fn as_fields(&self) -> (u32, u16, u16, &[u8; 8]) {
let d1 = u32::from(self.bytes[0]) << 24 | u32::from(self.bytes[1]) << 16
| u32::from(self.bytes[2]) << 8
| u32::from(self.bytes[3]);
let d2 = u16::from(self.bytes[4]) << 8 | u16::from(self.bytes[5]);
let d3 = u16::from(self.bytes[6]) << 8 | u16::from(self.bytes[7]);
let d4: &[u8; 8] =
unsafe { &*(self.bytes[8..16].as_ptr() as *const [u8; 8]) };
(d1, d2, d3, d4)
}
/// Returns an array of 16 octets containing the UUID data.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(uuid.as_bytes(), &[0; 16]);
///
/// let uuid = Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
/// assert_eq!(
/// uuid.as_bytes(),
/// &[
/// 147, 109, 160, 31, 154, 189, 77, 157, 128, 199, 2, 175, 133,
/// 200, 34, 168,
/// ]
/// );
/// ```
pub fn as_bytes(&self) -> &[u8; 16] {
&self.bytes
}
/// Returns a wrapper which when formatted via `fmt::Display` will format a
/// string of 32 hexadecimal digits.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(
/// uuid.simple().to_string(),
/// "00000000000000000000000000000000"
/// );
/// ```
pub fn simple(&self) -> Simple {
Simple { inner: self }
}
/// Returns a wrapper which when formatted via `fmt::Display` will format a
/// string of hexadecimal digits separated into groups with a hyphen.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(
/// uuid.hyphenated().to_string(),
/// "00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn hyphenated(&self) -> Hyphenated {
Hyphenated { inner: self }
}
/// Returns a wrapper which when formatted via `fmt::Display` will format a
/// string of the UUID as a full URN string.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(
/// uuid.urn().to_string(),
/// "urn:uuid:00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn urn(&self) -> Urn {
Urn { inner: self }
}
/// Returns an Optional Tuple of (u64, u16) representing the timestamp and
/// counter portion of a V1 UUID. If the supplied UUID is not V1, this
/// will return None
pub fn to_timestamp(&self) -> Option<(u64, u16)> {
if self.get_version()
.map(|v| v != UuidVersion::Mac)
.unwrap_or(true)
{
return None;
}
let ts: u64 = u64::from(self.bytes[6] & 0x0F) << 56
| u64::from(self.bytes[7]) << 48
| u64::from(self.bytes[4]) << 40
| u64::from(self.bytes[5]) << 32
| u64::from(self.bytes[0]) << 24
| u64::from(self.bytes[1]) << 16
| u64::from(self.bytes[2]) << 8
| u64::from(self.bytes[3]);
let count: u16 =
u16::from(self.bytes[8] & 0x3F) << 8 | u16::from(self.bytes[9]);
Some((ts, count))
}
/// Parses a `Uuid` from a string of hexadecimal digits with optional
/// hyphens.
///
/// Any of the formats generated by this module (simple, hyphenated, urn)
/// are supported by this parsing function.
pub fn parse_str(mut input: &str) -> Result<Uuid, ParseError> {
// Ensure length is valid for any of the supported formats
let len = input.len();
if len == (HYPHENATED_LENGTH + 9) && input.starts_with("urn:uuid:") {
input = &input[9..];
} else if len != SIMPLE_LENGTH && len != HYPHENATED_LENGTH {
return Err(ParseError::InvalidLength(len));
}
// `digit` counts only hexadecimal digits, `i_char` counts all chars.
let mut digit = 0;
let mut group = 0;
let mut acc = 0;
let mut buffer = [0u8; 16];
for (i_char, chr) in input.bytes().enumerate() {
if digit as usize >= SIMPLE_LENGTH && group != 4 {
if group == 0 {
return Err(ParseError::InvalidLength(len));
}
return Err(ParseError::InvalidGroups(group + 1));
}
if digit % 2 == 0 {
// First digit of the byte.
match chr {
// Calulate upper half.
b'0'...b'9' => acc = chr - b'0',
b'a'...b'f' => acc = chr - b'a' + 10,
b'A'...b'F' => acc = chr - b'A' + 10,
// Found a group delimiter
b'-' => {
if ACC_GROUP_LENS[group] != digit {
// Calculate how many digits this group consists of
// in the input.
let found = if group > 0 {
digit - ACC_GROUP_LENS[group - 1]
} else {
digit
};
return Err(ParseError::InvalidGroupLength(
group,
found as usize,
GROUP_LENS[group],
));
}
// Next group, decrement digit, it is incremented again
// at the bottom.
group += 1;
digit -= 1;
}
_ => {
return Err(ParseError::InvalidCharacter(
input[i_char..].chars().next().unwrap(),
i_char,
))
}
}
} else {
// Second digit of the byte, shift the upper half.
acc *= 16;
match chr {
b'0'...b'9' => acc += chr - b'0',
b'a'...b'f' => acc += chr - b'a' + 10,
b'A'...b'F' => acc += chr - b'A' + 10,
b'-' => {
// The byte isn't complete yet.
let found = if group > 0 {
digit - ACC_GROUP_LENS[group - 1]
} else {
digit
};
return Err(ParseError::InvalidGroupLength(
group,
found as usize,
GROUP_LENS[group],
));
}
_ => {
return Err(ParseError::InvalidCharacter(
input[i_char..].chars().next().unwrap(),
i_char,
))
}
}
buffer[(digit / 2) as usize] = acc;
}
digit += 1;
}
// Now check the last group.
if ACC_GROUP_LENS[4] != digit {
return Err(ParseError::InvalidGroupLength(
group,
(digit - ACC_GROUP_LENS[3]) as usize,
GROUP_LENS[4],
));
}
Ok(Uuid::from_bytes(&buffer).unwrap())
}
/// Tests if the UUID is nil
pub fn is_nil(&self) -> bool {
self.bytes.iter().all(|&b| b == 0)
}
}
impl<'a> fmt::Display for Simple<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(self, f)
}
}
impl<'a> fmt::UpperHex for Simple<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for byte in &self.inner.bytes {
write!(f, "{:02X}", byte)?;
}
Ok(())
}
}
impl<'a> fmt::LowerHex for Simple<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for byte in &self.inner.bytes {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
}
impl<'a> fmt::Display for Hyphenated<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(self, f)
}
}
macro_rules! hyphenated_write {
($f:expr, $format:expr, $bytes:expr) => {{
let data1 = u32::from($bytes[0]) << 24 | u32::from($bytes[1]) << 16
| u32::from($bytes[2]) << 8
| u32::from($bytes[3]);
let data2 = u16::from($bytes[4]) << 8 | u16::from($bytes[5]);
let data3 = u16::from($bytes[6]) << 8 | u16::from($bytes[7]);
write!(
$f,
$format,
data1,
data2,
data3,
$bytes[8],
$bytes[9],
$bytes[10],
$bytes[11],
$bytes[12],
$bytes[13],
$bytes[14],
$bytes[15]
)
}};
}
impl<'a> fmt::UpperHex for Hyphenated<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
hyphenated_write!(
f,
"{:08X}-\
{:04X}-\
{:04X}-\
{:02X}{:02X}-\
{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}",
self.inner.bytes
)
}
}
impl<'a> fmt::LowerHex for Hyphenated<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
hyphenated_write!(
f,
"{:08x}-\
{:04x}-\
{:04x}-\
{:02x}{:02x}-\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
self.inner.bytes
)
}
}
impl<'a> fmt::Display for Urn<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "urn:uuid:{}", self.inner.hyphenated())
}
}
#[cfg(test)]
mod tests {
extern crate std;
use self::std::prelude::v1::*;
use super::test_util;
use super::ns::{NAMESPACE_X500, NAMESPACE_DNS, NAMESPACE_OID,
NAMESPACE_URL};
use prelude::*;
#[cfg(feature = "v3")]
static FIXTURE_V3: &'static [(&'static Uuid, &'static str, &'static str)] =
&[
(
&NAMESPACE_DNS,
"example.org",
"04738bdf-b25a-3829-a801-b21a1d25095b",
),
(
&NAMESPACE_DNS,
"rust-lang.org",
"c6db027c-615c-3b4d-959e-1a917747ca5a",
),
(
&NAMESPACE_DNS,
"42",
"5aab6e0c-b7d3-379c-92e3-2bfbb5572511",
),
(
&NAMESPACE_DNS,
"lorem ipsum",
"4f8772e9-b59c-3cc9-91a9-5c823df27281",
),
(
&NAMESPACE_URL,
"example.org",
"39682ca1-9168-3da2-a1bb-f4dbcde99bf9",
),
(
&NAMESPACE_URL,
"rust-lang.org",
"7ed45aaf-e75b-3130-8e33-ee4d9253b19f",
),
(
&NAMESPACE_URL,
"42",
"08998a0c-fcf4-34a9-b444-f2bfc15731dc",
),
(
&NAMESPACE_URL,
"lorem ipsum",
"e55ad2e6-fb89-34e8-b012-c5dde3cd67f0",
),
(
&NAMESPACE_OID,
"example.org",
"f14eec63-2812-3110-ad06-1625e5a4a5b2",
),
(
&NAMESPACE_OID,
"rust-lang.org",
"6506a0ec-4d79-3e18-8c2b-f2b6b34f2b6d",
),
(
&NAMESPACE_OID,
"42",
"ce6925a5-2cd7-327b-ab1c-4b375ac044e4",
),
(
&NAMESPACE_OID,
"lorem ipsum",
"5dd8654f-76ba-3d47-bc2e-4d6d3a78cb09",
),
(
&NAMESPACE_X500,
"example.org",
"64606f3f-bd63-363e-b946-fca13611b6f7",
),
(
&NAMESPACE_X500,
"rust-lang.org",
"bcee7a9c-52f1-30c6-a3cc-8c72ba634990",
),
(
&NAMESPACE_X500,
"42",
"c1073fa2-d4a6-3104-b21d-7a6bdcf39a23",
),
(
&NAMESPACE_X500,
"lorem ipsum",
"02f09a3f-1624-3b1d-8409-44eff7708208",
),
];
#[test]
fn test_nil() {
let nil = Uuid::nil();
let not_nil = test_util::new();
let from_bytes = Uuid::from_uuid_bytes([
4, 54, 67, 12, 43, 2, 2, 76, 32, 50, 87, 5, 1, 33, 43, 87,
]);
assert_eq!(from_bytes.get_version(), None);
assert!(nil.is_nil());
assert!(!not_nil.is_nil());
assert_eq!(nil.get_version(), Some(UuidVersion::Nil));
assert_eq!(not_nil.get_version(), Some(UuidVersion::Random))
}
#[test]
fn test_new() {
if cfg!(feature = "v3") {
let u = Uuid::new(UuidVersion::Md5);
assert!(u.is_some(), "{:?}", u);
assert_eq!(
u.unwrap().get_version().unwrap(),
UuidVersion::Md5
);
} else {
assert_eq!(Uuid::new(UuidVersion::Md5), None);
}
if cfg!(feature = "v4") {
let uuid1 = Uuid::new(UuidVersion::Random).unwrap();
let s = uuid1.simple().to_string();
assert_eq!(s.len(), 32);
assert_eq!(
uuid1.get_version().unwrap(),
UuidVersion::Random
);
} else {
assert!(Uuid::new(UuidVersion::Random).is_none());
}
if cfg!(feature = "v5") {
let u = Uuid::new(UuidVersion::Sha1);
assert!(u.is_some(), "{:?}", u);
assert_eq!(
u.unwrap().get_version().unwrap(),
UuidVersion::Sha1
);
} else {
assert_eq!(Uuid::new(UuidVersion::Sha1), None);
}
// Test unsupported versions
assert_eq!(Uuid::new(UuidVersion::Mac), None);
assert_eq!(Uuid::new(UuidVersion::Dce), None);
}
#[cfg(feature = "v3")]
#[test]
fn test_new_v3() {
for &(ref ns, ref name, _) in FIXTURE_V3 {
let uuid = Uuid::new_v3(*ns, *name);
assert_eq!(uuid.get_version().unwrap(), UuidVersion::Md5);
assert_eq!(
uuid.get_variant().unwrap(),
UuidVariant::RFC4122
);
}
}
#[test]
fn test_predefined_namespaces() {
assert_eq!(
NAMESPACE_DNS.hyphenated().to_string(),
"6ba7b810-9dad-11d1-80b4-00c04fd430c8"
);
assert_eq!(
NAMESPACE_URL.hyphenated().to_string(),
"6ba7b811-9dad-11d1-80b4-00c04fd430c8"
);
assert_eq!(
NAMESPACE_OID.hyphenated().to_string(),
"6ba7b812-9dad-11d1-80b4-00c04fd430c8"
);
assert_eq!(
NAMESPACE_X500.hyphenated().to_string(),
"6ba7b814-9dad-11d1-80b4-00c04fd430c8"
);
}
#[cfg(feature = "v3")]
#[test]
fn test_get_version_v3() {
let uuid = Uuid::new_v3(&NAMESPACE_DNS, "rust-lang.org");
assert_eq!(uuid.get_version().unwrap(), UuidVersion::Md5);
assert_eq!(uuid.get_version_num(), 3);
}
#[test]
fn test_get_variant() {
let uuid1 = test_util::new();
let uuid2 =
Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap();
let uuid3 =
Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").unwrap();
let uuid4 =
Uuid::parse_str("936DA01F9ABD4d9dC0C702AF85C822A8").unwrap();
let uuid5 =
Uuid::parse_str("F9168C5E-CEB2-4faa-D6BF-329BF39FA1E4").unwrap();
let uuid6 =
Uuid::parse_str("f81d4fae-7dec-11d0-7765-00a0c91e6bf6").unwrap();
assert_eq!(
uuid1.get_variant().unwrap(),
UuidVariant::RFC4122
);
assert_eq!(
uuid2.get_variant().unwrap(),
UuidVariant::RFC4122
);
assert_eq!(
uuid3.get_variant().unwrap(),
UuidVariant::RFC4122
);
assert_eq!(
uuid4.get_variant().unwrap(),
UuidVariant::Microsoft
);
assert_eq!(
uuid5.get_variant().unwrap(),
UuidVariant::Microsoft
);
assert_eq!(uuid6.get_variant().unwrap(), UuidVariant::NCS);
}
#[test]
fn test_parse_uuid_v4() {
use super::ParseError::*;
// Invalid
assert_eq!(Uuid::parse_str(""), Err(InvalidLength(0)));
assert_eq!(Uuid::parse_str("!"), Err(InvalidLength(1)));
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E45"),
Err(InvalidLength(37))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-BBF-329BF39FA1E4"),
Err(InvalidLength(35))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-BGBF-329BF39FA1E4"),
Err(InvalidCharacter('G', 20))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2F4faaFB6BFF329BF39FA1E4"),
Err(InvalidGroups(2))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faaFB6BFF329BF39FA1E4"),
Err(InvalidGroups(3))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BFF329BF39FA1E4"),
Err(InvalidGroups(4))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa"),
Err(InvalidLength(18))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faaXB6BFF329BF39FA1E4"),
Err(InvalidCharacter('X', 18))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB-24fa-eB6BFF32-BF39FA1E4"),
Err(InvalidGroupLength(1, 3, 4))
);
assert_eq!(
Uuid::parse_str("01020304-1112-2122-3132-41424344"),
Err(InvalidGroupLength(4, 8, 12))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c"),
Err(InvalidLength(31))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c88"),
Err(InvalidLength(33))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0cg8"),
Err(InvalidLength(33))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426%9247bb680e5fe0c8"),
Err(InvalidCharacter('%', 15))
);
assert_eq!(
Uuid::parse_str("231231212212423424324323477343246663"),
Err(InvalidLength(36))
);
// Valid
assert!(Uuid::parse_str("00000000000000000000000000000000").is_ok());
assert!(
Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").is_ok()
);
assert!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").is_ok()
);
assert!(Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c8").is_ok());
assert!(
Uuid::parse_str("01020304-1112-2122-3132-414243444546").is_ok()
);
assert!(
Uuid::parse_str("urn:uuid:67e55044-10b1-426f-9247-bb680e5fe0c8")
.is_ok()
);
// Nil
let nil = Uuid::nil();
assert_eq!(
Uuid::parse_str("00000000000000000000000000000000").unwrap(),
nil
);
assert_eq!(
Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
nil
);
// Round-trip
let uuid_orig = test_util::new();
let orig_str = uuid_orig.to_string();
let uuid_out = Uuid::parse_str(&orig_str).unwrap();
assert_eq!(uuid_orig, uuid_out);
// Test error reporting
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c"),
Err(InvalidLength(31))
);
assert_eq!(
Uuid::parse_str("67e550X410b1426f9247bb680e5fe0cd"),
Err(InvalidCharacter('X', 6))
);
assert_eq!(
Uuid::parse_str("67e550-4105b1426f9247bb680e5fe0c"),
Err(InvalidGroupLength(0, 6, 8))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF1-02BF39FA1E4"),
Err(InvalidGroupLength(3, 5, 4))
);
}
#[test]
fn test_to_simple_string() {
let uuid1 = test_util::new();
let s = uuid1.simple().to_string();
assert_eq!(s.len(), 32);
assert!(s.chars().all(|c| c.is_digit(16)));
}
#[test]
fn test_to_hyphenated_string() {
let uuid1 = test_util::new();
let s = uuid1.hyphenated().to_string();
assert!(s.len() == 36);
assert!(s.chars().all(|c| c.is_digit(16) || c == '-'));
}
#[test]
fn test_upper_lower_hex() {
use super::fmt::Write;
let mut buf = String::new();
let u = test_util::new();
macro_rules! check {
($buf:ident, $format:expr, $target:expr, $len:expr, $cond:expr) => {
$buf.clear();
write!($buf, $format, $target).unwrap();
assert!(buf.len() == $len);
assert!($buf.chars().all($cond), "{}", $buf);
};
}
check!(
buf,
"{:X}",
u,
36,
|c| c.is_uppercase() || c.is_digit(10) || c == '-'
);
check!(
buf,
"{:X}",
u.hyphenated(),
36,
|c| c.is_uppercase() || c.is_digit(10) || c == '-'
);
check!(
buf,
"{:X}",
u.simple(),
32,
|c| c.is_uppercase() || c.is_digit(10)
);
check!(
buf,
"{:x}",
u.hyphenated(),
36,
|c| c.is_lowercase() || c.is_digit(10) || c == '-'
);
check!(
buf,
"{:x}",
u.simple(),
32,
|c| c.is_lowercase() || c.is_digit(10)
);
}
#[cfg(feature = "v3")]
#[test]
fn test_v3_to_hypenated_string() {
for &(ref ns, ref name, ref expected) in FIXTURE_V3 {
let uuid = Uuid::new_v3(*ns, *name);
assert_eq!(uuid.hyphenated().to_string(), *expected);
}
}
#[test]
fn test_to_urn_string() {
let uuid1 = test_util::new();
let ss = uuid1.urn().to_string();
let s = &ss[9..];
assert!(ss.starts_with("urn:uuid:"));
assert_eq!(s.len(), 36);
assert!(s.chars().all(|c| c.is_digit(16) || c == '-'));
}
#[test]
fn test_to_simple_string_matching() {
let uuid1 = test_util::new();
let hs = uuid1.hyphenated().to_string();
let ss = uuid1.simple().to_string();
let hsn = hs.chars()
.filter(|&c| c != '-')
.collect::<String>();
assert_eq!(hsn, ss);
}
#[test]
fn test_string_roundtrip() {
let uuid = test_util::new();
let hs = uuid.hyphenated().to_string();
let uuid_hs = Uuid::parse_str(&hs).unwrap();
assert_eq!(uuid_hs, uuid);
let ss = uuid.to_string();
let uuid_ss = Uuid::parse_str(&ss).unwrap();
assert_eq!(uuid_ss, uuid);
}
#[test]
fn test_from_fields() {
let d1: u32 = 0xa1a2a3a4;
let d2: u16 = 0xb1b2;
let d3: u16 = 0xc1c2;
let d4 = [0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8];
let u = Uuid::from_fields(d1, d2, d3, &d4).unwrap();
let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8";
let result = u.simple().to_string();
assert_eq!(result, expected);
}
#[test]
fn test_as_fields() {
let u = test_util::new();
let (d1, d2, d3, d4) = u.as_fields();
assert_ne!(d1, 0);
assert_ne!(d2, 0);
assert_ne!(d3, 0);
assert_eq!(d4.len(), 8);
assert!(!d4.iter().all(|&b| b == 0));
}
#[test]
fn test_fields_roundtrip() {
let d1_in: u32 = 0xa1a2a3a4;
let d2_in: u16 = 0xb1b2;
let d3_in: u16 = 0xc1c2;
let d4_in = &[0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8];
let u = Uuid::from_fields(d1_in, d2_in, d3_in, d4_in).unwrap();
let (d1_out, d2_out, d3_out, d4_out) = u.as_fields();
assert_eq!(d1_in, d1_out);
assert_eq!(d2_in, d2_out);
assert_eq!(d3_in, d3_out);
assert_eq!(d4_in, d4_out);
}
#[test]
fn test_from_bytes() {
let b = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_bytes(&b).unwrap();
let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8";
assert_eq!(u.simple().to_string(), expected);
}
#[test]
fn test_from_uuid_bytes() {
let b = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_uuid_bytes(b);
let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8";
assert_eq!(u.simple().to_string(), expected);
}
#[test]
fn test_as_bytes() {
let u = test_util::new();
let ub = u.as_bytes();
assert_eq!(ub.len(), 16);
assert!(!ub.iter().all(|&b| b == 0));
}
#[test]
fn test_bytes_roundtrip() {
let b_in: [u8; 16] = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_bytes(&b_in).unwrap();
let b_out = u.as_bytes();
assert_eq!(&b_in, b_out);
}
#[test]
fn test_from_random_bytes() {
let b = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_random_bytes(b);
let expected = "a1a2a3a4b1b241c291d2d3d4d5d6d7d8";
assert_eq!(u.simple().to_string(), expected);
}
#[test]
fn test_iterbytes_impl_for_uuid() {
let mut set = std::collections::HashSet::new();
let id1 = test_util::new();
let id2 = test_util::new2();
set.insert(id1.clone());
assert!(set.contains(&id1));
assert!(!set.contains(&id2));
}
}
remove `Uuid::new` function
Signed-off-by: Hunar Roop Kahlon <0f790b20a348aea0ff429218e4c2e71b2cd5d147@gmail.com>
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Generate and parse UUIDs.
//!
//! Provides support for Universally Unique Identifiers (UUIDs). A UUID is a
//! unique 128-bit number, stored as 16 octets. UUIDs are used to assign
//! unique identifiers to entities without requiring a central allocating
//! authority.
//!
//! They are particularly useful in distributed systems, though can be used in
//! disparate areas, such as databases and network protocols. Typically a UUID
//! is displayed in a readable string form as a sequence of hexadecimal digits,
//! separated into groups by hyphens.
//!
//! The uniqueness property is not strictly guaranteed, however for all
//! practical purposes, it can be assumed that an unintentional collision would
//! be extremely unlikely.
//!
//! # Dependencies
//!
//! By default, this crate depends on nothing but `std` and cannot generate
//! [`Uuid`]s. You need to enable the following Cargo features to enable
//! various pieces of functionality:
//!
//! * `v1` - adds the `Uuid::new_v1` function and the ability to create a V1
//! using an implementation of `UuidV1ClockSequence` (usually `UuidV1Context`)
//! and a timestamp from `time::timespec`.
//! * `v3` - adds the `Uuid::new_v3` function and the ability to create a V3
//! UUID based on the MD5 hash of some data.
//! * `v4` - adds the `Uuid::new_v4` function and the ability to randomly
//! generate a `Uuid`.
//! * `v5` - adds the `Uuid::new_v5` function and the ability to create a V5
//! UUID based on the SHA1 hash of some data.
//! * `serde` - adds the ability to serialize and deserialize a `Uuid` using the
//! `serde` crate.
//!
//! By default, `uuid` can be depended on with:
//!
//! ```toml
//! [dependencies]
//! uuid = "0.6"
//! ```
//!
//! To activate various features, use syntax like:
//!
//! ```toml
//! [dependencies]
//! uuid = { version = "0.6", features = ["serde", "v4"] }
//! ```
//!
//! You can disable default features with:
//!
//! ```toml
//! [dependencies]
//! uuid = { version = "0.6", default-features = false }
//! ```
//!
//! # Examples
//!
//! To parse a UUID given in the simple format and print it as a urn:
//!
//! ```rust
//! use uuid::Uuid;
//!
//! fn main() {
//! let my_uuid =
//! Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
//! println!("{}", my_uuid.urn());
//! }
//! ```
//!
//! To create a new random (V4) UUID and print it out in hexadecimal form:
//!
//! ```ignore,rust
//! // Note that this requires the `v4` feature enabled in the uuid crate.
//!
//! use uuid::Uuid;
//!
//! fn main() {
//! let my_uuid = Uuid::new_v4();
//! println!("{}", my_uuid);
//! }
//! ```
//!
//! # Strings
//!
//! Examples of string representations:
//!
//! * simple: `936DA01F9ABD4d9d80C702AF85C822A8`
//! * hyphenated: `550e8400-e29b-41d4-a716-446655440000`
//! * urn: `urn:uuid:F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4`
//!
//! # References
//!
//! * [Wikipedia: Universally Unique Identifier](
//! http://en.wikipedia.org/wiki/Universally_unique_identifier)
//! * [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace](
//! http://tools.ietf.org/html/rfc4122)
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://docs.rs/uuid"
)]
#![deny(warnings)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(all(feature = "u128", nightly), feature(i128_type))]
#[macro_use]
extern crate cfg_if;
cfg_if! {
if #[cfg(feature = "byteorder")] {
extern crate byteorder;
}
}
cfg_if! {
if #[cfg(feature = "md5")] {
extern crate md5;
}
}
cfg_if! {
if #[cfg(feature = "rand")] {
extern crate rand;
}
}
cfg_if! {
if #[cfg(feature = "serde")] {
extern crate serde;
}
}
cfg_if! {
if #[cfg(feature = "sha1")] {
extern crate sha1;
}
}
cfg_if! {
if #[cfg(all(feature = "slog", not(test)))] {
extern crate slog;
} else if #[cfg(all(feature = "slog", test))] {
#[macro_use]
extern crate slog;
}
}
cfg_if! {
if #[cfg(feature = "std")] {
use std::fmt;
use std::str;
} else if #[cfg(not(feature = "std"))] {
use core::fmt;
use core::str;
}
}
pub mod ns;
pub mod prelude;
mod core_support;
cfg_if! {
if #[cfg(feature = "v1")] {
pub mod v1;
}
}
cfg_if! {
if #[cfg(feature = "serde")] {
mod serde_support;
}
}
cfg_if! {
if #[cfg(feature = "slog")] {
mod slog_support;
}
}
cfg_if! {
if #[cfg(feature = "std")] {
mod std_support;
}
}
cfg_if! {
if #[cfg(test)] {
mod test_util;
}
}
cfg_if! {
if #[cfg(all(feature = "u128"), nightly)] {
mod u128_support;
}
}
cfg_if! {
if #[cfg(feature = "v4")] {
mod v4;
}
}
cfg_if! {
if #[cfg(feature = "v5")] {
mod v5;
}
}
/// A 128-bit (16 byte) buffer containing the ID.
pub type UuidBytes = [u8; 16];
/// The version of the UUID, denoting the generating algorithm.
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(C)]
pub enum UuidVersion {
/// Special case for `nil` [`Uuid`].
///
/// [`Uuid`]: struct.Uuid.html
Nil = 0,
/// Version 1: MAC address
Mac,
/// Version 2: DCE Security
Dce,
/// Version 3: MD5 hash
Md5,
/// Version 4: Random
Random,
/// Version 5: SHA-1 hash
Sha1,
}
/// The reserved variants of UUIDs.
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(C)]
pub enum UuidVariant {
/// Reserved by the NCS for backward compatibility
NCS = 0,
/// As described in the RFC4122 Specification (default)
RFC4122,
/// Reserved by Microsoft for backward compatibility
Microsoft,
/// Reserved for future expansion
Future,
}
/// A Universally Unique Identifier (UUID).
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Uuid {
/// The 128-bit number stored in 16 bytes
bytes: UuidBytes,
}
/// An adaptor for formatting a `Uuid` as a simple string.
pub struct Simple<'a> {
inner: &'a Uuid,
}
/// An adaptor for formatting a `Uuid` as a hyphenated string.
pub struct Hyphenated<'a> {
inner: &'a Uuid,
}
/// An adaptor for formatting a `Uuid` as a URN string.
pub struct Urn<'a> {
inner: &'a Uuid,
}
/// Error details for string parsing failures.
#[allow(missing_docs)]
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum ParseError {
InvalidLength(usize),
InvalidCharacter(char, usize),
InvalidGroups(usize),
InvalidGroupLength(usize, usize, u8),
}
const SIMPLE_LENGTH: usize = 32;
const HYPHENATED_LENGTH: usize = 36;
/// Converts a `ParseError` to a string.
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::InvalidLength(found) => write!(
f,
"Invalid length; expecting {} or {} chars, found {}",
SIMPLE_LENGTH, HYPHENATED_LENGTH, found
),
ParseError::InvalidCharacter(found, pos) => write!(
f,
"Invalid character; found `{}` (0x{:02x}) at offset {}",
found, found as usize, pos
),
ParseError::InvalidGroups(found) => write!(
f,
"Malformed; wrong number of groups: expected 1 or 5, found {}",
found
),
ParseError::InvalidGroupLength(group, found, expecting) => write!(
f,
"Malformed; length of group {} was {}, expecting {}",
group, found, expecting
),
}
}
}
// Length of each hyphenated group in hex digits.
const GROUP_LENS: [u8; 5] = [8, 4, 4, 4, 12];
// Accumulated length of each hyphenated group in hex digits.
const ACC_GROUP_LENS: [u8; 5] = [8, 12, 16, 20, 32];
impl Uuid {
/// The 'nil UUID'.
///
/// The nil UUID is special form of UUID that is specified to have all
/// 128 bits set to zero, as defined in [IETF RFC 4122 Section 4.1.7][RFC].
///
/// [RFC]: https://tools.ietf.org/html/rfc4122.html#section-4.1.7
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
///
/// assert_eq!(
/// uuid.hyphenated().to_string(),
/// "00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn nil() -> Uuid {
Uuid { bytes: [0; 16] }
}
/// Creates a UUID using a name from a namespace, based on the MD5 hash.
///
/// A number of namespaces are available as constants in this crate:
///
/// * `NAMESPACE_DNS`
/// * `NAMESPACE_URL`
/// * `NAMESPACE_OID`
/// * `NAMESPACE_X500`
///
/// Note that usage of this method requires the `v3` feature of this crate
/// to be enabled.
#[cfg(feature = "v3")]
pub fn new_v3(namespace: &Uuid, name: &str) -> Uuid {
let mut ctx = md5::Context::new();
ctx.consume(namespace.as_bytes());
ctx.consume(name.as_bytes());
let mut uuid = Uuid {
bytes: ctx.compute().into(),
};
uuid.set_variant(UuidVariant::RFC4122);
uuid.set_version(UuidVersion::Md5);
uuid
}
/// Creates a `Uuid` from four field values.
///
/// # Errors
///
/// This function will return an error if `d4`'s length is not 8 bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
///
/// let d4 = [12, 3, 9, 56, 54, 43, 8, 9];
///
/// let uuid = Uuid::from_fields(42, 12, 5, &d4);
/// let uuid = uuid.map(|uuid| uuid.hyphenated().to_string());
///
/// let expected_uuid =
/// Ok(String::from("0000002a-000c-0005-0c03-0938362b0809"));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An invalid length:
///
/// ```
/// use uuid::Uuid;
/// use uuid::ParseError;
///
/// let d4 = [12];
///
/// let uuid = Uuid::from_fields(42, 12, 5, &d4);
///
/// let expected_uuid = Err(ParseError::InvalidLength(1));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
pub fn from_fields(
d1: u32,
d2: u16,
d3: u16,
d4: &[u8],
) -> Result<Uuid, ParseError> {
if d4.len() != 8 {
return Err(ParseError::InvalidLength(d4.len()));
}
Ok(Uuid {
bytes: [
(d1 >> 24) as u8,
(d1 >> 16) as u8,
(d1 >> 8) as u8,
d1 as u8,
(d2 >> 8) as u8,
d2 as u8,
(d3 >> 8) as u8,
d3 as u8,
d4[0],
d4[1],
d4[2],
d4[3],
d4[4],
d4[5],
d4[6],
d4[7],
],
})
}
/// Creates a `Uuid` using the supplied bytes.
///
/// # Errors
///
/// This function will return an error if `b` has any length other than 16.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
///
/// let bytes = [4, 54, 67, 12, 43, 2, 98, 76, 32, 50, 87, 5, 1, 33, 43,
/// 87];
///
/// let uuid = Uuid::from_bytes(&bytes);
/// let uuid = uuid.map(|uuid| uuid.hyphenated().to_string());
///
/// let expected_uuid =
/// Ok(String::from("0436430c-2b02-624c-2032-570501212b57"));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```
/// use uuid::Uuid;
/// use uuid::ParseError;
///
/// let bytes = [4, 54, 67, 12, 43, 2, 98, 76];
///
/// let uuid = Uuid::from_bytes(&bytes);
///
/// let expected_uuid = Err(ParseError::InvalidLength(8));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
pub fn from_bytes(b: &[u8]) -> Result<Uuid, ParseError> {
let len = b.len();
if len != 16 {
return Err(ParseError::InvalidLength(len));
}
let mut uuid = Uuid { bytes: [0; 16] };
uuid.bytes.copy_from_slice(b);
Ok(uuid)
}
/// Creates a `Uuid` using the supplied bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
/// use uuid::UuidBytes;
///
/// let bytes: UuidBytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145,
/// 63, 62 ];
///
/// let uuid = Uuid::from_uuid_bytes(bytes);
/// let uuid = uuid.hyphenated().to_string();
///
/// let expected_uuid =
/// String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e");
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```compile_fail
/// use uuid::Uuid;
/// use uuid::UuidBytes;
///
/// let bytes: UuidBytes = [4, 54, 67, 12, 43, 2, 98, 76]; // doesn't
/// compile
///
/// let uuid = Uuid::from_uuid_bytes(bytes);
/// ```
pub fn from_uuid_bytes(b: UuidBytes) -> Uuid {
Uuid { bytes: b }
}
/// Creates a v4 Uuid from random bytes (e.g. bytes supplied from `Rand`
/// crate)
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Uuid;
/// use uuid::UuidBytes;
///
/// let bytes: UuidBytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145,
/// 63, 62 ];
/// let uuid = Uuid::from_random_bytes(bytes);
/// let uuid = uuid.hyphenated().to_string();
///
/// let expected_uuid =
/// String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e");
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
pub fn from_random_bytes(b: [u8; 16]) -> Uuid {
let mut uuid = Uuid { bytes: b };
uuid.set_variant(UuidVariant::RFC4122);
uuid.set_version(UuidVersion::Random);
uuid
}
/// Specifies the variant of the UUID structure
#[allow(dead_code)]
fn set_variant(&mut self, v: UuidVariant) {
// Octet 8 contains the variant in the most significant 3 bits
self.bytes[8] = match v {
UuidVariant::NCS => self.bytes[8] & 0x7f, // b0xx...
UuidVariant::RFC4122 => (self.bytes[8] & 0x3f) | 0x80, // b10x...
UuidVariant::Microsoft => (self.bytes[8] & 0x1f) | 0xc0, // b110...
UuidVariant::Future => (self.bytes[8] & 0x1f) | 0xe0, // b111...
}
}
/// Returns the variant of the `Uuid` structure.
///
/// This determines the interpretation of the structure of the UUID.
/// Currently only the RFC4122 variant is generated by this module.
///
/// * [Variant Reference](http://tools.ietf.org/html/rfc4122#section-4.1.1)
pub fn get_variant(&self) -> Option<UuidVariant> {
match self.bytes[8] {
x if x & 0x80 == 0x00 => Some(UuidVariant::NCS),
x if x & 0xc0 == 0x80 => Some(UuidVariant::RFC4122),
x if x & 0xe0 == 0xc0 => Some(UuidVariant::Microsoft),
x if x & 0xe0 == 0xe0 => Some(UuidVariant::Future),
_ => None,
}
}
/// Specifies the version number of the `Uuid`.
#[allow(dead_code)]
fn set_version(&mut self, v: UuidVersion) {
self.bytes[6] = (self.bytes[6] & 0xF) | ((v as u8) << 4);
}
/// Returns the version number of the `Uuid`.
///
/// This represents the algorithm used to generate the contents.
///
/// Currently only the Random (V4) algorithm is supported by this
/// module. There are security and privacy implications for using
/// older versions - see [Wikipedia: Universally Unique Identifier](
/// http://en.wikipedia.org/wiki/Universally_unique_identifier) for
/// details.
///
/// * [Version Reference](http://tools.ietf.org/html/rfc4122#section-4.1.3)
pub fn get_version_num(&self) -> usize {
(self.bytes[6] >> 4) as usize
}
/// Returns the version of the `Uuid`.
///
/// This represents the algorithm used to generate the contents
pub fn get_version(&self) -> Option<UuidVersion> {
let v = self.bytes[6] >> 4;
match v {
0 if self.is_nil() => Some(UuidVersion::Nil),
1 => Some(UuidVersion::Mac),
2 => Some(UuidVersion::Dce),
3 => Some(UuidVersion::Md5),
4 => Some(UuidVersion::Random),
5 => Some(UuidVersion::Sha1),
_ => None,
}
}
/// Returns the four field values of the UUID.
///
/// These values can be passed to the `from_fields()` method to get the
/// original `Uuid` back.
///
/// * The first field value represents the first group of (eight) hex
/// digits, taken as a big-endian `u32` value. For V1 UUIDs, this field
/// represents the low 32 bits of the timestamp.
/// * The second field value represents the second group of (four) hex
/// digits, taken as a big-endian `u16` value. For V1 UUIDs, this field
/// represents the middle 16 bits of the timestamp.
/// * The third field value represents the third group of (four) hex
/// digits, taken as a big-endian `u16` value. The 4 most significant
/// bits give the UUID version, and for V1 UUIDs, the last 12 bits
/// represent the high 12 bits of the timestamp.
/// * The last field value represents the last two groups of four and
/// twelve hex digits, taken in order. The first 1-3 bits of this
/// indicate the UUID variant, and for V1 UUIDs, the next 13-15 bits
/// indicate the clock sequence and the last 48 bits indicate the node
/// ID.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(uuid.as_fields(), (0, 0, 0, &[0u8; 8]));
///
/// let uuid =
/// Uuid::parse_str("936DA01F-9ABD-4D9D-80C7-02AF85C822A8").unwrap();
/// assert_eq!(
/// uuid.as_fields(),
/// (
/// 0x936DA01F,
/// 0x9ABD,
/// 0x4D9D,
/// b"\x80\xC7\x02\xAF\x85\xC8\x22\xA8"
/// )
/// );
/// ```
pub fn as_fields(&self) -> (u32, u16, u16, &[u8; 8]) {
let d1 = u32::from(self.bytes[0]) << 24 | u32::from(self.bytes[1]) << 16
| u32::from(self.bytes[2]) << 8
| u32::from(self.bytes[3]);
let d2 = u16::from(self.bytes[4]) << 8 | u16::from(self.bytes[5]);
let d3 = u16::from(self.bytes[6]) << 8 | u16::from(self.bytes[7]);
let d4: &[u8; 8] =
unsafe { &*(self.bytes[8..16].as_ptr() as *const [u8; 8]) };
(d1, d2, d3, d4)
}
/// Returns an array of 16 octets containing the UUID data.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(uuid.as_bytes(), &[0; 16]);
///
/// let uuid = Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
/// assert_eq!(
/// uuid.as_bytes(),
/// &[
/// 147, 109, 160, 31, 154, 189, 77, 157, 128, 199, 2, 175, 133,
/// 200, 34, 168,
/// ]
/// );
/// ```
pub fn as_bytes(&self) -> &[u8; 16] {
&self.bytes
}
/// Returns a wrapper which when formatted via `fmt::Display` will format a
/// string of 32 hexadecimal digits.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(
/// uuid.simple().to_string(),
/// "00000000000000000000000000000000"
/// );
/// ```
pub fn simple(&self) -> Simple {
Simple { inner: self }
}
/// Returns a wrapper which when formatted via `fmt::Display` will format a
/// string of hexadecimal digits separated into groups with a hyphen.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(
/// uuid.hyphenated().to_string(),
/// "00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn hyphenated(&self) -> Hyphenated {
Hyphenated { inner: self }
}
/// Returns a wrapper which when formatted via `fmt::Display` will format a
/// string of the UUID as a full URN string.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(
/// uuid.urn().to_string(),
/// "urn:uuid:00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn urn(&self) -> Urn {
Urn { inner: self }
}
/// Returns an Optional Tuple of (u64, u16) representing the timestamp and
/// counter portion of a V1 UUID. If the supplied UUID is not V1, this
/// will return None
pub fn to_timestamp(&self) -> Option<(u64, u16)> {
if self.get_version()
.map(|v| v != UuidVersion::Mac)
.unwrap_or(true)
{
return None;
}
let ts: u64 = u64::from(self.bytes[6] & 0x0F) << 56
| u64::from(self.bytes[7]) << 48
| u64::from(self.bytes[4]) << 40
| u64::from(self.bytes[5]) << 32
| u64::from(self.bytes[0]) << 24
| u64::from(self.bytes[1]) << 16
| u64::from(self.bytes[2]) << 8
| u64::from(self.bytes[3]);
let count: u16 =
u16::from(self.bytes[8] & 0x3F) << 8 | u16::from(self.bytes[9]);
Some((ts, count))
}
/// Parses a `Uuid` from a string of hexadecimal digits with optional
/// hyphens.
///
/// Any of the formats generated by this module (simple, hyphenated, urn)
/// are supported by this parsing function.
pub fn parse_str(mut input: &str) -> Result<Uuid, ParseError> {
// Ensure length is valid for any of the supported formats
let len = input.len();
if len == (HYPHENATED_LENGTH + 9) && input.starts_with("urn:uuid:") {
input = &input[9..];
} else if len != SIMPLE_LENGTH && len != HYPHENATED_LENGTH {
return Err(ParseError::InvalidLength(len));
}
// `digit` counts only hexadecimal digits, `i_char` counts all chars.
let mut digit = 0;
let mut group = 0;
let mut acc = 0;
let mut buffer = [0u8; 16];
for (i_char, chr) in input.bytes().enumerate() {
if digit as usize >= SIMPLE_LENGTH && group != 4 {
if group == 0 {
return Err(ParseError::InvalidLength(len));
}
return Err(ParseError::InvalidGroups(group + 1));
}
if digit % 2 == 0 {
// First digit of the byte.
match chr {
// Calulate upper half.
b'0'...b'9' => acc = chr - b'0',
b'a'...b'f' => acc = chr - b'a' + 10,
b'A'...b'F' => acc = chr - b'A' + 10,
// Found a group delimiter
b'-' => {
if ACC_GROUP_LENS[group] != digit {
// Calculate how many digits this group consists of
// in the input.
let found = if group > 0 {
digit - ACC_GROUP_LENS[group - 1]
} else {
digit
};
return Err(ParseError::InvalidGroupLength(
group,
found as usize,
GROUP_LENS[group],
));
}
// Next group, decrement digit, it is incremented again
// at the bottom.
group += 1;
digit -= 1;
}
_ => {
return Err(ParseError::InvalidCharacter(
input[i_char..].chars().next().unwrap(),
i_char,
))
}
}
} else {
// Second digit of the byte, shift the upper half.
acc *= 16;
match chr {
b'0'...b'9' => acc += chr - b'0',
b'a'...b'f' => acc += chr - b'a' + 10,
b'A'...b'F' => acc += chr - b'A' + 10,
b'-' => {
// The byte isn't complete yet.
let found = if group > 0 {
digit - ACC_GROUP_LENS[group - 1]
} else {
digit
};
return Err(ParseError::InvalidGroupLength(
group,
found as usize,
GROUP_LENS[group],
));
}
_ => {
return Err(ParseError::InvalidCharacter(
input[i_char..].chars().next().unwrap(),
i_char,
))
}
}
buffer[(digit / 2) as usize] = acc;
}
digit += 1;
}
// Now check the last group.
if ACC_GROUP_LENS[4] != digit {
return Err(ParseError::InvalidGroupLength(
group,
(digit - ACC_GROUP_LENS[3]) as usize,
GROUP_LENS[4],
));
}
Ok(Uuid::from_bytes(&buffer).unwrap())
}
/// Tests if the UUID is nil
pub fn is_nil(&self) -> bool {
self.bytes.iter().all(|&b| b == 0)
}
}
impl<'a> fmt::Display for Simple<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(self, f)
}
}
impl<'a> fmt::UpperHex for Simple<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for byte in &self.inner.bytes {
write!(f, "{:02X}", byte)?;
}
Ok(())
}
}
impl<'a> fmt::LowerHex for Simple<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for byte in &self.inner.bytes {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
}
impl<'a> fmt::Display for Hyphenated<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(self, f)
}
}
macro_rules! hyphenated_write {
($f:expr, $format:expr, $bytes:expr) => {{
let data1 = u32::from($bytes[0]) << 24 | u32::from($bytes[1]) << 16
| u32::from($bytes[2]) << 8
| u32::from($bytes[3]);
let data2 = u16::from($bytes[4]) << 8 | u16::from($bytes[5]);
let data3 = u16::from($bytes[6]) << 8 | u16::from($bytes[7]);
write!(
$f,
$format,
data1,
data2,
data3,
$bytes[8],
$bytes[9],
$bytes[10],
$bytes[11],
$bytes[12],
$bytes[13],
$bytes[14],
$bytes[15]
)
}};
}
impl<'a> fmt::UpperHex for Hyphenated<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
hyphenated_write!(
f,
"{:08X}-\
{:04X}-\
{:04X}-\
{:02X}{:02X}-\
{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}",
self.inner.bytes
)
}
}
impl<'a> fmt::LowerHex for Hyphenated<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
hyphenated_write!(
f,
"{:08x}-\
{:04x}-\
{:04x}-\
{:02x}{:02x}-\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
self.inner.bytes
)
}
}
impl<'a> fmt::Display for Urn<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "urn:uuid:{}", self.inner.hyphenated())
}
}
#[cfg(test)]
mod tests {
extern crate std;
use self::std::prelude::v1::*;
use super::test_util;
use super::ns::{NAMESPACE_X500, NAMESPACE_DNS, NAMESPACE_OID,
NAMESPACE_URL};
use prelude::*;
#[cfg(feature = "v3")]
static FIXTURE_V3: &'static [(&'static Uuid, &'static str, &'static str)] =
&[
(
&NAMESPACE_DNS,
"example.org",
"04738bdf-b25a-3829-a801-b21a1d25095b",
),
(
&NAMESPACE_DNS,
"rust-lang.org",
"c6db027c-615c-3b4d-959e-1a917747ca5a",
),
(
&NAMESPACE_DNS,
"42",
"5aab6e0c-b7d3-379c-92e3-2bfbb5572511",
),
(
&NAMESPACE_DNS,
"lorem ipsum",
"4f8772e9-b59c-3cc9-91a9-5c823df27281",
),
(
&NAMESPACE_URL,
"example.org",
"39682ca1-9168-3da2-a1bb-f4dbcde99bf9",
),
(
&NAMESPACE_URL,
"rust-lang.org",
"7ed45aaf-e75b-3130-8e33-ee4d9253b19f",
),
(
&NAMESPACE_URL,
"42",
"08998a0c-fcf4-34a9-b444-f2bfc15731dc",
),
(
&NAMESPACE_URL,
"lorem ipsum",
"e55ad2e6-fb89-34e8-b012-c5dde3cd67f0",
),
(
&NAMESPACE_OID,
"example.org",
"f14eec63-2812-3110-ad06-1625e5a4a5b2",
),
(
&NAMESPACE_OID,
"rust-lang.org",
"6506a0ec-4d79-3e18-8c2b-f2b6b34f2b6d",
),
(
&NAMESPACE_OID,
"42",
"ce6925a5-2cd7-327b-ab1c-4b375ac044e4",
),
(
&NAMESPACE_OID,
"lorem ipsum",
"5dd8654f-76ba-3d47-bc2e-4d6d3a78cb09",
),
(
&NAMESPACE_X500,
"example.org",
"64606f3f-bd63-363e-b946-fca13611b6f7",
),
(
&NAMESPACE_X500,
"rust-lang.org",
"bcee7a9c-52f1-30c6-a3cc-8c72ba634990",
),
(
&NAMESPACE_X500,
"42",
"c1073fa2-d4a6-3104-b21d-7a6bdcf39a23",
),
(
&NAMESPACE_X500,
"lorem ipsum",
"02f09a3f-1624-3b1d-8409-44eff7708208",
),
];
#[test]
fn test_nil() {
let nil = Uuid::nil();
let not_nil = test_util::new();
let from_bytes = Uuid::from_uuid_bytes([
4, 54, 67, 12, 43, 2, 2, 76, 32, 50, 87, 5, 1, 33, 43, 87,
]);
assert_eq!(from_bytes.get_version(), None);
assert!(nil.is_nil());
assert!(!not_nil.is_nil());
assert_eq!(nil.get_version(), Some(UuidVersion::Nil));
assert_eq!(not_nil.get_version(), Some(UuidVersion::Random))
}
#[cfg(feature = "v3")]
#[test]
fn test_new_v3() {
for &(ref ns, ref name, _) in FIXTURE_V3 {
let uuid = Uuid::new_v3(*ns, *name);
assert_eq!(uuid.get_version().unwrap(), UuidVersion::Md5);
assert_eq!(
uuid.get_variant().unwrap(),
UuidVariant::RFC4122
);
}
}
#[test]
fn test_predefined_namespaces() {
assert_eq!(
NAMESPACE_DNS.hyphenated().to_string(),
"6ba7b810-9dad-11d1-80b4-00c04fd430c8"
);
assert_eq!(
NAMESPACE_URL.hyphenated().to_string(),
"6ba7b811-9dad-11d1-80b4-00c04fd430c8"
);
assert_eq!(
NAMESPACE_OID.hyphenated().to_string(),
"6ba7b812-9dad-11d1-80b4-00c04fd430c8"
);
assert_eq!(
NAMESPACE_X500.hyphenated().to_string(),
"6ba7b814-9dad-11d1-80b4-00c04fd430c8"
);
}
#[cfg(feature = "v3")]
#[test]
fn test_get_version_v3() {
let uuid = Uuid::new_v3(&NAMESPACE_DNS, "rust-lang.org");
assert_eq!(uuid.get_version().unwrap(), UuidVersion::Md5);
assert_eq!(uuid.get_version_num(), 3);
}
#[test]
fn test_get_variant() {
let uuid1 = test_util::new();
let uuid2 =
Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap();
let uuid3 =
Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").unwrap();
let uuid4 =
Uuid::parse_str("936DA01F9ABD4d9dC0C702AF85C822A8").unwrap();
let uuid5 =
Uuid::parse_str("F9168C5E-CEB2-4faa-D6BF-329BF39FA1E4").unwrap();
let uuid6 =
Uuid::parse_str("f81d4fae-7dec-11d0-7765-00a0c91e6bf6").unwrap();
assert_eq!(
uuid1.get_variant().unwrap(),
UuidVariant::RFC4122
);
assert_eq!(
uuid2.get_variant().unwrap(),
UuidVariant::RFC4122
);
assert_eq!(
uuid3.get_variant().unwrap(),
UuidVariant::RFC4122
);
assert_eq!(
uuid4.get_variant().unwrap(),
UuidVariant::Microsoft
);
assert_eq!(
uuid5.get_variant().unwrap(),
UuidVariant::Microsoft
);
assert_eq!(uuid6.get_variant().unwrap(), UuidVariant::NCS);
}
#[test]
fn test_parse_uuid_v4() {
use super::ParseError::*;
// Invalid
assert_eq!(Uuid::parse_str(""), Err(InvalidLength(0)));
assert_eq!(Uuid::parse_str("!"), Err(InvalidLength(1)));
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E45"),
Err(InvalidLength(37))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-BBF-329BF39FA1E4"),
Err(InvalidLength(35))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-BGBF-329BF39FA1E4"),
Err(InvalidCharacter('G', 20))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2F4faaFB6BFF329BF39FA1E4"),
Err(InvalidGroups(2))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faaFB6BFF329BF39FA1E4"),
Err(InvalidGroups(3))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BFF329BF39FA1E4"),
Err(InvalidGroups(4))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa"),
Err(InvalidLength(18))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faaXB6BFF329BF39FA1E4"),
Err(InvalidCharacter('X', 18))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB-24fa-eB6BFF32-BF39FA1E4"),
Err(InvalidGroupLength(1, 3, 4))
);
assert_eq!(
Uuid::parse_str("01020304-1112-2122-3132-41424344"),
Err(InvalidGroupLength(4, 8, 12))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c"),
Err(InvalidLength(31))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c88"),
Err(InvalidLength(33))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0cg8"),
Err(InvalidLength(33))
);
assert_eq!(
Uuid::parse_str("67e5504410b1426%9247bb680e5fe0c8"),
Err(InvalidCharacter('%', 15))
);
assert_eq!(
Uuid::parse_str("231231212212423424324323477343246663"),
Err(InvalidLength(36))
);
// Valid
assert!(Uuid::parse_str("00000000000000000000000000000000").is_ok());
assert!(
Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").is_ok()
);
assert!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").is_ok()
);
assert!(Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c8").is_ok());
assert!(
Uuid::parse_str("01020304-1112-2122-3132-414243444546").is_ok()
);
assert!(
Uuid::parse_str("urn:uuid:67e55044-10b1-426f-9247-bb680e5fe0c8")
.is_ok()
);
// Nil
let nil = Uuid::nil();
assert_eq!(
Uuid::parse_str("00000000000000000000000000000000").unwrap(),
nil
);
assert_eq!(
Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
nil
);
// Round-trip
let uuid_orig = test_util::new();
let orig_str = uuid_orig.to_string();
let uuid_out = Uuid::parse_str(&orig_str).unwrap();
assert_eq!(uuid_orig, uuid_out);
// Test error reporting
assert_eq!(
Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c"),
Err(InvalidLength(31))
);
assert_eq!(
Uuid::parse_str("67e550X410b1426f9247bb680e5fe0cd"),
Err(InvalidCharacter('X', 6))
);
assert_eq!(
Uuid::parse_str("67e550-4105b1426f9247bb680e5fe0c"),
Err(InvalidGroupLength(0, 6, 8))
);
assert_eq!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF1-02BF39FA1E4"),
Err(InvalidGroupLength(3, 5, 4))
);
}
#[test]
fn test_to_simple_string() {
let uuid1 = test_util::new();
let s = uuid1.simple().to_string();
assert_eq!(s.len(), 32);
assert!(s.chars().all(|c| c.is_digit(16)));
}
#[test]
fn test_to_hyphenated_string() {
let uuid1 = test_util::new();
let s = uuid1.hyphenated().to_string();
assert!(s.len() == 36);
assert!(s.chars().all(|c| c.is_digit(16) || c == '-'));
}
#[test]
fn test_upper_lower_hex() {
use super::fmt::Write;
let mut buf = String::new();
let u = test_util::new();
macro_rules! check {
($buf:ident, $format:expr, $target:expr, $len:expr, $cond:expr) => {
$buf.clear();
write!($buf, $format, $target).unwrap();
assert!(buf.len() == $len);
assert!($buf.chars().all($cond), "{}", $buf);
};
}
check!(
buf,
"{:X}",
u,
36,
|c| c.is_uppercase() || c.is_digit(10) || c == '-'
);
check!(
buf,
"{:X}",
u.hyphenated(),
36,
|c| c.is_uppercase() || c.is_digit(10) || c == '-'
);
check!(
buf,
"{:X}",
u.simple(),
32,
|c| c.is_uppercase() || c.is_digit(10)
);
check!(
buf,
"{:x}",
u.hyphenated(),
36,
|c| c.is_lowercase() || c.is_digit(10) || c == '-'
);
check!(
buf,
"{:x}",
u.simple(),
32,
|c| c.is_lowercase() || c.is_digit(10)
);
}
#[cfg(feature = "v3")]
#[test]
fn test_v3_to_hypenated_string() {
for &(ref ns, ref name, ref expected) in FIXTURE_V3 {
let uuid = Uuid::new_v3(*ns, *name);
assert_eq!(uuid.hyphenated().to_string(), *expected);
}
}
#[test]
fn test_to_urn_string() {
let uuid1 = test_util::new();
let ss = uuid1.urn().to_string();
let s = &ss[9..];
assert!(ss.starts_with("urn:uuid:"));
assert_eq!(s.len(), 36);
assert!(s.chars().all(|c| c.is_digit(16) || c == '-'));
}
#[test]
fn test_to_simple_string_matching() {
let uuid1 = test_util::new();
let hs = uuid1.hyphenated().to_string();
let ss = uuid1.simple().to_string();
let hsn = hs.chars()
.filter(|&c| c != '-')
.collect::<String>();
assert_eq!(hsn, ss);
}
#[test]
fn test_string_roundtrip() {
let uuid = test_util::new();
let hs = uuid.hyphenated().to_string();
let uuid_hs = Uuid::parse_str(&hs).unwrap();
assert_eq!(uuid_hs, uuid);
let ss = uuid.to_string();
let uuid_ss = Uuid::parse_str(&ss).unwrap();
assert_eq!(uuid_ss, uuid);
}
#[test]
fn test_from_fields() {
let d1: u32 = 0xa1a2a3a4;
let d2: u16 = 0xb1b2;
let d3: u16 = 0xc1c2;
let d4 = [0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8];
let u = Uuid::from_fields(d1, d2, d3, &d4).unwrap();
let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8";
let result = u.simple().to_string();
assert_eq!(result, expected);
}
#[test]
fn test_as_fields() {
let u = test_util::new();
let (d1, d2, d3, d4) = u.as_fields();
assert_ne!(d1, 0);
assert_ne!(d2, 0);
assert_ne!(d3, 0);
assert_eq!(d4.len(), 8);
assert!(!d4.iter().all(|&b| b == 0));
}
#[test]
fn test_fields_roundtrip() {
let d1_in: u32 = 0xa1a2a3a4;
let d2_in: u16 = 0xb1b2;
let d3_in: u16 = 0xc1c2;
let d4_in = &[0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8];
let u = Uuid::from_fields(d1_in, d2_in, d3_in, d4_in).unwrap();
let (d1_out, d2_out, d3_out, d4_out) = u.as_fields();
assert_eq!(d1_in, d1_out);
assert_eq!(d2_in, d2_out);
assert_eq!(d3_in, d3_out);
assert_eq!(d4_in, d4_out);
}
#[test]
fn test_from_bytes() {
let b = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_bytes(&b).unwrap();
let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8";
assert_eq!(u.simple().to_string(), expected);
}
#[test]
fn test_from_uuid_bytes() {
let b = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_uuid_bytes(b);
let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8";
assert_eq!(u.simple().to_string(), expected);
}
#[test]
fn test_as_bytes() {
let u = test_util::new();
let ub = u.as_bytes();
assert_eq!(ub.len(), 16);
assert!(!ub.iter().all(|&b| b == 0));
}
#[test]
fn test_bytes_roundtrip() {
let b_in: [u8; 16] = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_bytes(&b_in).unwrap();
let b_out = u.as_bytes();
assert_eq!(&b_in, b_out);
}
#[test]
fn test_from_random_bytes() {
let b = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,
0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
];
let u = Uuid::from_random_bytes(b);
let expected = "a1a2a3a4b1b241c291d2d3d4d5d6d7d8";
assert_eq!(u.simple().to_string(), expected);
}
#[test]
fn test_iterbytes_impl_for_uuid() {
let mut set = std::collections::HashSet::new();
let id1 = test_util::new();
let id2 = test_util::new2();
set.insert(id1.clone());
assert!(set.contains(&id1));
assert!(!set.contains(&id2));
}
}
|
/*!
**rust-wtf8** is an implementation of [the WTF-8 encoding](https://simonsapin.github.io/wtf-8/).
It uses Rust’s type system to maintain
[well-formedness](https://simonsapin.github.io/wtf-8/#well-formed),
like the `String` and `&str` types do for UTF-8.
*/
// FIXME: moar docstrings
#![feature(globs)]
extern crate core;
use core::str::Utf16CodeUnits;
use std::fmt;
use std::mem::transmute;
use std::slice;
use std::str;
use std::string;
static UTF8_REPLACEMENT_CHARACTER: &'static [u8] = b"\xEF\xBF\xBD";
/// A Unicode code point: from U+0000 to U+10FFFF
/// Compare with the `char` type,
/// which represents a Unicode scalar value:
/// a code point that is not a surrogate (U+D800 to U+DFFF).
#[deriving(Eq, PartialEq, Ord, PartialOrd, Clone, Hash)]
pub struct CodePoint {
value: u32
}
impl fmt::Show for CodePoint {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
write!(formatter, "U+{:04X}", self.value)
}
}
impl CodePoint {
#[inline]
pub unsafe fn from_u32_unchecked(value: u32) -> CodePoint {
CodePoint { value: value }
}
#[inline]
pub fn from_u32(value: u32) -> Option<CodePoint> {
match value {
0 ... 0x10FFFF => Some(CodePoint { value: value }),
_ => None
}
}
#[inline]
pub fn from_char(value: char) -> CodePoint {
CodePoint { value: value as u32 }
}
#[inline]
pub fn to_u32(&self) -> u32 {
self.value
}
#[inline]
pub fn to_char(&self) -> Option<char> {
match self.value {
0xD800 ... 0xDFFF => None,
_ => Some(unsafe { transmute(self.value) })
}
}
}
/// A WTF-8 string.
#[deriving(Eq, PartialEq, Ord, PartialOrd, Clone, Hash)]
pub struct Wtf8String {
bytes: Vec<u8>
}
impl fmt::Show for Wtf8String {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
self.as_slice().fmt(formatter)
}
}
impl Wtf8String {
#[inline]
pub fn new() -> Wtf8String {
Wtf8String { bytes: Vec::new() }
}
#[inline]
pub fn with_capacity(c: uint) -> Wtf8String {
Wtf8String { bytes: Vec::with_capacity(c) }
}
#[inline]
pub unsafe fn from_bytes_unchecked(bytes: Vec<u8>) -> Wtf8String {
Wtf8String { bytes: bytes }
}
#[inline]
pub fn from_string(string: String) -> Wtf8String {
Wtf8String { bytes: string.into_bytes() }
}
#[inline]
pub fn from_str(str: &str) -> Wtf8String {
Wtf8String { bytes: str.as_bytes().to_vec() }
}
pub fn from_ill_formed_utf16(v: &[u16]) -> Wtf8String {
let mut string = Wtf8String::with_capacity(v.len());
for item in str::utf16_items(v) {
match item {
str::ScalarValue(c) => string.push_char(c),
// We’re violating some of the invariants of char here
// in order to skip the surrogate pair check,
// but such a pair would be a str::ScalarValue anyway.
str::LoneSurrogate(s) => string.push_char(unsafe { transmute(s as u32) })
}
}
string
}
#[inline]
pub unsafe fn push_bytes_unchecked(&mut self, other: &[u8]) {
self.bytes.push_all(other)
}
#[inline]
pub fn push_str(&mut self, other: &str) {
self.bytes.push_all(other.as_bytes())
}
#[inline]
pub fn push_wtf8(&mut self, other: Wtf8Slice) {
match ((&*self).final_lead_surrogate(), other.initial_trail_surrogate()) {
// Replace newly paired surrogates by a supplementary code point.
(Some(lead), Some(trail)) => {
let len_without_lead_surrogate = self.len() - 3;
self.bytes.truncate(len_without_lead_surrogate);
let other_without_trail_surrogate = other.as_bytes().slice_from(3);
// 4 bytes for the supplementary code point
self.bytes.reserve_additional(4 + other_without_trail_surrogate.len());
self.push_char(decode_surrogate_pair(lead, trail));
self.bytes.push_all(other_without_trail_surrogate);
}
_ => self.bytes.push_all(other.as_bytes())
}
}
#[inline]
pub fn push_char(&mut self, c: char) {
unsafe {
// We’re violating some of the invariants of String here,
// but String::push only assumes a subset of these invariants
// that still hold for Wtf8String.
let not_really_a_string: &mut String = transmute(self);
not_really_a_string.push(c)
}
}
#[inline]
pub fn push(&mut self, code_point: CodePoint) {
match code_point.to_u32() {
trail @ 0xDC00...0xDFFF => {
match (&*self).final_lead_surrogate() {
Some(lead) => {
let len_without_lead_surrogate = self.len() - 3;
self.bytes.truncate(len_without_lead_surrogate);
self.push_char(decode_surrogate_pair(lead, trail as u16));
return
}
_ => {}
}
}
_ => {}
}
unsafe {
// We’re violating some of the invariants of String and char here,
// but String::push only assumes a subset of these invariants
// that still hold for Wtf8String and CodePoint.
let not_really_a_string: &mut String = transmute(self);
let not_really_a_char: char = transmute(code_point.to_u32());
not_really_a_string.push(not_really_a_char)
}
}
#[inline]
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
pub fn into_string(self) -> Result<String, Wtf8String> {
match self.next_surrogate(0) {
None => Ok(unsafe { string::raw::from_utf8(self.bytes) }),
Some(_) => Err(self),
}
}
pub fn into_string_lossy(mut self) -> String {
let mut pos = 0;
loop {
match self.next_surrogate(pos) {
Some((surrogate_pos, _)) => {
pos = surrogate_pos + 3;
slice::bytes::copy_memory(
self.bytes.slice_mut(surrogate_pos, pos),
UTF8_REPLACEMENT_CHARACTER
);
},
None => return unsafe { string::raw::from_utf8(self.bytes) }
}
}
}
}
impl FromIterator<CodePoint> for Wtf8String {
fn from_iter<T: Iterator<CodePoint>>(iterator: T) -> Wtf8String {
let mut string = Wtf8String::new();
string.extend(iterator);
string
}
}
impl Extendable<CodePoint> for Wtf8String {
fn extend<T: Iterator<CodePoint>>(&mut self, mut iterator: T) {
let (low, _high) = iterator.size_hint();
// Lower bound of one byte per code point (ASCII only)
self.bytes.reserve_additional(low);
for code_point in iterator {
self.push(code_point);
}
}
}
/// A slice of WTF-8 string.
#[deriving(Eq, PartialEq, Ord, PartialOrd, Clone, Hash)]
pub struct Wtf8Slice<'a> {
bytes: &'a [u8]
}
impl<'a> fmt::Show for Wtf8Slice<'a> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
try!(formatter.write(b"\""))
let mut pos = 0;
loop {
match self.next_surrogate(pos) {
None => break,
Some((surrogate_pos, surrogate)) => {
try!(formatter.write(self.as_bytes().slice(pos, surrogate_pos)));
try!(write!(formatter, "\\u{:X}", surrogate));
pos = surrogate_pos + 3;
}
}
}
try!(formatter.write(self.as_bytes().slice_from(pos)));
formatter.write(b"\"")
}
}
impl<'a> Wtf8Slice<'a> {
#[inline]
pub unsafe fn from_bytes_unchecked(bytes: &[u8]) -> Wtf8Slice {
Wtf8Slice { bytes: bytes }
}
#[inline]
pub fn from_str(value: &str) -> Wtf8Slice {
unsafe { Wtf8Slice::from_bytes_unchecked(value.as_bytes()) }
}
}
pub trait Wtf8Methods {
fn as_slice(&self) -> Wtf8Slice;
#[inline]
fn as_bytes(&self) -> &[u8] {
self.as_slice().bytes
}
#[inline]
fn len(&self) -> uint {
self.as_bytes().len()
}
/// Iterate over the string’s code points.
#[inline]
fn code_points(&self) -> Wtf8CodePoints {
let crazy_unsafe_str = unsafe { str::raw::from_utf8(self.as_bytes()) };
Wtf8CodePoints { crazy_unsafe_chars: crazy_unsafe_str.chars() }
}
#[inline]
fn as_str(&self) -> Option<&str> {
// Well-formed WTF-8 is also well-formed UTF-8
// if and only if it contains no surrogate.
match self.next_surrogate(0) {
None => Some(unsafe { str::raw::from_utf8(self.as_bytes()) }),
Some(_) => None,
}
}
fn to_string_lossy(&self) -> str::MaybeOwned {
let surrogate_pos = match self.next_surrogate(0) {
None => return str::Slice(unsafe { str::raw::from_utf8(self.as_bytes()) }),
Some((pos, _)) => pos,
};
let wtf8_bytes = self.as_bytes();
let mut utf8_bytes = Vec::with_capacity(self.len());
utf8_bytes.push_all(wtf8_bytes.slice_to(surrogate_pos));
utf8_bytes.push_all(UTF8_REPLACEMENT_CHARACTER);
let mut pos = surrogate_pos + 3;
loop {
match self.next_surrogate(pos) {
Some((surrogate_pos, _)) => {
utf8_bytes.push_all(wtf8_bytes.slice(pos, surrogate_pos));
utf8_bytes.push_all(UTF8_REPLACEMENT_CHARACTER);
pos = surrogate_pos + 3;
},
None => {
utf8_bytes.push_all(wtf8_bytes.slice_from(pos));
return str::Owned(unsafe { string::raw::from_utf8(utf8_bytes) })
}
}
}
}
#[inline]
fn to_ill_formed_utf16_units(&self) -> Utf16CodeUnits {
unsafe {
// We’re violating some of the invariants of &str here,
// but &str::to_utf16 only assumes a subset of these invariants
// that still hold for Wtf8Slice.
let not_really_a_str = str::raw::from_utf8(self.as_bytes());
not_really_a_str.utf16_units()
}
}
}
impl Wtf8Methods for Wtf8String {
#[inline]
fn as_slice(&self) -> Wtf8Slice {
Wtf8Slice { bytes: self.bytes.as_slice() }
}
}
impl<'a> Wtf8Methods for Wtf8Slice<'a> {
#[inline]
fn as_slice(&self) -> Wtf8Slice {
*self
}
}
trait PrivateWtf8Methods {
fn next_surrogate(&self, mut pos: uint) -> Option<(uint, u16)>;
fn final_lead_surrogate(&self) -> Option<u16>;
fn initial_trail_surrogate(&self) -> Option<u16>;
}
impl<T> PrivateWtf8Methods for T where T: Wtf8Methods {
#[inline]
fn next_surrogate(&self, mut pos: uint) -> Option<(uint, u16)> {
let mut iter = self.as_bytes().slice_from(pos).iter();
loop {
let b = match iter.next() {
None => return None,
Some(&b) => b,
};
if b < 0x80 {
pos += 1;
} else if b < 0xE0 {
iter.next();
pos += 2;
} else if b == 0xED {
match (iter.next(), iter.next()) {
(Some(&b2), Some(&b3)) if b2 >= 0xA0 => {
return Some((pos, decode_surrogate(b2, b3)))
}
_ => pos += 3
}
} else if b < 0xF0 {
iter.next();
iter.next();
pos += 3;
} else {
iter.next();
iter.next();
iter.next();
pos += 4;
}
}
}
#[inline]
fn final_lead_surrogate(&self) -> Option<u16> {
let len = self.len();
if len < 3 {
return None
}
match self.as_bytes().slice_from(len - 3) {
[0xED, b2 @ 0xA0...0xAF, b3] => Some(decode_surrogate(b2, b3)),
_ => None
}
}
#[inline]
fn initial_trail_surrogate(&self) -> Option<u16> {
let len = self.len();
if len < 3 {
return None
}
match self.as_bytes().slice_to(3) {
[0xED, b2 @ 0xA0...0xAF, b3] => Some(decode_surrogate(b2, b3)),
_ => None
}
}
}
#[inline]
fn decode_surrogate(second_byte: u8, third_byte: u8) -> u16 {
// The first byte is assumed to be 0xED
0xD800 | (second_byte as u16 & 0x3F) << 6 | third_byte as u16 & 0x3F
}
#[inline]
fn decode_surrogate_pair(lead: u16, trail: u16) -> char {
let code_point = 0x100000 + (((lead - 0xD800) as u32 << 10) | (trail - 0xDC00) as u32);
unsafe { transmute(code_point) }
}
/// Iterator for the code points of a WTF-8 string
///
/// Created with the method `.code_points()`.
#[deriving(Clone)]
pub struct Wtf8CodePoints<'a> {
crazy_unsafe_chars: str::Chars<'a>
}
impl<'a> Iterator<CodePoint> for Wtf8CodePoints<'a> {
#[inline]
fn next(&mut self) -> Option<CodePoint> {
match self.crazy_unsafe_chars.next() {
Some(crazy_unsafe_char) => Some(unsafe {
CodePoint::from_u32_unchecked(crazy_unsafe_char as u32)
}),
None => None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn code_point_from_u32() {
assert!(CodePoint::from_u32(0).is_some())
assert!(CodePoint::from_u32(0xD800).is_some())
assert!(CodePoint::from_u32(0x10FFFF).is_some())
assert!(CodePoint::from_u32(0x110000).is_none())
}
#[test]
fn code_point_to_u32() {
fn c(value: u32) -> CodePoint { CodePoint::from_u32(value).unwrap() }
assert_eq!(c(0).to_u32(), 0)
assert_eq!(c(0xD800).to_u32(), 0xD800)
assert_eq!(c(0x10FFFF).to_u32(), 0x10FFFF)
}
#[test]
fn code_point_from_char() {
assert_eq!(CodePoint::from_char('a').to_u32(), 0x61)
assert_eq!(CodePoint::from_char('💩').to_u32(), 0x1F4A9)
}
#[test]
fn code_point_to_string() {
assert_eq!(CodePoint::from_char('a').to_string(), "U+0061".to_string())
assert_eq!(CodePoint::from_char('💩').to_string(), "U+1F4A9".to_string())
}
#[test]
fn code_point_to_char() {
fn c(value: u32) -> CodePoint { CodePoint::from_u32(value).unwrap() }
assert_eq!(c(0x61).to_char(), Some('a'))
assert_eq!(c(0x1F4A9).to_char(), Some('💩'))
assert_eq!(c(0xD800).to_char(), None)
}
#[test]
fn wtf8string_new() {
assert_eq!(Wtf8String::new().as_bytes(), b"");
}
#[test]
fn wtf8string_from_str() {
assert_eq!(Wtf8String::from_str("").as_bytes(), b"");
assert_eq!(Wtf8String::from_str("aé 💩").as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_from_string() {
assert_eq!(Wtf8String::from_string("".to_string()).as_bytes(), b"");
assert_eq!(Wtf8String::from_string("aé 💩".to_string()).as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_push_str() {
let mut string = Wtf8String::new();
assert_eq!(string.as_bytes(), b"");
string.push_str("aé 💩");
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_push_char() {
let mut string = Wtf8String::from_str("aé ");
assert_eq!(string.as_bytes(), b"a\xC3\xA9 ");
string.push_char('💩');
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_push() {
let mut string = Wtf8String::from_str("aé ");
assert_eq!(string.as_bytes(), b"a\xC3\xA9 ");
string.push(CodePoint::from_char('💩'));
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
// FIXME test surrogate pair
}
#[test]
fn wtf8string_push_wtf8() {
let mut string = Wtf8String::from_str("aé");
assert_eq!(string.as_bytes(), b"a\xC3\xA9");
string.push_wtf8(Wtf8Slice::from_str(" 💩"));
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
// FIXME test surrogate pair
}
#[test]
fn wtf8string_into_bytes() {
assert_eq!(Wtf8String::from_str("aé 💩").into_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9".to_vec());
}
#[test]
fn wtf8string_into_string() {
let mut string = Wtf8String::from_str("aé 💩");
assert_eq!(string.clone().into_string(), Ok("aé 💩".to_string()));
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.clone().into_string(), Err(string));
}
#[test]
fn wtf8string_into_string_lossy() {
let mut string = Wtf8String::from_str("aé 💩");
assert_eq!(string.clone().into_string_lossy(), "aé 💩".to_string());
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.clone().into_string_lossy(), "aé 💩�".to_string());
}
#[test]
fn wtf8string_from_iterator() {
fn c(value: &u32) -> CodePoint { CodePoint::from_u32(*value).unwrap() }
assert_eq!([0x61, 0xE9, 0x20, 0x1F4A9].iter().map(c).collect::<Wtf8String>().as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9")
// FIXME test surrogate pair
}
#[test]
fn wtf8string_extend() {
fn c(value: &u32) -> CodePoint { CodePoint::from_u32(*value).unwrap() }
let mut string = Wtf8String::from_str("aé");
string.extend([0x20, 0x1F4A9].iter().map(c));
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9")
// FIXME test surrogate pair
}
#[test]
fn wtf8string_show() {
let mut string = Wtf8String::from_str("aé 💩");
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(format!("{}", string).as_slice(), r#""aé 💩\uD800""#);
}
#[test]
fn wtf8string_as_slice() {
assert_eq!(Wtf8String::from_str("aé").as_slice(), Wtf8Slice::from_str("aé"));
}
#[test]
fn wtf8slice_show() {
let mut string = Wtf8String::from_str("aé 💩");
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(format!("{}", string.as_slice()).as_slice(), r#""aé 💩\uD800""#);
}
#[test]
fn wtf8slice_from_str() {
assert_eq!(Wtf8Slice::from_str("").as_bytes(), b"");
assert_eq!(Wtf8Slice::from_str("aé 💩").as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8slice_len() {
assert_eq!(Wtf8Slice::from_str("").len(), 0);
assert_eq!(Wtf8Slice::from_str("aé 💩").len(), 8);
}
#[test]
fn wtf8slice_code_points() {
let chars = Wtf8Slice::from_str("é 💩").code_points()
.map(|c| c.to_char()).collect::<Vec<_>>();
assert_eq!(chars, vec![Some('é'), Some(' '), Some('💩')]);
// FIXME test surrogates
}
#[test]
fn wtf8slice_as_str() {
assert_eq!(Wtf8Slice::from_str("").as_str(), Some(""));
assert_eq!(Wtf8Slice::from_str("aé 💩").as_str(), Some("aé 💩"));
let mut string = Wtf8String::new();
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.as_str(), None);
}
#[test]
fn wtf8slice_to_string_lossy() {
use std::str::{Owned, Slice};
assert_eq!(Wtf8Slice::from_str("").to_string_lossy(), Slice(""));
assert_eq!(Wtf8Slice::from_str("aé 💩").to_string_lossy(), Slice("aé 💩"));
let mut string = Wtf8String::from_str("aé 💩");
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.to_string_lossy(), Owned("aé 💩�".to_string()));
}
// FIXME: UTF-16 tests
}
Replace push_bytes_unchecked with as_mut_vec.
/*!
**rust-wtf8** is an implementation of [the WTF-8 encoding](https://simonsapin.github.io/wtf-8/).
It uses Rust’s type system to maintain
[well-formedness](https://simonsapin.github.io/wtf-8/#well-formed),
like the `String` and `&str` types do for UTF-8.
*/
// FIXME: moar docstrings
#![feature(globs)]
extern crate core;
use core::str::Utf16CodeUnits;
use std::fmt;
use std::mem::transmute;
use std::slice;
use std::str;
use std::string;
static UTF8_REPLACEMENT_CHARACTER: &'static [u8] = b"\xEF\xBF\xBD";
/// A Unicode code point: from U+0000 to U+10FFFF
/// Compare with the `char` type,
/// which represents a Unicode scalar value:
/// a code point that is not a surrogate (U+D800 to U+DFFF).
#[deriving(Eq, PartialEq, Ord, PartialOrd, Clone, Hash)]
pub struct CodePoint {
value: u32
}
impl fmt::Show for CodePoint {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
write!(formatter, "U+{:04X}", self.value)
}
}
impl CodePoint {
#[inline]
pub unsafe fn from_u32_unchecked(value: u32) -> CodePoint {
CodePoint { value: value }
}
#[inline]
pub fn from_u32(value: u32) -> Option<CodePoint> {
match value {
0 ... 0x10FFFF => Some(CodePoint { value: value }),
_ => None
}
}
#[inline]
pub fn from_char(value: char) -> CodePoint {
CodePoint { value: value as u32 }
}
#[inline]
pub fn to_u32(&self) -> u32 {
self.value
}
#[inline]
pub fn to_char(&self) -> Option<char> {
match self.value {
0xD800 ... 0xDFFF => None,
_ => Some(unsafe { transmute(self.value) })
}
}
}
/// A WTF-8 string.
#[deriving(Eq, PartialEq, Ord, PartialOrd, Clone, Hash)]
pub struct Wtf8String {
bytes: Vec<u8>
}
impl fmt::Show for Wtf8String {
#[inline]
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
self.as_slice().fmt(formatter)
}
}
impl Wtf8String {
#[inline]
pub fn new() -> Wtf8String {
Wtf8String { bytes: Vec::new() }
}
#[inline]
pub fn with_capacity(c: uint) -> Wtf8String {
Wtf8String { bytes: Vec::with_capacity(c) }
}
#[inline]
pub unsafe fn from_bytes_unchecked(bytes: Vec<u8>) -> Wtf8String {
Wtf8String { bytes: bytes }
}
#[inline]
pub fn from_string(string: String) -> Wtf8String {
Wtf8String { bytes: string.into_bytes() }
}
#[inline]
pub fn from_str(str: &str) -> Wtf8String {
Wtf8String { bytes: str.as_bytes().to_vec() }
}
pub fn from_ill_formed_utf16(v: &[u16]) -> Wtf8String {
let mut string = Wtf8String::with_capacity(v.len());
for item in str::utf16_items(v) {
match item {
str::ScalarValue(c) => string.push_char(c),
// We’re violating some of the invariants of char here
// in order to skip the surrogate pair check,
// but such a pair would be a str::ScalarValue anyway.
str::LoneSurrogate(s) => string.push_char(unsafe { transmute(s as u32) })
}
}
string
}
#[inline]
pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> {
&mut self.bytes
}
#[inline]
pub fn push_str(&mut self, other: &str) {
self.bytes.push_all(other.as_bytes())
}
#[inline]
pub fn push_wtf8(&mut self, other: Wtf8Slice) {
match ((&*self).final_lead_surrogate(), other.initial_trail_surrogate()) {
// Replace newly paired surrogates by a supplementary code point.
(Some(lead), Some(trail)) => {
let len_without_lead_surrogate = self.len() - 3;
self.bytes.truncate(len_without_lead_surrogate);
let other_without_trail_surrogate = other.as_bytes().slice_from(3);
// 4 bytes for the supplementary code point
self.bytes.reserve_additional(4 + other_without_trail_surrogate.len());
self.push_char(decode_surrogate_pair(lead, trail));
self.bytes.push_all(other_without_trail_surrogate);
}
_ => self.bytes.push_all(other.as_bytes())
}
}
#[inline]
pub fn push_char(&mut self, c: char) {
unsafe {
// We’re violating some of the invariants of String here,
// but String::push only assumes a subset of these invariants
// that still hold for Wtf8String.
let not_really_a_string: &mut String = transmute(self);
not_really_a_string.push(c)
}
}
#[inline]
pub fn push(&mut self, code_point: CodePoint) {
match code_point.to_u32() {
trail @ 0xDC00...0xDFFF => {
match (&*self).final_lead_surrogate() {
Some(lead) => {
let len_without_lead_surrogate = self.len() - 3;
self.bytes.truncate(len_without_lead_surrogate);
self.push_char(decode_surrogate_pair(lead, trail as u16));
return
}
_ => {}
}
}
_ => {}
}
unsafe {
// We’re violating some of the invariants of String and char here,
// but String::push only assumes a subset of these invariants
// that still hold for Wtf8String and CodePoint.
let not_really_a_string: &mut String = transmute(self);
let not_really_a_char: char = transmute(code_point.to_u32());
not_really_a_string.push(not_really_a_char)
}
}
#[inline]
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
pub fn into_string(self) -> Result<String, Wtf8String> {
match self.next_surrogate(0) {
None => Ok(unsafe { string::raw::from_utf8(self.bytes) }),
Some(_) => Err(self),
}
}
pub fn into_string_lossy(mut self) -> String {
let mut pos = 0;
loop {
match self.next_surrogate(pos) {
Some((surrogate_pos, _)) => {
pos = surrogate_pos + 3;
slice::bytes::copy_memory(
self.bytes.slice_mut(surrogate_pos, pos),
UTF8_REPLACEMENT_CHARACTER
);
},
None => return unsafe { string::raw::from_utf8(self.bytes) }
}
}
}
}
impl FromIterator<CodePoint> for Wtf8String {
fn from_iter<T: Iterator<CodePoint>>(iterator: T) -> Wtf8String {
let mut string = Wtf8String::new();
string.extend(iterator);
string
}
}
impl Extendable<CodePoint> for Wtf8String {
fn extend<T: Iterator<CodePoint>>(&mut self, mut iterator: T) {
let (low, _high) = iterator.size_hint();
// Lower bound of one byte per code point (ASCII only)
self.bytes.reserve_additional(low);
for code_point in iterator {
self.push(code_point);
}
}
}
/// A slice of WTF-8 string.
#[deriving(Eq, PartialEq, Ord, PartialOrd, Clone, Hash)]
pub struct Wtf8Slice<'a> {
bytes: &'a [u8]
}
impl<'a> fmt::Show for Wtf8Slice<'a> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::FormatError> {
try!(formatter.write(b"\""))
let mut pos = 0;
loop {
match self.next_surrogate(pos) {
None => break,
Some((surrogate_pos, surrogate)) => {
try!(formatter.write(self.as_bytes().slice(pos, surrogate_pos)));
try!(write!(formatter, "\\u{:X}", surrogate));
pos = surrogate_pos + 3;
}
}
}
try!(formatter.write(self.as_bytes().slice_from(pos)));
formatter.write(b"\"")
}
}
impl<'a> Wtf8Slice<'a> {
#[inline]
pub unsafe fn from_bytes_unchecked(bytes: &[u8]) -> Wtf8Slice {
Wtf8Slice { bytes: bytes }
}
#[inline]
pub fn from_str(value: &str) -> Wtf8Slice {
unsafe { Wtf8Slice::from_bytes_unchecked(value.as_bytes()) }
}
}
pub trait Wtf8Methods {
fn as_slice(&self) -> Wtf8Slice;
#[inline]
fn as_bytes(&self) -> &[u8] {
self.as_slice().bytes
}
#[inline]
fn len(&self) -> uint {
self.as_bytes().len()
}
/// Iterate over the string’s code points.
#[inline]
fn code_points(&self) -> Wtf8CodePoints {
let crazy_unsafe_str = unsafe { str::raw::from_utf8(self.as_bytes()) };
Wtf8CodePoints { crazy_unsafe_chars: crazy_unsafe_str.chars() }
}
#[inline]
fn as_str(&self) -> Option<&str> {
// Well-formed WTF-8 is also well-formed UTF-8
// if and only if it contains no surrogate.
match self.next_surrogate(0) {
None => Some(unsafe { str::raw::from_utf8(self.as_bytes()) }),
Some(_) => None,
}
}
fn to_string_lossy(&self) -> str::MaybeOwned {
let surrogate_pos = match self.next_surrogate(0) {
None => return str::Slice(unsafe { str::raw::from_utf8(self.as_bytes()) }),
Some((pos, _)) => pos,
};
let wtf8_bytes = self.as_bytes();
let mut utf8_bytes = Vec::with_capacity(self.len());
utf8_bytes.push_all(wtf8_bytes.slice_to(surrogate_pos));
utf8_bytes.push_all(UTF8_REPLACEMENT_CHARACTER);
let mut pos = surrogate_pos + 3;
loop {
match self.next_surrogate(pos) {
Some((surrogate_pos, _)) => {
utf8_bytes.push_all(wtf8_bytes.slice(pos, surrogate_pos));
utf8_bytes.push_all(UTF8_REPLACEMENT_CHARACTER);
pos = surrogate_pos + 3;
},
None => {
utf8_bytes.push_all(wtf8_bytes.slice_from(pos));
return str::Owned(unsafe { string::raw::from_utf8(utf8_bytes) })
}
}
}
}
#[inline]
fn to_ill_formed_utf16_units(&self) -> Utf16CodeUnits {
unsafe {
// We’re violating some of the invariants of &str here,
// but &str::to_utf16 only assumes a subset of these invariants
// that still hold for Wtf8Slice.
let not_really_a_str = str::raw::from_utf8(self.as_bytes());
not_really_a_str.utf16_units()
}
}
}
impl Wtf8Methods for Wtf8String {
#[inline]
fn as_slice(&self) -> Wtf8Slice {
Wtf8Slice { bytes: self.bytes.as_slice() }
}
}
impl<'a> Wtf8Methods for Wtf8Slice<'a> {
#[inline]
fn as_slice(&self) -> Wtf8Slice {
*self
}
}
trait PrivateWtf8Methods {
fn next_surrogate(&self, mut pos: uint) -> Option<(uint, u16)>;
fn final_lead_surrogate(&self) -> Option<u16>;
fn initial_trail_surrogate(&self) -> Option<u16>;
}
impl<T> PrivateWtf8Methods for T where T: Wtf8Methods {
#[inline]
fn next_surrogate(&self, mut pos: uint) -> Option<(uint, u16)> {
let mut iter = self.as_bytes().slice_from(pos).iter();
loop {
let b = match iter.next() {
None => return None,
Some(&b) => b,
};
if b < 0x80 {
pos += 1;
} else if b < 0xE0 {
iter.next();
pos += 2;
} else if b == 0xED {
match (iter.next(), iter.next()) {
(Some(&b2), Some(&b3)) if b2 >= 0xA0 => {
return Some((pos, decode_surrogate(b2, b3)))
}
_ => pos += 3
}
} else if b < 0xF0 {
iter.next();
iter.next();
pos += 3;
} else {
iter.next();
iter.next();
iter.next();
pos += 4;
}
}
}
#[inline]
fn final_lead_surrogate(&self) -> Option<u16> {
let len = self.len();
if len < 3 {
return None
}
match self.as_bytes().slice_from(len - 3) {
[0xED, b2 @ 0xA0...0xAF, b3] => Some(decode_surrogate(b2, b3)),
_ => None
}
}
#[inline]
fn initial_trail_surrogate(&self) -> Option<u16> {
let len = self.len();
if len < 3 {
return None
}
match self.as_bytes().slice_to(3) {
[0xED, b2 @ 0xA0...0xAF, b3] => Some(decode_surrogate(b2, b3)),
_ => None
}
}
}
#[inline]
fn decode_surrogate(second_byte: u8, third_byte: u8) -> u16 {
// The first byte is assumed to be 0xED
0xD800 | (second_byte as u16 & 0x3F) << 6 | third_byte as u16 & 0x3F
}
#[inline]
fn decode_surrogate_pair(lead: u16, trail: u16) -> char {
let code_point = 0x100000 + (((lead - 0xD800) as u32 << 10) | (trail - 0xDC00) as u32);
unsafe { transmute(code_point) }
}
/// Iterator for the code points of a WTF-8 string
///
/// Created with the method `.code_points()`.
#[deriving(Clone)]
pub struct Wtf8CodePoints<'a> {
crazy_unsafe_chars: str::Chars<'a>
}
impl<'a> Iterator<CodePoint> for Wtf8CodePoints<'a> {
#[inline]
fn next(&mut self) -> Option<CodePoint> {
match self.crazy_unsafe_chars.next() {
Some(crazy_unsafe_char) => Some(unsafe {
CodePoint::from_u32_unchecked(crazy_unsafe_char as u32)
}),
None => None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn code_point_from_u32() {
assert!(CodePoint::from_u32(0).is_some())
assert!(CodePoint::from_u32(0xD800).is_some())
assert!(CodePoint::from_u32(0x10FFFF).is_some())
assert!(CodePoint::from_u32(0x110000).is_none())
}
#[test]
fn code_point_to_u32() {
fn c(value: u32) -> CodePoint { CodePoint::from_u32(value).unwrap() }
assert_eq!(c(0).to_u32(), 0)
assert_eq!(c(0xD800).to_u32(), 0xD800)
assert_eq!(c(0x10FFFF).to_u32(), 0x10FFFF)
}
#[test]
fn code_point_from_char() {
assert_eq!(CodePoint::from_char('a').to_u32(), 0x61)
assert_eq!(CodePoint::from_char('💩').to_u32(), 0x1F4A9)
}
#[test]
fn code_point_to_string() {
assert_eq!(CodePoint::from_char('a').to_string(), "U+0061".to_string())
assert_eq!(CodePoint::from_char('💩').to_string(), "U+1F4A9".to_string())
}
#[test]
fn code_point_to_char() {
fn c(value: u32) -> CodePoint { CodePoint::from_u32(value).unwrap() }
assert_eq!(c(0x61).to_char(), Some('a'))
assert_eq!(c(0x1F4A9).to_char(), Some('💩'))
assert_eq!(c(0xD800).to_char(), None)
}
#[test]
fn wtf8string_new() {
assert_eq!(Wtf8String::new().as_bytes(), b"");
}
#[test]
fn wtf8string_from_str() {
assert_eq!(Wtf8String::from_str("").as_bytes(), b"");
assert_eq!(Wtf8String::from_str("aé 💩").as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_from_string() {
assert_eq!(Wtf8String::from_string("".to_string()).as_bytes(), b"");
assert_eq!(Wtf8String::from_string("aé 💩".to_string()).as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_as_mut_vec() {
let mut string = Wtf8String::from_str("aé");
unsafe {
*string.as_mut_vec().get_mut(0) = b'A';
}
assert_eq!(string.as_str(), Some("Aé"));
}
#[test]
fn wtf8string_push_str() {
let mut string = Wtf8String::new();
assert_eq!(string.as_bytes(), b"");
string.push_str("aé 💩");
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_push_char() {
let mut string = Wtf8String::from_str("aé ");
assert_eq!(string.as_bytes(), b"a\xC3\xA9 ");
string.push_char('💩');
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8string_push() {
let mut string = Wtf8String::from_str("aé ");
assert_eq!(string.as_bytes(), b"a\xC3\xA9 ");
string.push(CodePoint::from_char('💩'));
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
// FIXME test surrogate pair
}
#[test]
fn wtf8string_push_wtf8() {
let mut string = Wtf8String::from_str("aé");
assert_eq!(string.as_bytes(), b"a\xC3\xA9");
string.push_wtf8(Wtf8Slice::from_str(" 💩"));
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9");
// FIXME test surrogate pair
}
#[test]
fn wtf8string_into_bytes() {
assert_eq!(Wtf8String::from_str("aé 💩").into_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9".to_vec());
}
#[test]
fn wtf8string_into_string() {
let mut string = Wtf8String::from_str("aé 💩");
assert_eq!(string.clone().into_string(), Ok("aé 💩".to_string()));
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.clone().into_string(), Err(string));
}
#[test]
fn wtf8string_into_string_lossy() {
let mut string = Wtf8String::from_str("aé 💩");
assert_eq!(string.clone().into_string_lossy(), "aé 💩".to_string());
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.clone().into_string_lossy(), "aé 💩�".to_string());
}
#[test]
fn wtf8string_from_iterator() {
fn c(value: &u32) -> CodePoint { CodePoint::from_u32(*value).unwrap() }
assert_eq!([0x61, 0xE9, 0x20, 0x1F4A9].iter().map(c).collect::<Wtf8String>().as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9")
// FIXME test surrogate pair
}
#[test]
fn wtf8string_extend() {
fn c(value: &u32) -> CodePoint { CodePoint::from_u32(*value).unwrap() }
let mut string = Wtf8String::from_str("aé");
string.extend([0x20, 0x1F4A9].iter().map(c));
assert_eq!(string.as_bytes(), b"a\xC3\xA9 \xF0\x9F\x92\xA9")
// FIXME test surrogate pair
}
#[test]
fn wtf8string_show() {
let mut string = Wtf8String::from_str("aé 💩");
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(format!("{}", string).as_slice(), r#""aé 💩\uD800""#);
}
#[test]
fn wtf8string_as_slice() {
assert_eq!(Wtf8String::from_str("aé").as_slice(), Wtf8Slice::from_str("aé"));
}
#[test]
fn wtf8slice_show() {
let mut string = Wtf8String::from_str("aé 💩");
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(format!("{}", string.as_slice()).as_slice(), r#""aé 💩\uD800""#);
}
#[test]
fn wtf8slice_from_str() {
assert_eq!(Wtf8Slice::from_str("").as_bytes(), b"");
assert_eq!(Wtf8Slice::from_str("aé 💩").as_bytes(),
b"a\xC3\xA9 \xF0\x9F\x92\xA9");
}
#[test]
fn wtf8slice_len() {
assert_eq!(Wtf8Slice::from_str("").len(), 0);
assert_eq!(Wtf8Slice::from_str("aé 💩").len(), 8);
}
#[test]
fn wtf8slice_code_points() {
let chars = Wtf8Slice::from_str("é 💩").code_points()
.map(|c| c.to_char()).collect::<Vec<_>>();
assert_eq!(chars, vec![Some('é'), Some(' '), Some('💩')]);
// FIXME test surrogates
}
#[test]
fn wtf8slice_as_str() {
assert_eq!(Wtf8Slice::from_str("").as_str(), Some(""));
assert_eq!(Wtf8Slice::from_str("aé 💩").as_str(), Some("aé 💩"));
let mut string = Wtf8String::new();
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.as_str(), None);
}
#[test]
fn wtf8slice_to_string_lossy() {
use std::str::{Owned, Slice};
assert_eq!(Wtf8Slice::from_str("").to_string_lossy(), Slice(""));
assert_eq!(Wtf8Slice::from_str("aé 💩").to_string_lossy(), Slice("aé 💩"));
let mut string = Wtf8String::from_str("aé 💩");
string.push(CodePoint::from_u32(0xD800).unwrap());
assert_eq!(string.to_string_lossy(), Owned("aé 💩�".to_string()));
}
// FIXME: UTF-16 tests
}
|
// Copyright 2017 GFX developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
extern crate cocoa;
#[macro_use]
extern crate bitflags;
extern crate libc;
#[macro_use]
extern crate objc;
extern crate objc_foundation;
extern crate block;
use objc::Message;
use objc::runtime::{Object, Class, BOOL, YES, NO};
use cocoa::foundation::NSSize;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::ops::Deref;
use std::any::Any;
use std::fmt;
use std::mem;
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct id<T=()>(pub *mut Object, pub PhantomData<T>);
impl<T> Copy for id<T> {}
impl<T> Clone for id<T> {
fn clone(&self) -> id<T> {
*self
}
}
impl<T> Hash for id<T> {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u64(unsafe { mem::transmute(self.0) });
state.finish();
}
}
impl<T> PartialEq for id<T> {
fn eq(&self, other: &id<T>) -> bool {
self.0 == other.0
}
}
impl<T> Eq for id<T> {}
impl<T> fmt::Debug for id<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "id({:p})", self.0)
}
}
impl<T> id<T> {
pub fn nil() -> Self {
id(0 as *mut Object, PhantomData)
}
pub fn is_null(&self) -> bool {
self.0.is_null()
}
}
impl<T, R> Deref for id<(T, R)> {
type Target = id<R>;
fn deref(&self) -> &id<R> { unsafe { mem::transmute(self) } }
}
unsafe impl<T> objc::Message for id<T> { }
#[allow(non_upper_case_globals)]
pub const nil: id<()> = id(0 as *mut Object, PhantomData);
pub trait AsObject {
fn as_obj(&self) -> *mut Object;
}
impl<T> AsObject for id<T> {
fn as_obj(&self) -> *mut Object {
self.0
}
}
pub trait NSObjectProtocol : Message + Sized + AsObject {
unsafe fn retain(&self) {
msg_send![self.as_obj(), retain]
}
unsafe fn release(&self) {
msg_send![self.as_obj(), release]
}
unsafe fn retain_count(&self) -> u64 {
msg_send![self.as_obj(), retainCount]
}
unsafe fn autorelease(&self) {
msg_send![self.as_obj(), autorelease]
}
unsafe fn is_kind_of_class(&self, class: Class) -> BOOL {
msg_send![self.as_obj(), isKindOfClass:class]
}
unsafe fn class() -> &'static Class {
Class::get("NSObject").unwrap()
}
}
pub enum NSArrayPrototype {}
pub type NSArray<T> = id<(NSArrayPrototype, (NSObjectPrototype, (T)))>;
impl<T> NSArray<T> where T: Any {
pub fn array_with_objects(slice: &[T]) -> Self {
unsafe {
msg_send![Self::class(), arrayWithObjects:slice.as_ptr()
count:slice.len() as u64]
}
}
pub fn object_at(&self, index: u64) -> T {
unsafe {
msg_send![self.0, objectAtIndex:index]
}
}
pub fn count(&self) -> u64 {
unsafe {
msg_send![self.0, count]
}
}
}
impl<T> NSObjectProtocol for NSArray<T> {
unsafe fn class() -> &'static Class {
Class::get("NSArray").unwrap()
}
}
pub enum NSAutoreleasePoolPrototype {}
pub type NSAutoreleasePool = id<(NSAutoreleasePoolPrototype, (NSObjectPrototype, ()))>;
impl NSAutoreleasePool {
pub fn alloc() -> Self {
unsafe {
msg_send![Self::class(), alloc]
}
}
pub fn init(&self) -> Self {
unsafe {
msg_send![self.0, init]
}
}
pub fn drain(&self) {
unsafe {
msg_send![self.0, drain]
}
}
}
impl NSObjectProtocol for NSAutoreleasePool {
unsafe fn class() -> &'static Class {
Class::get("NSAutoreleasePool").unwrap()
}
}
pub enum NSObjectPrototype {}
pub type NSObject = id<(NSObjectPrototype, ())>;
impl NSObjectProtocol for NSObject {}
pub enum CAMetalDrawablePrototype {}
pub type CAMetalDrawable = id<(CAMetalDrawablePrototype, (MTLDrawablePrototype, (NSObjectPrototype, ())))>;
impl CAMetalDrawable {
pub fn texture(&self) -> MTLTexture {
unsafe {
msg_send![self.0, texture]
}
}
}
impl NSObjectProtocol for CAMetalDrawable {
unsafe fn class() -> &'static Class {
Class::get("CAMetalDrawable").unwrap()
}
}
pub enum CAMetalLayerPrototype {}
pub type CAMetalLayer = id<(CAMetalLayerPrototype, (NSObjectPrototype, ()))>;
impl CAMetalLayer {
pub fn new() -> CAMetalLayer {
unsafe {
msg_send![Self::class(), new]
}
}
pub fn layer() -> CAMetalLayer {
unsafe {
msg_send![Self::class(), layer]
}
}
pub fn set_device(&self, device: MTLDevice) {
unsafe {
msg_send![self.0, setDevice:device.0]
}
}
pub fn pixel_format(&self) -> MTLPixelFormat {
unsafe {
msg_send![self.0, pixelFormat]
}
}
pub fn set_pixel_format(&self, pixel_format: MTLPixelFormat) {
unsafe {
msg_send![self.0, setPixelFormat:pixel_format]
}
}
pub fn drawable_size(&self) -> NSSize {
unsafe {
msg_send![self.0, drawableSize]
}
}
pub fn set_drawable_size(&self, size: NSSize) {
unsafe {
msg_send![self.0, setDrawableSize:size]
}
}
pub fn presents_with_transaction(&self) -> bool {
unsafe {
match msg_send![self.0, presentsWithTransaction] {
YES => true,
NO => false,
_ => unreachable!()
}
}
}
pub fn set_presents_with_transaction(&self, transaction: bool) {
unsafe {
msg_send![self.0, setPresentsWithTransaction:transaction];
}
}
pub fn set_edge_antialiasing_mask(&self, mask: u64) {
unsafe {
msg_send![self.0, setEdgeAntialiasingMask:mask]
}
}
pub fn set_masks_to_bounds(&self, masks: bool) {
unsafe {
msg_send![self.0, setMasksToBounds:masks]
}
}
pub fn remove_all_animations(&self) {
unsafe {
msg_send![self.0, removeAllAnimations];
}
}
pub fn next_drawable(&self) -> Option<CAMetalDrawable> {
unsafe {
let drawable: CAMetalDrawable = msg_send![self.0, nextDrawable];
match drawable.is_null() {
true => None,
false => Some(drawable)
}
}
}
}
impl NSObjectProtocol for CAMetalLayer {
unsafe fn class() -> &'static Class {
Class::get("CAMetalLayer").unwrap()
}
}
mod constants;
mod types;
mod device;
mod texture;
mod sampler;
mod resource;
mod drawable;
mod buffer;
mod renderpass;
mod commandqueue;
mod commandbuffer;
mod encoder;
mod pipeline;
mod library;
mod argument;
mod vertexdescriptor;
mod depthstencil;
mod heap;
pub use constants::*;
pub use types::*;
pub use device::*;
pub use texture::*;
pub use sampler::*;
pub use resource::*;
pub use drawable::*;
pub use buffer::*;
pub use renderpass::*;
pub use commandqueue::*;
pub use commandbuffer::*;
pub use encoder::*;
pub use pipeline::*;
pub use library::*;
pub use argument::*;
pub use vertexdescriptor::*;
pub use depthstencil::*;
pub use heap::*;
Added setter for contentsScale
// Copyright 2017 GFX developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
extern crate cocoa;
#[macro_use]
extern crate bitflags;
extern crate libc;
#[macro_use]
extern crate objc;
extern crate objc_foundation;
extern crate block;
use objc::Message;
use objc::runtime::{Object, Class, BOOL, YES, NO};
use cocoa::foundation::NSSize;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::ops::Deref;
use std::any::Any;
use std::fmt;
use std::mem;
#[cfg(target_pointer_width = "64")]
pub type CGFloat = libc::c_double;
#[cfg(not(target_pointer_width = "64"))]
pub type CGFloat = libc::c_float;
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct id<T=()>(pub *mut Object, pub PhantomData<T>);
impl<T> Copy for id<T> {}
impl<T> Clone for id<T> {
fn clone(&self) -> id<T> {
*self
}
}
impl<T> Hash for id<T> {
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u64(unsafe { mem::transmute(self.0) });
state.finish();
}
}
impl<T> PartialEq for id<T> {
fn eq(&self, other: &id<T>) -> bool {
self.0 == other.0
}
}
impl<T> Eq for id<T> {}
impl<T> fmt::Debug for id<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "id({:p})", self.0)
}
}
impl<T> id<T> {
pub fn nil() -> Self {
id(0 as *mut Object, PhantomData)
}
pub fn is_null(&self) -> bool {
self.0.is_null()
}
}
impl<T, R> Deref for id<(T, R)> {
type Target = id<R>;
fn deref(&self) -> &id<R> { unsafe { mem::transmute(self) } }
}
unsafe impl<T> objc::Message for id<T> { }
#[allow(non_upper_case_globals)]
pub const nil: id<()> = id(0 as *mut Object, PhantomData);
pub trait AsObject {
fn as_obj(&self) -> *mut Object;
}
impl<T> AsObject for id<T> {
fn as_obj(&self) -> *mut Object {
self.0
}
}
pub trait NSObjectProtocol : Message + Sized + AsObject {
unsafe fn retain(&self) {
msg_send![self.as_obj(), retain]
}
unsafe fn release(&self) {
msg_send![self.as_obj(), release]
}
unsafe fn retain_count(&self) -> u64 {
msg_send![self.as_obj(), retainCount]
}
unsafe fn autorelease(&self) {
msg_send![self.as_obj(), autorelease]
}
unsafe fn is_kind_of_class(&self, class: Class) -> BOOL {
msg_send![self.as_obj(), isKindOfClass:class]
}
unsafe fn class() -> &'static Class {
Class::get("NSObject").unwrap()
}
}
pub enum NSArrayPrototype {}
pub type NSArray<T> = id<(NSArrayPrototype, (NSObjectPrototype, (T)))>;
impl<T> NSArray<T> where T: Any {
pub fn array_with_objects(slice: &[T]) -> Self {
unsafe {
msg_send![Self::class(), arrayWithObjects:slice.as_ptr()
count:slice.len() as u64]
}
}
pub fn object_at(&self, index: u64) -> T {
unsafe {
msg_send![self.0, objectAtIndex:index]
}
}
pub fn count(&self) -> u64 {
unsafe {
msg_send![self.0, count]
}
}
}
impl<T> NSObjectProtocol for NSArray<T> {
unsafe fn class() -> &'static Class {
Class::get("NSArray").unwrap()
}
}
pub enum NSAutoreleasePoolPrototype {}
pub type NSAutoreleasePool = id<(NSAutoreleasePoolPrototype, (NSObjectPrototype, ()))>;
impl NSAutoreleasePool {
pub fn alloc() -> Self {
unsafe {
msg_send![Self::class(), alloc]
}
}
pub fn init(&self) -> Self {
unsafe {
msg_send![self.0, init]
}
}
pub fn drain(&self) {
unsafe {
msg_send![self.0, drain]
}
}
}
impl NSObjectProtocol for NSAutoreleasePool {
unsafe fn class() -> &'static Class {
Class::get("NSAutoreleasePool").unwrap()
}
}
pub enum NSObjectPrototype {}
pub type NSObject = id<(NSObjectPrototype, ())>;
impl NSObjectProtocol for NSObject {}
pub enum CAMetalDrawablePrototype {}
pub type CAMetalDrawable = id<(CAMetalDrawablePrototype, (MTLDrawablePrototype, (NSObjectPrototype, ())))>;
impl CAMetalDrawable {
pub fn texture(&self) -> MTLTexture {
unsafe {
msg_send![self.0, texture]
}
}
}
impl NSObjectProtocol for CAMetalDrawable {
unsafe fn class() -> &'static Class {
Class::get("CAMetalDrawable").unwrap()
}
}
pub enum CAMetalLayerPrototype {}
pub type CAMetalLayer = id<(CAMetalLayerPrototype, (NSObjectPrototype, ()))>;
impl CAMetalLayer {
pub fn new() -> CAMetalLayer {
unsafe {
msg_send![Self::class(), new]
}
}
pub fn layer() -> CAMetalLayer {
unsafe {
msg_send![Self::class(), layer]
}
}
pub fn set_device(&self, device: MTLDevice) {
unsafe {
msg_send![self.0, setDevice:device.0]
}
}
pub fn pixel_format(&self) -> MTLPixelFormat {
unsafe {
msg_send![self.0, pixelFormat]
}
}
pub fn set_pixel_format(&self, pixel_format: MTLPixelFormat) {
unsafe {
msg_send![self.0, setPixelFormat:pixel_format]
}
}
pub fn drawable_size(&self) -> NSSize {
unsafe {
msg_send![self.0, drawableSize]
}
}
pub fn set_drawable_size(&self, size: NSSize) {
unsafe {
msg_send![self.0, setDrawableSize:size]
}
}
pub fn presents_with_transaction(&self) -> bool {
unsafe {
match msg_send![self.0, presentsWithTransaction] {
YES => true,
NO => false,
_ => unreachable!()
}
}
}
pub fn set_presents_with_transaction(&self, transaction: bool) {
unsafe {
msg_send![self.0, setPresentsWithTransaction:transaction];
}
}
pub fn set_edge_antialiasing_mask(&self, mask: u64) {
unsafe {
msg_send![self.0, setEdgeAntialiasingMask:mask]
}
}
pub fn set_masks_to_bounds(&self, masks: bool) {
unsafe {
msg_send![self.0, setMasksToBounds:masks]
}
}
pub fn remove_all_animations(&self) {
unsafe {
msg_send![self.0, removeAllAnimations];
}
}
pub fn next_drawable(&self) -> Option<CAMetalDrawable> {
unsafe {
let drawable: CAMetalDrawable = msg_send![self.0, nextDrawable];
match drawable.is_null() {
true => None,
false => Some(drawable)
}
}
}
pub fn set_contents_scale(&self, scale: CGFloat) {
unsafe {
msg_send![self.0, setContentsScale:scale];
}
}
}
impl NSObjectProtocol for CAMetalLayer {
unsafe fn class() -> &'static Class {
Class::get("CAMetalLayer").unwrap()
}
}
mod constants;
mod types;
mod device;
mod texture;
mod sampler;
mod resource;
mod drawable;
mod buffer;
mod renderpass;
mod commandqueue;
mod commandbuffer;
mod encoder;
mod pipeline;
mod library;
mod argument;
mod vertexdescriptor;
mod depthstencil;
mod heap;
pub use constants::*;
pub use types::*;
pub use device::*;
pub use texture::*;
pub use sampler::*;
pub use resource::*;
pub use drawable::*;
pub use buffer::*;
pub use renderpass::*;
pub use commandqueue::*;
pub use commandbuffer::*;
pub use encoder::*;
pub use pipeline::*;
pub use library::*;
pub use argument::*;
pub use vertexdescriptor::*;
pub use depthstencil::*;
pub use heap::*;
|
//! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! # #![feature(impl_trait_in_bindings)]
//! #![feature(async_await)]
//! use ruma_client::Client;
//!
//! let work = async {
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let session = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .await?;
//!
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok(())
//! };
//!
//! // Start `work` on a futures runtime...
//! # let work_typehint: impl futures::future::TryFuture<Ok = (), Error = ruma_client::Error>
//! # = work;
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # #![feature(async_await)]
//! # use futures::stream::{StreamExt as _, TryStreamExt as _};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! # async {
//! let mut sync_stream = Box::pin(client.sync(None, None, true));
//! while let Some(response) = sync_stream.try_next().await? {
//! // Do something with the data in the response...
//! }
//! # Result::<(), ruma_client::Error>::Ok(())
//! # };
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # #![feature(async_await)]
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! async {
//! let response = client
//! .request(get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! })
//! .await?;
//!
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Result::<(), ruma_client::Error>::Ok(())
//! }
//! # ;
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs
)]
use std::{
convert::TryFrom,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::Future,
stream::{self, Stream, TryStream, TryStreamExt as _},
};
use http::Response as HttpResponse;
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
pub use ruma_client_api as api;
pub use ruma_events as events;
pub use ruma_identifiers as identifiers;
/// Matrix client-server API endpoints.
//pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
/// Non-secured variant of the client (using plain HTTP requests)
pub type HttpClient = Client<HttpConnector>;
impl HttpClient {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
/// Secured variant of the client (using HTTPS requests)
#[cfg(feature = "tls")]
pub type HttpsClient = Client<HttpsConnector<HttpConnector>>;
#[cfg(feature = "tls")]
impl HttpsClient {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new()?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build(connector),
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub async fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> Result<Session, Error> {
use api::r0::session::login;
let response = self
.request(login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
})
.await?;
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*self.0.session.lock().unwrap() = Some(session.clone());
Ok(session)
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub async fn register_guest(&self) -> Result<Session, Error> {
use api::r0::account::register;
let response = self
.request(register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
})
.await?;
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*self.0.session.lock().unwrap() = Some(session.clone());
Ok(session)
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub async fn register_user(
&self,
username: Option<String>,
password: String,
) -> Result<Session, Error> {
use api::r0::account::register;
let response = self
.request(register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
})
.await?;
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*self.0.session.lock().unwrap() = Some(session.clone());
Ok(session)
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = Result<api::r0::sync::sync_events::Response, Error>>
+ TryStream<Ok = api::r0::sync::sync_events::Response, Error = Error> {
use api::r0::sync::sync_events;
// TODO: Is this really the way TryStreams are supposed to work?
#[derive(Debug, PartialEq, Eq)]
enum State {
InitialSync,
Since(String),
Errored,
}
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
let initial_state = match since {
Some(s) => State::Since(s),
None => State::InitialSync,
};
stream::unfold(initial_state, move |state| {
let client = client.clone();
let filter = filter.clone();
async move {
let since = match state {
State::Errored => return None,
State::Since(s) => Some(s),
State::InitialSync => None,
};
let res = client
.request(sync_events::Request {
filter,
since,
full_state: None,
set_presence,
timeout: None,
})
.await;
match res {
Ok(response) => {
let next_batch_clone = response.next_batch.clone();
Some((Ok(response), State::Since(next_batch_clone)))
}
Err(e) => Some((Err(e), State::Errored)),
}
}
})
}
/// Makes a request to a Matrix API endpoint.
pub fn request<Request: Endpoint>(
&self,
request: Request,
) -> impl Future<Output = Result<Request::Response, Error>> {
let client = self.0.clone();
async move {
let mut url = client.homeserver_url.clone();
let mut hyper_request = request.try_into()?.map(hyper::Body::from);
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if Request::METADATA.requires_authentication {
if let Some(ref session) = *client.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
*hyper_request.uri_mut() = Uri::from_str(url.as_ref())?;
let hyper_response = client.hyper.request(hyper_request).await?;
let (head, body) = hyper_response.into_parts();
let full_response =
HttpResponse::from_parts(head, body.try_concat().await?.as_ref().to_owned());
Ok(Request::Response::try_from(full_response)?)
}
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
Add some backticks in doc comments
//! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! # #![feature(impl_trait_in_bindings)]
//! #![feature(async_await)]
//! use ruma_client::Client;
//!
//! let work = async {
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let session = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .await?;
//!
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok(())
//! };
//!
//! // Start `work` on a futures runtime...
//! # let work_typehint: impl futures::future::TryFuture<Ok = (), Error = ruma_client::Error>
//! # = work;
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # #![feature(async_await)]
//! # use futures::stream::{StreamExt as _, TryStreamExt as _};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! # async {
//! let mut sync_stream = Box::pin(client.sync(None, None, true));
//! while let Some(response) = sync_stream.try_next().await? {
//! // Do something with the data in the response...
//! }
//! # Result::<(), ruma_client::Error>::Ok(())
//! # };
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # #![feature(async_await)]
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! async {
//! let response = client
//! .request(get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! })
//! .await?;
//!
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Result::<(), ruma_client::Error>::Ok(())
//! }
//! # ;
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs
)]
use std::{
convert::TryFrom,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::Future,
stream::{self, Stream, TryStream, TryStreamExt as _},
};
use http::Response as HttpResponse;
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
pub use ruma_client_api as api;
pub use ruma_events as events;
pub use ruma_identifiers as identifiers;
/// Matrix client-server API endpoints.
//pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
/// Non-secured variant of the client (using plain HTTP requests)
pub type HttpClient = Client<HttpConnector>;
impl HttpClient {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
/// Secured variant of the client (using HTTPS requests)
#[cfg(feature = "tls")]
pub type HttpsClient = Client<HttpsConnector<HttpConnector>>;
#[cfg(feature = "tls")]
impl HttpsClient {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new()?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build(connector),
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to `api::r0::session::login::call()`, this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub async fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> Result<Session, Error> {
use api::r0::session::login;
let response = self
.request(login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
})
.await?;
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*self.0.session.lock().unwrap() = Some(session.clone());
Ok(session)
}
/// Register as a guest. In contrast to `api::r0::account::register::call()`,
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub async fn register_guest(&self) -> Result<Session, Error> {
use api::r0::account::register;
let response = self
.request(register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
})
.await?;
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*self.0.session.lock().unwrap() = Some(session.clone());
Ok(session)
}
/// Register as a new user on this server.
///
/// In contrast to `api::r0::account::register::call()`, this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub async fn register_user(
&self,
username: Option<String>,
password: String,
) -> Result<Session, Error> {
use api::r0::account::register;
let response = self
.request(register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
})
.await?;
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*self.0.session.lock().unwrap() = Some(session.clone());
Ok(session)
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = Result<api::r0::sync::sync_events::Response, Error>>
+ TryStream<Ok = api::r0::sync::sync_events::Response, Error = Error> {
use api::r0::sync::sync_events;
// TODO: Is this really the way TryStreams are supposed to work?
#[derive(Debug, PartialEq, Eq)]
enum State {
InitialSync,
Since(String),
Errored,
}
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
let initial_state = match since {
Some(s) => State::Since(s),
None => State::InitialSync,
};
stream::unfold(initial_state, move |state| {
let client = client.clone();
let filter = filter.clone();
async move {
let since = match state {
State::Errored => return None,
State::Since(s) => Some(s),
State::InitialSync => None,
};
let res = client
.request(sync_events::Request {
filter,
since,
full_state: None,
set_presence,
timeout: None,
})
.await;
match res {
Ok(response) => {
let next_batch_clone = response.next_batch.clone();
Some((Ok(response), State::Since(next_batch_clone)))
}
Err(e) => Some((Err(e), State::Errored)),
}
}
})
}
/// Makes a request to a Matrix API endpoint.
pub fn request<Request: Endpoint>(
&self,
request: Request,
) -> impl Future<Output = Result<Request::Response, Error>> {
let client = self.0.clone();
async move {
let mut url = client.homeserver_url.clone();
let mut hyper_request = request.try_into()?.map(hyper::Body::from);
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if Request::METADATA.requires_authentication {
if let Some(ref session) = *client.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
*hyper_request.uri_mut() = Uri::from_str(url.as_ref())?;
let hyper_response = client.hyper.request(hyper_request).await?;
let (head, body) = hyper_response.into_parts();
let full_response =
HttpResponse::from_parts(head, body.try_concat().await?.as_ref().to_owned());
Ok(Request::Response::try_from(full_response)?)
}
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
|
use ::opcodes::{AddressingMode, OpCode};
use cpu::cpu_error::CpuError;
use cpu::flags::StatusFlags;
use cpu::memory_bus::MemoryBus;
use cpu::registers::Registers;
use cpu::stack::Stack;
const DEFAULT_CODE_SEGMENT_START_ADDRESS: u16 = 0xC000; // Default to a 16KB ROM, leaving 32KB of main memory
const STACK_START: usize = 0x100;
const STACK_END: usize = 0x1FF;
pub enum Operand {
Immediate(u8),
Memory(u16),
Implied,
}
/// A representation of a 6502 microprocessor
pub struct Cpu {
pub memory: MemoryBus,
pub registers: Registers,
pub flags: StatusFlags,
pub stack: Stack,
code_start: usize,
code_size: usize,
}
pub type CpuLoadResult = Result<(), CpuError>;
pub type CpuStepResult = Result<(), CpuError>;
impl Cpu {
/// Returns a default instance of a Cpu
pub fn new() -> Cpu {
Cpu {
memory: MemoryBus::new(),
registers: Registers::new(),
flags: Default::default(),
stack: Stack::new(),
code_start: DEFAULT_CODE_SEGMENT_START_ADDRESS as usize,
code_size: 0,
}
}
/// Loads code into the Cpu main memory at an optional offset. If no
/// offset is provided, the Cpu will, by default, load the code into
/// main memory at 0xC000
pub fn load<T>(&mut self, code: &[u8], addr: T) -> CpuLoadResult
where T: Into<Option<u16>>
{
let addr = addr.into();
let addr: u16 = if addr.is_some() {
let addr = addr.unwrap();
if addr as u32 + code.len() as u32 > u16::max_value() as u32 {
return Err(CpuError::code_segment_out_of_range(addr));
} else {
addr
}
} else {
DEFAULT_CODE_SEGMENT_START_ADDRESS
};
for x in 0..code.len() {
self.memory.write_byte(addr + x as u16, code[x]);
}
// Set the Program Counter to point at the
// start address of the code segment
self.registers.PC = addr;
self.code_start = addr as usize;
self.code_size = code.len();
Ok(())
}
/// Runs N instructions of code through the Cpu
pub fn step_n(&mut self, n: u32) -> CpuStepResult {
for _ in 0..n {
if (self.registers.PC as usize) < self.code_start + self.code_size {
self.step()?;
} else {
break;
}
}
Ok(())
}
/// Runs a single instruction of code through the Cpu
pub fn step(&mut self) -> CpuStepResult {
let byte = self.memory.read_byte(self.registers.PC);
if let Some(opcode) = OpCode::from_raw_byte(byte) {
let operand = self.get_operand_from_opcode(&opcode);
self.registers.PC += opcode.length as u16;
match opcode.mnemonic {
"ADC" => self.adc(&operand),
"AND" => self.and(&operand),
"ASL" => self.asl(&operand),
"BCC" => self.bcc(&operand),
"BCS" => self.bcs(&operand),
"BEQ" => self.beq(&operand),
"BIT" => self.bit(&operand),
"BMI" => self.bmi(&operand),
"BNE" => self.bne(&operand),
"BPL" => self.bpl(&operand),
"BRK" => self.brk(),
"BVC" => self.bvc(&operand),
"BVS" => self.bvs(&operand),
"CLC" => self.set_carry_flag(false),
"CLD" => self.set_decimal_flag(false),
"CLI" => self.set_interrupt_flag(false),
"CLV" => self.set_overflow_flag(false),
"CMP" => {
let a = self.registers.A;
self.compare(&operand, a)
}
"CPX" => {
let x = self.registers.X;
self.compare(&operand, x)
}
"CPY" => {
let y = self.registers.Y;
self.compare(&operand, y)
}
"DEC" => self.dec(&operand),
"DEX" => self.dex(),
"DEY" => self.dey(),
"EOR" => self.eor(&operand),
"INC" => self.inc(&operand),
"INX" => self.inx(),
"INY" => self.iny(),
"JMP" => self.jmp(&operand),
"LDA" => self.lda(&operand),
"LDX" => self.ldx(&operand),
"LDY" => self.ldy(&operand),
"SED" => self.set_decimal_flag(true),
"STA" => self.sta(&operand),
_ => return Err(CpuError::unknown_opcode(self.registers.PC, opcode.code)),
}
Ok(())
} else {
Err(CpuError::unknown_opcode(self.registers.PC, byte))
}
}
fn get_operand_from_opcode(&self, opcode: &OpCode) -> Operand {
use ::opcodes::AddressingMode::*;
let operand_start = self.registers.PC + 1;
match opcode.mode {
Unknown => unreachable!(),
Implied => Operand::Implied,
Immediate => Operand::Immediate(self.read_byte(operand_start)),
Relative => Operand::Immediate(self.read_byte(operand_start)),
Accumulator => Operand::Implied,
ZeroPage => Operand::Memory((self.read_byte(operand_start) as u16) & 0xFF),
ZeroPageX => {
Operand::Memory((self.registers.X as u16 + self.read_byte(operand_start) as u16) &
0xFF)
}
ZeroPageY => {
Operand::Memory((self.registers.Y as u16 + self.read_byte(operand_start) as u16) &
0xFF)
}
Absolute => Operand::Memory(self.read_u16(operand_start)),
AbsoluteX => Operand::Memory(self.registers.X as u16 + self.read_u16(operand_start)),
AbsoluteY => Operand::Memory(self.registers.Y as u16 + self.read_u16(operand_start)),
Indirect => Operand::Memory(self.read_u16(self.read_u16(operand_start))),
IndirectX => {
Operand::Memory(self.read_u16((self.registers.X as u16 +
self.read_byte(self.registers.PC + 1) as u16) &
0xFF))
}
IndirectY => {
Operand::Memory(self.registers.Y as u16 +
self.read_u16(self.read_byte(self.registers.PC + 1) as u16))
}
}
}
fn unwrap_immediate(&self, operand: &Operand) -> u8 {
match *operand {
Operand::Immediate(byte) => byte,
Operand::Memory(addr) => self.read_byte(addr),
Operand::Implied => 0,
}
}
fn unwrap_address(&self, operand: &Operand) -> u16 {
match *operand {
Operand::Immediate(byte) => byte as u16,
Operand::Memory(addr) => addr,
Operand::Implied => 0,
}
}
// ## OpCode handlers ##
fn adc(&mut self, operand: &Operand) {
// This is implemented on the information provided here:
// http://www.electrical4u.com/bcd-or-binary-coded-decimal-bcd-conversion-addition-subtraction/
// and here:
// http://www.6502.org/tutorials/decimal_mode.html,
// and here:
// http://www.atariarchives.org/2bml/chapter_10.php,
// and also here:
// http://stackoverflow.com/questions/29193303/6502-emulation-proper-way-to-implement-adc-and-sbc
let carry = if self.flags.carry { 1 } else { 0 };
let value = self.unwrap_immediate(&operand) as u16;
let value_signs = self.registers.A & 0x80 == 0x80 && value & 0x80 == 0x80;
// Do normal binary arithmetic first
let mut result = self.registers.A as u16 + value as u16 + carry as u16;
// Handle packed binary coded decimal
if self.flags.decimal {
if (self.registers.A as u16 & 0x0F) + (value & 0x0F) + carry > 0x09 {
result += 0x06;
}
if result > 0x99 {
result += 0x60;
}
self.flags.carry = (result & 0x100) == 0x100;
} else {
self.flags.carry = result > 0xFF;
}
self.flags.zero = result as u8 & 0xFF == 0x00;
self.flags.sign = result & 0x80 == 0x80;
if self.flags.sign != value_signs {
self.flags.overflow = true;
}
self.registers.A = result as u8 & 0xFF;
}
fn and(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let result = self.registers.A & value;
self.registers.A = result;
self.flags.zero = result as u8 & 0xFF == 0;
self.flags.sign = result & 0x80 == 0x80;
}
fn asl(&mut self, operand: &Operand) {
let mut value = if let &Operand::Implied = operand {
// Implied ASL uses the A register
self.registers.A
} else {
self.unwrap_immediate(&operand)
};
// Test the seventh bit - if its set, shift it
// into the carry flag
self.flags.carry = (value & 0x80) == 0x80;
// Shift the value left
value = value << 0x01;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value as u8 & 0xFF == 0;
if let &Operand::Implied = operand {
self.registers.A = value;
} else {
let addr = self.unwrap_address(&operand);
self.write_byte(addr, value);
}
}
fn bcc(&mut self, operand: &Operand) {
// Branch if the carry flag is not set
if !self.flags.carry {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bcs(&mut self, operand: &Operand) {
// Branch if the carry flag is set
if self.flags.carry {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn beq(&mut self, operand: &Operand) {
// Branch if the zero flag is set
if self.flags.zero {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bit(&mut self, operand: &Operand) {
let a = self.registers.A;
let value = self.unwrap_immediate(&operand);
let result = value & a;
self.flags.zero = result == 0x00;
self.flags.overflow = value & 0x40 == 0x40; // "The V flag and the N flag receive copies of the sixth and seventh bits of the tested number"
self.flags.sign = value & 0x80 == 0x80;
}
fn bmi(&mut self, operand: &Operand) {
// Branch if the sign flag is set
if self.flags.sign {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bne(&mut self, operand: &Operand) {
// Branch if the zero flag is not set
if !self.flags.zero {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bpl(&mut self, operand: &Operand) {
// Branch if the sign flag is not set
if !self.flags.sign {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn brk(&mut self) {
let mut mem = &mut self.memory[STACK_START..STACK_END];
// Return address is BRK + 0x02, but we do + 0x01 here
// because after the cpu step we add another 0x01
self.stack.push_u16(mem, self.registers.PC + 0x01);
self.stack.push(mem, self.flags.to_u8());
self.flags.interrupt_disabled = true;
}
fn bvc(&mut self, operand: &Operand) {
// Branch if the overflow flag is not set
if !self.flags.overflow {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bvs(&mut self, operand: &Operand) {
// Branch if the overflow flag is set
if self.flags.overflow {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn set_carry_flag(&mut self, value: bool) {
self.flags.carry = value;
}
fn set_decimal_flag(&mut self, value: bool) {
self.flags.decimal = value;
}
fn set_interrupt_flag(&mut self, value: bool) {
self.flags.interrupt_disabled = value;
}
fn set_overflow_flag(&mut self, value: bool) {
self.flags.overflow = value;
}
fn compare(&mut self, operand: &Operand, byte: u8) {
let value = self.unwrap_immediate(&operand);
let result: i16 = byte as i16 - value as i16;
self.flags.carry = (result as u16) < 0x100;
self.flags.zero = result & 0xFF == 0x00;
self.flags.sign = result & 0x80 == 0x80;
}
fn dec(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let addr = self.unwrap_address(&operand);
let result = value - 1;
self.write_byte(addr, result);
self.flags.sign = result & 0x80 == 0x80;
self.flags.zero = result & 0xFF == 0x00;
}
fn dex(&mut self) {
self.registers.X -= 0x01;
self.flags.sign = self.registers.X & 0x80 == 0x80;
self.flags.zero = self.registers.X & 0xFF == 0x00;
}
fn dey(&mut self) {
self.registers.Y -= 0x01;
self.flags.sign = self.registers.Y & 0x80 == 0x80;
self.flags.zero = self.registers.Y & 0xFF == 0x00;
}
fn eor(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let result = self.registers.A ^ value;
self.registers.A = result;
self.flags.sign = result & 0x80 == 0x80;
self.flags.zero = result & 0xFF == 0x00;
}
fn inc(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let addr = self.unwrap_address(&operand);
let result = value + 1;
self.write_byte(addr, result);
self.flags.sign = result & 0x80 == 0x80;
self.flags.zero = result & 0xFF == 0x00;
}
fn inx(&mut self) {
self.registers.X += 0x01;
self.flags.sign = self.registers.X & 0x80 == 0x80;
self.flags.zero = self.registers.X & 0xFF == 0x00;
}
fn iny(&mut self) {
self.registers.Y += 0x01;
self.flags.sign = self.registers.Y & 0x80 == 0x80;
self.flags.zero = self.registers.Y & 0xFF == 0x00;
}
fn jmp(&mut self, operand: &Operand) {
let value = self.unwrap_address(&operand);
self.registers.PC = value;
}
fn lda(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
self.registers.A = value;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value & 0xFF == 0x00;
}
fn ldx(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
self.registers.X = value;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value & 0xFF == 0x00;
}
fn ldy(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
self.registers.Y = value;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value & 0xFF == 0x00;
}
fn sta(&mut self, operand: &Operand) {
let addr = self.unwrap_address(&operand);
let value = self.registers.A;
self.write_byte(addr, value);
}
fn relative_jump(&mut self, offset: u8) {
// If the sign bit is there, negate the PC by the difference
// between 256 and the offset
if offset & 0x80 == 0x80 {
self.registers.PC -= 0x100 - offset as u16;
} else {
self.registers.PC += offset as u16;
}
}
/// Convenience wrapper for accessing a byte
/// in memory
fn read_byte(&self, addr: u16) -> u8 {
self.memory.read_byte(addr)
}
/// Convenience wrapper for writing a byte
/// to memory
fn write_byte(&mut self, addr: u16, byte: u8) {
self.memory.write_byte(addr, byte);
}
/// Convenience wrapper for accessing a word
/// in memory
fn read_u16(&self, addr: u16) -> u16 {
self.memory.read_u16(addr)
}
}
Tests passing
use ::opcodes::{AddressingMode, OpCode};
use cpu::cpu_error::CpuError;
use cpu::flags::StatusFlags;
use cpu::memory_bus::MemoryBus;
use cpu::registers::Registers;
use cpu::stack::Stack;
const DEFAULT_CODE_SEGMENT_START_ADDRESS: u16 = 0xC000; // Default to a 16KB ROM, leaving 32KB of main memory
const STACK_START: usize = 0x100;
const STACK_END: usize = 0x1FF;
pub enum Operand {
Immediate(u8),
Memory(u16),
Implied,
}
/// A representation of a 6502 microprocessor
pub struct Cpu {
pub memory: MemoryBus,
pub registers: Registers,
pub flags: StatusFlags,
pub stack: Stack,
code_start: usize,
code_size: usize,
}
pub type CpuLoadResult = Result<(), CpuError>;
pub type CpuStepResult = Result<(), CpuError>;
impl Cpu {
/// Returns a default instance of a Cpu
pub fn new() -> Cpu {
Cpu {
memory: MemoryBus::new(),
registers: Registers::new(),
flags: Default::default(),
stack: Stack::new(),
code_start: DEFAULT_CODE_SEGMENT_START_ADDRESS as usize,
code_size: 0,
}
}
/// Loads code into the Cpu main memory at an optional offset. If no
/// offset is provided, the Cpu will, by default, load the code into
/// main memory at 0xC000
pub fn load<T>(&mut self, code: &[u8], addr: T) -> CpuLoadResult
where T: Into<Option<u16>>
{
let addr = addr.into();
let addr: u16 = if addr.is_some() {
let addr = addr.unwrap();
if addr as u32 + code.len() as u32 > u16::max_value() as u32 {
return Err(CpuError::code_segment_out_of_range(addr));
} else {
addr
}
} else {
DEFAULT_CODE_SEGMENT_START_ADDRESS
};
for x in 0..code.len() {
self.memory.write_byte(addr + x as u16, code[x]);
}
// Set the Program Counter to point at the
// start address of the code segment
self.registers.PC = addr;
self.code_start = addr as usize;
self.code_size = code.len();
Ok(())
}
/// Runs N instructions of code through the Cpu
pub fn step_n(&mut self, n: u32) -> CpuStepResult {
for _ in 0..n {
if (self.registers.PC as usize) < self.code_start + self.code_size {
self.step()?;
} else {
break;
}
}
Ok(())
}
/// Runs a single instruction of code through the Cpu
pub fn step(&mut self) -> CpuStepResult {
let byte = self.memory.read_byte(self.registers.PC);
if let Some(opcode) = OpCode::from_raw_byte(byte) {
let operand = self.get_operand_from_opcode(&opcode);
self.registers.PC += opcode.length as u16;
match opcode.mnemonic {
"ADC" => self.adc(&operand),
"AND" => self.and(&operand),
"ASL" => self.asl(&operand),
"BCC" => self.bcc(&operand),
"BCS" => self.bcs(&operand),
"BEQ" => self.beq(&operand),
"BIT" => self.bit(&operand),
"BMI" => self.bmi(&operand),
"BNE" => self.bne(&operand),
"BPL" => self.bpl(&operand),
"BRK" => self.brk(),
"BVC" => self.bvc(&operand),
"BVS" => self.bvs(&operand),
"CLC" => self.set_carry_flag(false),
"CLD" => self.set_decimal_flag(false),
"CLI" => self.set_interrupt_flag(false),
"CLV" => self.set_overflow_flag(false),
"CMP" => {
let a = self.registers.A;
self.compare(&operand, a)
}
"CPX" => {
let x = self.registers.X;
self.compare(&operand, x)
}
"CPY" => {
let y = self.registers.Y;
self.compare(&operand, y)
}
"DEC" => self.dec(&operand),
"DEX" => self.dex(),
"DEY" => self.dey(),
"EOR" => self.eor(&operand),
"INC" => self.inc(&operand),
"INX" => self.inx(),
"INY" => self.iny(),
"JMP" => self.jmp(&operand),
"LDA" => self.lda(&operand),
"LDX" => self.ldx(&operand),
"LDY" => self.ldy(&operand),
"SED" => self.set_decimal_flag(true),
"STA" => self.sta(&operand),
_ => return Err(CpuError::unknown_opcode(self.registers.PC, opcode.code)),
}
Ok(())
} else {
Err(CpuError::unknown_opcode(self.registers.PC, byte))
}
}
fn get_operand_from_opcode(&self, opcode: &OpCode) -> Operand {
use ::opcodes::AddressingMode::*;
let operand_start = self.registers.PC + 1;
match opcode.mode {
Unknown => unreachable!(),
Implied => Operand::Implied,
Immediate => Operand::Immediate(self.read_byte(operand_start)),
Relative => Operand::Immediate(self.read_byte(operand_start)),
Accumulator => Operand::Implied,
ZeroPage => Operand::Memory((self.read_byte(operand_start) as u16) & 0xFF),
ZeroPageX => {
Operand::Memory((self.registers.X as u16 + self.read_byte(operand_start) as u16) &
0xFF)
}
ZeroPageY => {
Operand::Memory((self.registers.Y as u16 + self.read_byte(operand_start) as u16) &
0xFF)
}
Absolute => Operand::Memory(self.read_u16(operand_start)),
AbsoluteX => Operand::Memory(self.registers.X as u16 + self.read_u16(operand_start)),
AbsoluteY => Operand::Memory(self.registers.Y as u16 + self.read_u16(operand_start)),
Indirect => Operand::Memory(self.read_u16(self.read_u16(operand_start))),
IndirectX => {
Operand::Memory(self.read_u16((self.registers.X as u16 +
self.read_byte(self.registers.PC + 1) as u16) &
0xFF))
}
IndirectY => {
Operand::Memory(self.registers.Y as u16 +
self.read_u16(self.read_byte(self.registers.PC + 1) as u16))
}
}
}
fn unwrap_immediate(&self, operand: &Operand) -> u8 {
match *operand {
Operand::Immediate(byte) => byte,
Operand::Memory(addr) => self.read_byte(addr),
Operand::Implied => 0,
}
}
fn unwrap_address(&self, operand: &Operand) -> u16 {
match *operand {
Operand::Immediate(byte) => byte as u16,
Operand::Memory(addr) => addr,
Operand::Implied => 0,
}
}
// ## OpCode handlers ##
fn adc(&mut self, operand: &Operand) {
// This is implemented on the information provided here:
// http://www.electrical4u.com/bcd-or-binary-coded-decimal-bcd-conversion-addition-subtraction/
// and here:
// http://www.6502.org/tutorials/decimal_mode.html,
// and here:
// http://www.atariarchives.org/2bml/chapter_10.php,
// and also here:
// http://stackoverflow.com/questions/29193303/6502-emulation-proper-way-to-implement-adc-and-sbc
let carry = if self.flags.carry { 1 } else { 0 };
let value = self.unwrap_immediate(&operand) as u16;
let value_signs = self.registers.A & 0x80 == 0x80 && value & 0x80 == 0x80;
// Do normal binary arithmetic first
let mut result = self.registers.A as u16 + value as u16 + carry as u16;
// Handle packed binary coded decimal
if self.flags.decimal {
if (self.registers.A as u16 & 0x0F) + (value & 0x0F) + carry > 0x09 {
result += 0x06;
}
if result > 0x99 {
result += 0x60;
}
self.flags.carry = (result & 0x100) == 0x100;
} else {
self.flags.carry = result > 0xFF;
}
self.flags.zero = result as u8 & 0xFF == 0x00;
self.flags.sign = result & 0x80 == 0x80;
if self.flags.sign != value_signs {
self.flags.overflow = true;
}
self.registers.A = result as u8 & 0xFF;
}
fn and(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let result = self.registers.A & value;
self.registers.A = result;
self.flags.zero = result as u8 & 0xFF == 0;
self.flags.sign = result & 0x80 == 0x80;
}
fn asl(&mut self, operand: &Operand) {
let mut value = if let &Operand::Implied = operand {
// Implied ASL uses the A register
self.registers.A
} else {
self.unwrap_immediate(&operand)
};
// Test the seventh bit - if its set, shift it
// into the carry flag
self.flags.carry = (value & 0x80) == 0x80;
// Shift the value left
value = value << 0x01;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value as u8 & 0xFF == 0;
if let &Operand::Implied = operand {
self.registers.A = value;
} else {
let addr = self.unwrap_address(&operand);
self.write_byte(addr, value);
}
}
fn bcc(&mut self, operand: &Operand) {
// Branch if the carry flag is not set
if !self.flags.carry {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bcs(&mut self, operand: &Operand) {
// Branch if the carry flag is set
if self.flags.carry {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn beq(&mut self, operand: &Operand) {
// Branch if the zero flag is set
if self.flags.zero {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bit(&mut self, operand: &Operand) {
let a = self.registers.A;
let value = self.unwrap_immediate(&operand);
let result = value & a;
self.flags.zero = result == 0x00;
self.flags.overflow = value & 0x40 == 0x40; // "The V flag and the N flag receive copies of the sixth and seventh bits of the tested number"
self.flags.sign = value & 0x80 == 0x80;
}
fn bmi(&mut self, operand: &Operand) {
// Branch if the sign flag is set
if self.flags.sign {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bne(&mut self, operand: &Operand) {
// Branch if the zero flag is not set
if !self.flags.zero {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bpl(&mut self, operand: &Operand) {
// Branch if the sign flag is not set
if !self.flags.sign {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn brk(&mut self) {
let mut mem = &mut self.memory[STACK_START..STACK_END];
self.stack.push_u16(mem, self.registers.PC);
self.stack.push(mem, self.flags.to_u8());
self.flags.interrupt_disabled = true;
}
fn bvc(&mut self, operand: &Operand) {
// Branch if the overflow flag is not set
if !self.flags.overflow {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn bvs(&mut self, operand: &Operand) {
// Branch if the overflow flag is set
if self.flags.overflow {
let offset = self.unwrap_immediate(&operand);
self.relative_jump(offset);
}
}
fn set_carry_flag(&mut self, value: bool) {
self.flags.carry = value;
}
fn set_decimal_flag(&mut self, value: bool) {
self.flags.decimal = value;
}
fn set_interrupt_flag(&mut self, value: bool) {
self.flags.interrupt_disabled = value;
}
fn set_overflow_flag(&mut self, value: bool) {
self.flags.overflow = value;
}
fn compare(&mut self, operand: &Operand, byte: u8) {
let value = self.unwrap_immediate(&operand);
let result: i16 = byte as i16 - value as i16;
self.flags.carry = (result as u16) < 0x100;
self.flags.zero = result & 0xFF == 0x00;
self.flags.sign = result & 0x80 == 0x80;
}
fn dec(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let addr = self.unwrap_address(&operand);
let result = value - 1;
self.write_byte(addr, result);
self.flags.sign = result & 0x80 == 0x80;
self.flags.zero = result & 0xFF == 0x00;
}
fn dex(&mut self) {
self.registers.X -= 0x01;
self.flags.sign = self.registers.X & 0x80 == 0x80;
self.flags.zero = self.registers.X & 0xFF == 0x00;
}
fn dey(&mut self) {
self.registers.Y -= 0x01;
self.flags.sign = self.registers.Y & 0x80 == 0x80;
self.flags.zero = self.registers.Y & 0xFF == 0x00;
}
fn eor(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let result = self.registers.A ^ value;
self.registers.A = result;
self.flags.sign = result & 0x80 == 0x80;
self.flags.zero = result & 0xFF == 0x00;
}
fn inc(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
let addr = self.unwrap_address(&operand);
let result = value + 1;
self.write_byte(addr, result);
self.flags.sign = result & 0x80 == 0x80;
self.flags.zero = result & 0xFF == 0x00;
}
fn inx(&mut self) {
self.registers.X += 0x01;
self.flags.sign = self.registers.X & 0x80 == 0x80;
self.flags.zero = self.registers.X & 0xFF == 0x00;
}
fn iny(&mut self) {
self.registers.Y += 0x01;
self.flags.sign = self.registers.Y & 0x80 == 0x80;
self.flags.zero = self.registers.Y & 0xFF == 0x00;
}
fn jmp(&mut self, operand: &Operand) {
let value = self.unwrap_address(&operand);
self.registers.PC = value;
}
fn lda(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
self.registers.A = value;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value & 0xFF == 0x00;
}
fn ldx(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
self.registers.X = value;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value & 0xFF == 0x00;
}
fn ldy(&mut self, operand: &Operand) {
let value = self.unwrap_immediate(&operand);
self.registers.Y = value;
self.flags.sign = value & 0x80 == 0x80;
self.flags.zero = value & 0xFF == 0x00;
}
fn sta(&mut self, operand: &Operand) {
let addr = self.unwrap_address(&operand);
let value = self.registers.A;
self.write_byte(addr, value);
}
fn relative_jump(&mut self, offset: u8) {
// If the sign bit is there, negate the PC by the difference
// between 256 and the offset
if offset & 0x80 == 0x80 {
self.registers.PC -= 0x100 - offset as u16;
} else {
self.registers.PC += offset as u16;
}
}
/// Convenience wrapper for accessing a byte
/// in memory
fn read_byte(&self, addr: u16) -> u8 {
self.memory.read_byte(addr)
}
/// Convenience wrapper for writing a byte
/// to memory
fn write_byte(&mut self, addr: u16, byte: u8) {
self.memory.write_byte(addr, byte);
}
/// Convenience wrapper for accessing a word
/// in memory
fn read_u16(&self, addr: u16) -> u16 {
self.memory.read_u16(addr)
}
}
|
//
// Copyright (c) ShuYu Wang <andelf@gmail.com>, Feather Workshop and Pirmin Kalberer. All rights reserved.
//
//! An extension to rust-postgres, adds support for PostGIS.
//!
//! - PostGIS type helper
//! - GCJ02 support (used offically in Mainland China)
//! - Tiny WKB (TWKB) support
//!
//! ```rust,no_run
//! use postgres::{Connection, TlsMode};
//! use postgis::ewkb;
//! use postgis::LineString;
//!
//! fn main() {
//! // conn ....
//! # let conn = Connection::connect("postgresql://postgres@localhost", TlsMode::None).unwrap();
//! for row in &conn.query("SELECT * FROM busline", &[]).unwrap() {
//! let route: ewkb::LineString = row.get("route");
//! let last_stop = route.points().last().unwrap();
//! let _ = conn.execute("INSERT INTO stops (stop) VALUES ($1)", &[&last_stop]);
//! }
//! }
//! ```
//!
//! Handling NULL values:
//!
//! ```rust,no_run
//! let route = row.get_opt::<_, Option<ewkb::LineString>>("route");
//! match route.unwrap() {
//! Ok(Some(geom)) => { println!("{:?}", geom) }
//! Ok(None) => { /* Handle NULL value */ }
//! Err(err) => { println!("Error: {}", err) }
//! }
//! ```
#![feature(underscore_lifetimes)]
#[macro_use(accepts, to_sql_checked)]
extern crate postgres;
extern crate byteorder;
pub mod error;
mod types;
pub use types::{Point, LineString, Polygon, MultiPoint, MultiLineString, MultiPolygon};
pub mod ewkb;
pub mod twkb;
mod postgis;
pub mod mars;
Remove accidently add #![feature] directive
//
// Copyright (c) ShuYu Wang <andelf@gmail.com>, Feather Workshop and Pirmin Kalberer. All rights reserved.
//
//! An extension to rust-postgres, adds support for PostGIS.
//!
//! - PostGIS type helper
//! - GCJ02 support (used offically in Mainland China)
//! - Tiny WKB (TWKB) support
//!
//! ```rust,no_run
//! use postgres::{Connection, TlsMode};
//! use postgis::ewkb;
//! use postgis::LineString;
//!
//! fn main() {
//! // conn ....
//! # let conn = Connection::connect("postgresql://postgres@localhost", TlsMode::None).unwrap();
//! for row in &conn.query("SELECT * FROM busline", &[]).unwrap() {
//! let route: ewkb::LineString = row.get("route");
//! let last_stop = route.points().last().unwrap();
//! let _ = conn.execute("INSERT INTO stops (stop) VALUES ($1)", &[&last_stop]);
//! }
//! }
//! ```
//!
//! Handling NULL values:
//!
//! ```rust,no_run
//! let route = row.get_opt::<_, Option<ewkb::LineString>>("route");
//! match route.unwrap() {
//! Ok(Some(geom)) => { println!("{:?}", geom) }
//! Ok(None) => { /* Handle NULL value */ }
//! Err(err) => { println!("Error: {}", err) }
//! }
//! ```
#[macro_use(accepts, to_sql_checked)]
extern crate postgres;
extern crate byteorder;
pub mod error;
mod types;
pub use types::{Point, LineString, Polygon, MultiPoint, MultiLineString, MultiPolygon};
pub mod ewkb;
pub mod twkb;
mod postgis;
pub mod mars;
|
mod registers;
mod ops;
mod fetcher;
mod decoder;
use std::rc::Rc;
use std::cell::RefCell;
use memory::Memory;
use cpu::registers::*;
use cpu::ops::*;
use cpu::fetcher::*;
/*
Instruction Layout
====================
This analysis is based upon the Zilog Z80 manual with modifications done by the GB/GBC
such as removing the instructions involving the IX and IY registers.
[] = 1 byte
Addressing Modes
--------------------
Immediate Addressing: [op][operand] (1 or 2 byte op code)
Operand is a single byte, such as loading the accumulator with a constant.
Immediate Extended Addressing: [op][low][high] (1 or 2 byte op code)
Operand is split into two bytes, such as to load the HL register pair with 16 bits
of data.
Register Addressing:
Many Z80 opcodes specify register to registers directly
Implied Addressing:
This indicates OP codes imply registers, such as arithmetic instructions always
implying that the destination is the accumulator (A in AF register).
Register Indirect Addressing: [op] (1 or 2 byte op code)
This specifies a 16 bit register pair to be used as a pointer to any location in
memory. Such as loading the accumulator with data pointed to in the HR register.
Modified Page Zero Addressing: [op] (1 byte op code)
Eight special locations in page 0 of memory, depending on which version of CALL is
called.
Relative Addressing: [op][displacement] (1 byte op code)
Used for Jump Relative, displacement is 8 bit twos complement offset from A+2 (where
A is the current PC value)
Extended Addressing: [op][lowaddr][highaddr] (1 or 2 byte op code)
Used to jump to any location in 16 bit memory
*/
// CPU Data
pub struct Cpu {
pub running: bool,
memory: Rc<RefCell<Memory>>,
regs: Registers
}
macro_rules! read_reg_pair {
($h:expr, $l:expr) => {
(($h as u16) << 8) | $l as u16
};
}
macro_rules! write_reg_pair {
($h:expr, $l:expr, $v:expr) => {{
$h = ($v >> 8) as u8;
$l = ($v & 0xFF) as u8;
}};
}
// Control Conditions
#[derive(Debug, PartialEq, Eq)]
pub enum Cond {
None, NZ, Z, NC, C
}
// Registers
impl In8 for Reg8 {
fn read(&self, cpu: &mut Cpu) -> u8 {
match *self {
Reg8::A => cpu.regs.a,
Reg8::B => cpu.regs.b,
Reg8::C => cpu.regs.c,
Reg8::D => cpu.regs.d,
Reg8::E => cpu.regs.e,
Reg8::F => cpu.regs.f,
Reg8::H => cpu.regs.h,
Reg8::L => cpu.regs.l
}
}
}
impl Out8 for Reg8 {
fn write(&self, cpu: &mut Cpu, data: u8) {
match *self {
Reg8::A => cpu.regs.a = data,
Reg8::B => cpu.regs.b = data,
Reg8::C => cpu.regs.c = data,
Reg8::D => cpu.regs.d = data,
Reg8::E => cpu.regs.e = data,
Reg8::F => cpu.regs.f = data,
Reg8::H => cpu.regs.h = data,
Reg8::L => cpu.regs.l = data
}
}
}
impl In16 for Reg16 {
fn read(&self, cpu: &mut Cpu) -> u16 {
match *self {
Reg16::AF => read_reg_pair!(cpu.regs.a, cpu.regs.f),
Reg16::BC => read_reg_pair!(cpu.regs.b, cpu.regs.c),
Reg16::DE => read_reg_pair!(cpu.regs.d, cpu.regs.e),
Reg16::HL => read_reg_pair!(cpu.regs.h, cpu.regs.l),
Reg16::SP => cpu.regs.sp,
Reg16::PC => cpu.regs.pc,
}
}
}
impl Out16 for Reg16 {
fn write(&self, cpu: &mut Cpu, data: u16) {
match *self {
Reg16::AF => write_reg_pair!(cpu.regs.a, cpu.regs.f, data),
Reg16::BC => write_reg_pair!(cpu.regs.b, cpu.regs.c, data),
Reg16::DE => write_reg_pair!(cpu.regs.d, cpu.regs.e, data),
Reg16::HL => write_reg_pair!(cpu.regs.h, cpu.regs.l, data),
Reg16::SP => cpu.regs.sp = data,
Reg16::PC => cpu.regs.pc = data,
}
}
}
// Immediate operand - a constant stored in the next byte
#[derive(Debug)]
pub struct Imm8(u8);
impl In8 for Imm8 {
fn read(&self, _: &mut Cpu) -> u8 {
let Imm8(v) = *self; v
}
}
// Immediate extended operand - a constant stored in the next two bytes
#[derive(Debug)]
pub struct Imm16(u16);
impl In16 for Imm16 {
fn read(&self, _: &mut Cpu) -> u16 {
let Imm16(v) = *self; v
}
}
// Indirect Addressing
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IndirectAddr {
BC, DE, HL, // (BC/DE/HL)
C, // (FF00 + C)
Imm8(u8), // (FF00 + n)
Imm16(u16), // (nn)
}
fn get_address(cpu: &mut Cpu, a: &IndirectAddr) -> u16 {
match *a {
IndirectAddr::BC => read_reg_pair!(cpu.regs.b, cpu.regs.c),
IndirectAddr::DE => read_reg_pair!(cpu.regs.d, cpu.regs.e),
IndirectAddr::HL => read_reg_pair!(cpu.regs.h, cpu.regs.l),
IndirectAddr::C => cpu.regs.c as u16 + 0xFF00,
IndirectAddr::Imm8(n) => n as u16 + 0xFF00,
IndirectAddr::Imm16(n) => n
}
}
impl In8 for IndirectAddr {
fn read(&self, cpu: &mut Cpu) -> u8 {
let addr = get_address(cpu, self);
cpu.mem_read_u8(addr)
}
}
impl Out8 for IndirectAddr {
fn write(&self, cpu: &mut Cpu, data: u8) {
let addr = get_address(cpu, self);
cpu.mem_write_u8(addr, data);
}
}
impl Out16 for IndirectAddr {
fn write(&self, cpu: &mut Cpu, data: u16) {
let addr = get_address(cpu, self);
cpu.mem_write_u16(addr, data);
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Op8 {
Reg(Reg8),
Ind(IndirectAddr),
Imm(u8)
}
impl Fetcher for Cpu {
fn fetch_word(&mut self) -> u8 {
let byte = self.mem_read_u8(self.regs.pc);
self.regs.pc += 1;
byte
}
}
// Interpreter implementation of the CPU ops defined in the ops module
#[allow(unused_variables)]
impl<'a> CpuOps for &'a mut Cpu {
fn load<I: In8, O: Out8>(&mut self, i: I, o: O) {
let value = i.read(self);
o.write(self, value);
}
fn load16<I: In16, O: Out16>(&mut self, i: I, o: O) {
let value = i.read(self);
o.write(self, value);
}
fn load16_hlsp(&mut self, offset: i8) {
let value = if offset < 0 {
self.regs.sp - (offset as u16)
} else {
self.regs.sp + (offset as u16)
};
Reg16::HL.write(self, value);
}
// TODO(David): Should the stack pointer be decremented before or after reading from memory?
fn push<I: In16>(&mut self, i: I) {
let sp = self.regs.sp;
let content = i.read(self);
self.mem_write_u16(sp, content);
self.regs.sp -= 2;
}
fn pop<O: Out16>(&mut self, o: O) {
self.regs.sp += 2;
let value = self.mem_read_u16(self.regs.sp);
o.write(self, value);
}
fn add<I: In8>(&mut self, i: I) {
let result = self.regs.a as u16 + i.read(self) as u16;
self.regs.a = result as u8;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, ((result >> 4) & 0x1) == 1);
self.regs.update_flag(Flag::C, ((result >> 8) & 0x1) == 1);
}
fn adc<I: In8>(&mut self, i: I) {
let result =
self.regs.a as u16 +
i.read(self) as u16 +
if self.regs.get_flag(Flag::C) { 1 } else { 0 };
self.regs.a = result as u8;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, ((result >> 4) & 0x1) == 1);
self.regs.update_flag(Flag::C, ((result >> 8) & 0x1) == 1);
}
fn sub<I: In8>(&mut self, i: I) {
let result = self.regs.a as u16 - i.read(self) as u16;
self.regs.a = result as u8;
// TODO(David): Flags
}
fn sbc<I: In8>(&mut self, i: I) {
let result =
self.regs.a as u16 -
i.read(self) as u16 -
if self.regs.get_flag(Flag::C) { 1 } else { 0 };
self.regs.a = result as u8;
// TODO(David): Flags
}
fn and<I: In8>(&mut self, i: I) {
self.regs.a &= i.read(self);
let result = self.regs.a;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.set_flag(Flag::H);
self.regs.reset_flag(Flag::C);
}
fn or<I: In8>(&mut self, i: I) {
self.regs.a |= i.read(self);
let result = self.regs.a;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.reset_flag(Flag::C);
}
fn xor<I: In8>(&mut self, i: I) {
self.regs.a ^= i.read(self);
let result = self.regs.a;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.reset_flag(Flag::C);
}
fn cp<I: In8>(&mut self, i: I) {
let result = self.regs.a as u16 - i.read(self) as u16;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.set_flag(Flag::N);
// TODO(David): H and C flags
}
fn inc<I: In8 + Out8>(&mut self, i: I) {
let result = i.read(self) + 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, ((result >> 3) & 0x1) == 1);
}
fn dec<I: In8 + Out8>(&mut self, i: I) {
let result = i.read(self) - 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.set_flag(Flag::N);
// TODO(David): H flag
}
fn add16<I: In16>(&mut self, i: I) {
let result = Reg16::HL.read(self) as u32 + i.read(self) as u32;
Reg16::HL.write(self, result as u16);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, ((result >> 12) & 0x1) == 1);
self.regs.update_flag(Flag::C, ((result >> 16) & 0x1) == 1);
}
fn add16_sp(&mut self, i: Imm8) {
//TODO(Csongor): this was not actually setting
//the stack pointer anyway, so I've ust commented
//it out for now
//let result = self.regs.sp + i.read(self) as i8;
//self.regs.reset_flag(Flag::Z);
//self.regs.reset_flag(Flag::N);
// TODO(David): H and C flags are ambiguously defined
}
fn inc16<I: In16 + Out16>(&mut self, i: I) {
let result = i.read(self) + 1;
i.write(self, result);
}
fn dec16<I: In16 + Out16>(&mut self, i: I) {
let result = i.read(self) - 1;
i.write(self, result);
}
// misc
fn nop(&mut self) {}
fn daa(&mut self) {
// TODO(David): Ambiguous spec, test this
// A stores a number up to 255. In BCD form each nibble would store a single digit,
// therefore the maximum number that can be stored is 99.
// Source:
// The DAA instruction corrects this invalid result. It checks to see if there was a carry
// out of the low order BCD digit and adjusts the value (by adding six to it) if there was
// an overflow. After adjusting for overflow out of the L.O. digit, the DAA instruction
// repeats this process for the H.O. digit. DAA sets the carry flag if the was a (decimal)
// carry out of the H.O. digit of the operation.
}
fn cpl(&mut self) {
self.regs.a = !self.regs.a;
self.regs.set_flag(Flag::N);
self.regs.set_flag(Flag::H);
}
fn ccf(&mut self) {
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.update_flag(Flag::C, !self.regs.get_flag(Flag::C));
}
fn scf(&mut self) {
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.set_flag(Flag::C);
}
fn halt(&mut self) {
}
fn stop(&mut self) {
}
fn ei(&mut self) {
}
fn di(&mut self) {
}
// rotate and shift
fn rlc<I: In8 + Out8>(&mut self, i: I) {
let value = i.read(self);
self.regs.update_flag(Flag::C, value >> 7);
let result = value << 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
}
fn rl<I: In8 + Out8>(&mut self, i: I) {
// TODO(David): Spec is ambiguous again, what's the difference between RL and RLC?
self.rlc(i);
}
fn rrc<I: In8 + Out8>(&mut self, i: I) {
let value = i.read(self);
self.regs.update_flag(Flag::C, value & 0x1);
let result = value >> 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
}
fn rr<I: In8 + Out8>(&mut self, i: I) {
// TODO(David): Spec is ambiguous again, what's the difference between RR and RRC?
self.rrc(i);
}
fn sla<I: In8 + Out8>(&mut self, i: I) {
let result = (i.read(self) as u16) << 1;
i.write(self, result as u8);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.update_flag(Flag::C, (result >> 8) & 0x1);
}
fn sra<I: In8 + Out8>(&mut self, i: I) {
let value = i.read(self);
self.regs.update_flag(Flag::C, value & 0x1);
let result = value >> 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
}
fn swap<I: In8 + Out8>(&mut self, i: I) {
let initial = i.read(self);
i.write(self, ((initial >> 4) & 0xF) | ((initial << 4) & 0xF));
}
fn srl<I: In8 + Out8>(&mut self, i: I) {
}
// bit manipulation
fn bit<O: Out8>(&mut self, bit_id: u8, o: O) {
}
fn set<O: Out8>(&mut self, bit_id: u8, o: O) {
}
fn res<O: Out8>(&mut self, bit_id: u8, o: O) {
}
// control
fn jp(&mut self, dest: u16, cond: Cond) {
}
fn jp_hl(&mut self) {
}
fn jr(&mut self, offset: u8, cond: Cond) {
}
fn call(&mut self, dest: u16, cond: Cond) {
}
fn rst(&mut self, offset: u8) {
}
fn ret(&mut self, cond: Cond) {
}
fn reti(&mut self) {
}
}
impl Cpu {
pub fn new(memory: Rc<RefCell<Memory>>) -> Cpu {
Cpu {
running: true,
memory: memory,
regs: Registers::new()
}
}
pub fn tick(&mut self) {
let instr = self.fetch_instr();
println!("{:?}", instr);
// TODO: implement execution
// Stop execution for the lols
if self.regs.pc > 256 {
self.running = false;
self.dump_state();
}
}
// Memory reading helper functions
fn mem_read_u8(&self, addr: u16) -> u8 {
self.memory.borrow().read_u8(addr)
}
fn mem_read_u16(&self, addr: u16) -> u16 {
let l = self.mem_read_u8(addr);
let h = self.mem_read_u8(addr + 1);
((l as u16) << 8) | (h as u16)
}
fn mem_write_u8(&mut self, addr: u16, data: u8) {
self.memory.borrow_mut().write_u8(addr, data);
}
fn mem_write_u16(&mut self, addr: u16, data: u16) {
self.memory.borrow_mut().write_u16(addr, data);
}
pub fn dump_state(&self) {
println!("Registers:");
println!("- PC: {:04x} SP: {:04x} ", self.regs.pc, self.regs.sp);
println!("- A: {:02x} F: {:02x} B: {:02x} C: {:02x}", self.regs.a, self.regs.f, self.regs.b, self.regs.c);
println!("- D: {:02x} E: {:02x} H: {:02x} L: {:02x}", self.regs.d, self.regs.e, self.regs.h, self.regs.l);
println!("Flags:");
println!("- Zero: {}", self.regs.get_flag(Flag::Z));
println!("- Add/Sub: {}", self.regs.get_flag(Flag::N));
println!("- Half Carry: {}", self.regs.get_flag(Flag::H));
println!("- Carry Flag {}", self.regs.get_flag(Flag::C));
}
}
// Test cases
#[cfg(test)]
mod test {
use std::rc::Rc;
use std::cell::RefCell;
use memory::Memory;
use super::*;
use cpu::registers::*;
use cpu::ops::*;
fn test_u8() -> u8 {
144u8
}
fn test_u16() -> u16 {
47628u16
}
fn init_cpu() -> Cpu {
Cpu::new(Rc::new(RefCell::new(Memory::new_blank())))
}
#[test]
fn load_from_reg_a_to_b() {
let mut cpu = &mut init_cpu();
cpu.load(Imm8(test_u8()), Reg8::A);
cpu.load(Reg8::A, Reg8::B);
assert_eq!(cpu.regs.a, test_u8());
assert_eq!(cpu.regs.a, cpu.regs.b);
}
#[test]
fn load_from_reg_bc_to_de() {
let mut cpu = &mut init_cpu();
cpu.load16(Imm16(test_u16()), Reg16::BC);
cpu.load16(Reg16::BC, Reg16::DE);
assert_eq!(Reg16::BC.read(cpu), test_u16());
assert_eq!(Reg16::BC.read(cpu), Reg16::DE.read(cpu));
}
}
Fixed remaining compiler errors
mod registers;
mod ops;
mod fetcher;
mod decoder;
use std::rc::Rc;
use std::cell::RefCell;
use memory::Memory;
use cpu::registers::*;
use cpu::ops::*;
use cpu::fetcher::*;
/*
Instruction Layout
====================
This analysis is based upon the Zilog Z80 manual with modifications done by the GB/GBC
such as removing the instructions involving the IX and IY registers.
[] = 1 byte
Addressing Modes
--------------------
Immediate Addressing: [op][operand] (1 or 2 byte op code)
Operand is a single byte, such as loading the accumulator with a constant.
Immediate Extended Addressing: [op][low][high] (1 or 2 byte op code)
Operand is split into two bytes, such as to load the HL register pair with 16 bits
of data.
Register Addressing:
Many Z80 opcodes specify register to registers directly
Implied Addressing:
This indicates OP codes imply registers, such as arithmetic instructions always
implying that the destination is the accumulator (A in AF register).
Register Indirect Addressing: [op] (1 or 2 byte op code)
This specifies a 16 bit register pair to be used as a pointer to any location in
memory. Such as loading the accumulator with data pointed to in the HR register.
Modified Page Zero Addressing: [op] (1 byte op code)
Eight special locations in page 0 of memory, depending on which version of CALL is
called.
Relative Addressing: [op][displacement] (1 byte op code)
Used for Jump Relative, displacement is 8 bit twos complement offset from A+2 (where
A is the current PC value)
Extended Addressing: [op][lowaddr][highaddr] (1 or 2 byte op code)
Used to jump to any location in 16 bit memory
*/
// CPU Data
pub struct Cpu {
pub running: bool,
memory: Rc<RefCell<Memory>>,
regs: Registers
}
macro_rules! read_reg_pair {
($h:expr, $l:expr) => {
(($h as u16) << 8) | $l as u16
};
}
macro_rules! write_reg_pair {
($h:expr, $l:expr, $v:expr) => {{
$h = ($v >> 8) as u8;
$l = ($v & 0xFF) as u8;
}};
}
// Control Conditions
#[derive(Debug, PartialEq, Eq)]
pub enum Cond {
None, NZ, Z, NC, C
}
// Registers
impl In8 for Reg8 {
fn read(&self, cpu: &mut Cpu) -> u8 {
match *self {
Reg8::A => cpu.regs.a,
Reg8::B => cpu.regs.b,
Reg8::C => cpu.regs.c,
Reg8::D => cpu.regs.d,
Reg8::E => cpu.regs.e,
Reg8::F => cpu.regs.f,
Reg8::H => cpu.regs.h,
Reg8::L => cpu.regs.l
}
}
}
impl Out8 for Reg8 {
fn write(&self, cpu: &mut Cpu, data: u8) {
match *self {
Reg8::A => cpu.regs.a = data,
Reg8::B => cpu.regs.b = data,
Reg8::C => cpu.regs.c = data,
Reg8::D => cpu.regs.d = data,
Reg8::E => cpu.regs.e = data,
Reg8::F => cpu.regs.f = data,
Reg8::H => cpu.regs.h = data,
Reg8::L => cpu.regs.l = data
}
}
}
impl In16 for Reg16 {
fn read(&self, cpu: &mut Cpu) -> u16 {
match *self {
Reg16::AF => read_reg_pair!(cpu.regs.a, cpu.regs.f),
Reg16::BC => read_reg_pair!(cpu.regs.b, cpu.regs.c),
Reg16::DE => read_reg_pair!(cpu.regs.d, cpu.regs.e),
Reg16::HL => read_reg_pair!(cpu.regs.h, cpu.regs.l),
Reg16::SP => cpu.regs.sp,
Reg16::PC => cpu.regs.pc,
}
}
}
impl Out16 for Reg16 {
fn write(&self, cpu: &mut Cpu, data: u16) {
match *self {
Reg16::AF => write_reg_pair!(cpu.regs.a, cpu.regs.f, data),
Reg16::BC => write_reg_pair!(cpu.regs.b, cpu.regs.c, data),
Reg16::DE => write_reg_pair!(cpu.regs.d, cpu.regs.e, data),
Reg16::HL => write_reg_pair!(cpu.regs.h, cpu.regs.l, data),
Reg16::SP => cpu.regs.sp = data,
Reg16::PC => cpu.regs.pc = data,
}
}
}
// Immediate operand - a constant stored in the next byte
#[derive(Debug)]
pub struct Imm8(u8);
impl In8 for Imm8 {
fn read(&self, _: &mut Cpu) -> u8 {
let Imm8(v) = *self; v
}
}
// Immediate extended operand - a constant stored in the next two bytes
#[derive(Debug)]
pub struct Imm16(u16);
impl In16 for Imm16 {
fn read(&self, _: &mut Cpu) -> u16 {
let Imm16(v) = *self; v
}
}
// Indirect Addressing
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IndirectAddr {
BC, DE, HL, // (BC/DE/HL)
C, // (FF00 + C)
Imm8(u8), // (FF00 + n)
Imm16(u16), // (nn)
}
fn get_address(cpu: &mut Cpu, a: &IndirectAddr) -> u16 {
match *a {
IndirectAddr::BC => read_reg_pair!(cpu.regs.b, cpu.regs.c),
IndirectAddr::DE => read_reg_pair!(cpu.regs.d, cpu.regs.e),
IndirectAddr::HL => read_reg_pair!(cpu.regs.h, cpu.regs.l),
IndirectAddr::C => cpu.regs.c as u16 + 0xFF00,
IndirectAddr::Imm8(n) => n as u16 + 0xFF00,
IndirectAddr::Imm16(n) => n
}
}
impl In8 for IndirectAddr {
fn read(&self, cpu: &mut Cpu) -> u8 {
let addr = get_address(cpu, self);
cpu.mem_read_u8(addr)
}
}
impl Out8 for IndirectAddr {
fn write(&self, cpu: &mut Cpu, data: u8) {
let addr = get_address(cpu, self);
cpu.mem_write_u8(addr, data);
}
}
impl Out16 for IndirectAddr {
fn write(&self, cpu: &mut Cpu, data: u16) {
let addr = get_address(cpu, self);
cpu.mem_write_u16(addr, data);
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Op8 {
Reg(Reg8),
Ind(IndirectAddr),
Imm(u8)
}
impl Fetcher for Cpu {
fn fetch_word(&mut self) -> u8 {
let byte = self.mem_read_u8(self.regs.pc);
self.regs.pc += 1;
byte
}
}
fn get_flag_bit(value: u16, bit: u8) -> bool {
((value >> bit) & 0x1) == 1
}
// Interpreter implementation of the CPU ops defined in the ops module
#[allow(unused_variables)]
impl<'a> CpuOps for &'a mut Cpu {
fn load<I: In8, O: Out8>(&mut self, i: I, o: O) {
let value = i.read(self);
o.write(self, value);
}
fn load16<I: In16, O: Out16>(&mut self, i: I, o: O) {
let value = i.read(self);
o.write(self, value);
}
fn load16_hlsp(&mut self, offset: i8) {
let value = if offset < 0 {
self.regs.sp - (offset as u16)
} else {
self.regs.sp + (offset as u16)
};
Reg16::HL.write(self, value);
}
// TODO(David): Should the stack pointer be decremented before or after reading from memory?
fn push<I: In16>(&mut self, i: I) {
let sp = self.regs.sp;
let content = i.read(self);
self.mem_write_u16(sp, content);
self.regs.sp -= 2;
}
fn pop<O: Out16>(&mut self, o: O) {
self.regs.sp += 2;
let value = self.mem_read_u16(self.regs.sp);
o.write(self, value);
}
fn add<I: In8>(&mut self, i: I) {
let result = self.regs.a as u16 + i.read(self) as u16;
self.regs.a = result as u8;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, get_flag_bit(result, 4));
self.regs.update_flag(Flag::C, get_flag_bit(result, 8));
}
fn adc<I: In8>(&mut self, i: I) {
let result =
self.regs.a as u16 +
i.read(self) as u16 +
if self.regs.get_flag(Flag::C) { 1 } else { 0 };
self.regs.a = result as u8;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, get_flag_bit(result, 4));
self.regs.update_flag(Flag::C, get_flag_bit(result, 8));
}
fn sub<I: In8>(&mut self, i: I) {
let result = self.regs.a as u16 - i.read(self) as u16;
self.regs.a = result as u8;
// TODO(David): Flags
}
fn sbc<I: In8>(&mut self, i: I) {
let result =
self.regs.a as u16 -
i.read(self) as u16 -
if self.regs.get_flag(Flag::C) { 1 } else { 0 };
self.regs.a = result as u8;
// TODO(David): Flags
}
fn and<I: In8>(&mut self, i: I) {
self.regs.a &= i.read(self);
let result = self.regs.a;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.set_flag(Flag::H);
self.regs.reset_flag(Flag::C);
}
fn or<I: In8>(&mut self, i: I) {
self.regs.a |= i.read(self);
let result = self.regs.a;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.reset_flag(Flag::C);
}
fn xor<I: In8>(&mut self, i: I) {
self.regs.a ^= i.read(self);
let result = self.regs.a;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.reset_flag(Flag::C);
}
fn cp<I: In8>(&mut self, i: I) {
let result = self.regs.a as u16 - i.read(self) as u16;
self.regs.update_flag(Flag::Z, result == 0);
self.regs.set_flag(Flag::N);
// TODO(David): H and C flags
}
fn inc<I: In8 + Out8>(&mut self, i: I) {
let result = i.read(self) + 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, get_flag_bit(result as u16, 3));
}
fn dec<I: In8 + Out8>(&mut self, i: I) {
let result = i.read(self) - 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.set_flag(Flag::N);
// TODO(David): H flag
}
fn add16<I: In16>(&mut self, i: I) {
let result = Reg16::HL.read(self) as u32 + i.read(self) as u32;
Reg16::HL.write(self, result as u16);
self.regs.reset_flag(Flag::N);
self.regs.update_flag(Flag::H, get_flag_bit(result as u16, 12));
self.regs.update_flag(Flag::C, get_flag_bit(result as u16, 16));
}
fn add16_sp(&mut self, i: Imm8) {
//TODO(Csongor): this was not actually setting
//the stack pointer anyway, so I've ust commented
//it out for now
//let result = self.regs.sp + i.read(self) as i8;
//self.regs.reset_flag(Flag::Z);
//self.regs.reset_flag(Flag::N);
// TODO(David): H and C flags are ambiguously defined
}
fn inc16<I: In16 + Out16>(&mut self, i: I) {
let result = i.read(self) + 1;
i.write(self, result);
}
fn dec16<I: In16 + Out16>(&mut self, i: I) {
let result = i.read(self) - 1;
i.write(self, result);
}
// misc
fn nop(&mut self) {}
fn daa(&mut self) {
// TODO(David): Ambiguous spec, test this
// A stores a number up to 255. In BCD form each nibble would store a single digit,
// therefore the maximum number that can be stored is 99.
// Source:
// The DAA instruction corrects this invalid result. It checks to see if there was a carry
// out of the low order BCD digit and adjusts the value (by adding six to it) if there was
// an overflow. After adjusting for overflow out of the L.O. digit, the DAA instruction
// repeats this process for the H.O. digit. DAA sets the carry flag if the was a (decimal)
// carry out of the H.O. digit of the operation.
}
fn cpl(&mut self) {
self.regs.a = !self.regs.a;
self.regs.set_flag(Flag::N);
self.regs.set_flag(Flag::H);
}
fn ccf(&mut self) {
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
let current_flag = self.regs.get_flag(Flag::C);
self.regs.update_flag(Flag::C, !current_flag);
}
fn scf(&mut self) {
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.set_flag(Flag::C);
}
fn halt(&mut self) {
}
fn stop(&mut self) {
}
fn ei(&mut self) {
}
fn di(&mut self) {
}
// rotate and shift
fn rlc<I: In8 + Out8>(&mut self, i: I) {
let value = i.read(self);
self.regs.update_flag(Flag::C, get_flag_bit(value as u16, 7));
let result = value << 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
}
fn rl<I: In8 + Out8>(&mut self, i: I) {
// TODO(David): Spec is ambiguous again, what's the difference between RL and RLC?
self.rlc(i);
}
fn rrc<I: In8 + Out8>(&mut self, i: I) {
let value = i.read(self);
self.regs.update_flag(Flag::C, get_flag_bit(value as u16, 0));
let result = value >> 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
}
fn rr<I: In8 + Out8>(&mut self, i: I) {
// TODO(David): Spec is ambiguous again, what's the difference between RR and RRC?
self.rrc(i);
}
fn sla<I: In8 + Out8>(&mut self, i: I) {
let result = (i.read(self) as u16) << 1;
i.write(self, result as u8);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
self.regs.update_flag(Flag::C, get_flag_bit(result, 8));
}
fn sra<I: In8 + Out8>(&mut self, i: I) {
let value = i.read(self);
self.regs.update_flag(Flag::C, get_flag_bit(value as u16, 0));
let result = value >> 1;
i.write(self, result);
self.regs.update_flag(Flag::Z, result == 0);
self.regs.reset_flag(Flag::N);
self.regs.reset_flag(Flag::H);
}
fn swap<I: In8 + Out8>(&mut self, i: I) {
let initial = i.read(self);
i.write(self, ((initial >> 4) & 0xF) | ((initial << 4) & 0xF));
}
fn srl<I: In8 + Out8>(&mut self, i: I) {
}
// bit manipulation
fn bit<O: Out8>(&mut self, bit_id: u8, o: O) {
}
fn set<O: Out8>(&mut self, bit_id: u8, o: O) {
}
fn res<O: Out8>(&mut self, bit_id: u8, o: O) {
}
// control
fn jp(&mut self, dest: u16, cond: Cond) {
}
fn jp_hl(&mut self) {
}
fn jr(&mut self, offset: u8, cond: Cond) {
}
fn call(&mut self, dest: u16, cond: Cond) {
}
fn rst(&mut self, offset: u8) {
}
fn ret(&mut self, cond: Cond) {
}
fn reti(&mut self) {
}
}
impl Cpu {
pub fn new(memory: Rc<RefCell<Memory>>) -> Cpu {
Cpu {
running: true,
memory: memory,
regs: Registers::new()
}
}
pub fn tick(&mut self) {
let instr = self.fetch_instr();
println!("{:?}", instr);
// TODO: implement execution
// Stop execution for the lols
if self.regs.pc > 256 {
self.running = false;
self.dump_state();
}
}
// Memory reading helper functions
fn mem_read_u8(&self, addr: u16) -> u8 {
self.memory.borrow().read_u8(addr)
}
fn mem_read_u16(&self, addr: u16) -> u16 {
let l = self.mem_read_u8(addr);
let h = self.mem_read_u8(addr + 1);
((l as u16) << 8) | (h as u16)
}
fn mem_write_u8(&mut self, addr: u16, data: u8) {
self.memory.borrow_mut().write_u8(addr, data);
}
fn mem_write_u16(&mut self, addr: u16, data: u16) {
self.memory.borrow_mut().write_u16(addr, data);
}
pub fn dump_state(&self) {
println!("Registers:");
println!("- PC: {:04x} SP: {:04x} ", self.regs.pc, self.regs.sp);
println!("- A: {:02x} F: {:02x} B: {:02x} C: {:02x}", self.regs.a, self.regs.f, self.regs.b, self.regs.c);
println!("- D: {:02x} E: {:02x} H: {:02x} L: {:02x}", self.regs.d, self.regs.e, self.regs.h, self.regs.l);
println!("Flags:");
println!("- Zero: {}", self.regs.get_flag(Flag::Z));
println!("- Add/Sub: {}", self.regs.get_flag(Flag::N));
println!("- Half Carry: {}", self.regs.get_flag(Flag::H));
println!("- Carry Flag {}", self.regs.get_flag(Flag::C));
}
}
// Test cases
#[cfg(test)]
mod test {
use std::rc::Rc;
use std::cell::RefCell;
use memory::Memory;
use super::*;
use cpu::registers::*;
use cpu::ops::*;
fn test_u8() -> u8 {
144u8
}
fn test_u16() -> u16 {
47628u16
}
fn init_cpu() -> Cpu {
Cpu::new(Rc::new(RefCell::new(Memory::new_blank())))
}
#[test]
fn load_from_reg_a_to_b() {
let mut cpu = &mut init_cpu();
cpu.load(Imm8(test_u8()), Reg8::A);
cpu.load(Reg8::A, Reg8::B);
assert_eq!(cpu.regs.a, test_u8());
assert_eq!(cpu.regs.a, cpu.regs.b);
}
#[test]
fn load_from_reg_bc_to_de() {
let mut cpu = &mut init_cpu();
cpu.load16(Imm16(test_u16()), Reg16::BC);
cpu.load16(Reg16::BC, Reg16::DE);
assert_eq!(Reg16::BC.read(cpu), test_u16());
assert_eq!(Reg16::BC.read(cpu), Reg16::DE.read(cpu));
}
}
|
// Copyright 2016-2019 Matthew D. Michelotti
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This crate contains floating point types that panic if they are set
//! to an illegal value, such as NaN.
//!
//! The name "Noisy Float" comes from
//! the terms "quiet NaN" and "signaling NaN"; "signaling" was too long
//! to put in a struct/crate name, so "noisy" is used instead, being the opposite
//! of "quiet."
//!
//! The standard types defined in `noisy_float::types` follow the principle
//! demonstrated by Rust's handling of integer overflow:
//! a bad arithmetic operation is considered an error,
//! but it is too costly to check everywhere in optimized builds.
//! For each floating point number that is created, a `debug_assert!` invocation is used
//! to check if it is valid or not.
//! This way, there are guarantees when developing code that floating point
//! numbers have valid values,
//! but during a release run there is *no overhead* for using these floating
//! point types compared to using `f32` or `f64` directly.
//!
//! This crate makes use of the num, bounded, signed and floating point traits
//! in the popular `num_traits` crate.
//!
//! # Examples
//! An example using the `R64` type, which corresponds to *finite* `f64` values.
//!
//! ```
//! use noisy_float::prelude::*;
//!
//! fn geometric_mean(a: R64, b: R64) -> R64 {
//! (a * b).sqrt() //used just like regular floating point numbers
//! }
//!
//! fn mean(a: R64, b: R64) -> R64 {
//! (a + b) * 0.5 //the RHS of ops can be the underlying float type
//! }
//!
//! println!("geometric_mean(10.0, 20.0) = {}", geometric_mean(r64(10.0), r64(20.0)));
//! //prints 14.142...
//! assert!(mean(r64(10.0), r64(20.0)) == 15.0);
//! ```
//!
//! An example using the `N32` type, which corresponds to *non-NaN* `f32` values.
//! The float types in this crate are able to implement `Eq` and `Ord` properly,
//! since NaN is not allowed.
//!
//! ```
//! use noisy_float::prelude::*;
//!
//! let values = vec![n32(3.0), n32(-1.5), n32(71.3), N32::infinity()];
//! assert!(values.iter().cloned().min() == Some(n32(-1.5)));
//! assert!(values.iter().cloned().max() == Some(N32::infinity()));
//! ```
//!
//! An example converting from R64 to f64.
//!
//! ```
//! use noisy_float::prelude::*;
//! use num_traits::cast::ToPrimitive;
//!
//! let value_r64: R64 = r64(1.0);
//! let value_f64_a: f64 = value_r64.into();
//! let value_f64_b: f64 = value_r64.to_f64().unwrap();
//! assert!(value_f64_a == value_f64_b);
//! ```
//!
//! # Features
//!
//! This crate has the following cargo features:
//!
//! - `serde-1`: Enable serialization for all `NoisyFloats` using serde 1.0 and
//! will transparently serialize then as floats
extern crate num_traits;
#[cfg(feature = "serde-1")]
use serde::{Serialize, Deserialize, Serializer, Deserializer};
mod float_impl;
pub mod checkers;
pub mod types;
/// Prelude for the `noisy_float` crate.
///
/// This includes all of the types defined in the `noisy_float::types` module,
/// as well as a re-export of the `Float` trait from the `num_traits` crate.
/// It is important to have this re-export here, because it allows the user
/// to access common floating point methods like `abs()`, `sqrt()`, etc.
pub mod prelude {
pub use crate::types::*;
#[doc(no_inline)]
pub use num_traits::Float;
}
use std::marker::PhantomData;
use std::fmt;
use num_traits::Float;
/// Trait for checking whether a floating point number is *valid*.
///
/// The implementation defines its own criteria for what constitutes a *valid* value.
pub trait FloatChecker<F> {
/// Returns `true` if (and only if) the given floating point number is *valid*
/// according to this checker's criteria.
///
/// The only hard requirement is that NaN *must* be considered *invalid*
/// for all implementations of `FloatChecker`.
fn check(value: F) -> bool;
/// A function that may panic if the floating point number is *invalid*.
///
/// Should either call `assert!(check(value), ...)` or `debug_assert!(check(value), ...)`.
fn assert(value: F);
}
/// A floating point number with a restricted set of legal values.
///
/// Typical users will not need to access this struct directly, but
/// can instead use the type aliases found in the module `noisy_float::types`.
/// However, this struct together with a `FloatChecker` implementation can be used
/// to define custom behavior.
///
/// The underlying float type is `F`, usually `f32` or `f64`.
/// Valid values for the float are determined by the float checker `C`.
/// If an invalid value would ever be returned from a method on this type,
/// the method will panic instead, using either `assert!` or `debug_assert!`
/// as defined by the float checker.
/// The exception to this rule is for methods that return an `Option` containing
/// a `NoisyFloat`, in which case the result would be `None` if the value is invalid.
#[repr(transparent)]
pub struct NoisyFloat<F: Float, C: FloatChecker<F>> {
value: F,
checker: PhantomData<C>
}
impl<F: Float, C: FloatChecker<F>> NoisyFloat<F, C> {
/// Constructs a `NoisyFloat` with the given value.
///
/// Uses the `FloatChecker` to assert that the value is valid.
#[inline]
pub fn new(value: F) -> Self {
C::assert(value);
Self::unchecked_new(value)
}
#[inline]
fn unchecked_new(value: F) -> Self {
NoisyFloat {
value: value,
checker: PhantomData
}
}
/// Tries to construct a `NoisyFloat` with the given value.
///
/// Returns `None` if the value is invalid.
#[inline]
pub fn try_new(value: F) -> Option<Self> {
if C::check(value) {
Some(NoisyFloat {
value: value,
checker: PhantomData
})
} else {
None
}
}
/// Converts the value in-place to a reference to a `NoisyFloat`.
///
/// Uses the `FloatChecker` to assert that the value is valid.
#[inline]
pub fn borrowed(value: &F) -> &Self {
C::assert(*value);
Self::unchecked_borrowed(value)
}
#[inline]
fn unchecked_borrowed(value: &F) -> &Self {
// This is safe because `NoisyFloat` is a thin wrapper around the
// floating-point type.
unsafe { &*(value as *const F as *const Self) }
}
/// Tries to convert the value in-place to a reference to a `NoisyFloat`.
///
/// Returns `None` if the value is invalid.
#[inline]
pub fn try_borrowed(value: &F) -> Option<&Self> {
if C::check(*value) {
Some(Self::unchecked_borrowed(value))
} else {
None
}
}
/// Converts the value in-place to a mutable reference to a `NoisyFloat`.
///
/// Uses the `FloatChecker` to assert that the value is valid.
#[inline]
pub fn borrowed_mut(value: &mut F) -> &mut Self {
C::assert(*value);
Self::unchecked_borrowed_mut(value)
}
#[inline]
fn unchecked_borrowed_mut(value: &mut F) -> &mut Self {
// This is safe because `NoisyFloat` is a thin wrapper around the
// floating-point type.
unsafe { &mut *(value as *mut F as *mut Self) }
}
/// Tries to convert the value in-place to a mutable reference to a `NoisyFloat`.
///
/// Returns `None` if the value is invalid.
#[inline]
pub fn try_borrowed_mut(value: &mut F) -> Option<&mut Self> {
if C::check(*value) {
Some(Self::unchecked_borrowed_mut(value))
} else {
None
}
}
/// Constructs a `NoisyFloat` with the given `f32` value.
///
/// May panic not only by the `FloatChecker` but also
/// by unwrapping the result of a `NumCast` invocation for type `F`,
/// although the later should not occur in normal situations.
#[inline]
pub fn from_f32(value: f32) -> Self {
Self::new(F::from(value).unwrap())
}
/// Constructs a `NoisyFloat` with the given `f64` value.
///
/// May panic not only by the `FloatChecker` but also
/// by unwrapping the result of a `NumCast` invocation for type `F`,
/// although the later should not occur in normal situations.
#[inline]
pub fn from_f64(value: f64) -> Self {
Self::new(F::from(value).unwrap())
}
/// Returns the underlying float value.
#[inline]
pub fn raw(self) -> F {
self.value
}
/// Compares and returns the minimum of two values.
///
/// This method exists to disambiguate between `num_traits::Float.min` and `std::cmp::Ord.min`.
#[inline]
pub fn min(self, other: Self) -> Self { Ord::min(self, other) }
/// Compares and returns the maximum of two values.
///
/// This method exists to disambiguate between `num_traits::Float.max` and `std::cmp::Ord.max`.
#[inline]
pub fn max(self, other: Self) -> Self { Ord::max(self, other) }
}
impl<F: Float + Default, C: FloatChecker<F>> Default for NoisyFloat<F, C> {
#[inline]
fn default() -> Self {
Self::new(F::default())
}
}
impl<F: Float + fmt::Debug, C: FloatChecker<F>> fmt::Debug for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::Debug::fmt(&self.value, f)
}
}
impl<F: Float + fmt::Display, C: FloatChecker<F>> fmt::Display for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::Display::fmt(&self.value, f)
}
}
impl<F: Float + fmt::LowerExp, C: FloatChecker<F>> fmt::LowerExp for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::LowerExp::fmt(&self.value, f)
}
}
impl<F: Float + fmt::UpperExp, C: FloatChecker<F>> fmt::UpperExp for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::UpperExp::fmt(&self.value, f)
}
}
#[cfg(feature = "serde-1")]
impl<F: Float + Serialize, C: FloatChecker<F>> Serialize for NoisyFloat<F, C> {
fn serialize<S: Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> {
self.value.serialize(ser)
}
}
#[cfg(feature = "serde-1")]
impl<'de, F: Float + Deserialize<'de>, C: FloatChecker<F>> Deserialize<'de> for NoisyFloat<F, C> {
fn deserialize<D: Deserializer<'de>>(de: D) -> Result<Self, D::Error> {
let value = F::deserialize(de)?;
Ok(Self::new(value))
}
}
#[cfg(test)]
mod tests {
#[cfg(feature = "serde-1")]
use serde_json;
#[cfg(feature = "serde-1")]
use serde_derive::{Serialize, Deserialize};
use crate::prelude::*;
use std::f32;
use std::f64::{self, consts};
use std::mem::{size_of, align_of};
use std::hash::{Hash, Hasher};
#[test]
fn smoke_test() {
assert_eq!(n64(1.0) + 2.0, 3.0);
assert_ne!(n64(3.0), n64(2.9));
assert!(r64(1.0) < 2.0);
let mut value = n64(18.0);
value %= n64(5.0);
assert_eq!(-value, n64(-3.0));
assert_eq!(r64(1.0).exp(), consts::E);
assert_eq!((N64::try_new(1.0).unwrap() / N64::infinity()), 0.0);
assert_eq!(N64::from_f32(f32::INFINITY), N64::from_f64(f64::INFINITY));
assert_eq!(R64::try_new(f64::NEG_INFINITY), None);
assert_eq!(N64::try_new(f64::NAN), None);
assert_eq!(R64::try_new(f64::NAN), None);
assert_eq!(N64::try_borrowed(&f64::NAN), None);
assert_eq!(N64::try_borrowed_mut(&mut f64::NAN), None);
}
#[test]
fn ensure_layout() {
assert_eq!(size_of::<N32>(), size_of::<f32>());
assert_eq!(align_of::<N32>(), align_of::<f32>());
assert_eq!(size_of::<N64>(), size_of::<f64>());
assert_eq!(align_of::<N64>(), align_of::<f64>());
}
#[test]
fn borrowed_casts() {
assert_eq!(R64::borrowed(&3.14), &3.14);
assert_eq!(N64::borrowed(&[f64::INFINITY; 2][0]), &f64::INFINITY);
assert_eq!(N64::borrowed_mut(&mut 2.72), &mut 2.72);
}
#[test]
fn test_convert() {
assert_eq!(f32::from(r32(3.0)), 3.0f32);
assert_eq!(f64::from(r32(5.0)), 5.0f64);
assert_eq!(f64::from(r64(7.0)), 7.0f64);
}
#[test]
#[cfg(debug_assertions)]
#[should_panic]
fn n64_nan() {
let _ = n64(0.0) / n64(0.0);
}
#[test]
#[cfg(debug_assertions)]
#[should_panic]
fn r64_nan() {
let _ = r64(0.0) / r64(0.0);
}
#[test]
#[cfg(debug_assertions)]
#[should_panic]
fn r64_infinity() {
let _ = r64(1.0) / r64(0.0);
}
#[test]
fn resolves_min_max() {
assert_eq!(r64(1.0).min(r64(3.0)), r64(1.0));
assert_eq!(r64(1.0).max(r64(3.0)), r64(3.0));
}
#[test]
fn epsilon() {
assert_eq!(R32::epsilon(), f32::EPSILON);
assert_eq!(R64::epsilon(), f64::EPSILON);
}
struct TestHasher { bytes: Vec<u8> }
impl Hasher for TestHasher {
fn finish(&self) -> u64 { panic!("unexpected Hasher.finish invocation") }
fn write(&mut self, bytes: &[u8]) { self.bytes.extend_from_slice(bytes) }
}
fn hash_bytes<T: Hash>(value: T) -> Vec<u8> {
let mut hasher = TestHasher { bytes: Vec::new() };
value.hash(&mut hasher);
hasher.bytes
}
#[test]
fn test_hash() {
assert_eq!(hash_bytes(r64(10.3)), hash_bytes(10.3f64.to_bits()));
assert_ne!(hash_bytes(r64(10.3)), hash_bytes(10.4f64.to_bits()));
assert_eq!(hash_bytes(r32(10.3)), hash_bytes(10.3f32.to_bits()));
assert_ne!(hash_bytes(r32(10.3)), hash_bytes(10.4f32.to_bits()));
assert_eq!(hash_bytes(N64::infinity()), hash_bytes(f64::INFINITY.to_bits()));
assert_eq!(hash_bytes(N64::neg_infinity()), hash_bytes(f64::NEG_INFINITY.to_bits()));
// positive and negative zero should have the same hashes
assert_eq!(hash_bytes(r64(0.0)), hash_bytes(0.0f64.to_bits()));
assert_eq!(hash_bytes(r64(-0.0)), hash_bytes(0.0f64.to_bits()));
assert_eq!(hash_bytes(r32(0.0)), hash_bytes(0.0f32.to_bits()));
assert_eq!(hash_bytes(r32(-0.0)), hash_bytes(0.0f32.to_bits()));
}
#[cfg(feature = "serde-1")]
#[test]
fn serialize_transparently_as_float() {
let num = R32::new(3.14);
let should_be = "3.14";
let got = serde_json::to_string(&num).unwrap();
assert_eq!(got, should_be);
}
#[cfg(feature = "serde-1")]
#[test]
fn deserialize_transparently_as_float() {
let src = "3.14";
let should_be = R32::new(3.14);
let got: R32 = serde_json::from_str(src).unwrap();
assert_eq!(got, should_be);
}
// Make sure you can use serde_derive with noisy floats.
#[cfg(feature = "serde-1")]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct Dummy {
value: N64,
}
#[cfg(feature = "serde-1")]
#[test]
fn deserialize_struct_containing_n64() {
let src = r#"{ "value": 3.14 }"#;
let should_be = Dummy {
value: n64(3.14),
};
let got: Dummy = serde_json::from_str(src).unwrap();
assert_eq!(got, should_be);
}
#[cfg(feature = "serde-1")]
#[test]
fn serialize_struct_containing_n64() {
let src = Dummy {
value: n64(3.14),
};
let should_be = r#"{"value":3.14}"#;
let got = serde_json::to_string(&src).unwrap();
assert_eq!(got, should_be);
}
}
improved example for converting to primitive types
// Copyright 2016-2019 Matthew D. Michelotti
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This crate contains floating point types that panic if they are set
//! to an illegal value, such as NaN.
//!
//! The name "Noisy Float" comes from
//! the terms "quiet NaN" and "signaling NaN"; "signaling" was too long
//! to put in a struct/crate name, so "noisy" is used instead, being the opposite
//! of "quiet."
//!
//! The standard types defined in `noisy_float::types` follow the principle
//! demonstrated by Rust's handling of integer overflow:
//! a bad arithmetic operation is considered an error,
//! but it is too costly to check everywhere in optimized builds.
//! For each floating point number that is created, a `debug_assert!` invocation is used
//! to check if it is valid or not.
//! This way, there are guarantees when developing code that floating point
//! numbers have valid values,
//! but during a release run there is *no overhead* for using these floating
//! point types compared to using `f32` or `f64` directly.
//!
//! This crate makes use of the num, bounded, signed and floating point traits
//! in the popular `num_traits` crate.
//!
//! # Examples
//! An example using the `R64` type, which corresponds to *finite* `f64` values.
//!
//! ```
//! use noisy_float::prelude::*;
//!
//! fn geometric_mean(a: R64, b: R64) -> R64 {
//! (a * b).sqrt() //used just like regular floating point numbers
//! }
//!
//! fn mean(a: R64, b: R64) -> R64 {
//! (a + b) * 0.5 //the RHS of ops can be the underlying float type
//! }
//!
//! println!("geometric_mean(10.0, 20.0) = {}", geometric_mean(r64(10.0), r64(20.0)));
//! //prints 14.142...
//! assert!(mean(r64(10.0), r64(20.0)) == 15.0);
//! ```
//!
//! An example using the `N32` type, which corresponds to *non-NaN* `f32` values.
//! The float types in this crate are able to implement `Eq` and `Ord` properly,
//! since NaN is not allowed.
//!
//! ```
//! use noisy_float::prelude::*;
//!
//! let values = vec![n32(3.0), n32(-1.5), n32(71.3), N32::infinity()];
//! assert!(values.iter().cloned().min() == Some(n32(-1.5)));
//! assert!(values.iter().cloned().max() == Some(N32::infinity()));
//! ```
//!
//! An example converting from R64 to primitive types.
//!
//! ```
//! use noisy_float::prelude::*;
//! use num_traits::cast::ToPrimitive;
//!
//! let value_r64: R64 = r64(1.0);
//! let value_f64_a: f64 = value_r64.into();
//! let value_f64_b: f64 = value_r64.raw();
//! let value_u64: u64 = value_r64.to_u64().unwrap();
//!
//! assert!(value_f64_a == value_f64_b);
//! assert!(value_f64_a as u64 == value_u64);
//! ```
//!
//! # Features
//!
//! This crate has the following cargo features:
//!
//! - `serde-1`: Enable serialization for all `NoisyFloats` using serde 1.0 and
//! will transparently serialize then as floats
extern crate num_traits;
#[cfg(feature = "serde-1")]
use serde::{Serialize, Deserialize, Serializer, Deserializer};
mod float_impl;
pub mod checkers;
pub mod types;
/// Prelude for the `noisy_float` crate.
///
/// This includes all of the types defined in the `noisy_float::types` module,
/// as well as a re-export of the `Float` trait from the `num_traits` crate.
/// It is important to have this re-export here, because it allows the user
/// to access common floating point methods like `abs()`, `sqrt()`, etc.
pub mod prelude {
pub use crate::types::*;
#[doc(no_inline)]
pub use num_traits::Float;
}
use std::marker::PhantomData;
use std::fmt;
use num_traits::Float;
/// Trait for checking whether a floating point number is *valid*.
///
/// The implementation defines its own criteria for what constitutes a *valid* value.
pub trait FloatChecker<F> {
/// Returns `true` if (and only if) the given floating point number is *valid*
/// according to this checker's criteria.
///
/// The only hard requirement is that NaN *must* be considered *invalid*
/// for all implementations of `FloatChecker`.
fn check(value: F) -> bool;
/// A function that may panic if the floating point number is *invalid*.
///
/// Should either call `assert!(check(value), ...)` or `debug_assert!(check(value), ...)`.
fn assert(value: F);
}
/// A floating point number with a restricted set of legal values.
///
/// Typical users will not need to access this struct directly, but
/// can instead use the type aliases found in the module `noisy_float::types`.
/// However, this struct together with a `FloatChecker` implementation can be used
/// to define custom behavior.
///
/// The underlying float type is `F`, usually `f32` or `f64`.
/// Valid values for the float are determined by the float checker `C`.
/// If an invalid value would ever be returned from a method on this type,
/// the method will panic instead, using either `assert!` or `debug_assert!`
/// as defined by the float checker.
/// The exception to this rule is for methods that return an `Option` containing
/// a `NoisyFloat`, in which case the result would be `None` if the value is invalid.
#[repr(transparent)]
pub struct NoisyFloat<F: Float, C: FloatChecker<F>> {
value: F,
checker: PhantomData<C>
}
impl<F: Float, C: FloatChecker<F>> NoisyFloat<F, C> {
/// Constructs a `NoisyFloat` with the given value.
///
/// Uses the `FloatChecker` to assert that the value is valid.
#[inline]
pub fn new(value: F) -> Self {
C::assert(value);
Self::unchecked_new(value)
}
#[inline]
fn unchecked_new(value: F) -> Self {
NoisyFloat {
value: value,
checker: PhantomData
}
}
/// Tries to construct a `NoisyFloat` with the given value.
///
/// Returns `None` if the value is invalid.
#[inline]
pub fn try_new(value: F) -> Option<Self> {
if C::check(value) {
Some(NoisyFloat {
value: value,
checker: PhantomData
})
} else {
None
}
}
/// Converts the value in-place to a reference to a `NoisyFloat`.
///
/// Uses the `FloatChecker` to assert that the value is valid.
#[inline]
pub fn borrowed(value: &F) -> &Self {
C::assert(*value);
Self::unchecked_borrowed(value)
}
#[inline]
fn unchecked_borrowed(value: &F) -> &Self {
// This is safe because `NoisyFloat` is a thin wrapper around the
// floating-point type.
unsafe { &*(value as *const F as *const Self) }
}
/// Tries to convert the value in-place to a reference to a `NoisyFloat`.
///
/// Returns `None` if the value is invalid.
#[inline]
pub fn try_borrowed(value: &F) -> Option<&Self> {
if C::check(*value) {
Some(Self::unchecked_borrowed(value))
} else {
None
}
}
/// Converts the value in-place to a mutable reference to a `NoisyFloat`.
///
/// Uses the `FloatChecker` to assert that the value is valid.
#[inline]
pub fn borrowed_mut(value: &mut F) -> &mut Self {
C::assert(*value);
Self::unchecked_borrowed_mut(value)
}
#[inline]
fn unchecked_borrowed_mut(value: &mut F) -> &mut Self {
// This is safe because `NoisyFloat` is a thin wrapper around the
// floating-point type.
unsafe { &mut *(value as *mut F as *mut Self) }
}
/// Tries to convert the value in-place to a mutable reference to a `NoisyFloat`.
///
/// Returns `None` if the value is invalid.
#[inline]
pub fn try_borrowed_mut(value: &mut F) -> Option<&mut Self> {
if C::check(*value) {
Some(Self::unchecked_borrowed_mut(value))
} else {
None
}
}
/// Constructs a `NoisyFloat` with the given `f32` value.
///
/// May panic not only by the `FloatChecker` but also
/// by unwrapping the result of a `NumCast` invocation for type `F`,
/// although the later should not occur in normal situations.
#[inline]
pub fn from_f32(value: f32) -> Self {
Self::new(F::from(value).unwrap())
}
/// Constructs a `NoisyFloat` with the given `f64` value.
///
/// May panic not only by the `FloatChecker` but also
/// by unwrapping the result of a `NumCast` invocation for type `F`,
/// although the later should not occur in normal situations.
#[inline]
pub fn from_f64(value: f64) -> Self {
Self::new(F::from(value).unwrap())
}
/// Returns the underlying float value.
#[inline]
pub fn raw(self) -> F {
self.value
}
/// Compares and returns the minimum of two values.
///
/// This method exists to disambiguate between `num_traits::Float.min` and `std::cmp::Ord.min`.
#[inline]
pub fn min(self, other: Self) -> Self { Ord::min(self, other) }
/// Compares and returns the maximum of two values.
///
/// This method exists to disambiguate between `num_traits::Float.max` and `std::cmp::Ord.max`.
#[inline]
pub fn max(self, other: Self) -> Self { Ord::max(self, other) }
}
impl<F: Float + Default, C: FloatChecker<F>> Default for NoisyFloat<F, C> {
#[inline]
fn default() -> Self {
Self::new(F::default())
}
}
impl<F: Float + fmt::Debug, C: FloatChecker<F>> fmt::Debug for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::Debug::fmt(&self.value, f)
}
}
impl<F: Float + fmt::Display, C: FloatChecker<F>> fmt::Display for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::Display::fmt(&self.value, f)
}
}
impl<F: Float + fmt::LowerExp, C: FloatChecker<F>> fmt::LowerExp for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::LowerExp::fmt(&self.value, f)
}
}
impl<F: Float + fmt::UpperExp, C: FloatChecker<F>> fmt::UpperExp for NoisyFloat<F, C> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt::UpperExp::fmt(&self.value, f)
}
}
#[cfg(feature = "serde-1")]
impl<F: Float + Serialize, C: FloatChecker<F>> Serialize for NoisyFloat<F, C> {
fn serialize<S: Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> {
self.value.serialize(ser)
}
}
#[cfg(feature = "serde-1")]
impl<'de, F: Float + Deserialize<'de>, C: FloatChecker<F>> Deserialize<'de> for NoisyFloat<F, C> {
fn deserialize<D: Deserializer<'de>>(de: D) -> Result<Self, D::Error> {
let value = F::deserialize(de)?;
Ok(Self::new(value))
}
}
#[cfg(test)]
mod tests {
#[cfg(feature = "serde-1")]
use serde_json;
#[cfg(feature = "serde-1")]
use serde_derive::{Serialize, Deserialize};
use crate::prelude::*;
use std::f32;
use std::f64::{self, consts};
use std::mem::{size_of, align_of};
use std::hash::{Hash, Hasher};
#[test]
fn smoke_test() {
assert_eq!(n64(1.0) + 2.0, 3.0);
assert_ne!(n64(3.0), n64(2.9));
assert!(r64(1.0) < 2.0);
let mut value = n64(18.0);
value %= n64(5.0);
assert_eq!(-value, n64(-3.0));
assert_eq!(r64(1.0).exp(), consts::E);
assert_eq!((N64::try_new(1.0).unwrap() / N64::infinity()), 0.0);
assert_eq!(N64::from_f32(f32::INFINITY), N64::from_f64(f64::INFINITY));
assert_eq!(R64::try_new(f64::NEG_INFINITY), None);
assert_eq!(N64::try_new(f64::NAN), None);
assert_eq!(R64::try_new(f64::NAN), None);
assert_eq!(N64::try_borrowed(&f64::NAN), None);
assert_eq!(N64::try_borrowed_mut(&mut f64::NAN), None);
}
#[test]
fn ensure_layout() {
assert_eq!(size_of::<N32>(), size_of::<f32>());
assert_eq!(align_of::<N32>(), align_of::<f32>());
assert_eq!(size_of::<N64>(), size_of::<f64>());
assert_eq!(align_of::<N64>(), align_of::<f64>());
}
#[test]
fn borrowed_casts() {
assert_eq!(R64::borrowed(&3.14), &3.14);
assert_eq!(N64::borrowed(&[f64::INFINITY; 2][0]), &f64::INFINITY);
assert_eq!(N64::borrowed_mut(&mut 2.72), &mut 2.72);
}
#[test]
fn test_convert() {
assert_eq!(f32::from(r32(3.0)), 3.0f32);
assert_eq!(f64::from(r32(5.0)), 5.0f64);
assert_eq!(f64::from(r64(7.0)), 7.0f64);
}
#[test]
#[cfg(debug_assertions)]
#[should_panic]
fn n64_nan() {
let _ = n64(0.0) / n64(0.0);
}
#[test]
#[cfg(debug_assertions)]
#[should_panic]
fn r64_nan() {
let _ = r64(0.0) / r64(0.0);
}
#[test]
#[cfg(debug_assertions)]
#[should_panic]
fn r64_infinity() {
let _ = r64(1.0) / r64(0.0);
}
#[test]
fn resolves_min_max() {
assert_eq!(r64(1.0).min(r64(3.0)), r64(1.0));
assert_eq!(r64(1.0).max(r64(3.0)), r64(3.0));
}
#[test]
fn epsilon() {
assert_eq!(R32::epsilon(), f32::EPSILON);
assert_eq!(R64::epsilon(), f64::EPSILON);
}
struct TestHasher { bytes: Vec<u8> }
impl Hasher for TestHasher {
fn finish(&self) -> u64 { panic!("unexpected Hasher.finish invocation") }
fn write(&mut self, bytes: &[u8]) { self.bytes.extend_from_slice(bytes) }
}
fn hash_bytes<T: Hash>(value: T) -> Vec<u8> {
let mut hasher = TestHasher { bytes: Vec::new() };
value.hash(&mut hasher);
hasher.bytes
}
#[test]
fn test_hash() {
assert_eq!(hash_bytes(r64(10.3)), hash_bytes(10.3f64.to_bits()));
assert_ne!(hash_bytes(r64(10.3)), hash_bytes(10.4f64.to_bits()));
assert_eq!(hash_bytes(r32(10.3)), hash_bytes(10.3f32.to_bits()));
assert_ne!(hash_bytes(r32(10.3)), hash_bytes(10.4f32.to_bits()));
assert_eq!(hash_bytes(N64::infinity()), hash_bytes(f64::INFINITY.to_bits()));
assert_eq!(hash_bytes(N64::neg_infinity()), hash_bytes(f64::NEG_INFINITY.to_bits()));
// positive and negative zero should have the same hashes
assert_eq!(hash_bytes(r64(0.0)), hash_bytes(0.0f64.to_bits()));
assert_eq!(hash_bytes(r64(-0.0)), hash_bytes(0.0f64.to_bits()));
assert_eq!(hash_bytes(r32(0.0)), hash_bytes(0.0f32.to_bits()));
assert_eq!(hash_bytes(r32(-0.0)), hash_bytes(0.0f32.to_bits()));
}
#[cfg(feature = "serde-1")]
#[test]
fn serialize_transparently_as_float() {
let num = R32::new(3.14);
let should_be = "3.14";
let got = serde_json::to_string(&num).unwrap();
assert_eq!(got, should_be);
}
#[cfg(feature = "serde-1")]
#[test]
fn deserialize_transparently_as_float() {
let src = "3.14";
let should_be = R32::new(3.14);
let got: R32 = serde_json::from_str(src).unwrap();
assert_eq!(got, should_be);
}
// Make sure you can use serde_derive with noisy floats.
#[cfg(feature = "serde-1")]
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct Dummy {
value: N64,
}
#[cfg(feature = "serde-1")]
#[test]
fn deserialize_struct_containing_n64() {
let src = r#"{ "value": 3.14 }"#;
let should_be = Dummy {
value: n64(3.14),
};
let got: Dummy = serde_json::from_str(src).unwrap();
assert_eq!(got, should_be);
}
#[cfg(feature = "serde-1")]
#[test]
fn serialize_struct_containing_n64() {
let src = Dummy {
value: n64(3.14),
};
let should_be = r#"{"value":3.14}"#;
let got = serde_json::to_string(&src).unwrap();
assert_eq!(got, should_be);
}
}
|
//! Game Boy CPU emulation
use std::fmt::{Show, Formatter, Error};
use io::{Interconnect, Interrupt};
use cpu::instructions::next_instruction;
mod instructions;
/// CPU state.
pub struct Cpu<'a> {
/// Time remaining for the current instruction to finish
instruction_delay: u32,
/// CPU registers (except for `F` register)
regs: Registers,
/// CPU flags (`F` register)
flags: Flags,
/// Interrupt enabled flag
iten: bool,
/// True if interrupts should be enabled after next instruction
iten_enable_next: bool,
/// CPU halted flag
halted: bool,
/// Interconnect to access external ressources (RAM, ROM, peripherals...)
inter: Interconnect<'a>,
}
/// CPU registers. They're 16bit wide but some of them can be accessed
/// as high and low byte.
struct Registers {
/// 16bit Program Counter
pc: u16,
/// 16bit Stack Pointer
sp: u16,
/// 8bit `A` register
a: u8,
/// 8bit `B` register
b: u8,
/// 8bit `C` register
c: u8,
/// 8bit `D` register
d: u8,
/// 8bit `E` register
e: u8,
/// 8bit `H` register
h: u8,
/// 8bit `L` register
l: u8,
}
/// Flags contain `bool`s which are set or unset as a side effect of
/// the commands being executed. In turn, certain commands change
/// their behaviour based on the flag values.
struct Flags {
/// Zero: set if the result of a math operation is zero or two
/// values compare equal
z: bool,
/// Substract Flag: set if the last math operation performed a
/// substraction
n: bool,
/// Half Carry Flag: set if a carry occurred from the lower nibble
/// in the last math operation.
h: bool,
/// Carry Flag: set if a carry occured during the last math
/// operation or if the first operand register compared smaller.
c: bool,
}
impl<'a> Cpu<'a> {
/// Create a new Cpu instance and reset it
pub fn new<'n>(inter: Interconnect<'n>) -> Cpu<'n> {
// Default register values at startup. Taken from the
// unofficial Game Boy CPU manual.
let regs = Registers {
pc: 0,
sp: 0,
a : 0,
b : 0,
c : 0,
d : 0,
e : 0,
h : 0,
l : 0,
};
Cpu {
instruction_delay: 0,
regs: regs,
flags: Flags { z: false,
n: false,
h: false,
c: false,
},
inter: inter,
iten: true,
iten_enable_next: true,
halted: false,
}
}
/// Called at each tick of the system clock. Move the emulated
/// state one step forward.
pub fn step(&mut self) {
self.inter.step();
// Are we done running the current instruction?
if self.instruction_delay > 0 {
// Nope, wait for the next cycle
self.instruction_delay -= 1;
return;
}
if self.iten {
if let Some(it) = self.inter.next_interrupt_ack() {
// We have a pending interrupt!
self.interrupt(it);
// Wait until the context switch delay is over. We're
// sure not to reenter here after that since the
// `iten` is set to false in `self.interrupt`
return;
}
} else {
// If an interrupt enable is pending we update the iten
// flag
self.iten = self.iten_enable_next;
}
if self.halted {
// Check if we have a pending interrupt because even if
// `iten` is false HALT returns when an IT is triggered
// (but the IT handler doesn't run)
if !self.iten && self.inter.next_interrupt().is_some() {
self.halted = false;
} else {
// Wait for interrupt
return;
}
}
// Now we fetch the next instruction
let (delay, instruction) = next_instruction(self);
// Instruction delays are in CPU Machine cycles. There's 4
// Clock cycles in one Machine cycle.
self.instruction_delay = delay * 4 - 1;
// Run the next instruction. This can change the entire CPU
// state including the `instruction_delay` above (using the
// `additional_delay` method).
(instruction)(self);
}
/// Execute interrupt handler for `it`
fn interrupt(&mut self, it: Interrupt) {
// If the CPU was halted it's time to wake it up.
self.halted = false;
// Interrupt are disabled when entering an interrupt handler.
self.disable_interrupts();
// Switching context takes 32 cycles
self.instruction_delay = 32;
let handler_addr = match it {
Interrupt::VBlank => 0x40,
Interrupt::Lcdc => 0x48,
Interrupt::Timer => 0x50,
};
// Push current value to stack
let pc = self.pc();
self.push_word(pc);
// Jump to IT handler
self.set_pc(handler_addr);
}
/// Fetch byte at `addr` from the interconnect
fn fetch_byte(&self, addr: u16) -> u8 {
self.inter.fetch_byte(addr)
}
/// Store byte `val` at `addr` in the interconnect
fn store_byte(&mut self, addr: u16, val: u8) {
self.inter.store_byte(addr, val)
}
/// Push one byte onto the stack and decrement the stack pointer
fn push_byte(&mut self, val: u8){
let mut sp = self.sp();
sp -= 1;
self.set_sp(sp);
self.store_byte(sp, val);
}
/// Push two bytes onto the stack and decrement the stack pointer
/// twice
fn push_word(&mut self, val: u16) {
self.push_byte((val >> 8) as u8);
self.push_byte(val as u8);
}
/// Retreive one byte from the stack and increment the stack pointer
fn pop_byte(&mut self) -> u8 {
let sp = self.sp();
let b = self.fetch_byte(sp);
self.set_sp(sp + 1);
b
}
/// Retreive two bytes from the stack and increment the stack pointer
/// twice
fn pop_word(&mut self) -> u16 {
let lo = self.pop_byte() as u16;
let hi = self.pop_byte() as u16;
(hi << 8) | lo
}
/// Certain instructions take a different amount of time to
/// execute depending on the cpu state (conditional jumps and
/// calls). `delay` is expressed in CPU Machine cycle, there's 4
/// Clock cycles in one Machine cycle.
fn additional_delay(&mut self, delay: u32) {
self.instruction_delay += delay * 4;
}
/// Retrieve value of the `PC` register
fn pc(&self) -> u16 {
self.regs.pc
}
/// Set value of the `PC` register
fn set_pc(&mut self, pc: u16) {
self.regs.pc = pc;
}
/// Retrieve value of the `SP` register
fn sp(&self) -> u16 {
self.regs.sp
}
/// Set value of the `SP` register
fn set_sp(&mut self, sp: u16) {
self.regs.sp = sp;
}
/// Retrieve value of the `AF` register
fn af(&self) -> u16 {
let mut v = self.f() as u16;
v |= (self.regs.a as u16) << 8;
v
}
/// Set value of the `AF` register
fn set_af(&mut self, af: u16) {
self.regs.a = (af >> 8) as u8;
self.set_f(af as u8);
}
/// Retrieve value of the `BC` register
fn bc(&self) -> u16 {
let mut v = self.regs.c as u16;
v |= (self.regs.b as u16) << 8;
v
}
/// Set value of the `BC` register
fn set_bc(&mut self, bc: u16) {
self.regs.b = (bc >> 8) as u8;
self.regs.c = bc as u8;
}
/// Retrieve value of the `DE` register
fn de(&self) -> u16 {
let mut v = self.regs.e as u16;
v |= (self.regs.d as u16) << 8;
v
}
/// Set value of the `DE` register
fn set_de(&mut self, de: u16) {
self.regs.d = (de >> 8) as u8;
self.regs.e = de as u8;
}
/// Retrieve value of the `HL` register
fn hl(&self) -> u16 {
let mut v = self.regs.l as u16;
v |= (self.regs.h as u16) << 8;
v
}
/// Set value of the `HL` register
fn set_hl(&mut self, hl: u16) {
self.regs.h = (hl >> 8) as u8;
self.regs.l = hl as u8;
}
/// Retrieve value of the `A` register
fn a(&self) -> u8 {
self.regs.a
}
/// Set value of the `A` register
fn set_a(&mut self, v: u8) {
self.regs.a = v;
}
/// Retrieve value of the `B` register
fn b(&self) -> u8 {
self.regs.b
}
/// Set value of the `B` register
fn set_b(&mut self, v: u8) {
self.regs.b = v;
}
/// Retrieve value of the `C` register
fn c(&self) -> u8 {
self.regs.c
}
/// Set value of the `C` register
fn set_c(&mut self, v: u8) {
self.regs.c = v;
}
/// Retrieve value of the `D` register
fn d(&self) -> u8 {
self.regs.d
}
/// Set value of the `D` register
fn set_d(&mut self, v: u8) {
self.regs.d = v;
}
/// Retrieve value of the `E` register
fn e(&self) -> u8 {
self.regs.e
}
/// Set value of the `E` register
fn set_e(&mut self, v: u8) {
self.regs.e = v;
}
/// Retrieve value of the `F` register
fn f(&self) -> u8 {
let z = self.flags.z as u8;
let n = self.flags.n as u8;
let h = self.flags.h as u8;
let c = self.flags.c as u8;
(z << 7) | (n << 6) | ( h << 5) | (c << 4)
}
/// Set value of the `F` register
fn set_f(&mut self, v: u8) {
self.flags.z = (v & (1 << 7)) != 0;
self.flags.n = (v & (1 << 6)) != 0;
self.flags.h = (v & (1 << 5)) != 0;
self.flags.c = (v & (1 << 4)) != 0;
}
/// Retrieve value of the `H` register
fn h(&self) -> u8 {
self.regs.h
}
/// Set value of the `H` register
fn set_h(&mut self, v: u8) {
self.regs.h = v;
}
/// Retrieve value of the `L` register
fn l(&self) -> u8 {
self.regs.l
}
/// Set value of the `L` register
fn set_l(&mut self, v: u8) {
self.regs.l = v;
}
/// Get value of 'Z' flag
fn zero(&self) -> bool {
self.flags.z
}
/// set value of 'Z' flag
fn set_zero(&mut self, s: bool) {
self.flags.z = s;
}
/// Get value of 'C' flag
fn carry(&self) -> bool {
self.flags.c
}
/// Set value of 'C' flag
fn set_carry(&mut self, s: bool) {
self.flags.c = s;
}
/// Get value of 'H' flag
fn halfcarry(&self) -> bool {
self.flags.h
}
/// Set value of 'H' flag
fn set_halfcarry(&mut self, s: bool) {
self.flags.h = s;
}
/// Get value of 'N' flag
fn substract(&self) -> bool {
self.flags.n
}
/// Set value of 'N' flag
fn set_substract(&mut self, s: bool) {
self.flags.n = s;
}
/// Disable Interrupts. Takes effect immediately and cancels any
/// pending interrupt enable request.
fn disable_interrupts(&mut self) {
self.iten = false;
self.iten_enable_next = false;
}
/// Enable Interrupts immediately
fn enable_interrupts(&mut self) {
self.iten = true;
self.iten_enable_next = true;
}
/// Enable Interrupts after the next instruction.
fn enable_interrupts_next(&mut self) {
self.iten_enable_next = true;
}
/// Halt and wait for interrupts
fn halt(&mut self) {
self.halted = true;
}
/// Stop, blank the screen and wait for button press
fn stop(&mut self) {
println!("{}", *self);
panic!("STOP is not implemented");
}
// Partial reset procedure used for benchmarks
#[cfg(test)]
pub fn reset(&mut self) {
self.set_pc(0);
}
}
impl<'a> Show for Cpu<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
try!(writeln!(f, "Registers:"));
try!(writeln!(f, " pc: 0x{:04x} [{:02X} {:02X} {:02X} ...]",
self.pc(),
self.fetch_byte(self.pc()),
self.fetch_byte(self.pc() + 1),
self.fetch_byte(self.pc() + 2)));
try!(writeln!(f, " sp: 0x{:04x} [{:02X} {:02X} {:02X} ...]",
self.sp(),
self.fetch_byte(self.sp()),
self.fetch_byte(self.sp() + 1),
self.fetch_byte(self.sp() + 2)));
try!(writeln!(f, " af: 0x{:04x} a: {:3} f: {:3}",
self.af(), self.a(), self.f()));
try!(writeln!(f, " bc: 0x{:04x} b: {:3} c: {:3}",
self.bc(), self.b(), self.c()));
try!(writeln!(f, " de: 0x{:04x} d: {:3} d: {:3}",
self.de(), self.d(), self.e()));
try!(writeln!(f, " hl: 0x{:04x} h: {:3} l: {:3} \
[hl]: [{:02X} {:02X} ...]",
self.hl(), self.h(), self.l(),
self.fetch_byte(self.hl()),
self.fetch_byte(self.hl() + 1)));
try!(writeln!(f, "Flags:"));
try!(writeln!(f, " z: {} n: {} h: {} c: {}",
self.flags.z as int,
self.flags.n as int,
self.flags.h as int,
self.flags.c as int));
try!(writeln!(f, " iten: {} halted: {}", self.iten, self.halted));
Ok(())
}
}
Change HL register implementation to use a single u16
//! Game Boy CPU emulation
use std::fmt::{Show, Formatter, Error};
use io::{Interconnect, Interrupt};
use cpu::instructions::next_instruction;
mod instructions;
/// CPU state.
pub struct Cpu<'a> {
/// Time remaining for the current instruction to finish
instruction_delay: u32,
/// CPU registers (except for `F` register)
regs: Registers,
/// CPU flags (`F` register)
flags: Flags,
/// Interrupt enabled flag
iten: bool,
/// True if interrupts should be enabled after next instruction
iten_enable_next: bool,
/// CPU halted flag
halted: bool,
/// Interconnect to access external ressources (RAM, ROM, peripherals...)
inter: Interconnect<'a>,
}
/// CPU registers. They're 16bit wide but some of them can be accessed
/// as high and low byte.
struct Registers {
/// 16bit Program Counter
pc: u16,
/// 16bit Stack Pointer
sp: u16,
/// 8bit `A` register
a: u8,
/// 8bit `B` register
b: u8,
/// 8bit `C` register
c: u8,
/// 8bit `D` register
d: u8,
/// 8bit `E` register
e: u8,
/// 16bit `HL` register. This register can be split in `H` and `L`
/// like the others but it's often used as a 16bit memory pointer.
hl: u16,
}
/// Flags contain `bool`s which are set or unset as a side effect of
/// the commands being executed. In turn, certain commands change
/// their behaviour based on the flag values.
struct Flags {
/// Zero: set if the result of a math operation is zero or two
/// values compare equal
z: bool,
/// Substract Flag: set if the last math operation performed a
/// substraction
n: bool,
/// Half Carry Flag: set if a carry occurred from the lower nibble
/// in the last math operation.
h: bool,
/// Carry Flag: set if a carry occured during the last math
/// operation or if the first operand register compared smaller.
c: bool,
}
impl<'a> Cpu<'a> {
/// Create a new Cpu instance and reset it
pub fn new<'n>(inter: Interconnect<'n>) -> Cpu<'n> {
// Default register values at startup. Taken from the
// unofficial Game Boy CPU manual.
let regs = Registers {
pc: 0,
sp: 0,
a : 0,
b : 0,
c : 0,
d : 0,
e : 0,
hl: 0,
};
Cpu {
instruction_delay: 0,
regs: regs,
flags: Flags { z: false,
n: false,
h: false,
c: false,
},
inter: inter,
iten: true,
iten_enable_next: true,
halted: false,
}
}
/// Called at each tick of the system clock. Move the emulated
/// state one step forward.
pub fn step(&mut self) {
self.inter.step();
// Are we done running the current instruction?
if self.instruction_delay > 0 {
// Nope, wait for the next cycle
self.instruction_delay -= 1;
return;
}
if self.iten {
if let Some(it) = self.inter.next_interrupt_ack() {
// We have a pending interrupt!
self.interrupt(it);
// Wait until the context switch delay is over. We're
// sure not to reenter here after that since the
// `iten` is set to false in `self.interrupt`
return;
}
} else {
// If an interrupt enable is pending we update the iten
// flag
self.iten = self.iten_enable_next;
}
if self.halted {
// Check if we have a pending interrupt because even if
// `iten` is false HALT returns when an IT is triggered
// (but the IT handler doesn't run)
if !self.iten && self.inter.next_interrupt().is_some() {
self.halted = false;
} else {
// Wait for interrupt
return;
}
}
// Now we fetch the next instruction
let (delay, instruction) = next_instruction(self);
// Instruction delays are in CPU Machine cycles. There's 4
// Clock cycles in one Machine cycle.
self.instruction_delay = delay * 4 - 1;
// Run the next instruction. This can change the entire CPU
// state including the `instruction_delay` above (using the
// `additional_delay` method).
(instruction)(self);
}
/// Execute interrupt handler for `it`
fn interrupt(&mut self, it: Interrupt) {
// If the CPU was halted it's time to wake it up.
self.halted = false;
// Interrupt are disabled when entering an interrupt handler.
self.disable_interrupts();
// Switching context takes 32 cycles
self.instruction_delay = 32;
let handler_addr = match it {
Interrupt::VBlank => 0x40,
Interrupt::Lcdc => 0x48,
Interrupt::Timer => 0x50,
};
// Push current value to stack
let pc = self.pc();
self.push_word(pc);
// Jump to IT handler
self.set_pc(handler_addr);
}
/// Fetch byte at `addr` from the interconnect
fn fetch_byte(&self, addr: u16) -> u8 {
self.inter.fetch_byte(addr)
}
/// Store byte `val` at `addr` in the interconnect
fn store_byte(&mut self, addr: u16, val: u8) {
self.inter.store_byte(addr, val)
}
/// Push one byte onto the stack and decrement the stack pointer
fn push_byte(&mut self, val: u8){
let mut sp = self.sp();
sp -= 1;
self.set_sp(sp);
self.store_byte(sp, val);
}
/// Push two bytes onto the stack and decrement the stack pointer
/// twice
fn push_word(&mut self, val: u16) {
self.push_byte((val >> 8) as u8);
self.push_byte(val as u8);
}
/// Retreive one byte from the stack and increment the stack pointer
fn pop_byte(&mut self) -> u8 {
let sp = self.sp();
let b = self.fetch_byte(sp);
self.set_sp(sp + 1);
b
}
/// Retreive two bytes from the stack and increment the stack pointer
/// twice
fn pop_word(&mut self) -> u16 {
let lo = self.pop_byte() as u16;
let hi = self.pop_byte() as u16;
(hi << 8) | lo
}
/// Certain instructions take a different amount of time to
/// execute depending on the cpu state (conditional jumps and
/// calls). `delay` is expressed in CPU Machine cycle, there's 4
/// Clock cycles in one Machine cycle.
fn additional_delay(&mut self, delay: u32) {
self.instruction_delay += delay * 4;
}
/// Retrieve value of the `PC` register
fn pc(&self) -> u16 {
self.regs.pc
}
/// Set value of the `PC` register
fn set_pc(&mut self, pc: u16) {
self.regs.pc = pc;
}
/// Retrieve value of the `SP` register
fn sp(&self) -> u16 {
self.regs.sp
}
/// Set value of the `SP` register
fn set_sp(&mut self, sp: u16) {
self.regs.sp = sp;
}
/// Retrieve value of the `AF` register
fn af(&self) -> u16 {
let mut v = self.f() as u16;
v |= (self.regs.a as u16) << 8;
v
}
/// Set value of the `AF` register
fn set_af(&mut self, af: u16) {
self.regs.a = (af >> 8) as u8;
self.set_f(af as u8);
}
/// Retrieve value of the `BC` register
fn bc(&self) -> u16 {
let mut v = self.regs.c as u16;
v |= (self.regs.b as u16) << 8;
v
}
/// Set value of the `BC` register
fn set_bc(&mut self, bc: u16) {
self.regs.b = (bc >> 8) as u8;
self.regs.c = bc as u8;
}
/// Retrieve value of the `DE` register
fn de(&self) -> u16 {
let mut v = self.regs.e as u16;
v |= (self.regs.d as u16) << 8;
v
}
/// Set value of the `DE` register
fn set_de(&mut self, de: u16) {
self.regs.d = (de >> 8) as u8;
self.regs.e = de as u8;
}
/// Retrieve value of the `HL` register
fn hl(&self) -> u16 {
self.regs.hl
}
/// Set value of the `HL` register
fn set_hl(&mut self, hl: u16) {
self.regs.hl = hl
}
/// Retrieve value of the `A` register
fn a(&self) -> u8 {
self.regs.a
}
/// Set value of the `A` register
fn set_a(&mut self, v: u8) {
self.regs.a = v;
}
/// Retrieve value of the `B` register
fn b(&self) -> u8 {
self.regs.b
}
/// Set value of the `B` register
fn set_b(&mut self, v: u8) {
self.regs.b = v;
}
/// Retrieve value of the `C` register
fn c(&self) -> u8 {
self.regs.c
}
/// Set value of the `C` register
fn set_c(&mut self, v: u8) {
self.regs.c = v;
}
/// Retrieve value of the `D` register
fn d(&self) -> u8 {
self.regs.d
}
/// Set value of the `D` register
fn set_d(&mut self, v: u8) {
self.regs.d = v;
}
/// Retrieve value of the `E` register
fn e(&self) -> u8 {
self.regs.e
}
/// Set value of the `E` register
fn set_e(&mut self, v: u8) {
self.regs.e = v;
}
/// Retrieve value of the `F` register
fn f(&self) -> u8 {
let z = self.flags.z as u8;
let n = self.flags.n as u8;
let h = self.flags.h as u8;
let c = self.flags.c as u8;
(z << 7) | (n << 6) | ( h << 5) | (c << 4)
}
/// Set value of the `F` register
fn set_f(&mut self, v: u8) {
self.flags.z = (v & (1 << 7)) != 0;
self.flags.n = (v & (1 << 6)) != 0;
self.flags.h = (v & (1 << 5)) != 0;
self.flags.c = (v & (1 << 4)) != 0;
}
/// Retrieve value of the `H` register
fn h(&self) -> u8 {
(self.regs.hl >> 8) as u8
}
/// Set value of the `H` register
fn set_h(&mut self, v: u8) {
let mut hl = self.hl();
hl &= 0xff;
hl |= (v as u16) << 8;
self.set_hl(hl);
}
/// Retrieve value of the `L` register
fn l(&self) -> u8 {
self.regs.hl as u8
}
/// Set value of the `L` register
fn set_l(&mut self, v: u8) {
let mut hl = self.hl();
hl &= 0xff00;
hl |= v as u16;
self.set_hl(hl);
}
/// Get value of 'Z' flag
fn zero(&self) -> bool {
self.flags.z
}
/// set value of 'Z' flag
fn set_zero(&mut self, s: bool) {
self.flags.z = s;
}
/// Get value of 'C' flag
fn carry(&self) -> bool {
self.flags.c
}
/// Set value of 'C' flag
fn set_carry(&mut self, s: bool) {
self.flags.c = s;
}
/// Get value of 'H' flag
fn halfcarry(&self) -> bool {
self.flags.h
}
/// Set value of 'H' flag
fn set_halfcarry(&mut self, s: bool) {
self.flags.h = s;
}
/// Get value of 'N' flag
fn substract(&self) -> bool {
self.flags.n
}
/// Set value of 'N' flag
fn set_substract(&mut self, s: bool) {
self.flags.n = s;
}
/// Disable Interrupts. Takes effect immediately and cancels any
/// pending interrupt enable request.
fn disable_interrupts(&mut self) {
self.iten = false;
self.iten_enable_next = false;
}
/// Enable Interrupts immediately
fn enable_interrupts(&mut self) {
self.iten = true;
self.iten_enable_next = true;
}
/// Enable Interrupts after the next instruction.
fn enable_interrupts_next(&mut self) {
self.iten_enable_next = true;
}
/// Halt and wait for interrupts
fn halt(&mut self) {
self.halted = true;
}
/// Stop, blank the screen and wait for button press
fn stop(&mut self) {
println!("{}", *self);
panic!("STOP is not implemented");
}
// Partial reset procedure used for benchmarks
#[cfg(test)]
pub fn reset(&mut self) {
self.set_pc(0);
}
}
impl<'a> Show for Cpu<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
try!(writeln!(f, "Registers:"));
try!(writeln!(f, " pc: 0x{:04x} [{:02X} {:02X} {:02X} ...]",
self.pc(),
self.fetch_byte(self.pc()),
self.fetch_byte(self.pc() + 1),
self.fetch_byte(self.pc() + 2)));
try!(writeln!(f, " sp: 0x{:04x} [{:02X} {:02X} {:02X} ...]",
self.sp(),
self.fetch_byte(self.sp()),
self.fetch_byte(self.sp() + 1),
self.fetch_byte(self.sp() + 2)));
try!(writeln!(f, " af: 0x{:04x} a: {:3} f: {:3}",
self.af(), self.a(), self.f()));
try!(writeln!(f, " bc: 0x{:04x} b: {:3} c: {:3}",
self.bc(), self.b(), self.c()));
try!(writeln!(f, " de: 0x{:04x} d: {:3} d: {:3}",
self.de(), self.d(), self.e()));
try!(writeln!(f, " hl: 0x{:04x} h: {:3} l: {:3} \
[hl]: [{:02X} {:02X} ...]",
self.hl(), self.h(), self.l(),
self.fetch_byte(self.hl()),
self.fetch_byte(self.hl() + 1)));
try!(writeln!(f, "Flags:"));
try!(writeln!(f, " z: {} n: {} h: {} c: {}",
self.flags.z as int,
self.flags.n as int,
self.flags.h as int,
self.flags.c as int));
try!(writeln!(f, " iten: {} halted: {}", self.iten, self.halted));
Ok(())
}
}
|
pub fn puzzle(input: usize) -> usize {
let mut circle = vec![1; input];
let mut current_elf = 0;
loop {
let next_elf_with_presents = circle[current_elf + 1..].iter()
.position(|&num| num != 0);
match next_elf_with_presents {
Some(i) => {
let absolute_next_elf = i + current_elf + 1;
circle[current_elf] += circle[absolute_next_elf];
circle[absolute_next_elf] = 0;
},
None => {
let next_elf_with_presents = circle[0..current_elf].iter()
.position(|&num| num != 0);
match next_elf_with_presents {
Some(i) => {
circle[current_elf] += circle[i];
circle[i] = 0;
},
None => return current_elf + 1,
}
},
}
match circle[current_elf + 1..].iter().position(|&num| num != 0) {
Some(i) => current_elf = i,
None => {
let next_elf_with_presents = circle[0..current_elf].iter()
.position(|&num| num != 0);
match next_elf_with_presents {
Some(i) => current_elf = i,
None => panic!("something has gone horribly wrong"),
}
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn two_elves() {
let winner = puzzle(2);
assert_eq!(winner, 1);
}
}
Remove half of the elves each round, instead of one by one
This runs in a reasonable amount of time.
1841611 is right!
pub fn puzzle(input: usize) -> usize {
let mut circle: Vec<_> = (1..(input + 1)).collect();
let mut start_at_0 = true;
while circle.len() > 1 {
let orig_len = circle.len();
let next_circle: Vec<_> = match (circle.len() % 2 == 0, start_at_0) {
(_, true) => {
circle.into_iter()
.enumerate()
.filter(|&(i, _)| i % 2 == 0)
.map(|(_, val)| val).collect()
},
(_, _) => {
circle.into_iter()
.enumerate()
.filter(|&(i, _)| i % 2 != 0)
.map(|(_, val)| val).collect()
},
};
let started_even_length = orig_len % 2 == 0;
let started_odd_length = !started_even_length;
if start_at_0 && started_even_length {
start_at_0 = true;
} else if start_at_0 && started_odd_length {
start_at_0 = false;
} else if !start_at_0 && started_odd_length {
start_at_0 = true;
} else { // !start_at_0 && started_even_length
start_at_0 = false;
}
circle = next_circle;
}
circle.pop().expect("horrible things")
}
// start_at_0 = false
// vec![1, 2, 3, 4] -
// remove even, 0-based positions
// start_at_0 = false
// vec![1, 2, 3, 4, 5, 6] - len().is_even() && start == 0 => remove odd, 0-based positions
// vec![1, 3, 5] - len().is_odd() && start == 0 => remove odd, 0-based positions
// vec![1, 5] - len().is_even() && start == end => remove even, 0-based positions
// vec![5]
//
// vec![1, 2, 3, 4, 5] - len().is_odd() && start == 0 => remove odd, 0-based positions
// vec![1, 3, 5] len().is_odd() && start == end => remove even, 0-based positions
// vec![3]
#[cfg(test)]
mod test {
use super::*;
#[test]
fn two_elves() {
let winner = puzzle(2);
assert_eq!(winner, 1);
}
#[test]
fn five_elves() {
let winner = puzzle(5);
assert_eq!(winner, 3);
}
}
|
#![allow(experimental)]
use std::io::{File,fs};
//use std::io::process::Command;
use std::path::posix::Path;
use std::os::{homedir};
#[cfg(test)] use std::rand::random;
#[cfg(test)] use std::finally::Finally;
static TEMPLATE: &'static str = include_str!("creator/template.yml");
static DEFAULT_MUXED_DIR: &'static str = "muxed";
pub fn new(name: &str) {
let muxed_dir = if muxed_dir_exists(&DEFAULT_MUXED_DIR.to_string()) {
Path::new(format!("{}/.{}/", homedir_string(), &DEFAULT_MUXED_DIR.to_string()))
} else {
create_muxed_dir(&DEFAULT_MUXED_DIR.to_string())
};
let path = &Path::new(format!("{}/{}", muxed_dir.display(), name));
if !path.exists() {
create_project_file(path)
} else {
println!("Project already exists.");
}
}
fn create_project_file(path: &Path) {
match File::create(path).write(TEMPLATE.as_bytes()) {
Ok(()) => (), // succeeded
Err(_e) => println!("Failed to create project {}", path.filename()),
}
// Command::new("vim").arg(format!("{}", path.display())).detached();
}
fn create_muxed_dir(name: &String) -> Path {
let path = &Path::new(format!("{}/.{}", homedir_string(), name));
match fs::mkdir(path, ::std::io::UserRWX) {
Ok(()) => (), // succeeded
Err(_e) => println!("Failed to create project {}", path.filename()),
}
path.clone()
}
fn muxed_dir_exists(name: &String) -> bool {
let path = &Path::new(format!("{}/.{}", homedir_string(), name));
path.exists()
}
fn homedir_string() -> String {
let home_unwrap = homedir().unwrap();
format!("{}", home_unwrap.display())
}
#[cfg(test)]
fn random_name() -> String {
format!("test_{}", random::<f64>())
}
#[test]
fn muxed_dir_exists_returns_false() {
let dir = format!("test_dir_{}", random::<f64>());
assert!(!muxed_dir_exists(&dir));
}
#[test]
fn muxed_dir_exists_returns_true() {
let dir = format!("test_dir_{}", random::<f64>());
create_muxed_dir(&dir);
assert!(muxed_dir_exists(&dir));
let muxed_path = &Path::new(format!("{}/.{}/", homedir_string(), dir.as_slice()));
fs::rmdir_recursive(muxed_path);
}
#[test]
fn creates_muxed_dir() {
let name = format!("test_project_{}", random::<f64>());
let dir = format!("test_dir_{}", random::<f64>());
let muxed_path = &Path::new(format!("{}/.{}/", homedir_string(), dir.as_slice()));
create_muxed_dir(&dir);
assert!(muxed_path.exists());
fs::rmdir_recursive(muxed_path);
}
#[test]
fn new_writes_file_to_muxed_dir() {
let name = format!("test_project_{}", random::<f64>());
let path = &Path::new(format!("{}/.muxed/{}", homedir_string(), name));
new(name.as_slice());
assert!(path.exists());
fs::unlink(path);
}
#[test]
// TODO: Fix this test so it verifies something better.
fn new_doesnt_overwrite_existing_file() {
let name = format!("test_project_{}", random::<f64>());
let path = &Path::new(format!("{}/.muxed/{}", homedir_string(), name));
new(name.as_slice());
(|| {
new(name.as_slice());
}).finally(|| {
fs::unlink(path);
})
}
Use the new random_name function to standardize how directory and project names are being tested.
#![allow(experimental)]
use std::io::{File,fs};
//use std::io::process::Command;
use std::path::posix::Path;
use std::os::{homedir};
#[cfg(test)] use std::rand::random;
#[cfg(test)] use std::finally::Finally;
static TEMPLATE: &'static str = include_str!("creator/template.yml");
static DEFAULT_MUXED_DIR: &'static str = "muxed";
pub fn new(name: &str) {
let muxed_dir = if muxed_dir_exists(&DEFAULT_MUXED_DIR.to_string()) {
Path::new(format!("{}/.{}/", homedir_string(), &DEFAULT_MUXED_DIR.to_string()))
} else {
create_muxed_dir(&DEFAULT_MUXED_DIR.to_string())
};
let path = &Path::new(format!("{}/{}", muxed_dir.display(), name));
if !path.exists() {
create_project_file(path)
} else {
println!("Project already exists.");
}
}
fn create_project_file(path: &Path) {
match File::create(path).write(TEMPLATE.as_bytes()) {
Ok(()) => (), // succeeded
Err(_e) => println!("Failed to create project {}", path.filename()),
}
// Command::new("vim").arg(format!("{}", path.display())).detached();
}
fn create_muxed_dir(name: &String) -> Path {
let path = &Path::new(format!("{}/.{}", homedir_string(), name));
match fs::mkdir(path, ::std::io::UserRWX) {
Ok(()) => (), // succeeded
Err(_e) => println!("Failed to create project {}", path.filename()),
}
path.clone()
}
fn muxed_dir_exists(name: &String) -> bool {
let path = &Path::new(format!("{}/.{}", homedir_string(), name));
path.exists()
}
fn homedir_string() -> String {
let home_unwrap = homedir().unwrap();
format!("{}", home_unwrap.display())
}
#[cfg(test)]
fn random_name() -> String {
format!("test_{}", random::<f64>())
}
#[test]
fn muxed_dir_exists_returns_false() {
assert!(!muxed_dir_exists(&random_name()));
}
#[test]
fn muxed_dir_exists_returns_true() {
let dir = random_name();
create_muxed_dir(&dir);
assert!(muxed_dir_exists(&dir));
let muxed_path = &Path::new(format!("{}/.{}/", homedir_string(), dir));
fs::rmdir_recursive(muxed_path);
}
#[test]
fn creates_muxed_dir() {
let dir = random_name();
let muxed_path = &Path::new(format!("{}/.{}/", homedir_string(), dir));
create_muxed_dir(&dir);
assert!(muxed_path.exists());
fs::rmdir_recursive(muxed_path);
}
#[test]
fn new_writes_file_to_muxed_dir() {
let name = random_name();
let path = &Path::new(format!("{}/.muxed/{}", homedir_string(), name));
new(name.as_slice());
assert!(path.exists());
fs::unlink(path);
}
#[test]
// TODO: Fix this test so it verifies something better.
fn new_doesnt_overwrite_existing_file() {
let name = random_name();
let path = &Path::new(format!("{}/.muxed/{}", homedir_string(), name));
new(name.as_slice());
(|| {
new(name.as_slice());
}).finally(|| {
fs::unlink(path);
})
}
|
//! `extract()` = `unwrap_or_else(|| intrinsics::unreachable())`
//!
//! In a debug build: `extract()` = `unwrap_or_else(|| unreachable!())`
//!
//! Use `extract()` only when you are 200% sure that an option contains a value.
#![cfg_attr(not(debug_assertions), feature(core))]
#![deny(missing_docs)]
#![deny(warnings)]
#[cfg(not(debug_assertions))]
use std::intrinsics;
/// Extension trait for `Option` that adds the `extract()` method
pub trait Extract {
/// The type of what's contained in the `Some` variant
type Output;
/// Extracts the value contained in the `Some` variant
unsafe fn extract(self) -> Self::Output;
}
impl<T> Extract for Option<T> {
type Output = T;
unsafe fn extract(self) -> T {
match self {
#[cfg(debug_assertions)]
None => unreachable!(),
#[cfg(not(debug_assertions))]
None => intrinsics::unreachable(),
Some(x) => x,
}
}
}
feat: now works without the unreachable intrinsic
//! `extract()` = `unwrap_or_else(|| intrinsics::unreachable())`
//!
//! In a debug build: `extract()` = `unwrap_or_else(|| unreachable!())`
//!
//! Use `extract()` only when you are 200% sure that an option contains a value.
#![deny(missing_docs)]
#![deny(warnings)]
/// Extension trait for `Option` that adds the `extract()` method
pub trait Extract {
/// The type of what's contained in the `Some` variant
type Output;
/// Extracts the value contained in the `Some` variant
unsafe fn extract(self) -> Self::Output;
}
impl<T> Extract for Option<T> {
type Output = T;
unsafe fn extract(self) -> T {
match self {
#[cfg(debug_assertions)]
None => unreachable!(),
#[cfg(not(debug_assertions))]
None => {
enum Void {}
let void: &Void = std::mem::transmute(1_usize);
match *void {}
},
Some(x) => x,
}
}
}
|
#![warn(missing_docs)]
#![crate_name="itertools"]
#![cfg_attr(not(feature = "use_std"), no_std)]
//! Extra iterator adaptors, functions and macros.
//!
//! To extend [`Iterator`] with methods in this crate, import
//! the [`Itertools` trait](Itertools):
//!
//! ```
//! use itertools::Itertools;
//! ```
//!
//! Now, new methods like [`interleave`](Itertools::interleave)
//! are available on all iterators:
//!
//! ```
//! use itertools::Itertools;
//!
//! let it = (1..3).interleave(vec![-1, -2]);
//! itertools::assert_equal(it, vec![1, -1, 2, -2]);
//! ```
//!
//! Most iterator methods are also provided as functions (with the benefit
//! that they convert parameters using [`IntoIterator`]):
//!
//! ```
//! use itertools::interleave;
//!
//! for elt in interleave(&[1, 2, 3], &[2, 3, 4]) {
//! /* loop body */
//! }
//! ```
//!
//! ## Crate Features
//!
//! - `use_std`
//! - Enabled by default.
//! - Disable to compile itertools using `#![no_std]`. This disables
//! any items that depend on collections (like `group_by`, `unique`,
//! `kmerge`, `join` and many more).
//!
//! ## Rust Version
//!
//! This version of itertools requires Rust 1.32 or later.
#![doc(html_root_url="https://docs.rs/itertools/0.8/")]
#[cfg(not(feature = "use_std"))]
extern crate core as std;
#[cfg(feature = "use_alloc")]
extern crate alloc;
#[cfg(feature = "use_alloc")]
use alloc::{
string::String,
vec::Vec,
};
pub use either::Either;
#[cfg(feature = "use_std")]
use std::collections::HashMap;
use std::iter::{IntoIterator, once};
use std::cmp::Ordering;
use std::fmt;
#[cfg(feature = "use_std")]
use std::hash::Hash;
#[cfg(feature = "use_alloc")]
use std::fmt::Write;
#[cfg(feature = "use_alloc")]
type VecIntoIter<T> = alloc::vec::IntoIter<T>;
#[cfg(feature = "use_alloc")]
use std::iter::FromIterator;
#[macro_use]
mod impl_macros;
// for compatibility with no std and macros
#[doc(hidden)]
pub use std::iter as __std_iter;
/// The concrete iterator types.
pub mod structs {
pub use crate::adaptors::{
Dedup,
DedupBy,
DedupWithCount,
DedupByWithCount,
Interleave,
InterleaveShortest,
FilterMapOk,
FilterOk,
Product,
PutBack,
Batching,
MapInto,
MapOk,
Merge,
MergeBy,
TakeWhileRef,
WhileSome,
Coalesce,
TupleCombinations,
Positions,
Update,
};
#[allow(deprecated)]
pub use crate::adaptors::{MapResults, Step};
#[cfg(feature = "use_alloc")]
pub use crate::adaptors::MultiProduct;
#[cfg(feature = "use_alloc")]
pub use crate::combinations::Combinations;
#[cfg(feature = "use_alloc")]
pub use crate::combinations_with_replacement::CombinationsWithReplacement;
pub use crate::cons_tuples_impl::ConsTuples;
pub use crate::exactly_one_err::ExactlyOneError;
pub use crate::format::{Format, FormatWith};
#[cfg(feature = "use_std")]
pub use crate::grouping_map::{GroupingMap, GroupingMapBy};
#[cfg(feature = "use_alloc")]
pub use crate::groupbylazy::{IntoChunks, Chunk, Chunks, GroupBy, Group, Groups};
pub use crate::intersperse::{Intersperse, IntersperseWith};
#[cfg(feature = "use_alloc")]
pub use crate::kmerge_impl::{KMerge, KMergeBy};
pub use crate::merge_join::MergeJoinBy;
#[cfg(feature = "use_alloc")]
pub use crate::multipeek_impl::MultiPeek;
#[cfg(feature = "use_alloc")]
pub use crate::peek_nth::PeekNth;
pub use crate::pad_tail::PadUsing;
pub use crate::peeking_take_while::PeekingTakeWhile;
#[cfg(feature = "use_alloc")]
pub use crate::permutations::Permutations;
pub use crate::process_results_impl::ProcessResults;
#[cfg(feature = "use_alloc")]
pub use crate::powerset::Powerset;
#[cfg(feature = "use_alloc")]
pub use crate::put_back_n_impl::PutBackN;
#[cfg(feature = "use_alloc")]
pub use crate::rciter_impl::RcIter;
pub use crate::repeatn::RepeatN;
#[allow(deprecated)]
pub use crate::sources::{RepeatCall, Unfold, Iterate};
#[cfg(feature = "use_alloc")]
pub use crate::tee::Tee;
pub use crate::tuple_impl::{TupleBuffer, TupleWindows, CircularTupleWindows, Tuples};
#[cfg(feature = "use_std")]
pub use crate::unique_impl::{Unique, UniqueBy};
pub use crate::with_position::WithPosition;
pub use crate::zip_eq_impl::ZipEq;
pub use crate::zip_longest::ZipLongest;
pub use crate::ziptuple::Zip;
}
/// Traits helpful for using certain `Itertools` methods in generic contexts.
pub mod traits {
pub use crate::tuple_impl::HomogeneousTuple;
}
#[allow(deprecated)]
pub use crate::structs::*;
pub use crate::concat_impl::concat;
pub use crate::cons_tuples_impl::cons_tuples;
pub use crate::diff::diff_with;
pub use crate::diff::Diff;
#[cfg(feature = "use_alloc")]
pub use crate::kmerge_impl::{kmerge_by};
pub use crate::minmax::MinMaxResult;
pub use crate::peeking_take_while::PeekingNext;
pub use crate::process_results_impl::process_results;
pub use crate::repeatn::repeat_n;
#[allow(deprecated)]
pub use crate::sources::{repeat_call, unfold, iterate};
pub use crate::with_position::Position;
pub use crate::ziptuple::multizip;
mod adaptors;
mod either_or_both;
pub use crate::either_or_both::EitherOrBoth;
#[doc(hidden)]
pub mod free;
#[doc(inline)]
pub use crate::free::*;
mod concat_impl;
mod cons_tuples_impl;
#[cfg(feature = "use_alloc")]
mod combinations;
#[cfg(feature = "use_alloc")]
mod combinations_with_replacement;
mod exactly_one_err;
mod diff;
mod format;
#[cfg(feature = "use_std")]
mod grouping_map;
#[cfg(feature = "use_alloc")]
mod group_map;
#[cfg(feature = "use_alloc")]
mod groupbylazy;
mod intersperse;
#[cfg(feature = "use_alloc")]
mod k_smallest;
#[cfg(feature = "use_alloc")]
mod kmerge_impl;
#[cfg(feature = "use_alloc")]
mod lazy_buffer;
mod merge_join;
mod minmax;
#[cfg(feature = "use_alloc")]
mod multipeek_impl;
mod pad_tail;
#[cfg(feature = "use_alloc")]
mod peek_nth;
mod peeking_take_while;
#[cfg(feature = "use_alloc")]
mod permutations;
#[cfg(feature = "use_alloc")]
mod powerset;
mod process_results_impl;
#[cfg(feature = "use_alloc")]
mod put_back_n_impl;
#[cfg(feature = "use_alloc")]
mod rciter_impl;
mod repeatn;
mod size_hint;
mod sources;
#[cfg(feature = "use_alloc")]
mod tee;
mod tuple_impl;
#[cfg(feature = "use_std")]
mod unique_impl;
mod with_position;
mod zip_eq_impl;
mod zip_longest;
mod ziptuple;
#[macro_export]
/// Create an iterator over the “cartesian product” of iterators.
///
/// Iterator element type is like `(A, B, ..., E)` if formed
/// from iterators `(I, J, ..., M)` with element types `I::Item = A`, `J::Item = B`, etc.
///
/// ```
/// # use itertools::iproduct;
/// #
/// # fn main() {
/// // Iterate over the coordinates of a 4 x 4 x 4 grid
/// // from (0, 0, 0), (0, 0, 1), .., (0, 1, 0), (0, 1, 1), .. etc until (3, 3, 3)
/// for (i, j, k) in iproduct!(0..4, 0..4, 0..4) {
/// // ..
/// }
/// # }
/// ```
macro_rules! iproduct {
(@flatten $I:expr,) => (
$I
);
(@flatten $I:expr, $J:expr, $($K:expr,)*) => (
$crate::iproduct!(@flatten $crate::cons_tuples($crate::iproduct!($I, $J)), $($K,)*)
);
($I:expr) => (
$crate::__std_iter::IntoIterator::into_iter($I)
);
($I:expr, $J:expr) => (
$crate::Itertools::cartesian_product($crate::iproduct!($I), $crate::iproduct!($J))
);
($I:expr, $J:expr, $($K:expr),+) => (
$crate::iproduct!(@flatten $crate::iproduct!($I, $J), $($K,)+)
);
}
#[macro_export]
/// Create an iterator running multiple iterators in lockstep.
///
/// The `izip!` iterator yields elements until any subiterator
/// returns `None`.
///
/// This is a version of the standard ``.zip()`` that's supporting more than
/// two iterators. The iterator element type is a tuple with one element
/// from each of the input iterators. Just like ``.zip()``, the iteration stops
/// when the shortest of the inputs reaches its end.
///
/// **Note:** The result of this macro is in the general case an iterator
/// composed of repeated `.zip()` and a `.map()`; it has an anonymous type.
/// The special cases of one and two arguments produce the equivalent of
/// `$a.into_iter()` and `$a.into_iter().zip($b)` respectively.
///
/// Prefer this macro `izip!()` over [`multizip`] for the performance benefits
/// of using the standard library `.zip()`.
///
/// ```
/// # use itertools::izip;
/// #
/// # fn main() {
///
/// // iterate over three sequences side-by-side
/// let mut results = [0, 0, 0, 0];
/// let inputs = [3, 7, 9, 6];
///
/// for (r, index, input) in izip!(&mut results, 0..10, &inputs) {
/// *r = index * 10 + input;
/// }
///
/// assert_eq!(results, [0 + 3, 10 + 7, 29, 36]);
/// # }
/// ```
macro_rules! izip {
// @closure creates a tuple-flattening closure for .map() call. usage:
// @closure partial_pattern => partial_tuple , rest , of , iterators
// eg. izip!( @closure ((a, b), c) => (a, b, c) , dd , ee )
( @closure $p:pat => $tup:expr ) => {
|$p| $tup
};
// The "b" identifier is a different identifier on each recursion level thanks to hygiene.
( @closure $p:pat => ( $($tup:tt)* ) , $_iter:expr $( , $tail:expr )* ) => {
$crate::izip!(@closure ($p, b) => ( $($tup)*, b ) $( , $tail )*)
};
// unary
($first:expr $(,)*) => {
$crate::__std_iter::IntoIterator::into_iter($first)
};
// binary
($first:expr, $second:expr $(,)*) => {
$crate::izip!($first)
.zip($second)
};
// n-ary where n > 2
( $first:expr $( , $rest:expr )* $(,)* ) => {
$crate::izip!($first)
$(
.zip($rest)
)*
.map(
$crate::izip!(@closure a => (a) $( , $rest )*)
)
};
}
/// An [`Iterator`] blanket implementation that provides extra adaptors and
/// methods.
///
/// This trait defines a number of methods. They are divided into two groups:
///
/// * *Adaptors* take an iterator and parameter as input, and return
/// a new iterator value. These are listed first in the trait. An example
/// of an adaptor is [`.interleave()`](#method.interleave)
///
/// * *Regular methods* are those that don't return iterators and instead
/// return a regular value of some other kind.
/// [`.next_tuple()`](#method.next_tuple) is an example and the first regular
/// method in the list.
pub trait Itertools : Iterator {
// adaptors
/// Alternate elements from two iterators until both have run out.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..7).interleave(vec![-1, -2]);
/// itertools::assert_equal(it, vec![1, -1, 2, -2, 3, 4, 5, 6]);
/// ```
fn interleave<J>(self, other: J) -> Interleave<Self, J::IntoIter>
where J: IntoIterator<Item = Self::Item>,
Self: Sized
{
interleave(self, other)
}
/// Alternate elements from two iterators until at least one of them has run
/// out.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..7).interleave_shortest(vec![-1, -2]);
/// itertools::assert_equal(it, vec![1, -1, 2, -2, 3]);
/// ```
fn interleave_shortest<J>(self, other: J) -> InterleaveShortest<Self, J::IntoIter>
where J: IntoIterator<Item = Self::Item>,
Self: Sized
{
adaptors::interleave_shortest(self, other.into_iter())
}
/// An iterator adaptor to insert a particular value
/// between each element of the adapted iterator.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// itertools::assert_equal((0..3).intersperse(8), vec![0, 8, 1, 8, 2]);
/// ```
fn intersperse(self, element: Self::Item) -> Intersperse<Self>
where Self: Sized,
Self::Item: Clone
{
intersperse::intersperse(self, element)
}
/// An iterator adaptor to insert a particular value created by a function
/// between each element of the adapted iterator.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let mut i = 10;
/// itertools::assert_equal((0..3).intersperse_with(|| { i -= 1; i }), vec![0, 9, 1, 8, 2]);
/// assert_eq!(i, 8);
/// ```
fn intersperse_with<F>(self, element: F) -> IntersperseWith<Self, F>
where Self: Sized,
F: FnMut() -> Self::Item
{
intersperse::intersperse_with(self, element)
}
/// Create an iterator which iterates over both this and the specified
/// iterator simultaneously, yielding pairs of two optional elements.
///
/// This iterator is *fused*.
///
/// As long as neither input iterator is exhausted yet, it yields two values
/// via `EitherOrBoth::Both`.
///
/// When the parameter iterator is exhausted, it only yields a value from the
/// `self` iterator via `EitherOrBoth::Left`.
///
/// When the `self` iterator is exhausted, it only yields a value from the
/// parameter iterator via `EitherOrBoth::Right`.
///
/// When both iterators return `None`, all further invocations of `.next()`
/// will return `None`.
///
/// Iterator element type is
/// [`EitherOrBoth<Self::Item, J::Item>`](EitherOrBoth).
///
/// ```rust
/// use itertools::EitherOrBoth::{Both, Right};
/// use itertools::Itertools;
/// let it = (0..1).zip_longest(1..3);
/// itertools::assert_equal(it, vec![Both(0, 1), Right(2)]);
/// ```
#[inline]
fn zip_longest<J>(self, other: J) -> ZipLongest<Self, J::IntoIter>
where J: IntoIterator,
Self: Sized
{
zip_longest::zip_longest(self, other.into_iter())
}
/// Create an iterator which iterates over both this and the specified
/// iterator simultaneously, yielding pairs of elements.
///
/// **Panics** if the iterators reach an end and they are not of equal
/// lengths.
#[inline]
fn zip_eq<J>(self, other: J) -> ZipEq<Self, J::IntoIter>
where J: IntoIterator,
Self: Sized
{
zip_eq(self, other)
}
/// A “meta iterator adaptor”. Its closure receives a reference to the
/// iterator and may pick off as many elements as it likes, to produce the
/// next iterator element.
///
/// Iterator element type is `B`.
///
/// ```
/// use itertools::Itertools;
///
/// // An adaptor that gathers elements in pairs
/// let pit = (0..4).batching(|it| {
/// match it.next() {
/// None => None,
/// Some(x) => match it.next() {
/// None => None,
/// Some(y) => Some((x, y)),
/// }
/// }
/// });
///
/// itertools::assert_equal(pit, vec![(0, 1), (2, 3)]);
/// ```
///
fn batching<B, F>(self, f: F) -> Batching<Self, F>
where F: FnMut(&mut Self) -> Option<B>,
Self: Sized
{
adaptors::batching(self, f)
}
/// Return an *iterable* that can group iterator elements.
/// Consecutive elements that map to the same key (“runs”), are assigned
/// to the same group.
///
/// `GroupBy` is the storage for the lazy grouping operation.
///
/// If the groups are consumed in order, or if each group's iterator is
/// dropped without keeping it around, then `GroupBy` uses no
/// allocations. It needs allocations only if several group iterators
/// are alive at the same time.
///
/// This type implements `IntoIterator` (it is **not** an iterator
/// itself), because the group iterators need to borrow from this
/// value. It should be stored in a local variable or temporary and
/// iterated.
///
/// Iterator element type is `(K, Group)`: the group's key and the
/// group iterator.
///
/// ```
/// use itertools::Itertools;
///
/// // group data into runs of larger than zero or not.
/// let data = vec![1, 3, -2, -2, 1, 0, 1, 2];
/// // groups: |---->|------>|--------->|
///
/// // Note: The `&` is significant here, `GroupBy` is iterable
/// // only by reference. You can also call `.into_iter()` explicitly.
/// let mut data_grouped = Vec::new();
/// for (key, group) in &data.into_iter().group_by(|elt| *elt >= 0) {
/// data_grouped.push((key, group.collect()));
/// }
/// assert_eq!(data_grouped, vec![(true, vec![1, 3]), (false, vec![-2, -2]), (true, vec![1, 0, 1, 2])]);
/// ```
#[cfg(feature = "use_alloc")]
fn group_by<K, F>(self, key: F) -> GroupBy<K, Self, F>
where Self: Sized,
F: FnMut(&Self::Item) -> K,
K: PartialEq,
{
groupbylazy::new(self, key)
}
/// Return an *iterable* that can chunk the iterator.
///
/// Yield subiterators (chunks) that each yield a fixed number elements,
/// determined by `size`. The last chunk will be shorter if there aren't
/// enough elements.
///
/// `IntoChunks` is based on `GroupBy`: it is iterable (implements
/// `IntoIterator`, **not** `Iterator`), and it only buffers if several
/// chunk iterators are alive at the same time.
///
/// Iterator element type is `Chunk`, each chunk's iterator.
///
/// **Panics** if `size` is 0.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1, 1, 2, -2, 6, 0, 3, 1];
/// //chunk size=3 |------->|-------->|--->|
///
/// // Note: The `&` is significant here, `IntoChunks` is iterable
/// // only by reference. You can also call `.into_iter()` explicitly.
/// for chunk in &data.into_iter().chunks(3) {
/// // Check that the sum of each chunk is 4.
/// assert_eq!(4, chunk.sum());
/// }
/// ```
#[cfg(feature = "use_alloc")]
fn chunks(self, size: usize) -> IntoChunks<Self>
where Self: Sized,
{
assert!(size != 0);
groupbylazy::new_chunks(self, size)
}
/// Return an iterator over all contiguous windows producing tuples of
/// a specific size (up to 4).
///
/// `tuple_windows` clones the iterator elements so that they can be
/// part of successive windows, this makes it most suited for iterators
/// of references and other values that are cheap to copy.
///
/// ```
/// use itertools::Itertools;
/// let mut v = Vec::new();
///
/// // pairwise iteration
/// for (a, b) in (1..5).tuple_windows() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4)]);
///
/// let mut it = (1..5).tuple_windows();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((2, 3, 4)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..5).tuple_windows::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]);
///
/// // you can also specify the complete type
/// use itertools::TupleWindows;
/// use std::ops::Range;
///
/// let it: TupleWindows<Range<u32>, (u32, u32, u32)> = (1..5).tuple_windows();
/// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]);
/// ```
fn tuple_windows<T>(self) -> TupleWindows<Self, T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple,
T::Item: Clone
{
tuple_impl::tuple_windows(self)
}
/// Return an iterator over all windows, wrapping back to the first
/// elements when the window would otherwise exceed the length of the
/// iterator, producing tuples of a specific size (up to 4).
///
/// `circular_tuple_windows` clones the iterator elements so that they can be
/// part of successive windows, this makes it most suited for iterators
/// of references and other values that are cheap to copy.
///
/// ```
/// use itertools::Itertools;
/// let mut v = Vec::new();
/// for (a, b) in (1..5).circular_tuple_windows() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4), (4, 1)]);
///
/// let mut it = (1..5).circular_tuple_windows();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((2, 3, 4)), it.next());
/// assert_eq!(Some((3, 4, 1)), it.next());
/// assert_eq!(Some((4, 1, 2)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..5).circular_tuple_windows::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4), (3, 4, 1), (4, 1, 2)]);
/// ```
fn circular_tuple_windows<T>(self) -> CircularTupleWindows<Self, T>
where Self: Sized + Clone + Iterator<Item = T::Item> + ExactSizeIterator,
T: tuple_impl::TupleCollect + Clone,
T::Item: Clone
{
tuple_impl::circular_tuple_windows(self)
}
/// Return an iterator that groups the items in tuples of a specific size
/// (up to 4).
///
/// See also the method [`.next_tuple()`](#method.next_tuple).
///
/// ```
/// use itertools::Itertools;
/// let mut v = Vec::new();
/// for (a, b) in (1..5).tuples() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (3, 4)]);
///
/// let mut it = (1..7).tuples();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((4, 5, 6)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..7).tuples::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]);
///
/// // you can also specify the complete type
/// use itertools::Tuples;
/// use std::ops::Range;
///
/// let it: Tuples<Range<u32>, (u32, u32, u32)> = (1..7).tuples();
/// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]);
/// ```
///
/// See also [`Tuples::into_buffer`].
fn tuples<T>(self) -> Tuples<Self, T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple
{
tuple_impl::tuples(self)
}
/// Split into an iterator pair that both yield all elements from
/// the original iterator.
///
/// **Note:** If the iterator is clonable, prefer using that instead
/// of using this method. It is likely to be more efficient.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
/// let xs = vec![0, 1, 2, 3];
///
/// let (mut t1, t2) = xs.into_iter().tee();
/// itertools::assert_equal(t1.next(), Some(0));
/// itertools::assert_equal(t2, 0..4);
/// itertools::assert_equal(t1, 1..4);
/// ```
#[cfg(feature = "use_alloc")]
fn tee(self) -> (Tee<Self>, Tee<Self>)
where Self: Sized,
Self::Item: Clone
{
tee::new(self)
}
/// Return an iterator adaptor that steps `n` elements in the base iterator
/// for each iteration.
///
/// The iterator steps by yielding the next element from the base iterator,
/// then skipping forward `n - 1` elements.
///
/// Iterator element type is `Self::Item`.
///
/// **Panics** if the step is 0.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (0..8).step(3);
/// itertools::assert_equal(it, vec![0, 3, 6]);
/// ```
#[deprecated(note="Use std .step_by() instead", since="0.8.0")]
#[allow(deprecated)]
fn step(self, n: usize) -> Step<Self>
where Self: Sized
{
adaptors::step(self, n)
}
/// Convert each item of the iterator using the `Into` trait.
///
/// ```rust
/// use itertools::Itertools;
///
/// (1i32..42i32).map_into::<f64>().collect_vec();
/// ```
fn map_into<R>(self) -> MapInto<Self, R>
where Self: Sized,
Self::Item: Into<R>,
{
adaptors::map_into(self)
}
/// See [`.map_ok()`](#method.map_ok).
#[deprecated(note="Use .map_ok() instead", since="0.10.0")]
fn map_results<F, T, U, E>(self, f: F) -> MapOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(T) -> U,
{
self.map_ok(f)
}
/// Return an iterator adaptor that applies the provided closure
/// to every `Result::Ok` value. `Result::Err` values are
/// unchanged.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![Ok(41), Err(false), Ok(11)];
/// let it = input.into_iter().map_ok(|i| i + 1);
/// itertools::assert_equal(it, vec![Ok(42), Err(false), Ok(12)]);
/// ```
fn map_ok<F, T, U, E>(self, f: F) -> MapOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(T) -> U,
{
adaptors::map_ok(self, f)
}
/// Return an iterator adaptor that filters every `Result::Ok`
/// value with the provided closure. `Result::Err` values are
/// unchanged.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![Ok(22), Err(false), Ok(11)];
/// let it = input.into_iter().filter_ok(|&i| i > 20);
/// itertools::assert_equal(it, vec![Ok(22), Err(false)]);
/// ```
fn filter_ok<F, T, E>(self, f: F) -> FilterOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(&T) -> bool,
{
adaptors::filter_ok(self, f)
}
/// Return an iterator adaptor that filters and transforms every
/// `Result::Ok` value with the provided closure. `Result::Err`
/// values are unchanged.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![Ok(22), Err(false), Ok(11)];
/// let it = input.into_iter().filter_map_ok(|i| if i > 20 { Some(i * 2) } else { None });
/// itertools::assert_equal(it, vec![Ok(44), Err(false)]);
/// ```
fn filter_map_ok<F, T, U, E>(self, f: F) -> FilterMapOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(T) -> Option<U>,
{
adaptors::filter_map_ok(self, f)
}
/// Return an iterator adaptor that merges the two base iterators in
/// ascending order. If both base iterators are sorted (ascending), the
/// result is sorted.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = (0..11).step(3);
/// let b = (0..11).step(5);
/// let it = a.merge(b);
/// itertools::assert_equal(it, vec![0, 0, 3, 5, 6, 9, 10]);
/// ```
fn merge<J>(self, other: J) -> Merge<Self, J::IntoIter>
where Self: Sized,
Self::Item: PartialOrd,
J: IntoIterator<Item = Self::Item>
{
merge(self, other)
}
/// Return an iterator adaptor that merges the two base iterators in order.
/// This is much like `.merge()` but allows for a custom ordering.
///
/// This can be especially useful for sequences of tuples.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = (0..).zip("bc".chars());
/// let b = (0..).zip("ad".chars());
/// let it = a.merge_by(b, |x, y| x.1 <= y.1);
/// itertools::assert_equal(it, vec![(0, 'a'), (0, 'b'), (1, 'c'), (1, 'd')]);
/// ```
fn merge_by<J, F>(self, other: J, is_first: F) -> MergeBy<Self, J::IntoIter, F>
where Self: Sized,
J: IntoIterator<Item = Self::Item>,
F: FnMut(&Self::Item, &Self::Item) -> bool
{
adaptors::merge_by_new(self, other.into_iter(), is_first)
}
/// Create an iterator that merges items from both this and the specified
/// iterator in ascending order.
///
/// It chooses whether to pair elements based on the `Ordering` returned by the
/// specified compare function. At any point, inspecting the tip of the
/// iterators `I` and `J` as items `i` of type `I::Item` and `j` of type
/// `J::Item` respectively, the resulting iterator will:
///
/// - Emit `EitherOrBoth::Left(i)` when `i < j`,
/// and remove `i` from its source iterator
/// - Emit `EitherOrBoth::Right(j)` when `i > j`,
/// and remove `j` from its source iterator
/// - Emit `EitherOrBoth::Both(i, j)` when `i == j`,
/// and remove both `i` and `j` from their respective source iterators
///
/// ```
/// use itertools::Itertools;
/// use itertools::EitherOrBoth::{Left, Right, Both};
///
/// let multiples_of_2 = (0..10).step(2);
/// let multiples_of_3 = (0..10).step(3);
///
/// itertools::assert_equal(
/// multiples_of_2.merge_join_by(multiples_of_3, |i, j| i.cmp(j)),
/// vec![Both(0, 0), Left(2), Right(3), Left(4), Both(6, 6), Left(8), Right(9)]
/// );
/// ```
#[inline]
fn merge_join_by<J, F>(self, other: J, cmp_fn: F) -> MergeJoinBy<Self, J::IntoIter, F>
where J: IntoIterator,
F: FnMut(&Self::Item, &J::Item) -> std::cmp::Ordering,
Self: Sized
{
merge_join_by(self, other, cmp_fn)
}
/// Return an iterator adaptor that flattens an iterator of iterators by
/// merging them in ascending order.
///
/// If all base iterators are sorted (ascending), the result is sorted.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = (0..6).step(3);
/// let b = (1..6).step(3);
/// let c = (2..6).step(3);
/// let it = vec![a, b, c].into_iter().kmerge();
/// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5]);
/// ```
#[cfg(feature = "use_alloc")]
fn kmerge(self) -> KMerge<<Self::Item as IntoIterator>::IntoIter>
where Self: Sized,
Self::Item: IntoIterator,
<Self::Item as IntoIterator>::Item: PartialOrd,
{
kmerge(self)
}
/// Return an iterator adaptor that flattens an iterator of iterators by
/// merging them according to the given closure.
///
/// The closure `first` is called with two elements *a*, *b* and should
/// return `true` if *a* is ordered before *b*.
///
/// If all base iterators are sorted according to `first`, the result is
/// sorted.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = vec![-1f64, 2., 3., -5., 6., -7.];
/// let b = vec![0., 2., -4.];
/// let mut it = vec![a, b].into_iter().kmerge_by(|a, b| a.abs() < b.abs());
/// assert_eq!(it.next(), Some(0.));
/// assert_eq!(it.last(), Some(-7.));
/// ```
#[cfg(feature = "use_alloc")]
fn kmerge_by<F>(self, first: F)
-> KMergeBy<<Self::Item as IntoIterator>::IntoIter, F>
where Self: Sized,
Self::Item: IntoIterator,
F: FnMut(&<Self::Item as IntoIterator>::Item,
&<Self::Item as IntoIterator>::Item) -> bool
{
kmerge_by(self, first)
}
/// Return an iterator adaptor that iterates over the cartesian product of
/// the element sets of two iterators `self` and `J`.
///
/// Iterator element type is `(Self::Item, J::Item)`.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (0..2).cartesian_product("αβ".chars());
/// itertools::assert_equal(it, vec![(0, 'α'), (0, 'β'), (1, 'α'), (1, 'β')]);
/// ```
fn cartesian_product<J>(self, other: J) -> Product<Self, J::IntoIter>
where Self: Sized,
Self::Item: Clone,
J: IntoIterator,
J::IntoIter: Clone
{
adaptors::cartesian_product(self, other.into_iter())
}
/// Return an iterator adaptor that iterates over the cartesian product of
/// all subiterators returned by meta-iterator `self`.
///
/// All provided iterators must yield the same `Item` type. To generate
/// the product of iterators yielding multiple types, use the
/// [`iproduct`] macro instead.
///
///
/// The iterator element type is `Vec<T>`, where `T` is the iterator element
/// of the subiterators.
///
/// ```
/// use itertools::Itertools;
/// let mut multi_prod = (0..3).map(|i| (i * 2)..(i * 2 + 2))
/// .multi_cartesian_product();
/// assert_eq!(multi_prod.next(), Some(vec![0, 2, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![0, 2, 5]));
/// assert_eq!(multi_prod.next(), Some(vec![0, 3, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![0, 3, 5]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 2, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 2, 5]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 3, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 3, 5]));
/// assert_eq!(multi_prod.next(), None);
/// ```
#[cfg(feature = "use_alloc")]
fn multi_cartesian_product(self) -> MultiProduct<<Self::Item as IntoIterator>::IntoIter>
where Self: Iterator + Sized,
Self::Item: IntoIterator,
<Self::Item as IntoIterator>::IntoIter: Clone,
<Self::Item as IntoIterator>::Item: Clone
{
adaptors::multi_cartesian_product(self)
}
/// Return an iterator adaptor that uses the passed-in closure to
/// optionally merge together consecutive elements.
///
/// The closure `f` is passed two elements, `previous` and `current` and may
/// return either (1) `Ok(combined)` to merge the two values or
/// (2) `Err((previous', current'))` to indicate they can't be merged.
/// In (2), the value `previous'` is emitted by the iterator.
/// Either (1) `combined` or (2) `current'` becomes the previous value
/// when coalesce continues with the next pair of elements to merge. The
/// value that remains at the end is also emitted by the iterator.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// // sum same-sign runs together
/// let data = vec![-1., -2., -3., 3., 1., 0., -1.];
/// itertools::assert_equal(data.into_iter().coalesce(|x, y|
/// if (x >= 0.) == (y >= 0.) {
/// Ok(x + y)
/// } else {
/// Err((x, y))
/// }),
/// vec![-6., 4., -1.]);
/// ```
fn coalesce<F>(self, f: F) -> Coalesce<Self, F>
where Self: Sized,
F: FnMut(Self::Item, Self::Item)
-> Result<Self::Item, (Self::Item, Self::Item)>
{
adaptors::coalesce(self, f)
}
/// Remove duplicates from sections of consecutive identical elements.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1., 1., 2., 3., 3., 2., 2.];
/// itertools::assert_equal(data.into_iter().dedup(),
/// vec![1., 2., 3., 2.]);
/// ```
fn dedup(self) -> Dedup<Self>
where Self: Sized,
Self::Item: PartialEq,
{
adaptors::dedup(self)
}
/// Remove duplicates from sections of consecutive identical elements,
/// determining equality using a comparison function.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![(0, 1.), (1, 1.), (0, 2.), (0, 3.), (1, 3.), (1, 2.), (2, 2.)];
/// itertools::assert_equal(data.into_iter().dedup_by(|x, y| x.1 == y.1),
/// vec![(0, 1.), (0, 2.), (0, 3.), (1, 2.)]);
/// ```
fn dedup_by<Cmp>(self, cmp: Cmp) -> DedupBy<Self, Cmp>
where Self: Sized,
Cmp: FnMut(&Self::Item, &Self::Item)->bool,
{
adaptors::dedup_by(self, cmp)
}
/// Remove duplicates from sections of consecutive identical elements, while keeping a count of
/// how many repeated elements were present.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `(usize, Self::Item)`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1., 1., 2., 3., 3., 2., 2.];
/// itertools::assert_equal(data.into_iter().dedup_with_count(),
/// vec![(2, 1.), (1, 2.), (2, 3.), (2, 2.)]);
/// ```
fn dedup_with_count(self) -> DedupWithCount<Self>
where Self: Sized,
{
adaptors::dedup_with_count(self)
}
/// Remove duplicates from sections of consecutive identical elements, while keeping a count of
/// how many repeated elements were present.
/// This will determine equality using a comparison function.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `(usize, Self::Item)`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![(0, 1.), (1, 1.), (0, 2.), (0, 3.), (1, 3.), (1, 2.), (2, 2.)];
/// itertools::assert_equal(data.into_iter().dedup_by_with_count(|x, y| x.1 == y.1),
/// vec![(2, (0, 1.)), (1, (0, 2.)), (2, (0, 3.)), (2, (1, 2.))]);
/// ```
fn dedup_by_with_count<Cmp>(self, cmp: Cmp) -> DedupByWithCount<Self, Cmp>
where Self: Sized,
Cmp: FnMut(&Self::Item, &Self::Item) -> bool,
{
adaptors::dedup_by_with_count(self, cmp)
}
/// Return an iterator adaptor that filters out elements that have
/// already been produced once during the iteration. Duplicates
/// are detected using hash and equality.
///
/// Clones of visited elements are stored in a hash set in the
/// iterator.
///
/// The iterator is stable, returning the non-duplicate items in the order
/// in which they occur in the adapted iterator. In a set of duplicate
/// items, the first item encountered is the item retained.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![10, 20, 30, 20, 40, 10, 50];
/// itertools::assert_equal(data.into_iter().unique(),
/// vec![10, 20, 30, 40, 50]);
/// ```
#[cfg(feature = "use_std")]
fn unique(self) -> Unique<Self>
where Self: Sized,
Self::Item: Clone + Eq + Hash
{
unique_impl::unique(self)
}
/// Return an iterator adaptor that filters out elements that have
/// already been produced once during the iteration.
///
/// Duplicates are detected by comparing the key they map to
/// with the keying function `f` by hash and equality.
/// The keys are stored in a hash set in the iterator.
///
/// The iterator is stable, returning the non-duplicate items in the order
/// in which they occur in the adapted iterator. In a set of duplicate
/// items, the first item encountered is the item retained.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec!["a", "bb", "aa", "c", "ccc"];
/// itertools::assert_equal(data.into_iter().unique_by(|s| s.len()),
/// vec!["a", "bb", "ccc"]);
/// ```
#[cfg(feature = "use_std")]
fn unique_by<V, F>(self, f: F) -> UniqueBy<Self, V, F>
where Self: Sized,
V: Eq + Hash,
F: FnMut(&Self::Item) -> V
{
unique_impl::unique_by(self, f)
}
/// Return an iterator adaptor that borrows from this iterator and
/// takes items while the closure `accept` returns `true`.
///
/// This adaptor can only be used on iterators that implement `PeekingNext`
/// like `.peekable()`, `put_back` and a few other collection iterators.
///
/// The last and rejected element (first `false`) is still available when
/// `peeking_take_while` is done.
///
///
/// See also [`.take_while_ref()`](#method.take_while_ref)
/// which is a similar adaptor.
fn peeking_take_while<F>(&mut self, accept: F) -> PeekingTakeWhile<Self, F>
where Self: Sized + PeekingNext,
F: FnMut(&Self::Item) -> bool,
{
peeking_take_while::peeking_take_while(self, accept)
}
/// Return an iterator adaptor that borrows from a `Clone`-able iterator
/// to only pick off elements while the predicate `accept` returns `true`.
///
/// It uses the `Clone` trait to restore the original iterator so that the
/// last and rejected element (first `false`) is still available when
/// `take_while_ref` is done.
///
/// ```
/// use itertools::Itertools;
///
/// let mut hexadecimals = "0123456789abcdef".chars();
///
/// let decimals = hexadecimals.take_while_ref(|c| c.is_numeric())
/// .collect::<String>();
/// assert_eq!(decimals, "0123456789");
/// assert_eq!(hexadecimals.next(), Some('a'));
///
/// ```
fn take_while_ref<F>(&mut self, accept: F) -> TakeWhileRef<Self, F>
where Self: Clone,
F: FnMut(&Self::Item) -> bool
{
adaptors::take_while_ref(self, accept)
}
/// Return an iterator adaptor that filters `Option<A>` iterator elements
/// and produces `A`. Stops on the first `None` encountered.
///
/// Iterator element type is `A`, the unwrapped element.
///
/// ```
/// use itertools::Itertools;
///
/// // List all hexadecimal digits
/// itertools::assert_equal(
/// (0..).map(|i| std::char::from_digit(i, 16)).while_some(),
/// "0123456789abcdef".chars());
///
/// ```
fn while_some<A>(self) -> WhileSome<Self>
where Self: Sized + Iterator<Item = Option<A>>
{
adaptors::while_some(self)
}
/// Return an iterator adaptor that iterates over the combinations of the
/// elements from an iterator.
///
/// Iterator element can be any homogeneous tuple of type `Self::Item` with
/// size up to 12.
///
/// ```
/// use itertools::Itertools;
///
/// let mut v = Vec::new();
/// for (a, b) in (1..5).tuple_combinations() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]);
///
/// let mut it = (1..5).tuple_combinations();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((1, 2, 4)), it.next());
/// assert_eq!(Some((1, 3, 4)), it.next());
/// assert_eq!(Some((2, 3, 4)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..5).tuple_combinations::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]);
///
/// // you can also specify the complete type
/// use itertools::TupleCombinations;
/// use std::ops::Range;
///
/// let it: TupleCombinations<Range<u32>, (u32, u32, u32)> = (1..5).tuple_combinations();
/// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]);
/// ```
fn tuple_combinations<T>(self) -> TupleCombinations<Self, T>
where Self: Sized + Clone,
Self::Item: Clone,
T: adaptors::HasCombination<Self>,
{
adaptors::tuple_combinations(self)
}
/// Return an iterator adaptor that iterates over the `k`-length combinations of
/// the elements from an iterator.
///
/// Iterator element type is `Vec<Self::Item>`. The iterator produces a new Vec per iteration,
/// and clones the iterator elements.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..5).combinations(3);
/// itertools::assert_equal(it, vec![
/// vec![1, 2, 3],
/// vec![1, 2, 4],
/// vec![1, 3, 4],
/// vec![2, 3, 4],
/// ]);
/// ```
///
/// Note: Combinations does not take into account the equality of the iterated values.
/// ```
/// use itertools::Itertools;
///
/// let it = vec![1, 2, 2].into_iter().combinations(2);
/// itertools::assert_equal(it, vec![
/// vec![1, 2], // Note: these are the same
/// vec![1, 2], // Note: these are the same
/// vec![2, 2],
/// ]);
/// ```
#[cfg(feature = "use_alloc")]
fn combinations(self, k: usize) -> Combinations<Self>
where Self: Sized,
Self::Item: Clone
{
combinations::combinations(self, k)
}
/// Return an iterator that iterates over the `k`-length combinations of
/// the elements from an iterator, with replacement.
///
/// Iterator element type is `Vec<Self::Item>`. The iterator produces a new Vec per iteration,
/// and clones the iterator elements.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..4).combinations_with_replacement(2);
/// itertools::assert_equal(it, vec![
/// vec![1, 1],
/// vec![1, 2],
/// vec![1, 3],
/// vec![2, 2],
/// vec![2, 3],
/// vec![3, 3],
/// ]);
/// ```
#[cfg(feature = "use_alloc")]
fn combinations_with_replacement(self, k: usize) -> CombinationsWithReplacement<Self>
where
Self: Sized,
Self::Item: Clone,
{
combinations_with_replacement::combinations_with_replacement(self, k)
}
/// Return an iterator adaptor that iterates over all k-permutations of the
/// elements from an iterator.
///
/// Iterator element type is `Vec<Self::Item>` with length `k`. The iterator
/// produces a new Vec per iteration, and clones the iterator elements.
///
/// If `k` is greater than the length of the input iterator, the resultant
/// iterator adaptor will be empty.
///
/// ```
/// use itertools::Itertools;
///
/// let perms = (5..8).permutations(2);
/// itertools::assert_equal(perms, vec![
/// vec![5, 6],
/// vec![5, 7],
/// vec![6, 5],
/// vec![6, 7],
/// vec![7, 5],
/// vec![7, 6],
/// ]);
/// ```
///
/// Note: Permutations does not take into account the equality of the iterated values.
///
/// ```
/// use itertools::Itertools;
///
/// let it = vec![2, 2].into_iter().permutations(2);
/// itertools::assert_equal(it, vec![
/// vec![2, 2], // Note: these are the same
/// vec![2, 2], // Note: these are the same
/// ]);
/// ```
///
/// Note: The source iterator is collected lazily, and will not be
/// re-iterated if the permutations adaptor is completed and re-iterated.
#[cfg(feature = "use_alloc")]
fn permutations(self, k: usize) -> Permutations<Self>
where Self: Sized,
Self::Item: Clone
{
permutations::permutations(self, k)
}
/// Return an iterator that iterates through the powerset of the elements from an
/// iterator.
///
/// Iterator element type is `Vec<Self::Item>`. The iterator produces a new `Vec`
/// per iteration, and clones the iterator elements.
///
/// The powerset of a set contains all subsets including the empty set and the full
/// input set. A powerset has length _2^n_ where _n_ is the length of the input
/// set.
///
/// Each `Vec` produced by this iterator represents a subset of the elements
/// produced by the source iterator.
///
/// ```
/// use itertools::Itertools;
///
/// let sets = (1..4).powerset().collect::<Vec<_>>();
/// itertools::assert_equal(sets, vec![
/// vec![],
/// vec![1],
/// vec![2],
/// vec![3],
/// vec![1, 2],
/// vec![1, 3],
/// vec![2, 3],
/// vec![1, 2, 3],
/// ]);
/// ```
#[cfg(feature = "use_alloc")]
fn powerset(self) -> Powerset<Self>
where Self: Sized,
Self::Item: Clone,
{
powerset::powerset(self)
}
/// Return an iterator adaptor that pads the sequence to a minimum length of
/// `min` by filling missing elements using a closure `f`.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (0..5).pad_using(10, |i| 2*i);
/// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 10, 12, 14, 16, 18]);
///
/// let it = (0..10).pad_using(5, |i| 2*i);
/// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// let it = (0..5).pad_using(10, |i| 2*i).rev();
/// itertools::assert_equal(it, vec![18, 16, 14, 12, 10, 4, 3, 2, 1, 0]);
/// ```
fn pad_using<F>(self, min: usize, f: F) -> PadUsing<Self, F>
where Self: Sized,
F: FnMut(usize) -> Self::Item
{
pad_tail::pad_using(self, min, f)
}
/// Return an iterator adaptor that wraps each element in a `Position` to
/// ease special-case handling of the first or last elements.
///
/// Iterator element type is
/// [`Position<Self::Item>`](Position)
///
/// ```
/// use itertools::{Itertools, Position};
///
/// let it = (0..4).with_position();
/// itertools::assert_equal(it,
/// vec![Position::First(0),
/// Position::Middle(1),
/// Position::Middle(2),
/// Position::Last(3)]);
///
/// let it = (0..1).with_position();
/// itertools::assert_equal(it, vec![Position::Only(0)]);
/// ```
fn with_position(self) -> WithPosition<Self>
where Self: Sized,
{
with_position::with_position(self)
}
/// Return an iterator adaptor that yields the indices of all elements
/// satisfying a predicate, counted from the start of the iterator.
///
/// Equivalent to `iter.enumerate().filter(|(_, v)| predicate(v)).map(|(i, _)| i)`.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1, 2, 3, 3, 4, 6, 7, 9];
/// itertools::assert_equal(data.iter().positions(|v| v % 2 == 0), vec![1, 4, 5]);
///
/// itertools::assert_equal(data.iter().positions(|v| v % 2 == 1).rev(), vec![7, 6, 3, 2, 0]);
/// ```
fn positions<P>(self, predicate: P) -> Positions<Self, P>
where Self: Sized,
P: FnMut(Self::Item) -> bool,
{
adaptors::positions(self, predicate)
}
/// Return an iterator adaptor that applies a mutating function
/// to each element before yielding it.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![vec![1], vec![3, 2, 1]];
/// let it = input.into_iter().update(|mut v| v.push(0));
/// itertools::assert_equal(it, vec![vec![1, 0], vec![3, 2, 1, 0]]);
/// ```
fn update<F>(self, updater: F) -> Update<Self, F>
where Self: Sized,
F: FnMut(&mut Self::Item),
{
adaptors::update(self, updater)
}
// non-adaptor methods
/// Advances the iterator and returns the next items grouped in a tuple of
/// a specific size (up to 12).
///
/// If there are enough elements to be grouped in a tuple, then the tuple is
/// returned inside `Some`, otherwise `None` is returned.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = 1..5;
///
/// assert_eq!(Some((1, 2)), iter.next_tuple());
/// ```
fn next_tuple<T>(&mut self) -> Option<T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple
{
T::collect_from_iter_no_buf(self)
}
/// Collects all items from the iterator into a tuple of a specific size
/// (up to 12).
///
/// If the number of elements inside the iterator is **exactly** equal to
/// the tuple size, then the tuple is returned inside `Some`, otherwise
/// `None` is returned.
///
/// ```
/// use itertools::Itertools;
///
/// let iter = 1..3;
///
/// if let Some((x, y)) = iter.collect_tuple() {
/// assert_eq!((x, y), (1, 2))
/// } else {
/// panic!("Expected two elements")
/// }
/// ```
fn collect_tuple<T>(mut self) -> Option<T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple
{
match self.next_tuple() {
elt @ Some(_) => match self.next() {
Some(_) => None,
None => elt,
},
_ => None
}
}
/// Find the position and value of the first element satisfying a predicate.
///
/// The iterator is not advanced past the first element found.
///
/// ```
/// use itertools::Itertools;
///
/// let text = "Hα";
/// assert_eq!(text.chars().find_position(|ch| ch.is_lowercase()), Some((1, 'α')));
/// ```
fn find_position<P>(&mut self, mut pred: P) -> Option<(usize, Self::Item)>
where P: FnMut(&Self::Item) -> bool
{
let mut index = 0usize;
for elt in self {
if pred(&elt) {
return Some((index, elt));
}
index += 1;
}
None
}
/// Check whether all elements compare equal.
///
/// Empty iterators are considered to have equal elements:
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1, 1, 1, 2, 2, 3, 3, 3, 4, 5, 5];
/// assert!(!data.iter().all_equal());
/// assert!(data[0..3].iter().all_equal());
/// assert!(data[3..5].iter().all_equal());
/// assert!(data[5..8].iter().all_equal());
///
/// let data : Option<usize> = None;
/// assert!(data.into_iter().all_equal());
/// ```
fn all_equal(&mut self) -> bool
where Self: Sized,
Self::Item: PartialEq,
{
match self.next() {
None => true,
Some(a) => self.all(|x| a == x),
}
}
/// Consume the first `n` elements from the iterator eagerly,
/// and return the same iterator again.
///
/// It works similarly to *.skip(* `n` *)* except it is eager and
/// preserves the iterator type.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = "αβγ".chars().dropping(2);
/// itertools::assert_equal(iter, "γ".chars());
/// ```
///
/// *Fusing notes: if the iterator is exhausted by dropping,
/// the result of calling `.next()` again depends on the iterator implementation.*
fn dropping(mut self, n: usize) -> Self
where Self: Sized
{
if n > 0 {
self.nth(n - 1);
}
self
}
/// Consume the last `n` elements from the iterator eagerly,
/// and return the same iterator again.
///
/// This is only possible on double ended iterators. `n` may be
/// larger than the number of elements.
///
/// Note: This method is eager, dropping the back elements immediately and
/// preserves the iterator type.
///
/// ```
/// use itertools::Itertools;
///
/// let init = vec![0, 3, 6, 9].into_iter().dropping_back(1);
/// itertools::assert_equal(init, vec![0, 3, 6]);
/// ```
fn dropping_back(mut self, n: usize) -> Self
where Self: Sized,
Self: DoubleEndedIterator
{
if n > 0 {
(&mut self).rev().nth(n - 1);
}
self
}
/// Run the closure `f` eagerly on each element of the iterator.
///
/// Consumes the iterator until its end.
///
/// ```
/// use std::sync::mpsc::channel;
/// use itertools::Itertools;
///
/// let (tx, rx) = channel();
///
/// // use .foreach() to apply a function to each value -- sending it
/// (0..5).map(|x| x * 2 + 1).foreach(|x| { tx.send(x).unwrap(); } );
///
/// drop(tx);
///
/// itertools::assert_equal(rx.iter(), vec![1, 3, 5, 7, 9]);
/// ```
#[deprecated(note="Use .for_each() instead", since="0.8.0")]
fn foreach<F>(self, f: F)
where F: FnMut(Self::Item),
Self: Sized,
{
self.for_each(f)
}
/// Combine all an iterator's elements into one element by using `Extend`.
///
/// This combinator will extend the first item with each of the rest of the
/// items of the iterator. If the iterator is empty, the default value of
/// `I::Item` is returned.
///
/// ```rust
/// use itertools::Itertools;
///
/// let input = vec![vec![1], vec![2, 3], vec![4, 5, 6]];
/// assert_eq!(input.into_iter().concat(),
/// vec![1, 2, 3, 4, 5, 6]);
/// ```
fn concat(self) -> Self::Item
where Self: Sized,
Self::Item: Extend<<<Self as Iterator>::Item as IntoIterator>::Item> + IntoIterator + Default
{
concat(self)
}
/// `.collect_vec()` is simply a type specialization of `.collect()`,
/// for convenience.
#[cfg(feature = "use_alloc")]
fn collect_vec(self) -> Vec<Self::Item>
where Self: Sized
{
self.collect()
}
/// `.try_collect()` is more convenient way of writing
/// `.collect::<Result<_, _>>()`
///
/// # Example
///
/// ```
/// use std::{fs, io};
/// use itertools::Itertools;
///
/// fn process_dir_entries(entries: &[fs::DirEntry]) {
/// // ...
/// }
///
/// fn do_stuff() -> std::io::Result<()> {
/// let entries: Vec<_> = fs::read_dir(".")?.try_collect()?;
/// process_dir_entries(&entries);
///
/// Ok(())
/// }
/// ```
#[cfg(feature = "use_alloc")]
fn try_collect<T, U, E>(self) -> Result<U, E>
where
Self: Sized + Iterator<Item = Result<T, E>>,
Result<U, E>: FromIterator<Result<T, E>>,
{
self.collect()
}
/// Assign to each reference in `self` from the `from` iterator,
/// stopping at the shortest of the two iterators.
///
/// The `from` iterator is queried for its next element before the `self`
/// iterator, and if either is exhausted the method is done.
///
/// Return the number of elements written.
///
/// ```
/// use itertools::Itertools;
///
/// let mut xs = [0; 4];
/// xs.iter_mut().set_from(1..);
/// assert_eq!(xs, [1, 2, 3, 4]);
/// ```
#[inline]
fn set_from<'a, A: 'a, J>(&mut self, from: J) -> usize
where Self: Iterator<Item = &'a mut A>,
J: IntoIterator<Item = A>
{
let mut count = 0;
for elt in from {
match self.next() {
None => break,
Some(ptr) => *ptr = elt,
}
count += 1;
}
count
}
/// Combine all iterator elements into one String, separated by `sep`.
///
/// Use the `Display` implementation of each element.
///
/// ```
/// use itertools::Itertools;
///
/// assert_eq!(["a", "b", "c"].iter().join(", "), "a, b, c");
/// assert_eq!([1, 2, 3].iter().join(", "), "1, 2, 3");
/// ```
#[cfg(feature = "use_alloc")]
fn join(&mut self, sep: &str) -> String
where Self::Item: std::fmt::Display
{
match self.next() {
None => String::new(),
Some(first_elt) => {
// estimate lower bound of capacity needed
let (lower, _) = self.size_hint();
let mut result = String::with_capacity(sep.len() * lower);
write!(&mut result, "{}", first_elt).unwrap();
self.for_each(|elt| {
result.push_str(sep);
write!(&mut result, "{}", elt).unwrap();
});
result
}
}
}
/// Format all iterator elements, separated by `sep`.
///
/// All elements are formatted (any formatting trait)
/// with `sep` inserted between each element.
///
/// **Panics** if the formatter helper is formatted more than once.
///
/// ```
/// use itertools::Itertools;
///
/// let data = [1.1, 2.71828, -3.];
/// assert_eq!(
/// format!("{:.2}", data.iter().format(", ")),
/// "1.10, 2.72, -3.00");
/// ```
fn format(self, sep: &str) -> Format<Self>
where Self: Sized,
{
format::new_format_default(self, sep)
}
/// Format all iterator elements, separated by `sep`.
///
/// This is a customizable version of `.format()`.
///
/// The supplied closure `format` is called once per iterator element,
/// with two arguments: the element and a callback that takes a
/// `&Display` value, i.e. any reference to type that implements `Display`.
///
/// Using `&format_args!(...)` is the most versatile way to apply custom
/// element formatting. The callback can be called multiple times if needed.
///
/// **Panics** if the formatter helper is formatted more than once.
///
/// ```
/// use itertools::Itertools;
///
/// let data = [1.1, 2.71828, -3.];
/// let data_formatter = data.iter().format_with(", ", |elt, f| f(&format_args!("{:.2}", elt)));
/// assert_eq!(format!("{}", data_formatter),
/// "1.10, 2.72, -3.00");
///
/// // .format_with() is recursively composable
/// let matrix = [[1., 2., 3.],
/// [4., 5., 6.]];
/// let matrix_formatter = matrix.iter().format_with("\n", |row, f| {
/// f(&row.iter().format_with(", ", |elt, g| g(&elt)))
/// });
/// assert_eq!(format!("{}", matrix_formatter),
/// "1, 2, 3\n4, 5, 6");
///
///
/// ```
fn format_with<F>(self, sep: &str, format: F) -> FormatWith<Self, F>
where Self: Sized,
F: FnMut(Self::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result,
{
format::new_format(self, sep, format)
}
/// See [`.fold_ok()`](#method.fold_ok).
#[deprecated(note="Use .fold_ok() instead", since="0.10.0")]
fn fold_results<A, E, B, F>(&mut self, start: B, f: F) -> Result<B, E>
where Self: Iterator<Item = Result<A, E>>,
F: FnMut(B, A) -> B
{
self.fold_ok(start, f)
}
/// Fold `Result` values from an iterator.
///
/// Only `Ok` values are folded. If no error is encountered, the folded
/// value is returned inside `Ok`. Otherwise, the operation terminates
/// and returns the first `Err` value it encounters. No iterator elements are
/// consumed after the first error.
///
/// The first accumulator value is the `start` parameter.
/// Each iteration passes the accumulator value and the next value inside `Ok`
/// to the fold function `f` and its return value becomes the new accumulator value.
///
/// For example the sequence *Ok(1), Ok(2), Ok(3)* will result in a
/// computation like this:
///
/// ```ignore
/// let mut accum = start;
/// accum = f(accum, 1);
/// accum = f(accum, 2);
/// accum = f(accum, 3);
/// ```
///
/// With a `start` value of 0 and an addition as folding function,
/// this effectively results in *((0 + 1) + 2) + 3*
///
/// ```
/// use std::ops::Add;
/// use itertools::Itertools;
///
/// let values = [1, 2, -2, -1, 2, 1];
/// assert_eq!(
/// values.iter()
/// .map(Ok::<_, ()>)
/// .fold_ok(0, Add::add),
/// Ok(3)
/// );
/// assert!(
/// values.iter()
/// .map(|&x| if x >= 0 { Ok(x) } else { Err("Negative number") })
/// .fold_ok(0, Add::add)
/// .is_err()
/// );
/// ```
fn fold_ok<A, E, B, F>(&mut self, mut start: B, mut f: F) -> Result<B, E>
where Self: Iterator<Item = Result<A, E>>,
F: FnMut(B, A) -> B
{
for elt in self {
match elt {
Ok(v) => start = f(start, v),
Err(u) => return Err(u),
}
}
Ok(start)
}
/// Fold `Option` values from an iterator.
///
/// Only `Some` values are folded. If no `None` is encountered, the folded
/// value is returned inside `Some`. Otherwise, the operation terminates
/// and returns `None`. No iterator elements are consumed after the `None`.
///
/// This is the `Option` equivalent to `fold_ok`.
///
/// ```
/// use std::ops::Add;
/// use itertools::Itertools;
///
/// let mut values = vec![Some(1), Some(2), Some(-2)].into_iter();
/// assert_eq!(values.fold_options(5, Add::add), Some(5 + 1 + 2 - 2));
///
/// let mut more_values = vec![Some(2), None, Some(0)].into_iter();
/// assert!(more_values.fold_options(0, Add::add).is_none());
/// assert_eq!(more_values.next().unwrap(), Some(0));
/// ```
fn fold_options<A, B, F>(&mut self, mut start: B, mut f: F) -> Option<B>
where Self: Iterator<Item = Option<A>>,
F: FnMut(B, A) -> B
{
for elt in self {
match elt {
Some(v) => start = f(start, v),
None => return None,
}
}
Some(start)
}
/// Accumulator of the elements in the iterator.
///
/// Like `.fold()`, without a base case. If the iterator is
/// empty, return `None`. With just one element, return it.
/// Otherwise elements are accumulated in sequence using the closure `f`.
///
/// ```
/// use itertools::Itertools;
///
/// assert_eq!((0..10).fold1(|x, y| x + y).unwrap_or(0), 45);
/// assert_eq!((0..0).fold1(|x, y| x * y), None);
/// ```
fn fold1<F>(mut self, f: F) -> Option<Self::Item>
where F: FnMut(Self::Item, Self::Item) -> Self::Item,
Self: Sized,
{
self.next().map(move |x| self.fold(x, f))
}
/// Accumulate the elements in the iterator in a tree-like manner.
///
/// You can think of it as, while there's more than one item, repeatedly
/// combining adjacent items. It does so in bottom-up-merge-sort order,
/// however, so that it needs only logarithmic stack space.
///
/// This produces a call tree like the following (where the calls under
/// an item are done after reading that item):
///
/// ```text
/// 1 2 3 4 5 6 7
/// │ │ │ │ │ │ │
/// └─f └─f └─f │
/// │ │ │ │
/// └───f └─f
/// │ │
/// └─────f
/// ```
///
/// Which, for non-associative functions, will typically produce a different
/// result than the linear call tree used by `fold1`:
///
/// ```text
/// 1 2 3 4 5 6 7
/// │ │ │ │ │ │ │
/// └─f─f─f─f─f─f
/// ```
///
/// If `f` is associative, prefer the normal `fold1` instead.
///
/// ```
/// use itertools::Itertools;
///
/// // The same tree as above
/// let num_strings = (1..8).map(|x| x.to_string());
/// assert_eq!(num_strings.tree_fold1(|x, y| format!("f({}, {})", x, y)),
/// Some(String::from("f(f(f(1, 2), f(3, 4)), f(f(5, 6), 7))")));
///
/// // Like fold1, an empty iterator produces None
/// assert_eq!((0..0).tree_fold1(|x, y| x * y), None);
///
/// // tree_fold1 matches fold1 for associative operations...
/// assert_eq!((0..10).tree_fold1(|x, y| x + y),
/// (0..10).fold1(|x, y| x + y));
/// // ...but not for non-associative ones
/// assert_ne!((0..10).tree_fold1(|x, y| x - y),
/// (0..10).fold1(|x, y| x - y));
/// ```
fn tree_fold1<F>(mut self, mut f: F) -> Option<Self::Item>
where F: FnMut(Self::Item, Self::Item) -> Self::Item,
Self: Sized,
{
type State<T> = Result<T, Option<T>>;
fn inner0<T, II, FF>(it: &mut II, f: &mut FF) -> State<T>
where
II: Iterator<Item = T>,
FF: FnMut(T, T) -> T
{
// This function could be replaced with `it.next().ok_or(None)`,
// but half the useful tree_fold1 work is combining adjacent items,
// so put that in a form that LLVM is more likely to optimize well.
let a =
if let Some(v) = it.next() { v }
else { return Err(None) };
let b =
if let Some(v) = it.next() { v }
else { return Err(Some(a)) };
Ok(f(a, b))
}
fn inner<T, II, FF>(stop: usize, it: &mut II, f: &mut FF) -> State<T>
where
II: Iterator<Item = T>,
FF: FnMut(T, T) -> T
{
let mut x = inner0(it, f)?;
for height in 0..stop {
// Try to get another tree the same size with which to combine it,
// creating a new tree that's twice as big for next time around.
let next =
if height == 0 {
inner0(it, f)
} else {
inner(height, it, f)
};
match next {
Ok(y) => x = f(x, y),
// If we ran out of items, combine whatever we did manage
// to get. It's better combined with the current value
// than something in a parent frame, because the tree in
// the parent is always as least as big as this one.
Err(None) => return Err(Some(x)),
Err(Some(y)) => return Err(Some(f(x, y))),
}
}
Ok(x)
}
match inner(usize::max_value(), &mut self, &mut f) {
Err(x) => x,
_ => unreachable!(),
}
}
/// An iterator method that applies a function, producing a single, final value.
///
/// `fold_while()` is basically equivalent to `fold()` but with additional support for
/// early exit via short-circuiting.
///
/// ```
/// use itertools::Itertools;
/// use itertools::FoldWhile::{Continue, Done};
///
/// let numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
///
/// let mut result = 0;
///
/// // for loop:
/// for i in &numbers {
/// if *i > 5 {
/// break;
/// }
/// result = result + i;
/// }
///
/// // fold:
/// let result2 = numbers.iter().fold(0, |acc, x| {
/// if *x > 5 { acc } else { acc + x }
/// });
///
/// // fold_while:
/// let result3 = numbers.iter().fold_while(0, |acc, x| {
/// if *x > 5 { Done(acc) } else { Continue(acc + x) }
/// }).into_inner();
///
/// // they're the same
/// assert_eq!(result, result2);
/// assert_eq!(result2, result3);
/// ```
///
/// The big difference between the computations of `result2` and `result3` is that while
/// `fold()` called the provided closure for every item of the callee iterator,
/// `fold_while()` actually stopped iterating as soon as it encountered `Fold::Done(_)`.
fn fold_while<B, F>(&mut self, init: B, mut f: F) -> FoldWhile<B>
where Self: Sized,
F: FnMut(B, Self::Item) -> FoldWhile<B>
{
use Result::{
Ok as Continue,
Err as Break,
};
let result = self.try_fold(init, #[inline(always)] |acc, v|
match f(acc, v) {
FoldWhile::Continue(acc) => Continue(acc),
FoldWhile::Done(acc) => Break(acc),
}
);
match result {
Continue(acc) => FoldWhile::Continue(acc),
Break(acc) => FoldWhile::Done(acc),
}
}
/// Iterate over the entire iterator and add all the elements.
///
/// An empty iterator returns `None`, otherwise `Some(sum)`.
///
/// # Panics
///
/// When calling `sum1()` and a primitive integer type is being returned, this
/// method will panic if the computation overflows and debug assertions are
/// enabled.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let empty_sum = (1..1).sum1::<i32>();
/// assert_eq!(empty_sum, None);
///
/// let nonempty_sum = (1..11).sum1::<i32>();
/// assert_eq!(nonempty_sum, Some(55));
/// ```
fn sum1<S>(mut self) -> Option<S>
where Self: Sized,
S: std::iter::Sum<Self::Item>,
{
self.next()
.map(|first| once(first).chain(self).sum())
}
/// Iterate over the entire iterator and multiply all the elements.
///
/// An empty iterator returns `None`, otherwise `Some(product)`.
///
/// # Panics
///
/// When calling `product1()` and a primitive integer type is being returned,
/// method will panic if the computation overflows and debug assertions are
/// enabled.
///
/// # Examples
/// ```
/// use itertools::Itertools;
///
/// let empty_product = (1..1).product1::<i32>();
/// assert_eq!(empty_product, None);
///
/// let nonempty_product = (1..11).product1::<i32>();
/// assert_eq!(nonempty_product, Some(3628800));
/// ```
fn product1<P>(mut self) -> Option<P>
where Self: Sized,
P: std::iter::Product<Self::Item>,
{
self.next()
.map(|first| once(first).chain(self).product())
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_unstable()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort the letters of the text in ascending order
/// let text = "bdacfe";
/// itertools::assert_equal(text.chars().sorted_unstable(),
/// "abcdef".chars());
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_unstable(self) -> VecIntoIter<Self::Item>
where Self: Sized,
Self::Item: Ord
{
// Use .sort_unstable() directly since it is not quite identical with
// .sort_by(Ord::cmp)
let mut v = Vec::from_iter(self);
v.sort_unstable();
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_unstable_by()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_unstable_by(|a, b| Ord::cmp(&b.1, &a.1))
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_unstable_by<F>(self, cmp: F) -> VecIntoIter<Self::Item>
where Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
let mut v = Vec::from_iter(self);
v.sort_unstable_by(cmp);
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_unstable_by_key()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_unstable_by_key(|x| -x.1)
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_unstable_by_key<K, F>(self, f: F) -> VecIntoIter<Self::Item>
where Self: Sized,
K: Ord,
F: FnMut(&Self::Item) -> K,
{
let mut v = Vec::from_iter(self);
v.sort_unstable_by_key(f);
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort the letters of the text in ascending order
/// let text = "bdacfe";
/// itertools::assert_equal(text.chars().sorted(),
/// "abcdef".chars());
/// ```
#[cfg(feature = "use_alloc")]
fn sorted(self) -> VecIntoIter<Self::Item>
where Self: Sized,
Self::Item: Ord
{
// Use .sort() directly since it is not quite identical with
// .sort_by(Ord::cmp)
let mut v = Vec::from_iter(self);
v.sort();
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_by()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_by(|a, b| Ord::cmp(&b.1, &a.1))
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_by<F>(self, cmp: F) -> VecIntoIter<Self::Item>
where Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
let mut v = Vec::from_iter(self);
v.sort_by(cmp);
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_by_key()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_by_key(|x| -x.1)
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_by_key<K, F>(self, f: F) -> VecIntoIter<Self::Item>
where Self: Sized,
K: Ord,
F: FnMut(&Self::Item) -> K,
{
let mut v = Vec::from_iter(self);
v.sort_by_key(f);
v.into_iter()
}
/// Sort the k smallest elements into a new iterator, in ascending order.
///
/// **Note:** This consumes the entire iterator, and returns the result
/// as a new iterator that owns its elements. If the input contains
/// less than k elements, the result is equivalent to `self.sorted()`.
///
/// This is guaranteed to use `k * sizeof(Self::Item) + O(1)` memory
/// and `O(n log k)` time, with `n` the number of elements in the input.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// **Note:** This is functionally-equivalent to `self.sorted().take(k)`
/// but much more efficient.
///
/// ```
/// use itertools::Itertools;
///
/// // A random permutation of 0..15
/// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5];
///
/// let five_smallest = numbers
/// .into_iter()
/// .k_smallest(5);
///
/// itertools::assert_equal(five_smallest, 0..5);
/// ```
#[cfg(feature = "use_alloc")]
fn k_smallest(self, k: usize) -> VecIntoIter<Self::Item>
where Self: Sized,
Self::Item: Ord
{
crate::k_smallest::k_smallest(self, k)
.into_sorted_vec()
.into_iter()
}
/// Collect all iterator elements into one of two
/// partitions. Unlike `Iterator::partition`, each partition may
/// have a distinct type.
///
/// ```
/// use itertools::{Itertools, Either};
///
/// let successes_and_failures = vec![Ok(1), Err(false), Err(true), Ok(2)];
///
/// let (successes, failures): (Vec<_>, Vec<_>) = successes_and_failures
/// .into_iter()
/// .partition_map(|r| {
/// match r {
/// Ok(v) => Either::Left(v),
/// Err(v) => Either::Right(v),
/// }
/// });
///
/// assert_eq!(successes, [1, 2]);
/// assert_eq!(failures, [false, true]);
/// ```
fn partition_map<A, B, F, L, R>(self, mut predicate: F) -> (A, B)
where Self: Sized,
F: FnMut(Self::Item) -> Either<L, R>,
A: Default + Extend<L>,
B: Default + Extend<R>,
{
let mut left = A::default();
let mut right = B::default();
self.for_each(|val| match predicate(val) {
Either::Left(v) => left.extend(Some(v)),
Either::Right(v) => right.extend(Some(v)),
});
(left, right)
}
/// Return a `HashMap` of keys mapped to `Vec`s of values. Keys and values
/// are taken from `(Key, Value)` tuple pairs yielded by the input iterator.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)];
/// let lookup = data.into_iter().into_group_map();
///
/// assert_eq!(lookup[&0], vec![10, 20]);
/// assert_eq!(lookup.get(&1), None);
/// assert_eq!(lookup[&2], vec![12, 42]);
/// assert_eq!(lookup[&3], vec![13, 33]);
/// ```
#[cfg(feature = "use_std")]
fn into_group_map<K, V>(self) -> HashMap<K, Vec<V>>
where Self: Iterator<Item=(K, V)> + Sized,
K: Hash + Eq,
{
group_map::into_group_map(self)
}
/// Return an `Iterator` on a HahMap. Keys mapped to `Vec`s of values. The key is specified in
/// in the closure.
/// Different of into_group_map_by because the key is still present. It is also more general.
/// you can also fold the group_map.
///
/// ```
/// use itertools::Itertools;
/// use std::collections::HashMap;
///
/// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)];
/// let lookup: HashMap<u32,Vec<(u32, u32)>> = data.clone().into_iter().into_group_map_by(|a|
/// a.0);
///
/// assert_eq!(lookup[&0], vec![(0,10),(0,20)]);
/// assert_eq!(lookup.get(&1), None);
/// assert_eq!(lookup[&2], vec![(2,12), (2,42)]);
/// assert_eq!(lookup[&3], vec![(3,13), (3,33)]);
///
/// assert_eq!(
/// data.into_iter()
/// .into_group_map_by(|x| x.0)
/// .into_iter()
/// .map(|(key, values)| (key, values.into_iter().fold(0,|acc, (_,v)| acc + v )))
/// .collect::<HashMap<u32,u32>>()[&0], 30)
/// ```
#[cfg(feature = "use_std")]
fn into_group_map_by<K, V, F>(self, f: F) -> HashMap<K, Vec<V>>
where
Self: Iterator<Item=V> + Sized,
K: Hash + Eq,
F: Fn(&V) -> K,
{
group_map::into_group_map_by(self, f)
}
/// Constructs a `GroupingMap` to be used later with one of the efficient
/// group-and-fold operations it allows to perform.
///
/// The input iterator must yield item in the form of `(K, V)` where the
/// value of type `K` will be used as key to identify the groups and the
/// value of type `V` as value for the folding operation.
///
/// See [`GroupingMap`](./structs/struct.GroupingMap.html) for more informations
/// on what operations are available.
#[cfg(feature = "use_std")]
fn into_grouping_map<K, V>(self) -> GroupingMap<Self>
where Self: Iterator<Item=(K, V)> + Sized,
K: Hash + Eq,
{
grouping_map::new(self)
}
/// Constructs a `GroupingMap` to be used later with one of the efficient
/// group-and-fold operations it allows to perform.
///
/// The values from this iterator will be used as values for the folding operation
/// while the keys will be obtained from the values by calling `key_mapper`.
///
/// See [`GroupingMap`](./structs/struct.GroupingMap.html) for more informations
/// on what operations are available.
#[cfg(feature = "use_std")]
fn into_grouping_map_by<K, V, F>(self, key_mapper: F) -> GroupingMapBy<Self, F>
where Self: Iterator<Item=V> + Sized,
K: Hash + Eq,
F: FnMut(&V) -> K
{
grouping_map::new(grouping_map::MapForGrouping::new(self, key_mapper))
}
/// Return the minimum and maximum elements in the iterator.
///
/// The return type `MinMaxResult` is an enum of three variants:
///
/// - `NoElements` if the iterator is empty.
/// - `OneElement(x)` if the iterator has exactly one element.
/// - `MinMax(x, y)` is returned otherwise, where `x <= y`. Two
/// values are equal if and only if there is more than one
/// element in the iterator and all elements are equal.
///
/// On an iterator of length `n`, `minmax` does `1.5 * n` comparisons,
/// and so is faster than calling `min` and `max` separately which does
/// `2 * n` comparisons.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().minmax(), NoElements);
///
/// let a = [1];
/// assert_eq!(a.iter().minmax(), OneElement(&1));
///
/// let a = [1, 2, 3, 4, 5];
/// assert_eq!(a.iter().minmax(), MinMax(&1, &5));
///
/// let a = [1, 1, 1, 1];
/// assert_eq!(a.iter().minmax(), MinMax(&1, &1));
/// ```
///
/// The elements can be floats but no particular result is guaranteed
/// if an element is NaN.
fn minmax(self) -> MinMaxResult<Self::Item>
where Self: Sized, Self::Item: PartialOrd
{
minmax::minmax_impl(self, |_| (), |x, y, _, _| x < y)
}
/// Return the minimum and maximum element of an iterator, as determined by
/// the specified function.
///
/// The return value is a variant of `MinMaxResult` like for `minmax()`.
///
/// For the minimum, the first minimal element is returned. For the maximum,
/// the last maximal element wins. This matches the behavior of the standard
/// `Iterator::min()` and `Iterator::max()` methods.
///
/// The keys can be floats but no particular result is guaranteed
/// if a key is NaN.
fn minmax_by_key<K, F>(self, key: F) -> MinMaxResult<Self::Item>
where Self: Sized, K: PartialOrd, F: FnMut(&Self::Item) -> K
{
minmax::minmax_impl(self, key, |_, _, xk, yk| xk < yk)
}
/// Return the minimum and maximum element of an iterator, as determined by
/// the specified comparison function.
///
/// The return value is a variant of `MinMaxResult` like for `minmax()`.
///
/// For the minimum, the first minimal element is returned. For the maximum,
/// the last maximal element wins. This matches the behavior of the standard
/// `Iterator::min()` and `Iterator::max()` methods.
fn minmax_by<F>(self, mut compare: F) -> MinMaxResult<Self::Item>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
minmax::minmax_impl(
self,
|_| (),
|x, y, _, _| Ordering::Less == compare(x, y)
)
}
/// Return the position of the maximum element in the iterator.
///
/// If several elements are equally maximum, the position of the
/// last of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_max(), None);
///
/// let a = [-3, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_max(), Some(3));
///
/// let a = [1, 1, -1, -1];
/// assert_eq!(a.iter().position_max(), Some(1));
/// ```
fn position_max(self) -> Option<usize>
where Self: Sized, Self::Item: Ord
{
self.enumerate()
.max_by(|x, y| Ord::cmp(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the position of the maximum element in the iterator, as
/// determined by the specified function.
///
/// If several elements are equally maximum, the position of the
/// last of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(4));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(3));
/// ```
fn position_max_by_key<K, F>(self, mut key: F) -> Option<usize>
where Self: Sized, K: Ord, F: FnMut(&Self::Item) -> K
{
self.enumerate()
.max_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1)))
.map(|x| x.0)
}
/// Return the position of the maximum element in the iterator, as
/// determined by the specified comparison function.
///
/// If several elements are equally maximum, the position of the
/// last of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(3));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(1));
/// ```
fn position_max_by<F>(self, mut compare: F) -> Option<usize>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
self.enumerate()
.max_by(|x, y| compare(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the position of the minimum element in the iterator.
///
/// If several elements are equally minimum, the position of the
/// first of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_min(), None);
///
/// let a = [-3, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_min(), Some(4));
///
/// let a = [1, 1, -1, -1];
/// assert_eq!(a.iter().position_min(), Some(2));
/// ```
fn position_min(self) -> Option<usize>
where Self: Sized, Self::Item: Ord
{
self.enumerate()
.min_by(|x, y| Ord::cmp(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the position of the minimum element in the iterator, as
/// determined by the specified function.
///
/// If several elements are equally minimum, the position of the
/// first of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(1));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(0));
/// ```
fn position_min_by_key<K, F>(self, mut key: F) -> Option<usize>
where Self: Sized, K: Ord, F: FnMut(&Self::Item) -> K
{
self.enumerate()
.min_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1)))
.map(|x| x.0)
}
/// Return the position of the minimum element in the iterator, as
/// determined by the specified comparison function.
///
/// If several elements are equally minimum, the position of the
/// first of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(4));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(2));
/// ```
fn position_min_by<F>(self, mut compare: F) -> Option<usize>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
self.enumerate()
.min_by(|x, y| compare(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the positions of the minimum and maximum elements in
/// the iterator.
///
/// The return type [`MinMaxResult`] is an enum of three variants:
///
/// - `NoElements` if the iterator is empty.
/// - `OneElement(xpos)` if the iterator has exactly one element.
/// - `MinMax(xpos, ypos)` is returned otherwise, where the
/// element at `xpos` ≤ the element at `ypos`. While the
/// referenced elements themselves may be equal, `xpos` cannot
/// be equal to `ypos`.
///
/// On an iterator of length `n`, `position_minmax` does `1.5 * n`
/// comparisons, and so is faster than calling `positon_min` and
/// `position_max` separately which does `2 * n` comparisons.
///
/// For the minimum, if several elements are equally minimum, the
/// position of the first of them is returned. For the maximum, if
/// several elements are equally maximum, the position of the last
/// of them is returned.
///
/// The elements can be floats but no particular result is
/// guaranteed if an element is NaN.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_minmax(), NoElements);
///
/// let a = [10];
/// assert_eq!(a.iter().position_minmax(), OneElement(0));
///
/// let a = [-3, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_minmax(), MinMax(4, 3));
///
/// let a = [1, 1, -1, -1];
/// assert_eq!(a.iter().position_minmax(), MinMax(2, 1));
/// ```
fn position_minmax(self) -> MinMaxResult<usize>
where Self: Sized, Self::Item: PartialOrd
{
use crate::MinMaxResult::{NoElements, OneElement, MinMax};
match minmax::minmax_impl(self.enumerate(), |_| (), |x, y, _, _| x.1 < y.1) {
NoElements => NoElements,
OneElement(x) => OneElement(x.0),
MinMax(x, y) => MinMax(x.0, y.0),
}
}
/// Return the postions of the minimum and maximum elements of an
/// iterator, as determined by the specified function.
///
/// The return value is a variant of [`MinMaxResult`] like for
/// [`position_minmax`].
///
/// For the minimum, if several elements are equally minimum, the
/// position of the first of them is returned. For the maximum, if
/// several elements are equally maximum, the position of the last
/// of them is returned.
///
/// The keys can be floats but no particular result is guaranteed
/// if a key is NaN.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), NoElements);
///
/// let a = [10_i32];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), OneElement(0));
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(1, 4));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(0, 3));
/// ```
///
/// [`position_minmax`]: Self::position_minmax
fn position_minmax_by_key<K, F>(self, mut key: F) -> MinMaxResult<usize>
where Self: Sized, K: PartialOrd, F: FnMut(&Self::Item) -> K
{
use crate::MinMaxResult::{NoElements, OneElement, MinMax};
match self.enumerate().minmax_by_key(|e| key(&e.1)) {
NoElements => NoElements,
OneElement(x) => OneElement(x.0),
MinMax(x, y) => MinMax(x.0, y.0),
}
}
/// Return the postions of the minimum and maximum elements of an
/// iterator, as determined by the specified comparison function.
///
/// The return value is a variant of [`MinMaxResult`] like for
/// [`position_minmax`].
///
/// For the minimum, if several elements are equally minimum, the
/// position of the first of them is returned. For the maximum, if
/// several elements are equally maximum, the position of the last
/// of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), NoElements);
///
/// let a = [10_i32];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), OneElement(0));
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(4, 3));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(2, 1));
/// ```
///
/// [`position_minmax`]: Self::position_minmax
fn position_minmax_by<F>(self, mut compare: F) -> MinMaxResult<usize>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
use crate::MinMaxResult::{NoElements, OneElement, MinMax};
match self.enumerate().minmax_by(|x, y| compare(&x.1, &y.1)) {
NoElements => NoElements,
OneElement(x) => OneElement(x.0),
MinMax(x, y) => MinMax(x.0, y.0),
}
}
/// If the iterator yields exactly one element, that element will be returned, otherwise
/// an error will be returned containing an iterator that has the same output as the input
/// iterator.
///
/// This provides an additional layer of validation over just calling `Iterator::next()`.
/// If your assumption that there should only be one element yielded is false this provides
/// the opportunity to detect and handle that, preventing errors at a distance.
///
/// # Examples
/// ```
/// use itertools::Itertools;
///
/// assert_eq!((0..10).filter(|&x| x == 2).exactly_one().unwrap(), 2);
/// assert!((0..10).filter(|&x| x > 1 && x < 4).exactly_one().unwrap_err().eq(2..4));
/// assert!((0..10).filter(|&x| x > 1 && x < 5).exactly_one().unwrap_err().eq(2..5));
/// assert!((0..10).filter(|&_| false).exactly_one().unwrap_err().eq(0..0));
/// ```
fn exactly_one(mut self) -> Result<Self::Item, ExactlyOneError<Self>>
where
Self: Sized,
{
match self.next() {
Some(first) => {
match self.next() {
Some(second) => {
Err(ExactlyOneError::new(Some(Either::Left([first, second])), self))
}
None => {
Ok(first)
}
}
}
None => Err(ExactlyOneError::new(None, self)),
}
}
/// An iterator adaptor that allows the user to peek at multiple `.next()`
/// values without advancing the base iterator.
///
/// # Examples
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..10).multipeek();
/// assert_eq!(iter.peek(), Some(&0));
/// assert_eq!(iter.peek(), Some(&1));
/// assert_eq!(iter.peek(), Some(&2));
/// assert_eq!(iter.next(), Some(0));
/// assert_eq!(iter.peek(), Some(&1));
/// ```
#[cfg(feature = "use_alloc")]
fn multipeek(self) -> MultiPeek<Self>
where
Self: Sized,
{
multipeek_impl::multipeek(self)
}
/// Collect the items in this iterator and return a `HashMap` which
/// contains each item that appears in the iterator and the number
/// of times it appears.
///
/// # Examples
/// ```
/// # use itertools::Itertools;
/// let counts = [1, 1, 1, 3, 3, 5].into_iter().counts();
/// assert_eq!(counts[&1], 3);
/// assert_eq!(counts[&3], 2);
/// assert_eq!(counts[&5], 1);
/// assert_eq!(counts.get(&0), None);
/// ```
#[cfg(feature = "use_std")]
fn counts(self) -> HashMap<Self::Item, usize>
where
Self: Sized,
Self::Item: Eq + Hash,
{
let mut counts = HashMap::new();
self.for_each(|item| *counts.entry(item).or_default() += 1);
counts
}
}
impl<T: ?Sized> Itertools for T where T: Iterator { }
/// Return `true` if both iterables produce equal sequences
/// (elements pairwise equal and sequences of the same length),
/// `false` otherwise.
///
/// This is an `IntoIterator` enabled function that is similar to the standard
/// library method `Iterator::eq`.
///
/// ```
/// assert!(itertools::equal(vec![1, 2, 3], 1..4));
/// assert!(!itertools::equal(&[0, 0], &[0, 0, 0]));
/// ```
pub fn equal<I, J>(a: I, b: J) -> bool
where I: IntoIterator,
J: IntoIterator,
I::Item: PartialEq<J::Item>
{
let mut ia = a.into_iter();
let mut ib = b.into_iter();
loop {
match ia.next() {
Some(x) => match ib.next() {
Some(y) => if x != y { return false; },
None => return false,
},
None => return ib.next().is_none()
}
}
}
/// Assert that two iterables produce equal sequences, with the same
/// semantics as *equal(a, b)*.
///
/// **Panics** on assertion failure with a message that shows the
/// two iteration elements.
///
/// ```ignore
/// assert_equal("exceed".split('c'), "excess".split('c'));
/// // ^PANIC: panicked at 'Failed assertion Some("eed") == Some("ess") for iteration 1',
/// ```
pub fn assert_equal<I, J>(a: I, b: J)
where I: IntoIterator,
J: IntoIterator,
I::Item: fmt::Debug + PartialEq<J::Item>,
J::Item: fmt::Debug,
{
let mut ia = a.into_iter();
let mut ib = b.into_iter();
let mut i = 0;
loop {
match (ia.next(), ib.next()) {
(None, None) => return,
(a, b) => {
let equal = match (&a, &b) {
(&Some(ref a), &Some(ref b)) => a == b,
_ => false,
};
assert!(equal, "Failed assertion {a:?} == {b:?} for iteration {i}",
i=i, a=a, b=b);
i += 1;
}
}
}
}
/// Partition a sequence using predicate `pred` so that elements
/// that map to `true` are placed before elements which map to `false`.
///
/// The order within the partitions is arbitrary.
///
/// Return the index of the split point.
///
/// ```
/// use itertools::partition;
///
/// # // use repeated numbers to not promise any ordering
/// let mut data = [7, 1, 1, 7, 1, 1, 7];
/// let split_index = partition(&mut data, |elt| *elt >= 3);
///
/// assert_eq!(data, [7, 7, 7, 1, 1, 1, 1]);
/// assert_eq!(split_index, 3);
/// ```
pub fn partition<'a, A: 'a, I, F>(iter: I, mut pred: F) -> usize
where I: IntoIterator<Item = &'a mut A>,
I::IntoIter: DoubleEndedIterator,
F: FnMut(&A) -> bool
{
let mut split_index = 0;
let mut iter = iter.into_iter();
'main: while let Some(front) = iter.next() {
if !pred(front) {
loop {
match iter.next_back() {
Some(back) => if pred(back) {
std::mem::swap(front, back);
break;
},
None => break 'main,
}
}
}
split_index += 1;
}
split_index
}
/// An enum used for controlling the execution of `.fold_while()`.
///
/// See [`.fold_while()`](crate::Itertools::fold_while) for more information.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum FoldWhile<T> {
/// Continue folding with this value
Continue(T),
/// Fold is complete and will return this value
Done(T),
}
impl<T> FoldWhile<T> {
/// Return the value in the continue or done.
pub fn into_inner(self) -> T {
match self {
FoldWhile::Continue(x) | FoldWhile::Done(x) => x,
}
}
/// Return true if `self` is `Done`, false if it is `Continue`.
pub fn is_done(&self) -> bool {
match *self {
FoldWhile::Continue(_) => false,
FoldWhile::Done(_) => true,
}
}
}
Add Itertools.counts_by
#![warn(missing_docs)]
#![crate_name="itertools"]
#![cfg_attr(not(feature = "use_std"), no_std)]
//! Extra iterator adaptors, functions and macros.
//!
//! To extend [`Iterator`] with methods in this crate, import
//! the [`Itertools` trait](Itertools):
//!
//! ```
//! use itertools::Itertools;
//! ```
//!
//! Now, new methods like [`interleave`](Itertools::interleave)
//! are available on all iterators:
//!
//! ```
//! use itertools::Itertools;
//!
//! let it = (1..3).interleave(vec![-1, -2]);
//! itertools::assert_equal(it, vec![1, -1, 2, -2]);
//! ```
//!
//! Most iterator methods are also provided as functions (with the benefit
//! that they convert parameters using [`IntoIterator`]):
//!
//! ```
//! use itertools::interleave;
//!
//! for elt in interleave(&[1, 2, 3], &[2, 3, 4]) {
//! /* loop body */
//! }
//! ```
//!
//! ## Crate Features
//!
//! - `use_std`
//! - Enabled by default.
//! - Disable to compile itertools using `#![no_std]`. This disables
//! any items that depend on collections (like `group_by`, `unique`,
//! `kmerge`, `join` and many more).
//!
//! ## Rust Version
//!
//! This version of itertools requires Rust 1.32 or later.
#![doc(html_root_url="https://docs.rs/itertools/0.8/")]
#[cfg(not(feature = "use_std"))]
extern crate core as std;
#[cfg(feature = "use_alloc")]
extern crate alloc;
#[cfg(feature = "use_alloc")]
use alloc::{
string::String,
vec::Vec,
};
pub use either::Either;
#[cfg(feature = "use_std")]
use std::collections::HashMap;
use std::iter::{IntoIterator, once};
use std::cmp::Ordering;
use std::fmt;
#[cfg(feature = "use_std")]
use std::hash::Hash;
#[cfg(feature = "use_alloc")]
use std::fmt::Write;
#[cfg(feature = "use_alloc")]
type VecIntoIter<T> = alloc::vec::IntoIter<T>;
#[cfg(feature = "use_alloc")]
use std::iter::FromIterator;
#[macro_use]
mod impl_macros;
// for compatibility with no std and macros
#[doc(hidden)]
pub use std::iter as __std_iter;
/// The concrete iterator types.
pub mod structs {
pub use crate::adaptors::{
Dedup,
DedupBy,
DedupWithCount,
DedupByWithCount,
Interleave,
InterleaveShortest,
FilterMapOk,
FilterOk,
Product,
PutBack,
Batching,
MapInto,
MapOk,
Merge,
MergeBy,
TakeWhileRef,
WhileSome,
Coalesce,
TupleCombinations,
Positions,
Update,
};
#[allow(deprecated)]
pub use crate::adaptors::{MapResults, Step};
#[cfg(feature = "use_alloc")]
pub use crate::adaptors::MultiProduct;
#[cfg(feature = "use_alloc")]
pub use crate::combinations::Combinations;
#[cfg(feature = "use_alloc")]
pub use crate::combinations_with_replacement::CombinationsWithReplacement;
pub use crate::cons_tuples_impl::ConsTuples;
pub use crate::exactly_one_err::ExactlyOneError;
pub use crate::format::{Format, FormatWith};
#[cfg(feature = "use_std")]
pub use crate::grouping_map::{GroupingMap, GroupingMapBy};
#[cfg(feature = "use_alloc")]
pub use crate::groupbylazy::{IntoChunks, Chunk, Chunks, GroupBy, Group, Groups};
pub use crate::intersperse::{Intersperse, IntersperseWith};
#[cfg(feature = "use_alloc")]
pub use crate::kmerge_impl::{KMerge, KMergeBy};
pub use crate::merge_join::MergeJoinBy;
#[cfg(feature = "use_alloc")]
pub use crate::multipeek_impl::MultiPeek;
#[cfg(feature = "use_alloc")]
pub use crate::peek_nth::PeekNth;
pub use crate::pad_tail::PadUsing;
pub use crate::peeking_take_while::PeekingTakeWhile;
#[cfg(feature = "use_alloc")]
pub use crate::permutations::Permutations;
pub use crate::process_results_impl::ProcessResults;
#[cfg(feature = "use_alloc")]
pub use crate::powerset::Powerset;
#[cfg(feature = "use_alloc")]
pub use crate::put_back_n_impl::PutBackN;
#[cfg(feature = "use_alloc")]
pub use crate::rciter_impl::RcIter;
pub use crate::repeatn::RepeatN;
#[allow(deprecated)]
pub use crate::sources::{RepeatCall, Unfold, Iterate};
#[cfg(feature = "use_alloc")]
pub use crate::tee::Tee;
pub use crate::tuple_impl::{TupleBuffer, TupleWindows, CircularTupleWindows, Tuples};
#[cfg(feature = "use_std")]
pub use crate::unique_impl::{Unique, UniqueBy};
pub use crate::with_position::WithPosition;
pub use crate::zip_eq_impl::ZipEq;
pub use crate::zip_longest::ZipLongest;
pub use crate::ziptuple::Zip;
}
/// Traits helpful for using certain `Itertools` methods in generic contexts.
pub mod traits {
pub use crate::tuple_impl::HomogeneousTuple;
}
#[allow(deprecated)]
pub use crate::structs::*;
pub use crate::concat_impl::concat;
pub use crate::cons_tuples_impl::cons_tuples;
pub use crate::diff::diff_with;
pub use crate::diff::Diff;
#[cfg(feature = "use_alloc")]
pub use crate::kmerge_impl::{kmerge_by};
pub use crate::minmax::MinMaxResult;
pub use crate::peeking_take_while::PeekingNext;
pub use crate::process_results_impl::process_results;
pub use crate::repeatn::repeat_n;
#[allow(deprecated)]
pub use crate::sources::{repeat_call, unfold, iterate};
pub use crate::with_position::Position;
pub use crate::ziptuple::multizip;
mod adaptors;
mod either_or_both;
pub use crate::either_or_both::EitherOrBoth;
#[doc(hidden)]
pub mod free;
#[doc(inline)]
pub use crate::free::*;
mod concat_impl;
mod cons_tuples_impl;
#[cfg(feature = "use_alloc")]
mod combinations;
#[cfg(feature = "use_alloc")]
mod combinations_with_replacement;
mod exactly_one_err;
mod diff;
mod format;
#[cfg(feature = "use_std")]
mod grouping_map;
#[cfg(feature = "use_alloc")]
mod group_map;
#[cfg(feature = "use_alloc")]
mod groupbylazy;
mod intersperse;
#[cfg(feature = "use_alloc")]
mod k_smallest;
#[cfg(feature = "use_alloc")]
mod kmerge_impl;
#[cfg(feature = "use_alloc")]
mod lazy_buffer;
mod merge_join;
mod minmax;
#[cfg(feature = "use_alloc")]
mod multipeek_impl;
mod pad_tail;
#[cfg(feature = "use_alloc")]
mod peek_nth;
mod peeking_take_while;
#[cfg(feature = "use_alloc")]
mod permutations;
#[cfg(feature = "use_alloc")]
mod powerset;
mod process_results_impl;
#[cfg(feature = "use_alloc")]
mod put_back_n_impl;
#[cfg(feature = "use_alloc")]
mod rciter_impl;
mod repeatn;
mod size_hint;
mod sources;
#[cfg(feature = "use_alloc")]
mod tee;
mod tuple_impl;
#[cfg(feature = "use_std")]
mod unique_impl;
mod with_position;
mod zip_eq_impl;
mod zip_longest;
mod ziptuple;
#[macro_export]
/// Create an iterator over the “cartesian product” of iterators.
///
/// Iterator element type is like `(A, B, ..., E)` if formed
/// from iterators `(I, J, ..., M)` with element types `I::Item = A`, `J::Item = B`, etc.
///
/// ```
/// # use itertools::iproduct;
/// #
/// # fn main() {
/// // Iterate over the coordinates of a 4 x 4 x 4 grid
/// // from (0, 0, 0), (0, 0, 1), .., (0, 1, 0), (0, 1, 1), .. etc until (3, 3, 3)
/// for (i, j, k) in iproduct!(0..4, 0..4, 0..4) {
/// // ..
/// }
/// # }
/// ```
macro_rules! iproduct {
(@flatten $I:expr,) => (
$I
);
(@flatten $I:expr, $J:expr, $($K:expr,)*) => (
$crate::iproduct!(@flatten $crate::cons_tuples($crate::iproduct!($I, $J)), $($K,)*)
);
($I:expr) => (
$crate::__std_iter::IntoIterator::into_iter($I)
);
($I:expr, $J:expr) => (
$crate::Itertools::cartesian_product($crate::iproduct!($I), $crate::iproduct!($J))
);
($I:expr, $J:expr, $($K:expr),+) => (
$crate::iproduct!(@flatten $crate::iproduct!($I, $J), $($K,)+)
);
}
#[macro_export]
/// Create an iterator running multiple iterators in lockstep.
///
/// The `izip!` iterator yields elements until any subiterator
/// returns `None`.
///
/// This is a version of the standard ``.zip()`` that's supporting more than
/// two iterators. The iterator element type is a tuple with one element
/// from each of the input iterators. Just like ``.zip()``, the iteration stops
/// when the shortest of the inputs reaches its end.
///
/// **Note:** The result of this macro is in the general case an iterator
/// composed of repeated `.zip()` and a `.map()`; it has an anonymous type.
/// The special cases of one and two arguments produce the equivalent of
/// `$a.into_iter()` and `$a.into_iter().zip($b)` respectively.
///
/// Prefer this macro `izip!()` over [`multizip`] for the performance benefits
/// of using the standard library `.zip()`.
///
/// ```
/// # use itertools::izip;
/// #
/// # fn main() {
///
/// // iterate over three sequences side-by-side
/// let mut results = [0, 0, 0, 0];
/// let inputs = [3, 7, 9, 6];
///
/// for (r, index, input) in izip!(&mut results, 0..10, &inputs) {
/// *r = index * 10 + input;
/// }
///
/// assert_eq!(results, [0 + 3, 10 + 7, 29, 36]);
/// # }
/// ```
macro_rules! izip {
// @closure creates a tuple-flattening closure for .map() call. usage:
// @closure partial_pattern => partial_tuple , rest , of , iterators
// eg. izip!( @closure ((a, b), c) => (a, b, c) , dd , ee )
( @closure $p:pat => $tup:expr ) => {
|$p| $tup
};
// The "b" identifier is a different identifier on each recursion level thanks to hygiene.
( @closure $p:pat => ( $($tup:tt)* ) , $_iter:expr $( , $tail:expr )* ) => {
$crate::izip!(@closure ($p, b) => ( $($tup)*, b ) $( , $tail )*)
};
// unary
($first:expr $(,)*) => {
$crate::__std_iter::IntoIterator::into_iter($first)
};
// binary
($first:expr, $second:expr $(,)*) => {
$crate::izip!($first)
.zip($second)
};
// n-ary where n > 2
( $first:expr $( , $rest:expr )* $(,)* ) => {
$crate::izip!($first)
$(
.zip($rest)
)*
.map(
$crate::izip!(@closure a => (a) $( , $rest )*)
)
};
}
/// An [`Iterator`] blanket implementation that provides extra adaptors and
/// methods.
///
/// This trait defines a number of methods. They are divided into two groups:
///
/// * *Adaptors* take an iterator and parameter as input, and return
/// a new iterator value. These are listed first in the trait. An example
/// of an adaptor is [`.interleave()`](#method.interleave)
///
/// * *Regular methods* are those that don't return iterators and instead
/// return a regular value of some other kind.
/// [`.next_tuple()`](#method.next_tuple) is an example and the first regular
/// method in the list.
pub trait Itertools : Iterator {
// adaptors
/// Alternate elements from two iterators until both have run out.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..7).interleave(vec![-1, -2]);
/// itertools::assert_equal(it, vec![1, -1, 2, -2, 3, 4, 5, 6]);
/// ```
fn interleave<J>(self, other: J) -> Interleave<Self, J::IntoIter>
where J: IntoIterator<Item = Self::Item>,
Self: Sized
{
interleave(self, other)
}
/// Alternate elements from two iterators until at least one of them has run
/// out.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..7).interleave_shortest(vec![-1, -2]);
/// itertools::assert_equal(it, vec![1, -1, 2, -2, 3]);
/// ```
fn interleave_shortest<J>(self, other: J) -> InterleaveShortest<Self, J::IntoIter>
where J: IntoIterator<Item = Self::Item>,
Self: Sized
{
adaptors::interleave_shortest(self, other.into_iter())
}
/// An iterator adaptor to insert a particular value
/// between each element of the adapted iterator.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// itertools::assert_equal((0..3).intersperse(8), vec![0, 8, 1, 8, 2]);
/// ```
fn intersperse(self, element: Self::Item) -> Intersperse<Self>
where Self: Sized,
Self::Item: Clone
{
intersperse::intersperse(self, element)
}
/// An iterator adaptor to insert a particular value created by a function
/// between each element of the adapted iterator.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let mut i = 10;
/// itertools::assert_equal((0..3).intersperse_with(|| { i -= 1; i }), vec![0, 9, 1, 8, 2]);
/// assert_eq!(i, 8);
/// ```
fn intersperse_with<F>(self, element: F) -> IntersperseWith<Self, F>
where Self: Sized,
F: FnMut() -> Self::Item
{
intersperse::intersperse_with(self, element)
}
/// Create an iterator which iterates over both this and the specified
/// iterator simultaneously, yielding pairs of two optional elements.
///
/// This iterator is *fused*.
///
/// As long as neither input iterator is exhausted yet, it yields two values
/// via `EitherOrBoth::Both`.
///
/// When the parameter iterator is exhausted, it only yields a value from the
/// `self` iterator via `EitherOrBoth::Left`.
///
/// When the `self` iterator is exhausted, it only yields a value from the
/// parameter iterator via `EitherOrBoth::Right`.
///
/// When both iterators return `None`, all further invocations of `.next()`
/// will return `None`.
///
/// Iterator element type is
/// [`EitherOrBoth<Self::Item, J::Item>`](EitherOrBoth).
///
/// ```rust
/// use itertools::EitherOrBoth::{Both, Right};
/// use itertools::Itertools;
/// let it = (0..1).zip_longest(1..3);
/// itertools::assert_equal(it, vec![Both(0, 1), Right(2)]);
/// ```
#[inline]
fn zip_longest<J>(self, other: J) -> ZipLongest<Self, J::IntoIter>
where J: IntoIterator,
Self: Sized
{
zip_longest::zip_longest(self, other.into_iter())
}
/// Create an iterator which iterates over both this and the specified
/// iterator simultaneously, yielding pairs of elements.
///
/// **Panics** if the iterators reach an end and they are not of equal
/// lengths.
#[inline]
fn zip_eq<J>(self, other: J) -> ZipEq<Self, J::IntoIter>
where J: IntoIterator,
Self: Sized
{
zip_eq(self, other)
}
/// A “meta iterator adaptor”. Its closure receives a reference to the
/// iterator and may pick off as many elements as it likes, to produce the
/// next iterator element.
///
/// Iterator element type is `B`.
///
/// ```
/// use itertools::Itertools;
///
/// // An adaptor that gathers elements in pairs
/// let pit = (0..4).batching(|it| {
/// match it.next() {
/// None => None,
/// Some(x) => match it.next() {
/// None => None,
/// Some(y) => Some((x, y)),
/// }
/// }
/// });
///
/// itertools::assert_equal(pit, vec![(0, 1), (2, 3)]);
/// ```
///
fn batching<B, F>(self, f: F) -> Batching<Self, F>
where F: FnMut(&mut Self) -> Option<B>,
Self: Sized
{
adaptors::batching(self, f)
}
/// Return an *iterable* that can group iterator elements.
/// Consecutive elements that map to the same key (“runs”), are assigned
/// to the same group.
///
/// `GroupBy` is the storage for the lazy grouping operation.
///
/// If the groups are consumed in order, or if each group's iterator is
/// dropped without keeping it around, then `GroupBy` uses no
/// allocations. It needs allocations only if several group iterators
/// are alive at the same time.
///
/// This type implements `IntoIterator` (it is **not** an iterator
/// itself), because the group iterators need to borrow from this
/// value. It should be stored in a local variable or temporary and
/// iterated.
///
/// Iterator element type is `(K, Group)`: the group's key and the
/// group iterator.
///
/// ```
/// use itertools::Itertools;
///
/// // group data into runs of larger than zero or not.
/// let data = vec![1, 3, -2, -2, 1, 0, 1, 2];
/// // groups: |---->|------>|--------->|
///
/// // Note: The `&` is significant here, `GroupBy` is iterable
/// // only by reference. You can also call `.into_iter()` explicitly.
/// let mut data_grouped = Vec::new();
/// for (key, group) in &data.into_iter().group_by(|elt| *elt >= 0) {
/// data_grouped.push((key, group.collect()));
/// }
/// assert_eq!(data_grouped, vec![(true, vec![1, 3]), (false, vec![-2, -2]), (true, vec![1, 0, 1, 2])]);
/// ```
#[cfg(feature = "use_alloc")]
fn group_by<K, F>(self, key: F) -> GroupBy<K, Self, F>
where Self: Sized,
F: FnMut(&Self::Item) -> K,
K: PartialEq,
{
groupbylazy::new(self, key)
}
/// Return an *iterable* that can chunk the iterator.
///
/// Yield subiterators (chunks) that each yield a fixed number elements,
/// determined by `size`. The last chunk will be shorter if there aren't
/// enough elements.
///
/// `IntoChunks` is based on `GroupBy`: it is iterable (implements
/// `IntoIterator`, **not** `Iterator`), and it only buffers if several
/// chunk iterators are alive at the same time.
///
/// Iterator element type is `Chunk`, each chunk's iterator.
///
/// **Panics** if `size` is 0.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1, 1, 2, -2, 6, 0, 3, 1];
/// //chunk size=3 |------->|-------->|--->|
///
/// // Note: The `&` is significant here, `IntoChunks` is iterable
/// // only by reference. You can also call `.into_iter()` explicitly.
/// for chunk in &data.into_iter().chunks(3) {
/// // Check that the sum of each chunk is 4.
/// assert_eq!(4, chunk.sum());
/// }
/// ```
#[cfg(feature = "use_alloc")]
fn chunks(self, size: usize) -> IntoChunks<Self>
where Self: Sized,
{
assert!(size != 0);
groupbylazy::new_chunks(self, size)
}
/// Return an iterator over all contiguous windows producing tuples of
/// a specific size (up to 4).
///
/// `tuple_windows` clones the iterator elements so that they can be
/// part of successive windows, this makes it most suited for iterators
/// of references and other values that are cheap to copy.
///
/// ```
/// use itertools::Itertools;
/// let mut v = Vec::new();
///
/// // pairwise iteration
/// for (a, b) in (1..5).tuple_windows() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4)]);
///
/// let mut it = (1..5).tuple_windows();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((2, 3, 4)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..5).tuple_windows::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]);
///
/// // you can also specify the complete type
/// use itertools::TupleWindows;
/// use std::ops::Range;
///
/// let it: TupleWindows<Range<u32>, (u32, u32, u32)> = (1..5).tuple_windows();
/// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4)]);
/// ```
fn tuple_windows<T>(self) -> TupleWindows<Self, T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple,
T::Item: Clone
{
tuple_impl::tuple_windows(self)
}
/// Return an iterator over all windows, wrapping back to the first
/// elements when the window would otherwise exceed the length of the
/// iterator, producing tuples of a specific size (up to 4).
///
/// `circular_tuple_windows` clones the iterator elements so that they can be
/// part of successive windows, this makes it most suited for iterators
/// of references and other values that are cheap to copy.
///
/// ```
/// use itertools::Itertools;
/// let mut v = Vec::new();
/// for (a, b) in (1..5).circular_tuple_windows() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (2, 3), (3, 4), (4, 1)]);
///
/// let mut it = (1..5).circular_tuple_windows();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((2, 3, 4)), it.next());
/// assert_eq!(Some((3, 4, 1)), it.next());
/// assert_eq!(Some((4, 1, 2)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..5).circular_tuple_windows::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (2, 3, 4), (3, 4, 1), (4, 1, 2)]);
/// ```
fn circular_tuple_windows<T>(self) -> CircularTupleWindows<Self, T>
where Self: Sized + Clone + Iterator<Item = T::Item> + ExactSizeIterator,
T: tuple_impl::TupleCollect + Clone,
T::Item: Clone
{
tuple_impl::circular_tuple_windows(self)
}
/// Return an iterator that groups the items in tuples of a specific size
/// (up to 4).
///
/// See also the method [`.next_tuple()`](#method.next_tuple).
///
/// ```
/// use itertools::Itertools;
/// let mut v = Vec::new();
/// for (a, b) in (1..5).tuples() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (3, 4)]);
///
/// let mut it = (1..7).tuples();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((4, 5, 6)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..7).tuples::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]);
///
/// // you can also specify the complete type
/// use itertools::Tuples;
/// use std::ops::Range;
///
/// let it: Tuples<Range<u32>, (u32, u32, u32)> = (1..7).tuples();
/// itertools::assert_equal(it, vec![(1, 2, 3), (4, 5, 6)]);
/// ```
///
/// See also [`Tuples::into_buffer`].
fn tuples<T>(self) -> Tuples<Self, T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple
{
tuple_impl::tuples(self)
}
/// Split into an iterator pair that both yield all elements from
/// the original iterator.
///
/// **Note:** If the iterator is clonable, prefer using that instead
/// of using this method. It is likely to be more efficient.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
/// let xs = vec![0, 1, 2, 3];
///
/// let (mut t1, t2) = xs.into_iter().tee();
/// itertools::assert_equal(t1.next(), Some(0));
/// itertools::assert_equal(t2, 0..4);
/// itertools::assert_equal(t1, 1..4);
/// ```
#[cfg(feature = "use_alloc")]
fn tee(self) -> (Tee<Self>, Tee<Self>)
where Self: Sized,
Self::Item: Clone
{
tee::new(self)
}
/// Return an iterator adaptor that steps `n` elements in the base iterator
/// for each iteration.
///
/// The iterator steps by yielding the next element from the base iterator,
/// then skipping forward `n - 1` elements.
///
/// Iterator element type is `Self::Item`.
///
/// **Panics** if the step is 0.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (0..8).step(3);
/// itertools::assert_equal(it, vec![0, 3, 6]);
/// ```
#[deprecated(note="Use std .step_by() instead", since="0.8.0")]
#[allow(deprecated)]
fn step(self, n: usize) -> Step<Self>
where Self: Sized
{
adaptors::step(self, n)
}
/// Convert each item of the iterator using the `Into` trait.
///
/// ```rust
/// use itertools::Itertools;
///
/// (1i32..42i32).map_into::<f64>().collect_vec();
/// ```
fn map_into<R>(self) -> MapInto<Self, R>
where Self: Sized,
Self::Item: Into<R>,
{
adaptors::map_into(self)
}
/// See [`.map_ok()`](#method.map_ok).
#[deprecated(note="Use .map_ok() instead", since="0.10.0")]
fn map_results<F, T, U, E>(self, f: F) -> MapOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(T) -> U,
{
self.map_ok(f)
}
/// Return an iterator adaptor that applies the provided closure
/// to every `Result::Ok` value. `Result::Err` values are
/// unchanged.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![Ok(41), Err(false), Ok(11)];
/// let it = input.into_iter().map_ok(|i| i + 1);
/// itertools::assert_equal(it, vec![Ok(42), Err(false), Ok(12)]);
/// ```
fn map_ok<F, T, U, E>(self, f: F) -> MapOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(T) -> U,
{
adaptors::map_ok(self, f)
}
/// Return an iterator adaptor that filters every `Result::Ok`
/// value with the provided closure. `Result::Err` values are
/// unchanged.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![Ok(22), Err(false), Ok(11)];
/// let it = input.into_iter().filter_ok(|&i| i > 20);
/// itertools::assert_equal(it, vec![Ok(22), Err(false)]);
/// ```
fn filter_ok<F, T, E>(self, f: F) -> FilterOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(&T) -> bool,
{
adaptors::filter_ok(self, f)
}
/// Return an iterator adaptor that filters and transforms every
/// `Result::Ok` value with the provided closure. `Result::Err`
/// values are unchanged.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![Ok(22), Err(false), Ok(11)];
/// let it = input.into_iter().filter_map_ok(|i| if i > 20 { Some(i * 2) } else { None });
/// itertools::assert_equal(it, vec![Ok(44), Err(false)]);
/// ```
fn filter_map_ok<F, T, U, E>(self, f: F) -> FilterMapOk<Self, F>
where Self: Iterator<Item = Result<T, E>> + Sized,
F: FnMut(T) -> Option<U>,
{
adaptors::filter_map_ok(self, f)
}
/// Return an iterator adaptor that merges the two base iterators in
/// ascending order. If both base iterators are sorted (ascending), the
/// result is sorted.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = (0..11).step(3);
/// let b = (0..11).step(5);
/// let it = a.merge(b);
/// itertools::assert_equal(it, vec![0, 0, 3, 5, 6, 9, 10]);
/// ```
fn merge<J>(self, other: J) -> Merge<Self, J::IntoIter>
where Self: Sized,
Self::Item: PartialOrd,
J: IntoIterator<Item = Self::Item>
{
merge(self, other)
}
/// Return an iterator adaptor that merges the two base iterators in order.
/// This is much like `.merge()` but allows for a custom ordering.
///
/// This can be especially useful for sequences of tuples.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = (0..).zip("bc".chars());
/// let b = (0..).zip("ad".chars());
/// let it = a.merge_by(b, |x, y| x.1 <= y.1);
/// itertools::assert_equal(it, vec![(0, 'a'), (0, 'b'), (1, 'c'), (1, 'd')]);
/// ```
fn merge_by<J, F>(self, other: J, is_first: F) -> MergeBy<Self, J::IntoIter, F>
where Self: Sized,
J: IntoIterator<Item = Self::Item>,
F: FnMut(&Self::Item, &Self::Item) -> bool
{
adaptors::merge_by_new(self, other.into_iter(), is_first)
}
/// Create an iterator that merges items from both this and the specified
/// iterator in ascending order.
///
/// It chooses whether to pair elements based on the `Ordering` returned by the
/// specified compare function. At any point, inspecting the tip of the
/// iterators `I` and `J` as items `i` of type `I::Item` and `j` of type
/// `J::Item` respectively, the resulting iterator will:
///
/// - Emit `EitherOrBoth::Left(i)` when `i < j`,
/// and remove `i` from its source iterator
/// - Emit `EitherOrBoth::Right(j)` when `i > j`,
/// and remove `j` from its source iterator
/// - Emit `EitherOrBoth::Both(i, j)` when `i == j`,
/// and remove both `i` and `j` from their respective source iterators
///
/// ```
/// use itertools::Itertools;
/// use itertools::EitherOrBoth::{Left, Right, Both};
///
/// let multiples_of_2 = (0..10).step(2);
/// let multiples_of_3 = (0..10).step(3);
///
/// itertools::assert_equal(
/// multiples_of_2.merge_join_by(multiples_of_3, |i, j| i.cmp(j)),
/// vec![Both(0, 0), Left(2), Right(3), Left(4), Both(6, 6), Left(8), Right(9)]
/// );
/// ```
#[inline]
fn merge_join_by<J, F>(self, other: J, cmp_fn: F) -> MergeJoinBy<Self, J::IntoIter, F>
where J: IntoIterator,
F: FnMut(&Self::Item, &J::Item) -> std::cmp::Ordering,
Self: Sized
{
merge_join_by(self, other, cmp_fn)
}
/// Return an iterator adaptor that flattens an iterator of iterators by
/// merging them in ascending order.
///
/// If all base iterators are sorted (ascending), the result is sorted.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = (0..6).step(3);
/// let b = (1..6).step(3);
/// let c = (2..6).step(3);
/// let it = vec![a, b, c].into_iter().kmerge();
/// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5]);
/// ```
#[cfg(feature = "use_alloc")]
fn kmerge(self) -> KMerge<<Self::Item as IntoIterator>::IntoIter>
where Self: Sized,
Self::Item: IntoIterator,
<Self::Item as IntoIterator>::Item: PartialOrd,
{
kmerge(self)
}
/// Return an iterator adaptor that flattens an iterator of iterators by
/// merging them according to the given closure.
///
/// The closure `first` is called with two elements *a*, *b* and should
/// return `true` if *a* is ordered before *b*.
///
/// If all base iterators are sorted according to `first`, the result is
/// sorted.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let a = vec![-1f64, 2., 3., -5., 6., -7.];
/// let b = vec![0., 2., -4.];
/// let mut it = vec![a, b].into_iter().kmerge_by(|a, b| a.abs() < b.abs());
/// assert_eq!(it.next(), Some(0.));
/// assert_eq!(it.last(), Some(-7.));
/// ```
#[cfg(feature = "use_alloc")]
fn kmerge_by<F>(self, first: F)
-> KMergeBy<<Self::Item as IntoIterator>::IntoIter, F>
where Self: Sized,
Self::Item: IntoIterator,
F: FnMut(&<Self::Item as IntoIterator>::Item,
&<Self::Item as IntoIterator>::Item) -> bool
{
kmerge_by(self, first)
}
/// Return an iterator adaptor that iterates over the cartesian product of
/// the element sets of two iterators `self` and `J`.
///
/// Iterator element type is `(Self::Item, J::Item)`.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (0..2).cartesian_product("αβ".chars());
/// itertools::assert_equal(it, vec![(0, 'α'), (0, 'β'), (1, 'α'), (1, 'β')]);
/// ```
fn cartesian_product<J>(self, other: J) -> Product<Self, J::IntoIter>
where Self: Sized,
Self::Item: Clone,
J: IntoIterator,
J::IntoIter: Clone
{
adaptors::cartesian_product(self, other.into_iter())
}
/// Return an iterator adaptor that iterates over the cartesian product of
/// all subiterators returned by meta-iterator `self`.
///
/// All provided iterators must yield the same `Item` type. To generate
/// the product of iterators yielding multiple types, use the
/// [`iproduct`] macro instead.
///
///
/// The iterator element type is `Vec<T>`, where `T` is the iterator element
/// of the subiterators.
///
/// ```
/// use itertools::Itertools;
/// let mut multi_prod = (0..3).map(|i| (i * 2)..(i * 2 + 2))
/// .multi_cartesian_product();
/// assert_eq!(multi_prod.next(), Some(vec![0, 2, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![0, 2, 5]));
/// assert_eq!(multi_prod.next(), Some(vec![0, 3, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![0, 3, 5]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 2, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 2, 5]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 3, 4]));
/// assert_eq!(multi_prod.next(), Some(vec![1, 3, 5]));
/// assert_eq!(multi_prod.next(), None);
/// ```
#[cfg(feature = "use_alloc")]
fn multi_cartesian_product(self) -> MultiProduct<<Self::Item as IntoIterator>::IntoIter>
where Self: Iterator + Sized,
Self::Item: IntoIterator,
<Self::Item as IntoIterator>::IntoIter: Clone,
<Self::Item as IntoIterator>::Item: Clone
{
adaptors::multi_cartesian_product(self)
}
/// Return an iterator adaptor that uses the passed-in closure to
/// optionally merge together consecutive elements.
///
/// The closure `f` is passed two elements, `previous` and `current` and may
/// return either (1) `Ok(combined)` to merge the two values or
/// (2) `Err((previous', current'))` to indicate they can't be merged.
/// In (2), the value `previous'` is emitted by the iterator.
/// Either (1) `combined` or (2) `current'` becomes the previous value
/// when coalesce continues with the next pair of elements to merge. The
/// value that remains at the end is also emitted by the iterator.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// // sum same-sign runs together
/// let data = vec![-1., -2., -3., 3., 1., 0., -1.];
/// itertools::assert_equal(data.into_iter().coalesce(|x, y|
/// if (x >= 0.) == (y >= 0.) {
/// Ok(x + y)
/// } else {
/// Err((x, y))
/// }),
/// vec![-6., 4., -1.]);
/// ```
fn coalesce<F>(self, f: F) -> Coalesce<Self, F>
where Self: Sized,
F: FnMut(Self::Item, Self::Item)
-> Result<Self::Item, (Self::Item, Self::Item)>
{
adaptors::coalesce(self, f)
}
/// Remove duplicates from sections of consecutive identical elements.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1., 1., 2., 3., 3., 2., 2.];
/// itertools::assert_equal(data.into_iter().dedup(),
/// vec![1., 2., 3., 2.]);
/// ```
fn dedup(self) -> Dedup<Self>
where Self: Sized,
Self::Item: PartialEq,
{
adaptors::dedup(self)
}
/// Remove duplicates from sections of consecutive identical elements,
/// determining equality using a comparison function.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `Self::Item`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![(0, 1.), (1, 1.), (0, 2.), (0, 3.), (1, 3.), (1, 2.), (2, 2.)];
/// itertools::assert_equal(data.into_iter().dedup_by(|x, y| x.1 == y.1),
/// vec![(0, 1.), (0, 2.), (0, 3.), (1, 2.)]);
/// ```
fn dedup_by<Cmp>(self, cmp: Cmp) -> DedupBy<Self, Cmp>
where Self: Sized,
Cmp: FnMut(&Self::Item, &Self::Item)->bool,
{
adaptors::dedup_by(self, cmp)
}
/// Remove duplicates from sections of consecutive identical elements, while keeping a count of
/// how many repeated elements were present.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `(usize, Self::Item)`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1., 1., 2., 3., 3., 2., 2.];
/// itertools::assert_equal(data.into_iter().dedup_with_count(),
/// vec![(2, 1.), (1, 2.), (2, 3.), (2, 2.)]);
/// ```
fn dedup_with_count(self) -> DedupWithCount<Self>
where Self: Sized,
{
adaptors::dedup_with_count(self)
}
/// Remove duplicates from sections of consecutive identical elements, while keeping a count of
/// how many repeated elements were present.
/// This will determine equality using a comparison function.
/// If the iterator is sorted, all elements will be unique.
///
/// Iterator element type is `(usize, Self::Item)`.
///
/// This iterator is *fused*.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![(0, 1.), (1, 1.), (0, 2.), (0, 3.), (1, 3.), (1, 2.), (2, 2.)];
/// itertools::assert_equal(data.into_iter().dedup_by_with_count(|x, y| x.1 == y.1),
/// vec![(2, (0, 1.)), (1, (0, 2.)), (2, (0, 3.)), (2, (1, 2.))]);
/// ```
fn dedup_by_with_count<Cmp>(self, cmp: Cmp) -> DedupByWithCount<Self, Cmp>
where Self: Sized,
Cmp: FnMut(&Self::Item, &Self::Item) -> bool,
{
adaptors::dedup_by_with_count(self, cmp)
}
/// Return an iterator adaptor that filters out elements that have
/// already been produced once during the iteration. Duplicates
/// are detected using hash and equality.
///
/// Clones of visited elements are stored in a hash set in the
/// iterator.
///
/// The iterator is stable, returning the non-duplicate items in the order
/// in which they occur in the adapted iterator. In a set of duplicate
/// items, the first item encountered is the item retained.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![10, 20, 30, 20, 40, 10, 50];
/// itertools::assert_equal(data.into_iter().unique(),
/// vec![10, 20, 30, 40, 50]);
/// ```
#[cfg(feature = "use_std")]
fn unique(self) -> Unique<Self>
where Self: Sized,
Self::Item: Clone + Eq + Hash
{
unique_impl::unique(self)
}
/// Return an iterator adaptor that filters out elements that have
/// already been produced once during the iteration.
///
/// Duplicates are detected by comparing the key they map to
/// with the keying function `f` by hash and equality.
/// The keys are stored in a hash set in the iterator.
///
/// The iterator is stable, returning the non-duplicate items in the order
/// in which they occur in the adapted iterator. In a set of duplicate
/// items, the first item encountered is the item retained.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec!["a", "bb", "aa", "c", "ccc"];
/// itertools::assert_equal(data.into_iter().unique_by(|s| s.len()),
/// vec!["a", "bb", "ccc"]);
/// ```
#[cfg(feature = "use_std")]
fn unique_by<V, F>(self, f: F) -> UniqueBy<Self, V, F>
where Self: Sized,
V: Eq + Hash,
F: FnMut(&Self::Item) -> V
{
unique_impl::unique_by(self, f)
}
/// Return an iterator adaptor that borrows from this iterator and
/// takes items while the closure `accept` returns `true`.
///
/// This adaptor can only be used on iterators that implement `PeekingNext`
/// like `.peekable()`, `put_back` and a few other collection iterators.
///
/// The last and rejected element (first `false`) is still available when
/// `peeking_take_while` is done.
///
///
/// See also [`.take_while_ref()`](#method.take_while_ref)
/// which is a similar adaptor.
fn peeking_take_while<F>(&mut self, accept: F) -> PeekingTakeWhile<Self, F>
where Self: Sized + PeekingNext,
F: FnMut(&Self::Item) -> bool,
{
peeking_take_while::peeking_take_while(self, accept)
}
/// Return an iterator adaptor that borrows from a `Clone`-able iterator
/// to only pick off elements while the predicate `accept` returns `true`.
///
/// It uses the `Clone` trait to restore the original iterator so that the
/// last and rejected element (first `false`) is still available when
/// `take_while_ref` is done.
///
/// ```
/// use itertools::Itertools;
///
/// let mut hexadecimals = "0123456789abcdef".chars();
///
/// let decimals = hexadecimals.take_while_ref(|c| c.is_numeric())
/// .collect::<String>();
/// assert_eq!(decimals, "0123456789");
/// assert_eq!(hexadecimals.next(), Some('a'));
///
/// ```
fn take_while_ref<F>(&mut self, accept: F) -> TakeWhileRef<Self, F>
where Self: Clone,
F: FnMut(&Self::Item) -> bool
{
adaptors::take_while_ref(self, accept)
}
/// Return an iterator adaptor that filters `Option<A>` iterator elements
/// and produces `A`. Stops on the first `None` encountered.
///
/// Iterator element type is `A`, the unwrapped element.
///
/// ```
/// use itertools::Itertools;
///
/// // List all hexadecimal digits
/// itertools::assert_equal(
/// (0..).map(|i| std::char::from_digit(i, 16)).while_some(),
/// "0123456789abcdef".chars());
///
/// ```
fn while_some<A>(self) -> WhileSome<Self>
where Self: Sized + Iterator<Item = Option<A>>
{
adaptors::while_some(self)
}
/// Return an iterator adaptor that iterates over the combinations of the
/// elements from an iterator.
///
/// Iterator element can be any homogeneous tuple of type `Self::Item` with
/// size up to 12.
///
/// ```
/// use itertools::Itertools;
///
/// let mut v = Vec::new();
/// for (a, b) in (1..5).tuple_combinations() {
/// v.push((a, b));
/// }
/// assert_eq!(v, vec![(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]);
///
/// let mut it = (1..5).tuple_combinations();
/// assert_eq!(Some((1, 2, 3)), it.next());
/// assert_eq!(Some((1, 2, 4)), it.next());
/// assert_eq!(Some((1, 3, 4)), it.next());
/// assert_eq!(Some((2, 3, 4)), it.next());
/// assert_eq!(None, it.next());
///
/// // this requires a type hint
/// let it = (1..5).tuple_combinations::<(_, _, _)>();
/// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]);
///
/// // you can also specify the complete type
/// use itertools::TupleCombinations;
/// use std::ops::Range;
///
/// let it: TupleCombinations<Range<u32>, (u32, u32, u32)> = (1..5).tuple_combinations();
/// itertools::assert_equal(it, vec![(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]);
/// ```
fn tuple_combinations<T>(self) -> TupleCombinations<Self, T>
where Self: Sized + Clone,
Self::Item: Clone,
T: adaptors::HasCombination<Self>,
{
adaptors::tuple_combinations(self)
}
/// Return an iterator adaptor that iterates over the `k`-length combinations of
/// the elements from an iterator.
///
/// Iterator element type is `Vec<Self::Item>`. The iterator produces a new Vec per iteration,
/// and clones the iterator elements.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..5).combinations(3);
/// itertools::assert_equal(it, vec![
/// vec![1, 2, 3],
/// vec![1, 2, 4],
/// vec![1, 3, 4],
/// vec![2, 3, 4],
/// ]);
/// ```
///
/// Note: Combinations does not take into account the equality of the iterated values.
/// ```
/// use itertools::Itertools;
///
/// let it = vec![1, 2, 2].into_iter().combinations(2);
/// itertools::assert_equal(it, vec![
/// vec![1, 2], // Note: these are the same
/// vec![1, 2], // Note: these are the same
/// vec![2, 2],
/// ]);
/// ```
#[cfg(feature = "use_alloc")]
fn combinations(self, k: usize) -> Combinations<Self>
where Self: Sized,
Self::Item: Clone
{
combinations::combinations(self, k)
}
/// Return an iterator that iterates over the `k`-length combinations of
/// the elements from an iterator, with replacement.
///
/// Iterator element type is `Vec<Self::Item>`. The iterator produces a new Vec per iteration,
/// and clones the iterator elements.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (1..4).combinations_with_replacement(2);
/// itertools::assert_equal(it, vec![
/// vec![1, 1],
/// vec![1, 2],
/// vec![1, 3],
/// vec![2, 2],
/// vec![2, 3],
/// vec![3, 3],
/// ]);
/// ```
#[cfg(feature = "use_alloc")]
fn combinations_with_replacement(self, k: usize) -> CombinationsWithReplacement<Self>
where
Self: Sized,
Self::Item: Clone,
{
combinations_with_replacement::combinations_with_replacement(self, k)
}
/// Return an iterator adaptor that iterates over all k-permutations of the
/// elements from an iterator.
///
/// Iterator element type is `Vec<Self::Item>` with length `k`. The iterator
/// produces a new Vec per iteration, and clones the iterator elements.
///
/// If `k` is greater than the length of the input iterator, the resultant
/// iterator adaptor will be empty.
///
/// ```
/// use itertools::Itertools;
///
/// let perms = (5..8).permutations(2);
/// itertools::assert_equal(perms, vec![
/// vec![5, 6],
/// vec![5, 7],
/// vec![6, 5],
/// vec![6, 7],
/// vec![7, 5],
/// vec![7, 6],
/// ]);
/// ```
///
/// Note: Permutations does not take into account the equality of the iterated values.
///
/// ```
/// use itertools::Itertools;
///
/// let it = vec![2, 2].into_iter().permutations(2);
/// itertools::assert_equal(it, vec![
/// vec![2, 2], // Note: these are the same
/// vec![2, 2], // Note: these are the same
/// ]);
/// ```
///
/// Note: The source iterator is collected lazily, and will not be
/// re-iterated if the permutations adaptor is completed and re-iterated.
#[cfg(feature = "use_alloc")]
fn permutations(self, k: usize) -> Permutations<Self>
where Self: Sized,
Self::Item: Clone
{
permutations::permutations(self, k)
}
/// Return an iterator that iterates through the powerset of the elements from an
/// iterator.
///
/// Iterator element type is `Vec<Self::Item>`. The iterator produces a new `Vec`
/// per iteration, and clones the iterator elements.
///
/// The powerset of a set contains all subsets including the empty set and the full
/// input set. A powerset has length _2^n_ where _n_ is the length of the input
/// set.
///
/// Each `Vec` produced by this iterator represents a subset of the elements
/// produced by the source iterator.
///
/// ```
/// use itertools::Itertools;
///
/// let sets = (1..4).powerset().collect::<Vec<_>>();
/// itertools::assert_equal(sets, vec![
/// vec![],
/// vec![1],
/// vec![2],
/// vec![3],
/// vec![1, 2],
/// vec![1, 3],
/// vec![2, 3],
/// vec![1, 2, 3],
/// ]);
/// ```
#[cfg(feature = "use_alloc")]
fn powerset(self) -> Powerset<Self>
where Self: Sized,
Self::Item: Clone,
{
powerset::powerset(self)
}
/// Return an iterator adaptor that pads the sequence to a minimum length of
/// `min` by filling missing elements using a closure `f`.
///
/// Iterator element type is `Self::Item`.
///
/// ```
/// use itertools::Itertools;
///
/// let it = (0..5).pad_using(10, |i| 2*i);
/// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 10, 12, 14, 16, 18]);
///
/// let it = (0..10).pad_using(5, |i| 2*i);
/// itertools::assert_equal(it, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// let it = (0..5).pad_using(10, |i| 2*i).rev();
/// itertools::assert_equal(it, vec![18, 16, 14, 12, 10, 4, 3, 2, 1, 0]);
/// ```
fn pad_using<F>(self, min: usize, f: F) -> PadUsing<Self, F>
where Self: Sized,
F: FnMut(usize) -> Self::Item
{
pad_tail::pad_using(self, min, f)
}
/// Return an iterator adaptor that wraps each element in a `Position` to
/// ease special-case handling of the first or last elements.
///
/// Iterator element type is
/// [`Position<Self::Item>`](Position)
///
/// ```
/// use itertools::{Itertools, Position};
///
/// let it = (0..4).with_position();
/// itertools::assert_equal(it,
/// vec![Position::First(0),
/// Position::Middle(1),
/// Position::Middle(2),
/// Position::Last(3)]);
///
/// let it = (0..1).with_position();
/// itertools::assert_equal(it, vec![Position::Only(0)]);
/// ```
fn with_position(self) -> WithPosition<Self>
where Self: Sized,
{
with_position::with_position(self)
}
/// Return an iterator adaptor that yields the indices of all elements
/// satisfying a predicate, counted from the start of the iterator.
///
/// Equivalent to `iter.enumerate().filter(|(_, v)| predicate(v)).map(|(i, _)| i)`.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1, 2, 3, 3, 4, 6, 7, 9];
/// itertools::assert_equal(data.iter().positions(|v| v % 2 == 0), vec![1, 4, 5]);
///
/// itertools::assert_equal(data.iter().positions(|v| v % 2 == 1).rev(), vec![7, 6, 3, 2, 0]);
/// ```
fn positions<P>(self, predicate: P) -> Positions<Self, P>
where Self: Sized,
P: FnMut(Self::Item) -> bool,
{
adaptors::positions(self, predicate)
}
/// Return an iterator adaptor that applies a mutating function
/// to each element before yielding it.
///
/// ```
/// use itertools::Itertools;
///
/// let input = vec![vec![1], vec![3, 2, 1]];
/// let it = input.into_iter().update(|mut v| v.push(0));
/// itertools::assert_equal(it, vec![vec![1, 0], vec![3, 2, 1, 0]]);
/// ```
fn update<F>(self, updater: F) -> Update<Self, F>
where Self: Sized,
F: FnMut(&mut Self::Item),
{
adaptors::update(self, updater)
}
// non-adaptor methods
/// Advances the iterator and returns the next items grouped in a tuple of
/// a specific size (up to 12).
///
/// If there are enough elements to be grouped in a tuple, then the tuple is
/// returned inside `Some`, otherwise `None` is returned.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = 1..5;
///
/// assert_eq!(Some((1, 2)), iter.next_tuple());
/// ```
fn next_tuple<T>(&mut self) -> Option<T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple
{
T::collect_from_iter_no_buf(self)
}
/// Collects all items from the iterator into a tuple of a specific size
/// (up to 12).
///
/// If the number of elements inside the iterator is **exactly** equal to
/// the tuple size, then the tuple is returned inside `Some`, otherwise
/// `None` is returned.
///
/// ```
/// use itertools::Itertools;
///
/// let iter = 1..3;
///
/// if let Some((x, y)) = iter.collect_tuple() {
/// assert_eq!((x, y), (1, 2))
/// } else {
/// panic!("Expected two elements")
/// }
/// ```
fn collect_tuple<T>(mut self) -> Option<T>
where Self: Sized + Iterator<Item = T::Item>,
T: traits::HomogeneousTuple
{
match self.next_tuple() {
elt @ Some(_) => match self.next() {
Some(_) => None,
None => elt,
},
_ => None
}
}
/// Find the position and value of the first element satisfying a predicate.
///
/// The iterator is not advanced past the first element found.
///
/// ```
/// use itertools::Itertools;
///
/// let text = "Hα";
/// assert_eq!(text.chars().find_position(|ch| ch.is_lowercase()), Some((1, 'α')));
/// ```
fn find_position<P>(&mut self, mut pred: P) -> Option<(usize, Self::Item)>
where P: FnMut(&Self::Item) -> bool
{
let mut index = 0usize;
for elt in self {
if pred(&elt) {
return Some((index, elt));
}
index += 1;
}
None
}
/// Check whether all elements compare equal.
///
/// Empty iterators are considered to have equal elements:
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![1, 1, 1, 2, 2, 3, 3, 3, 4, 5, 5];
/// assert!(!data.iter().all_equal());
/// assert!(data[0..3].iter().all_equal());
/// assert!(data[3..5].iter().all_equal());
/// assert!(data[5..8].iter().all_equal());
///
/// let data : Option<usize> = None;
/// assert!(data.into_iter().all_equal());
/// ```
fn all_equal(&mut self) -> bool
where Self: Sized,
Self::Item: PartialEq,
{
match self.next() {
None => true,
Some(a) => self.all(|x| a == x),
}
}
/// Consume the first `n` elements from the iterator eagerly,
/// and return the same iterator again.
///
/// It works similarly to *.skip(* `n` *)* except it is eager and
/// preserves the iterator type.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = "αβγ".chars().dropping(2);
/// itertools::assert_equal(iter, "γ".chars());
/// ```
///
/// *Fusing notes: if the iterator is exhausted by dropping,
/// the result of calling `.next()` again depends on the iterator implementation.*
fn dropping(mut self, n: usize) -> Self
where Self: Sized
{
if n > 0 {
self.nth(n - 1);
}
self
}
/// Consume the last `n` elements from the iterator eagerly,
/// and return the same iterator again.
///
/// This is only possible on double ended iterators. `n` may be
/// larger than the number of elements.
///
/// Note: This method is eager, dropping the back elements immediately and
/// preserves the iterator type.
///
/// ```
/// use itertools::Itertools;
///
/// let init = vec![0, 3, 6, 9].into_iter().dropping_back(1);
/// itertools::assert_equal(init, vec![0, 3, 6]);
/// ```
fn dropping_back(mut self, n: usize) -> Self
where Self: Sized,
Self: DoubleEndedIterator
{
if n > 0 {
(&mut self).rev().nth(n - 1);
}
self
}
/// Run the closure `f` eagerly on each element of the iterator.
///
/// Consumes the iterator until its end.
///
/// ```
/// use std::sync::mpsc::channel;
/// use itertools::Itertools;
///
/// let (tx, rx) = channel();
///
/// // use .foreach() to apply a function to each value -- sending it
/// (0..5).map(|x| x * 2 + 1).foreach(|x| { tx.send(x).unwrap(); } );
///
/// drop(tx);
///
/// itertools::assert_equal(rx.iter(), vec![1, 3, 5, 7, 9]);
/// ```
#[deprecated(note="Use .for_each() instead", since="0.8.0")]
fn foreach<F>(self, f: F)
where F: FnMut(Self::Item),
Self: Sized,
{
self.for_each(f)
}
/// Combine all an iterator's elements into one element by using `Extend`.
///
/// This combinator will extend the first item with each of the rest of the
/// items of the iterator. If the iterator is empty, the default value of
/// `I::Item` is returned.
///
/// ```rust
/// use itertools::Itertools;
///
/// let input = vec![vec![1], vec![2, 3], vec![4, 5, 6]];
/// assert_eq!(input.into_iter().concat(),
/// vec![1, 2, 3, 4, 5, 6]);
/// ```
fn concat(self) -> Self::Item
where Self: Sized,
Self::Item: Extend<<<Self as Iterator>::Item as IntoIterator>::Item> + IntoIterator + Default
{
concat(self)
}
/// `.collect_vec()` is simply a type specialization of `.collect()`,
/// for convenience.
#[cfg(feature = "use_alloc")]
fn collect_vec(self) -> Vec<Self::Item>
where Self: Sized
{
self.collect()
}
/// `.try_collect()` is more convenient way of writing
/// `.collect::<Result<_, _>>()`
///
/// # Example
///
/// ```
/// use std::{fs, io};
/// use itertools::Itertools;
///
/// fn process_dir_entries(entries: &[fs::DirEntry]) {
/// // ...
/// }
///
/// fn do_stuff() -> std::io::Result<()> {
/// let entries: Vec<_> = fs::read_dir(".")?.try_collect()?;
/// process_dir_entries(&entries);
///
/// Ok(())
/// }
/// ```
#[cfg(feature = "use_alloc")]
fn try_collect<T, U, E>(self) -> Result<U, E>
where
Self: Sized + Iterator<Item = Result<T, E>>,
Result<U, E>: FromIterator<Result<T, E>>,
{
self.collect()
}
/// Assign to each reference in `self` from the `from` iterator,
/// stopping at the shortest of the two iterators.
///
/// The `from` iterator is queried for its next element before the `self`
/// iterator, and if either is exhausted the method is done.
///
/// Return the number of elements written.
///
/// ```
/// use itertools::Itertools;
///
/// let mut xs = [0; 4];
/// xs.iter_mut().set_from(1..);
/// assert_eq!(xs, [1, 2, 3, 4]);
/// ```
#[inline]
fn set_from<'a, A: 'a, J>(&mut self, from: J) -> usize
where Self: Iterator<Item = &'a mut A>,
J: IntoIterator<Item = A>
{
let mut count = 0;
for elt in from {
match self.next() {
None => break,
Some(ptr) => *ptr = elt,
}
count += 1;
}
count
}
/// Combine all iterator elements into one String, separated by `sep`.
///
/// Use the `Display` implementation of each element.
///
/// ```
/// use itertools::Itertools;
///
/// assert_eq!(["a", "b", "c"].iter().join(", "), "a, b, c");
/// assert_eq!([1, 2, 3].iter().join(", "), "1, 2, 3");
/// ```
#[cfg(feature = "use_alloc")]
fn join(&mut self, sep: &str) -> String
where Self::Item: std::fmt::Display
{
match self.next() {
None => String::new(),
Some(first_elt) => {
// estimate lower bound of capacity needed
let (lower, _) = self.size_hint();
let mut result = String::with_capacity(sep.len() * lower);
write!(&mut result, "{}", first_elt).unwrap();
self.for_each(|elt| {
result.push_str(sep);
write!(&mut result, "{}", elt).unwrap();
});
result
}
}
}
/// Format all iterator elements, separated by `sep`.
///
/// All elements are formatted (any formatting trait)
/// with `sep` inserted between each element.
///
/// **Panics** if the formatter helper is formatted more than once.
///
/// ```
/// use itertools::Itertools;
///
/// let data = [1.1, 2.71828, -3.];
/// assert_eq!(
/// format!("{:.2}", data.iter().format(", ")),
/// "1.10, 2.72, -3.00");
/// ```
fn format(self, sep: &str) -> Format<Self>
where Self: Sized,
{
format::new_format_default(self, sep)
}
/// Format all iterator elements, separated by `sep`.
///
/// This is a customizable version of `.format()`.
///
/// The supplied closure `format` is called once per iterator element,
/// with two arguments: the element and a callback that takes a
/// `&Display` value, i.e. any reference to type that implements `Display`.
///
/// Using `&format_args!(...)` is the most versatile way to apply custom
/// element formatting. The callback can be called multiple times if needed.
///
/// **Panics** if the formatter helper is formatted more than once.
///
/// ```
/// use itertools::Itertools;
///
/// let data = [1.1, 2.71828, -3.];
/// let data_formatter = data.iter().format_with(", ", |elt, f| f(&format_args!("{:.2}", elt)));
/// assert_eq!(format!("{}", data_formatter),
/// "1.10, 2.72, -3.00");
///
/// // .format_with() is recursively composable
/// let matrix = [[1., 2., 3.],
/// [4., 5., 6.]];
/// let matrix_formatter = matrix.iter().format_with("\n", |row, f| {
/// f(&row.iter().format_with(", ", |elt, g| g(&elt)))
/// });
/// assert_eq!(format!("{}", matrix_formatter),
/// "1, 2, 3\n4, 5, 6");
///
///
/// ```
fn format_with<F>(self, sep: &str, format: F) -> FormatWith<Self, F>
where Self: Sized,
F: FnMut(Self::Item, &mut dyn FnMut(&dyn fmt::Display) -> fmt::Result) -> fmt::Result,
{
format::new_format(self, sep, format)
}
/// See [`.fold_ok()`](#method.fold_ok).
#[deprecated(note="Use .fold_ok() instead", since="0.10.0")]
fn fold_results<A, E, B, F>(&mut self, start: B, f: F) -> Result<B, E>
where Self: Iterator<Item = Result<A, E>>,
F: FnMut(B, A) -> B
{
self.fold_ok(start, f)
}
/// Fold `Result` values from an iterator.
///
/// Only `Ok` values are folded. If no error is encountered, the folded
/// value is returned inside `Ok`. Otherwise, the operation terminates
/// and returns the first `Err` value it encounters. No iterator elements are
/// consumed after the first error.
///
/// The first accumulator value is the `start` parameter.
/// Each iteration passes the accumulator value and the next value inside `Ok`
/// to the fold function `f` and its return value becomes the new accumulator value.
///
/// For example the sequence *Ok(1), Ok(2), Ok(3)* will result in a
/// computation like this:
///
/// ```ignore
/// let mut accum = start;
/// accum = f(accum, 1);
/// accum = f(accum, 2);
/// accum = f(accum, 3);
/// ```
///
/// With a `start` value of 0 and an addition as folding function,
/// this effectively results in *((0 + 1) + 2) + 3*
///
/// ```
/// use std::ops::Add;
/// use itertools::Itertools;
///
/// let values = [1, 2, -2, -1, 2, 1];
/// assert_eq!(
/// values.iter()
/// .map(Ok::<_, ()>)
/// .fold_ok(0, Add::add),
/// Ok(3)
/// );
/// assert!(
/// values.iter()
/// .map(|&x| if x >= 0 { Ok(x) } else { Err("Negative number") })
/// .fold_ok(0, Add::add)
/// .is_err()
/// );
/// ```
fn fold_ok<A, E, B, F>(&mut self, mut start: B, mut f: F) -> Result<B, E>
where Self: Iterator<Item = Result<A, E>>,
F: FnMut(B, A) -> B
{
for elt in self {
match elt {
Ok(v) => start = f(start, v),
Err(u) => return Err(u),
}
}
Ok(start)
}
/// Fold `Option` values from an iterator.
///
/// Only `Some` values are folded. If no `None` is encountered, the folded
/// value is returned inside `Some`. Otherwise, the operation terminates
/// and returns `None`. No iterator elements are consumed after the `None`.
///
/// This is the `Option` equivalent to `fold_ok`.
///
/// ```
/// use std::ops::Add;
/// use itertools::Itertools;
///
/// let mut values = vec![Some(1), Some(2), Some(-2)].into_iter();
/// assert_eq!(values.fold_options(5, Add::add), Some(5 + 1 + 2 - 2));
///
/// let mut more_values = vec![Some(2), None, Some(0)].into_iter();
/// assert!(more_values.fold_options(0, Add::add).is_none());
/// assert_eq!(more_values.next().unwrap(), Some(0));
/// ```
fn fold_options<A, B, F>(&mut self, mut start: B, mut f: F) -> Option<B>
where Self: Iterator<Item = Option<A>>,
F: FnMut(B, A) -> B
{
for elt in self {
match elt {
Some(v) => start = f(start, v),
None => return None,
}
}
Some(start)
}
/// Accumulator of the elements in the iterator.
///
/// Like `.fold()`, without a base case. If the iterator is
/// empty, return `None`. With just one element, return it.
/// Otherwise elements are accumulated in sequence using the closure `f`.
///
/// ```
/// use itertools::Itertools;
///
/// assert_eq!((0..10).fold1(|x, y| x + y).unwrap_or(0), 45);
/// assert_eq!((0..0).fold1(|x, y| x * y), None);
/// ```
fn fold1<F>(mut self, f: F) -> Option<Self::Item>
where F: FnMut(Self::Item, Self::Item) -> Self::Item,
Self: Sized,
{
self.next().map(move |x| self.fold(x, f))
}
/// Accumulate the elements in the iterator in a tree-like manner.
///
/// You can think of it as, while there's more than one item, repeatedly
/// combining adjacent items. It does so in bottom-up-merge-sort order,
/// however, so that it needs only logarithmic stack space.
///
/// This produces a call tree like the following (where the calls under
/// an item are done after reading that item):
///
/// ```text
/// 1 2 3 4 5 6 7
/// │ │ │ │ │ │ │
/// └─f └─f └─f │
/// │ │ │ │
/// └───f └─f
/// │ │
/// └─────f
/// ```
///
/// Which, for non-associative functions, will typically produce a different
/// result than the linear call tree used by `fold1`:
///
/// ```text
/// 1 2 3 4 5 6 7
/// │ │ │ │ │ │ │
/// └─f─f─f─f─f─f
/// ```
///
/// If `f` is associative, prefer the normal `fold1` instead.
///
/// ```
/// use itertools::Itertools;
///
/// // The same tree as above
/// let num_strings = (1..8).map(|x| x.to_string());
/// assert_eq!(num_strings.tree_fold1(|x, y| format!("f({}, {})", x, y)),
/// Some(String::from("f(f(f(1, 2), f(3, 4)), f(f(5, 6), 7))")));
///
/// // Like fold1, an empty iterator produces None
/// assert_eq!((0..0).tree_fold1(|x, y| x * y), None);
///
/// // tree_fold1 matches fold1 for associative operations...
/// assert_eq!((0..10).tree_fold1(|x, y| x + y),
/// (0..10).fold1(|x, y| x + y));
/// // ...but not for non-associative ones
/// assert_ne!((0..10).tree_fold1(|x, y| x - y),
/// (0..10).fold1(|x, y| x - y));
/// ```
fn tree_fold1<F>(mut self, mut f: F) -> Option<Self::Item>
where F: FnMut(Self::Item, Self::Item) -> Self::Item,
Self: Sized,
{
type State<T> = Result<T, Option<T>>;
fn inner0<T, II, FF>(it: &mut II, f: &mut FF) -> State<T>
where
II: Iterator<Item = T>,
FF: FnMut(T, T) -> T
{
// This function could be replaced with `it.next().ok_or(None)`,
// but half the useful tree_fold1 work is combining adjacent items,
// so put that in a form that LLVM is more likely to optimize well.
let a =
if let Some(v) = it.next() { v }
else { return Err(None) };
let b =
if let Some(v) = it.next() { v }
else { return Err(Some(a)) };
Ok(f(a, b))
}
fn inner<T, II, FF>(stop: usize, it: &mut II, f: &mut FF) -> State<T>
where
II: Iterator<Item = T>,
FF: FnMut(T, T) -> T
{
let mut x = inner0(it, f)?;
for height in 0..stop {
// Try to get another tree the same size with which to combine it,
// creating a new tree that's twice as big for next time around.
let next =
if height == 0 {
inner0(it, f)
} else {
inner(height, it, f)
};
match next {
Ok(y) => x = f(x, y),
// If we ran out of items, combine whatever we did manage
// to get. It's better combined with the current value
// than something in a parent frame, because the tree in
// the parent is always as least as big as this one.
Err(None) => return Err(Some(x)),
Err(Some(y)) => return Err(Some(f(x, y))),
}
}
Ok(x)
}
match inner(usize::max_value(), &mut self, &mut f) {
Err(x) => x,
_ => unreachable!(),
}
}
/// An iterator method that applies a function, producing a single, final value.
///
/// `fold_while()` is basically equivalent to `fold()` but with additional support for
/// early exit via short-circuiting.
///
/// ```
/// use itertools::Itertools;
/// use itertools::FoldWhile::{Continue, Done};
///
/// let numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
///
/// let mut result = 0;
///
/// // for loop:
/// for i in &numbers {
/// if *i > 5 {
/// break;
/// }
/// result = result + i;
/// }
///
/// // fold:
/// let result2 = numbers.iter().fold(0, |acc, x| {
/// if *x > 5 { acc } else { acc + x }
/// });
///
/// // fold_while:
/// let result3 = numbers.iter().fold_while(0, |acc, x| {
/// if *x > 5 { Done(acc) } else { Continue(acc + x) }
/// }).into_inner();
///
/// // they're the same
/// assert_eq!(result, result2);
/// assert_eq!(result2, result3);
/// ```
///
/// The big difference between the computations of `result2` and `result3` is that while
/// `fold()` called the provided closure for every item of the callee iterator,
/// `fold_while()` actually stopped iterating as soon as it encountered `Fold::Done(_)`.
fn fold_while<B, F>(&mut self, init: B, mut f: F) -> FoldWhile<B>
where Self: Sized,
F: FnMut(B, Self::Item) -> FoldWhile<B>
{
use Result::{
Ok as Continue,
Err as Break,
};
let result = self.try_fold(init, #[inline(always)] |acc, v|
match f(acc, v) {
FoldWhile::Continue(acc) => Continue(acc),
FoldWhile::Done(acc) => Break(acc),
}
);
match result {
Continue(acc) => FoldWhile::Continue(acc),
Break(acc) => FoldWhile::Done(acc),
}
}
/// Iterate over the entire iterator and add all the elements.
///
/// An empty iterator returns `None`, otherwise `Some(sum)`.
///
/// # Panics
///
/// When calling `sum1()` and a primitive integer type is being returned, this
/// method will panic if the computation overflows and debug assertions are
/// enabled.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let empty_sum = (1..1).sum1::<i32>();
/// assert_eq!(empty_sum, None);
///
/// let nonempty_sum = (1..11).sum1::<i32>();
/// assert_eq!(nonempty_sum, Some(55));
/// ```
fn sum1<S>(mut self) -> Option<S>
where Self: Sized,
S: std::iter::Sum<Self::Item>,
{
self.next()
.map(|first| once(first).chain(self).sum())
}
/// Iterate over the entire iterator and multiply all the elements.
///
/// An empty iterator returns `None`, otherwise `Some(product)`.
///
/// # Panics
///
/// When calling `product1()` and a primitive integer type is being returned,
/// method will panic if the computation overflows and debug assertions are
/// enabled.
///
/// # Examples
/// ```
/// use itertools::Itertools;
///
/// let empty_product = (1..1).product1::<i32>();
/// assert_eq!(empty_product, None);
///
/// let nonempty_product = (1..11).product1::<i32>();
/// assert_eq!(nonempty_product, Some(3628800));
/// ```
fn product1<P>(mut self) -> Option<P>
where Self: Sized,
P: std::iter::Product<Self::Item>,
{
self.next()
.map(|first| once(first).chain(self).product())
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_unstable()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort the letters of the text in ascending order
/// let text = "bdacfe";
/// itertools::assert_equal(text.chars().sorted_unstable(),
/// "abcdef".chars());
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_unstable(self) -> VecIntoIter<Self::Item>
where Self: Sized,
Self::Item: Ord
{
// Use .sort_unstable() directly since it is not quite identical with
// .sort_by(Ord::cmp)
let mut v = Vec::from_iter(self);
v.sort_unstable();
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_unstable_by()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_unstable_by(|a, b| Ord::cmp(&b.1, &a.1))
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_unstable_by<F>(self, cmp: F) -> VecIntoIter<Self::Item>
where Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
let mut v = Vec::from_iter(self);
v.sort_unstable_by(cmp);
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_unstable_by_key()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_unstable_by_key(|x| -x.1)
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_unstable_by_key<K, F>(self, f: F) -> VecIntoIter<Self::Item>
where Self: Sized,
K: Ord,
F: FnMut(&Self::Item) -> K,
{
let mut v = Vec::from_iter(self);
v.sort_unstable_by_key(f);
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort the letters of the text in ascending order
/// let text = "bdacfe";
/// itertools::assert_equal(text.chars().sorted(),
/// "abcdef".chars());
/// ```
#[cfg(feature = "use_alloc")]
fn sorted(self) -> VecIntoIter<Self::Item>
where Self: Sized,
Self::Item: Ord
{
// Use .sort() directly since it is not quite identical with
// .sort_by(Ord::cmp)
let mut v = Vec::from_iter(self);
v.sort();
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_by()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_by(|a, b| Ord::cmp(&b.1, &a.1))
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_by<F>(self, cmp: F) -> VecIntoIter<Self::Item>
where Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
let mut v = Vec::from_iter(self);
v.sort_by(cmp);
v.into_iter()
}
/// Sort all iterator elements into a new iterator in ascending order.
///
/// **Note:** This consumes the entire iterator, uses the
/// `slice::sort_by_key()` method and returns the result as a new
/// iterator that owns its elements.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// ```
/// use itertools::Itertools;
///
/// // sort people in descending order by age
/// let people = vec![("Jane", 20), ("John", 18), ("Jill", 30), ("Jack", 27)];
///
/// let oldest_people_first = people
/// .into_iter()
/// .sorted_by_key(|x| -x.1)
/// .map(|(person, _age)| person);
///
/// itertools::assert_equal(oldest_people_first,
/// vec!["Jill", "Jack", "Jane", "John"]);
/// ```
#[cfg(feature = "use_alloc")]
fn sorted_by_key<K, F>(self, f: F) -> VecIntoIter<Self::Item>
where Self: Sized,
K: Ord,
F: FnMut(&Self::Item) -> K,
{
let mut v = Vec::from_iter(self);
v.sort_by_key(f);
v.into_iter()
}
/// Sort the k smallest elements into a new iterator, in ascending order.
///
/// **Note:** This consumes the entire iterator, and returns the result
/// as a new iterator that owns its elements. If the input contains
/// less than k elements, the result is equivalent to `self.sorted()`.
///
/// This is guaranteed to use `k * sizeof(Self::Item) + O(1)` memory
/// and `O(n log k)` time, with `n` the number of elements in the input.
///
/// The sorted iterator, if directly collected to a `Vec`, is converted
/// without any extra copying or allocation cost.
///
/// **Note:** This is functionally-equivalent to `self.sorted().take(k)`
/// but much more efficient.
///
/// ```
/// use itertools::Itertools;
///
/// // A random permutation of 0..15
/// let numbers = vec![6, 9, 1, 14, 0, 4, 8, 7, 11, 2, 10, 3, 13, 12, 5];
///
/// let five_smallest = numbers
/// .into_iter()
/// .k_smallest(5);
///
/// itertools::assert_equal(five_smallest, 0..5);
/// ```
#[cfg(feature = "use_alloc")]
fn k_smallest(self, k: usize) -> VecIntoIter<Self::Item>
where Self: Sized,
Self::Item: Ord
{
crate::k_smallest::k_smallest(self, k)
.into_sorted_vec()
.into_iter()
}
/// Collect all iterator elements into one of two
/// partitions. Unlike `Iterator::partition`, each partition may
/// have a distinct type.
///
/// ```
/// use itertools::{Itertools, Either};
///
/// let successes_and_failures = vec![Ok(1), Err(false), Err(true), Ok(2)];
///
/// let (successes, failures): (Vec<_>, Vec<_>) = successes_and_failures
/// .into_iter()
/// .partition_map(|r| {
/// match r {
/// Ok(v) => Either::Left(v),
/// Err(v) => Either::Right(v),
/// }
/// });
///
/// assert_eq!(successes, [1, 2]);
/// assert_eq!(failures, [false, true]);
/// ```
fn partition_map<A, B, F, L, R>(self, mut predicate: F) -> (A, B)
where Self: Sized,
F: FnMut(Self::Item) -> Either<L, R>,
A: Default + Extend<L>,
B: Default + Extend<R>,
{
let mut left = A::default();
let mut right = B::default();
self.for_each(|val| match predicate(val) {
Either::Left(v) => left.extend(Some(v)),
Either::Right(v) => right.extend(Some(v)),
});
(left, right)
}
/// Return a `HashMap` of keys mapped to `Vec`s of values. Keys and values
/// are taken from `(Key, Value)` tuple pairs yielded by the input iterator.
///
/// ```
/// use itertools::Itertools;
///
/// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)];
/// let lookup = data.into_iter().into_group_map();
///
/// assert_eq!(lookup[&0], vec![10, 20]);
/// assert_eq!(lookup.get(&1), None);
/// assert_eq!(lookup[&2], vec![12, 42]);
/// assert_eq!(lookup[&3], vec![13, 33]);
/// ```
#[cfg(feature = "use_std")]
fn into_group_map<K, V>(self) -> HashMap<K, Vec<V>>
where Self: Iterator<Item=(K, V)> + Sized,
K: Hash + Eq,
{
group_map::into_group_map(self)
}
/// Return an `Iterator` on a HahMap. Keys mapped to `Vec`s of values. The key is specified in
/// in the closure.
/// Different of into_group_map_by because the key is still present. It is also more general.
/// you can also fold the group_map.
///
/// ```
/// use itertools::Itertools;
/// use std::collections::HashMap;
///
/// let data = vec![(0, 10), (2, 12), (3, 13), (0, 20), (3, 33), (2, 42)];
/// let lookup: HashMap<u32,Vec<(u32, u32)>> = data.clone().into_iter().into_group_map_by(|a|
/// a.0);
///
/// assert_eq!(lookup[&0], vec![(0,10),(0,20)]);
/// assert_eq!(lookup.get(&1), None);
/// assert_eq!(lookup[&2], vec![(2,12), (2,42)]);
/// assert_eq!(lookup[&3], vec![(3,13), (3,33)]);
///
/// assert_eq!(
/// data.into_iter()
/// .into_group_map_by(|x| x.0)
/// .into_iter()
/// .map(|(key, values)| (key, values.into_iter().fold(0,|acc, (_,v)| acc + v )))
/// .collect::<HashMap<u32,u32>>()[&0], 30)
/// ```
#[cfg(feature = "use_std")]
fn into_group_map_by<K, V, F>(self, f: F) -> HashMap<K, Vec<V>>
where
Self: Iterator<Item=V> + Sized,
K: Hash + Eq,
F: Fn(&V) -> K,
{
group_map::into_group_map_by(self, f)
}
/// Constructs a `GroupingMap` to be used later with one of the efficient
/// group-and-fold operations it allows to perform.
///
/// The input iterator must yield item in the form of `(K, V)` where the
/// value of type `K` will be used as key to identify the groups and the
/// value of type `V` as value for the folding operation.
///
/// See [`GroupingMap`](./structs/struct.GroupingMap.html) for more informations
/// on what operations are available.
#[cfg(feature = "use_std")]
fn into_grouping_map<K, V>(self) -> GroupingMap<Self>
where Self: Iterator<Item=(K, V)> + Sized,
K: Hash + Eq,
{
grouping_map::new(self)
}
/// Constructs a `GroupingMap` to be used later with one of the efficient
/// group-and-fold operations it allows to perform.
///
/// The values from this iterator will be used as values for the folding operation
/// while the keys will be obtained from the values by calling `key_mapper`.
///
/// See [`GroupingMap`](./structs/struct.GroupingMap.html) for more informations
/// on what operations are available.
#[cfg(feature = "use_std")]
fn into_grouping_map_by<K, V, F>(self, key_mapper: F) -> GroupingMapBy<Self, F>
where Self: Iterator<Item=V> + Sized,
K: Hash + Eq,
F: FnMut(&V) -> K
{
grouping_map::new(grouping_map::MapForGrouping::new(self, key_mapper))
}
/// Return the minimum and maximum elements in the iterator.
///
/// The return type `MinMaxResult` is an enum of three variants:
///
/// - `NoElements` if the iterator is empty.
/// - `OneElement(x)` if the iterator has exactly one element.
/// - `MinMax(x, y)` is returned otherwise, where `x <= y`. Two
/// values are equal if and only if there is more than one
/// element in the iterator and all elements are equal.
///
/// On an iterator of length `n`, `minmax` does `1.5 * n` comparisons,
/// and so is faster than calling `min` and `max` separately which does
/// `2 * n` comparisons.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().minmax(), NoElements);
///
/// let a = [1];
/// assert_eq!(a.iter().minmax(), OneElement(&1));
///
/// let a = [1, 2, 3, 4, 5];
/// assert_eq!(a.iter().minmax(), MinMax(&1, &5));
///
/// let a = [1, 1, 1, 1];
/// assert_eq!(a.iter().minmax(), MinMax(&1, &1));
/// ```
///
/// The elements can be floats but no particular result is guaranteed
/// if an element is NaN.
fn minmax(self) -> MinMaxResult<Self::Item>
where Self: Sized, Self::Item: PartialOrd
{
minmax::minmax_impl(self, |_| (), |x, y, _, _| x < y)
}
/// Return the minimum and maximum element of an iterator, as determined by
/// the specified function.
///
/// The return value is a variant of `MinMaxResult` like for `minmax()`.
///
/// For the minimum, the first minimal element is returned. For the maximum,
/// the last maximal element wins. This matches the behavior of the standard
/// `Iterator::min()` and `Iterator::max()` methods.
///
/// The keys can be floats but no particular result is guaranteed
/// if a key is NaN.
fn minmax_by_key<K, F>(self, key: F) -> MinMaxResult<Self::Item>
where Self: Sized, K: PartialOrd, F: FnMut(&Self::Item) -> K
{
minmax::minmax_impl(self, key, |_, _, xk, yk| xk < yk)
}
/// Return the minimum and maximum element of an iterator, as determined by
/// the specified comparison function.
///
/// The return value is a variant of `MinMaxResult` like for `minmax()`.
///
/// For the minimum, the first minimal element is returned. For the maximum,
/// the last maximal element wins. This matches the behavior of the standard
/// `Iterator::min()` and `Iterator::max()` methods.
fn minmax_by<F>(self, mut compare: F) -> MinMaxResult<Self::Item>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
minmax::minmax_impl(
self,
|_| (),
|x, y, _, _| Ordering::Less == compare(x, y)
)
}
/// Return the position of the maximum element in the iterator.
///
/// If several elements are equally maximum, the position of the
/// last of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_max(), None);
///
/// let a = [-3, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_max(), Some(3));
///
/// let a = [1, 1, -1, -1];
/// assert_eq!(a.iter().position_max(), Some(1));
/// ```
fn position_max(self) -> Option<usize>
where Self: Sized, Self::Item: Ord
{
self.enumerate()
.max_by(|x, y| Ord::cmp(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the position of the maximum element in the iterator, as
/// determined by the specified function.
///
/// If several elements are equally maximum, the position of the
/// last of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(4));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_max_by_key(|x| x.abs()), Some(3));
/// ```
fn position_max_by_key<K, F>(self, mut key: F) -> Option<usize>
where Self: Sized, K: Ord, F: FnMut(&Self::Item) -> K
{
self.enumerate()
.max_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1)))
.map(|x| x.0)
}
/// Return the position of the maximum element in the iterator, as
/// determined by the specified comparison function.
///
/// If several elements are equally maximum, the position of the
/// last of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(3));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_max_by(|x, y| x.cmp(y)), Some(1));
/// ```
fn position_max_by<F>(self, mut compare: F) -> Option<usize>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
self.enumerate()
.max_by(|x, y| compare(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the position of the minimum element in the iterator.
///
/// If several elements are equally minimum, the position of the
/// first of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_min(), None);
///
/// let a = [-3, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_min(), Some(4));
///
/// let a = [1, 1, -1, -1];
/// assert_eq!(a.iter().position_min(), Some(2));
/// ```
fn position_min(self) -> Option<usize>
where Self: Sized, Self::Item: Ord
{
self.enumerate()
.min_by(|x, y| Ord::cmp(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the position of the minimum element in the iterator, as
/// determined by the specified function.
///
/// If several elements are equally minimum, the position of the
/// first of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(1));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_min_by_key(|x| x.abs()), Some(0));
/// ```
fn position_min_by_key<K, F>(self, mut key: F) -> Option<usize>
where Self: Sized, K: Ord, F: FnMut(&Self::Item) -> K
{
self.enumerate()
.min_by(|x, y| Ord::cmp(&key(&x.1), &key(&y.1)))
.map(|x| x.0)
}
/// Return the position of the minimum element in the iterator, as
/// determined by the specified comparison function.
///
/// If several elements are equally minimum, the position of the
/// first of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), None);
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(4));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_min_by(|x, y| x.cmp(y)), Some(2));
/// ```
fn position_min_by<F>(self, mut compare: F) -> Option<usize>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
self.enumerate()
.min_by(|x, y| compare(&x.1, &y.1))
.map(|x| x.0)
}
/// Return the positions of the minimum and maximum elements in
/// the iterator.
///
/// The return type [`MinMaxResult`] is an enum of three variants:
///
/// - `NoElements` if the iterator is empty.
/// - `OneElement(xpos)` if the iterator has exactly one element.
/// - `MinMax(xpos, ypos)` is returned otherwise, where the
/// element at `xpos` ≤ the element at `ypos`. While the
/// referenced elements themselves may be equal, `xpos` cannot
/// be equal to `ypos`.
///
/// On an iterator of length `n`, `position_minmax` does `1.5 * n`
/// comparisons, and so is faster than calling `positon_min` and
/// `position_max` separately which does `2 * n` comparisons.
///
/// For the minimum, if several elements are equally minimum, the
/// position of the first of them is returned. For the maximum, if
/// several elements are equally maximum, the position of the last
/// of them is returned.
///
/// The elements can be floats but no particular result is
/// guaranteed if an element is NaN.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_minmax(), NoElements);
///
/// let a = [10];
/// assert_eq!(a.iter().position_minmax(), OneElement(0));
///
/// let a = [-3, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_minmax(), MinMax(4, 3));
///
/// let a = [1, 1, -1, -1];
/// assert_eq!(a.iter().position_minmax(), MinMax(2, 1));
/// ```
fn position_minmax(self) -> MinMaxResult<usize>
where Self: Sized, Self::Item: PartialOrd
{
use crate::MinMaxResult::{NoElements, OneElement, MinMax};
match minmax::minmax_impl(self.enumerate(), |_| (), |x, y, _, _| x.1 < y.1) {
NoElements => NoElements,
OneElement(x) => OneElement(x.0),
MinMax(x, y) => MinMax(x.0, y.0),
}
}
/// Return the postions of the minimum and maximum elements of an
/// iterator, as determined by the specified function.
///
/// The return value is a variant of [`MinMaxResult`] like for
/// [`position_minmax`].
///
/// For the minimum, if several elements are equally minimum, the
/// position of the first of them is returned. For the maximum, if
/// several elements are equally maximum, the position of the last
/// of them is returned.
///
/// The keys can be floats but no particular result is guaranteed
/// if a key is NaN.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), NoElements);
///
/// let a = [10_i32];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), OneElement(0));
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(1, 4));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_minmax_by_key(|x| x.abs()), MinMax(0, 3));
/// ```
///
/// [`position_minmax`]: Self::position_minmax
fn position_minmax_by_key<K, F>(self, mut key: F) -> MinMaxResult<usize>
where Self: Sized, K: PartialOrd, F: FnMut(&Self::Item) -> K
{
use crate::MinMaxResult::{NoElements, OneElement, MinMax};
match self.enumerate().minmax_by_key(|e| key(&e.1)) {
NoElements => NoElements,
OneElement(x) => OneElement(x.0),
MinMax(x, y) => MinMax(x.0, y.0),
}
}
/// Return the postions of the minimum and maximum elements of an
/// iterator, as determined by the specified comparison function.
///
/// The return value is a variant of [`MinMaxResult`] like for
/// [`position_minmax`].
///
/// For the minimum, if several elements are equally minimum, the
/// position of the first of them is returned. For the maximum, if
/// several elements are equally maximum, the position of the last
/// of them is returned.
///
/// # Examples
///
/// ```
/// use itertools::Itertools;
/// use itertools::MinMaxResult::{NoElements, OneElement, MinMax};
///
/// let a: [i32; 0] = [];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), NoElements);
///
/// let a = [10_i32];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), OneElement(0));
///
/// let a = [-3_i32, 0, 1, 5, -10];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(4, 3));
///
/// let a = [1_i32, 1, -1, -1];
/// assert_eq!(a.iter().position_minmax_by(|x, y| x.cmp(y)), MinMax(2, 1));
/// ```
///
/// [`position_minmax`]: Self::position_minmax
fn position_minmax_by<F>(self, mut compare: F) -> MinMaxResult<usize>
where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering
{
use crate::MinMaxResult::{NoElements, OneElement, MinMax};
match self.enumerate().minmax_by(|x, y| compare(&x.1, &y.1)) {
NoElements => NoElements,
OneElement(x) => OneElement(x.0),
MinMax(x, y) => MinMax(x.0, y.0),
}
}
/// If the iterator yields exactly one element, that element will be returned, otherwise
/// an error will be returned containing an iterator that has the same output as the input
/// iterator.
///
/// This provides an additional layer of validation over just calling `Iterator::next()`.
/// If your assumption that there should only be one element yielded is false this provides
/// the opportunity to detect and handle that, preventing errors at a distance.
///
/// # Examples
/// ```
/// use itertools::Itertools;
///
/// assert_eq!((0..10).filter(|&x| x == 2).exactly_one().unwrap(), 2);
/// assert!((0..10).filter(|&x| x > 1 && x < 4).exactly_one().unwrap_err().eq(2..4));
/// assert!((0..10).filter(|&x| x > 1 && x < 5).exactly_one().unwrap_err().eq(2..5));
/// assert!((0..10).filter(|&_| false).exactly_one().unwrap_err().eq(0..0));
/// ```
fn exactly_one(mut self) -> Result<Self::Item, ExactlyOneError<Self>>
where
Self: Sized,
{
match self.next() {
Some(first) => {
match self.next() {
Some(second) => {
Err(ExactlyOneError::new(Some(Either::Left([first, second])), self))
}
None => {
Ok(first)
}
}
}
None => Err(ExactlyOneError::new(None, self)),
}
}
/// An iterator adaptor that allows the user to peek at multiple `.next()`
/// values without advancing the base iterator.
///
/// # Examples
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..10).multipeek();
/// assert_eq!(iter.peek(), Some(&0));
/// assert_eq!(iter.peek(), Some(&1));
/// assert_eq!(iter.peek(), Some(&2));
/// assert_eq!(iter.next(), Some(0));
/// assert_eq!(iter.peek(), Some(&1));
/// ```
#[cfg(feature = "use_alloc")]
fn multipeek(self) -> MultiPeek<Self>
where
Self: Sized,
{
multipeek_impl::multipeek(self)
}
/// Collect the items in this iterator and return a `HashMap` which
/// contains each item that appears in the iterator and the number
/// of times it appears.
///
/// # Examples
/// ```
/// # use itertools::Itertools;
/// let counts = [1, 1, 1, 3, 3, 5].into_iter().counts();
/// assert_eq!(counts[&1], 3);
/// assert_eq!(counts[&3], 2);
/// assert_eq!(counts[&5], 1);
/// assert_eq!(counts.get(&0), None);
/// ```
#[cfg(feature = "use_std")]
fn counts(self) -> HashMap<Self::Item, usize>
where
Self: Sized,
Self::Item: Eq + Hash,
{
let mut counts = HashMap::new();
self.for_each(|item| *counts.entry(item).or_default() += 1);
counts
}
/// Collect the items in this iterator and return a `HashMap` which
/// contains each item that appears in the iterator and the number
/// of times it appears,
/// determining identity using a keying function.
///
/// # Examples
/// ```
/// # use itertools::Itertools;
/// # use std::collections::HashMap;
/// let counts: HashMap<usize, usize> = vec![
/// (1, "foo"), (1, "bar"), (1, "baz"),
/// (3, "spam"), (3, "eggs"), (5, "foo")
/// ].into_iter().counts_by(|(fst,snd)| fst);
/// assert_eq!(counts[&1], 3);
/// assert_eq!(counts[&3], 2);
/// assert_eq!(counts[&5], 1);
/// assert_eq!(counts.get(&0), None);
/// ```
#[cfg(feature = "use_std")]
fn counts_by<K, F>(self, mut f: F) -> HashMap<K, usize>
where
Self: Sized,
K: Eq + Hash,
F: FnMut(Self::Item) -> K,
{
let mut counts = HashMap::new();
self.for_each(|item| *counts.entry(f(item)).or_default() += 1);
counts
}
}
impl<T: ?Sized> Itertools for T where T: Iterator { }
/// Return `true` if both iterables produce equal sequences
/// (elements pairwise equal and sequences of the same length),
/// `false` otherwise.
///
/// This is an `IntoIterator` enabled function that is similar to the standard
/// library method `Iterator::eq`.
///
/// ```
/// assert!(itertools::equal(vec![1, 2, 3], 1..4));
/// assert!(!itertools::equal(&[0, 0], &[0, 0, 0]));
/// ```
pub fn equal<I, J>(a: I, b: J) -> bool
where I: IntoIterator,
J: IntoIterator,
I::Item: PartialEq<J::Item>
{
let mut ia = a.into_iter();
let mut ib = b.into_iter();
loop {
match ia.next() {
Some(x) => match ib.next() {
Some(y) => if x != y { return false; },
None => return false,
},
None => return ib.next().is_none()
}
}
}
/// Assert that two iterables produce equal sequences, with the same
/// semantics as *equal(a, b)*.
///
/// **Panics** on assertion failure with a message that shows the
/// two iteration elements.
///
/// ```ignore
/// assert_equal("exceed".split('c'), "excess".split('c'));
/// // ^PANIC: panicked at 'Failed assertion Some("eed") == Some("ess") for iteration 1',
/// ```
pub fn assert_equal<I, J>(a: I, b: J)
where I: IntoIterator,
J: IntoIterator,
I::Item: fmt::Debug + PartialEq<J::Item>,
J::Item: fmt::Debug,
{
let mut ia = a.into_iter();
let mut ib = b.into_iter();
let mut i = 0;
loop {
match (ia.next(), ib.next()) {
(None, None) => return,
(a, b) => {
let equal = match (&a, &b) {
(&Some(ref a), &Some(ref b)) => a == b,
_ => false,
};
assert!(equal, "Failed assertion {a:?} == {b:?} for iteration {i}",
i=i, a=a, b=b);
i += 1;
}
}
}
}
/// Partition a sequence using predicate `pred` so that elements
/// that map to `true` are placed before elements which map to `false`.
///
/// The order within the partitions is arbitrary.
///
/// Return the index of the split point.
///
/// ```
/// use itertools::partition;
///
/// # // use repeated numbers to not promise any ordering
/// let mut data = [7, 1, 1, 7, 1, 1, 7];
/// let split_index = partition(&mut data, |elt| *elt >= 3);
///
/// assert_eq!(data, [7, 7, 7, 1, 1, 1, 1]);
/// assert_eq!(split_index, 3);
/// ```
pub fn partition<'a, A: 'a, I, F>(iter: I, mut pred: F) -> usize
where I: IntoIterator<Item = &'a mut A>,
I::IntoIter: DoubleEndedIterator,
F: FnMut(&A) -> bool
{
let mut split_index = 0;
let mut iter = iter.into_iter();
'main: while let Some(front) = iter.next() {
if !pred(front) {
loop {
match iter.next_back() {
Some(back) => if pred(back) {
std::mem::swap(front, back);
break;
},
None => break 'main,
}
}
}
split_index += 1;
}
split_index
}
/// An enum used for controlling the execution of `.fold_while()`.
///
/// See [`.fold_while()`](crate::Itertools::fold_while) for more information.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum FoldWhile<T> {
/// Continue folding with this value
Continue(T),
/// Fold is complete and will return this value
Done(T),
}
impl<T> FoldWhile<T> {
/// Return the value in the continue or done.
pub fn into_inner(self) -> T {
match self {
FoldWhile::Continue(x) | FoldWhile::Done(x) => x,
}
}
/// Return true if `self` is `Done`, false if it is `Continue`.
pub fn is_done(&self) -> bool {
match *self {
FoldWhile::Continue(_) => false,
FoldWhile::Done(_) => true,
}
}
}
|
use rusqlite::Connection;
use rusqlite::Error;
#[derive(Debug)]
pub struct FsDir {
pub id: i32,
pub fk_type: i32,
pub name: String,
}
pub fn table_create_fs_dir(conn: &Connection) -> &Connection {
conn.execute("CREATE TABLE FS_DIR (
id INTEGER PRIMARY KEY ASC,
fk_type INTEGER,
name TEXT NOT NULL UNIQUE
)", &[]).unwrap();
return conn;
}
pub fn insert_fs_dir(conn: &Connection, fk_type: i32, name: String) {
let fs_dir = FsDir {
id: 0,
name: name,
fk_type: fk_type,
};
let dir_instance = conn.execute("INSERT INTO FS_DIR (fk_type, name)
VALUES (?1, ?2)",
&[&fs_dir.fk_type, &fs_dir.name]);
if dir_instance.is_err() {
return;
}
dir_instance.unwrap();
}
pub fn list_fs_dir(conn: &Connection)-> Vec<FsDir> {
let mut stmt = conn.prepare("SELECT id, fk_type, name FROM FS_DIR").unwrap();
let wraped_fs_file_iter = stmt.query_map(&[], |row| {
FsDir {
id: row.get(0),
fk_type: row.get(1),
name: row.get(2),
}
});
let mut items = Vec::<FsDir>::new();
if wraped_fs_file_iter.is_err() {
return items;
}
let fs_file_iter = wraped_fs_file_iter.unwrap();
for person in fs_file_iter {
items.push(person.unwrap());
}
return items;
}
pub fn list_fs_dir_filter(conn: &Connection)-> Vec<FsDir> {
let filter = "elephant".to_string();
let mut stmt = conn.prepare("SELECT id, fk_type, name FROM FS_DIR WHERE name = elephant").unwrap();
let wraped_fs_file_iter = stmt.query_map(&[&filter], |row| {
FsDir {
id: row.get(0),
fk_type: row.get(1),
name: row.get(2),
}
});
let mut items = Vec::<FsDir>::new();
if wraped_fs_file_iter.is_err() {
return items;
}
let fs_file_iter = wraped_fs_file_iter.unwrap();
for person in fs_file_iter {
items.push(person.unwrap());
}
return items;
}
Add FOREIGN KEY to FS_DIR_TYPE
Signed-off-by: Owen Synge <d1d4fd964cef7d0e7c6a7c6237ad92c9cf2de236@jaysnest.de>
use rusqlite::Connection;
use rusqlite::Error;
#[derive(Debug)]
pub struct FsDir {
pub id: i32,
pub fk_type: i32,
pub name: String,
}
pub fn table_create_fs_dir(conn: &Connection) -> &Connection {
conn.execute("CREATE TABLE FS_DIR (
id INTEGER PRIMARY KEY ASC,
fk_type INTEGER NOT NULL,
name TEXT NOT NULL UNIQUE,
FOREIGN KEY(fk_type) REFERENCES FS_DIR_TYPE(id) ON UPDATE CASCADE
)", &[]).unwrap();
return conn;
}
pub fn insert_fs_dir(conn: &Connection, fk_type: i32, name: String) {
let fs_dir = FsDir {
id: 0,
name: name,
fk_type: fk_type,
};
let dir_instance = conn.execute("INSERT INTO FS_DIR (fk_type, name)
VALUES (?1, ?2)",
&[&fs_dir.fk_type, &fs_dir.name]);
if dir_instance.is_err() {
return;
}
dir_instance.unwrap();
}
pub fn list_fs_dir(conn: &Connection)-> Vec<FsDir> {
let mut stmt = conn.prepare("SELECT id, fk_type, name FROM FS_DIR").unwrap();
let wraped_fs_file_iter = stmt.query_map(&[], |row| {
FsDir {
id: row.get(0),
fk_type: row.get(1),
name: row.get(2),
}
});
let mut items = Vec::<FsDir>::new();
if wraped_fs_file_iter.is_err() {
return items;
}
let fs_file_iter = wraped_fs_file_iter.unwrap();
for person in fs_file_iter {
items.push(person.unwrap());
}
return items;
}
pub fn list_fs_dir_filter(conn: &Connection)-> Vec<FsDir> {
let filter = "elephant".to_string();
let mut stmt = conn.prepare("SELECT id, fk_type, name FROM FS_DIR WHERE name = elephant").unwrap();
let wraped_fs_file_iter = stmt.query_map(&[&filter], |row| {
FsDir {
id: row.get(0),
fk_type: row.get(1),
name: row.get(2),
}
});
let mut items = Vec::<FsDir>::new();
if wraped_fs_file_iter.is_err() {
return items;
}
let fs_file_iter = wraped_fs_file_iter.unwrap();
for person in fs_file_iter {
items.push(person.unwrap());
}
return items;
}
|
use std::io;
use std::io::Read;
use serde_json;
use jsontypes::RawSourceMap;
use types::{RawToken, SourceMap, SourceMapIndex, SourceMapSection};
use errors::{Result, Error};
const B64: [i8; 123] = [
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1,
-1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51
];
#[derive(PartialEq)]
enum HeaderState {
Undecided,
Junk,
AwaitingNewline,
PastHeader,
}
pub struct StripHeaderReader<R: Read> {
r: R,
header_state: HeaderState,
}
impl<R: Read> StripHeaderReader<R> {
pub fn new(reader: R) -> StripHeaderReader<R> {
StripHeaderReader {
r: reader,
header_state: HeaderState::Undecided,
}
}
}
fn is_junk_json(byte: u8) -> bool {
byte == b')' || byte == b']' || byte == b'}' || byte == b'\''
}
impl<R: Read> Read for StripHeaderReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.header_state == HeaderState::PastHeader {
return self.r.read(buf);
}
let mut backing = vec![0; buf.len()];
let mut local_buf : &mut [u8] = &mut *backing;
loop {
let read = try!(self.r.read(local_buf));
if read == 0 {
return Ok(0);
}
for (offset, &byte) in local_buf[0..read].iter().enumerate() {
self.header_state = match self.header_state {
HeaderState::Undecided => {
if is_junk_json(byte) {
HeaderState::Junk
} else {
(&mut buf[..read]).copy_from_slice(&local_buf[..read]);
self.header_state = HeaderState::PastHeader;
return Ok(read);
}
},
HeaderState::Junk => {
if byte == b'\r' {
HeaderState::AwaitingNewline
} else if byte == b'\n' {
HeaderState::PastHeader
} else {
HeaderState::Junk
}
},
HeaderState::AwaitingNewline => {
if byte == b'\n' {
HeaderState::PastHeader
} else {
fail!(io::Error::new(io::ErrorKind::InvalidData,
"expected newline"));
}
},
HeaderState::PastHeader => {
let rem = read - offset;
(&mut buf[..rem]).copy_from_slice(&local_buf[offset..read]);
return Ok(rem);
}
};
}
}
}
}
pub fn parse_vlq_segment(segment: &str) -> Result<Vec<i64>> {
let mut rv = vec![];
let mut cur = 0;
let mut shift = 0;
for c in segment.bytes() {
let enc = B64[c as usize] as i64;
let val = enc & 0b11111;
let cont = enc >> 5;
cur += val << shift;
shift += 5;
if cont == 0 {
let sign = cur & 1;
cur = cur >> 1;
if sign != 0 {
cur = -cur;
}
rv.push(cur);
cur = 0;
shift = 0;
}
}
if cur != 0 || shift != 0 {
Err(Error::VlqLeftover)
} else if rv.len() == 0 {
Err(Error::VlqNoValues)
} else {
Ok(rv)
}
}
/// Represents the result of a decode operation
pub enum DecodedMap {
/// Indicates a regular sourcemap
Regular(SourceMap),
/// Indicates a sourcemap index
Index(SourceMapIndex),
}
fn decode_regular(rsm: RawSourceMap) -> Result<SourceMap> {
let mut dst_col;
let mut src_id;
let mut src_line = 0;
let mut src_col = 0;
let mut name_id = 0;
let mut tokens = vec![];
let mut index = vec![];
for (dst_line, line) in rsm.mappings.split(';').enumerate() {
let mut line_index = vec![];
dst_col = 0;
for segment in line.split(',') {
if segment.len() == 0 {
continue;
}
let nums = try!(parse_vlq_segment(segment));
dst_col = (dst_col as i64 + nums[0]) as u32;
let mut src = !0;
let mut name = !0;
if nums.len() > 1 {
if nums.len() != 4 && nums.len() != 5 {
fail!(Error::BadSegmentSize(nums.len() as u32));
}
src_id = nums[1] as u32;
if src_id >= rsm.sources.len() as u32 {
fail!(Error::BadSourceReference(src_id));
}
src = src_id;
src_line = (src_line as i64 + nums[2]) as u32;
src_col = (src_col as i64 + nums[3]) as u32;
if nums.len() > 4 {
name_id = (name_id as i64 + nums[4]) as u32;
if name_id >= rsm.names.len() as u32 {
fail!(Error::BadNameReference(name_id));
}
name = name_id as u32;
}
}
tokens.push(RawToken {
dst_line: dst_line as u32,
dst_col: dst_col,
src_line: src_line,
src_col: src_col,
src_id: src,
name_id: name,
});
line_index.push((dst_col, (tokens.len() - 1) as u32));
}
line_index.sort();
for (dst_col, token_id) in line_index {
index.push((dst_line as u32, dst_col, token_id));
}
}
let mut sources = rsm.sources;
if let Some(source_root) = rsm.source_root {
let source_root = source_root.trim_right_matches('/');
sources = sources.into_iter().map(|x| {
if x.len() > 0 && (x.as_bytes()[0] == b'/' ||
x.as_bytes()[..5] == b"http:"[..] ||
x.as_bytes()[..6] == b"https:"[..]) {
x
} else {
format!("{}/{}", source_root, x)
}
}).collect();
}
Ok(SourceMap::new(
rsm.version, rsm.file, tokens, index, rsm.names, sources))
}
fn decode_index(rsm: RawSourceMap) -> Result<SourceMapIndex> {
let mut sections = vec![];
for mut raw_section in rsm.sections.unwrap_or(vec![]) {
sections.push(SourceMapSection::new(
(raw_section.offset.line, raw_section.offset.column),
raw_section.url,
match raw_section.map.take() {
Some(map) => Some(try!(decode_regular(*map))),
None => None,
}
));
}
Ok(SourceMapIndex::new(
rsm.version, rsm.file, sections))
}
/// Decodes a sourcemap or sourcemap index from a reader
///
/// This supports both sourcemaps and sourcemap indexes unless the
/// specialized methods on the individual types.
pub fn decode<R: Read>(rdr: R) -> Result<DecodedMap> {
let mut rdr = StripHeaderReader::new(rdr);
let rsm : RawSourceMap = try!(serde_json::from_reader(&mut rdr));
Ok(if rsm.sections.is_some() {
DecodedMap::Index(try!(decode_index(rsm)))
} else {
DecodedMap::Regular(try!(decode_regular(rsm)))
})
}
Fixed an issue with joining onto an empty sourcemap
use std::io;
use std::io::Read;
use serde_json;
use jsontypes::RawSourceMap;
use types::{RawToken, SourceMap, SourceMapIndex, SourceMapSection};
use errors::{Result, Error};
const B64: [i8; 123] = [
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1,
-1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51
];
#[derive(PartialEq)]
enum HeaderState {
Undecided,
Junk,
AwaitingNewline,
PastHeader,
}
pub struct StripHeaderReader<R: Read> {
r: R,
header_state: HeaderState,
}
impl<R: Read> StripHeaderReader<R> {
pub fn new(reader: R) -> StripHeaderReader<R> {
StripHeaderReader {
r: reader,
header_state: HeaderState::Undecided,
}
}
}
fn is_junk_json(byte: u8) -> bool {
byte == b')' || byte == b']' || byte == b'}' || byte == b'\''
}
impl<R: Read> Read for StripHeaderReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.header_state == HeaderState::PastHeader {
return self.r.read(buf);
}
let mut backing = vec![0; buf.len()];
let mut local_buf : &mut [u8] = &mut *backing;
loop {
let read = try!(self.r.read(local_buf));
if read == 0 {
return Ok(0);
}
for (offset, &byte) in local_buf[0..read].iter().enumerate() {
self.header_state = match self.header_state {
HeaderState::Undecided => {
if is_junk_json(byte) {
HeaderState::Junk
} else {
(&mut buf[..read]).copy_from_slice(&local_buf[..read]);
self.header_state = HeaderState::PastHeader;
return Ok(read);
}
},
HeaderState::Junk => {
if byte == b'\r' {
HeaderState::AwaitingNewline
} else if byte == b'\n' {
HeaderState::PastHeader
} else {
HeaderState::Junk
}
},
HeaderState::AwaitingNewline => {
if byte == b'\n' {
HeaderState::PastHeader
} else {
fail!(io::Error::new(io::ErrorKind::InvalidData,
"expected newline"));
}
},
HeaderState::PastHeader => {
let rem = read - offset;
(&mut buf[..rem]).copy_from_slice(&local_buf[offset..read]);
return Ok(rem);
}
};
}
}
}
}
pub fn parse_vlq_segment(segment: &str) -> Result<Vec<i64>> {
let mut rv = vec![];
let mut cur = 0;
let mut shift = 0;
for c in segment.bytes() {
let enc = B64[c as usize] as i64;
let val = enc & 0b11111;
let cont = enc >> 5;
cur += val << shift;
shift += 5;
if cont == 0 {
let sign = cur & 1;
cur = cur >> 1;
if sign != 0 {
cur = -cur;
}
rv.push(cur);
cur = 0;
shift = 0;
}
}
if cur != 0 || shift != 0 {
Err(Error::VlqLeftover)
} else if rv.len() == 0 {
Err(Error::VlqNoValues)
} else {
Ok(rv)
}
}
/// Represents the result of a decode operation
pub enum DecodedMap {
/// Indicates a regular sourcemap
Regular(SourceMap),
/// Indicates a sourcemap index
Index(SourceMapIndex),
}
fn decode_regular(rsm: RawSourceMap) -> Result<SourceMap> {
let mut dst_col;
let mut src_id;
let mut src_line = 0;
let mut src_col = 0;
let mut name_id = 0;
let mut tokens = vec![];
let mut index = vec![];
for (dst_line, line) in rsm.mappings.split(';').enumerate() {
let mut line_index = vec![];
dst_col = 0;
for segment in line.split(',') {
if segment.len() == 0 {
continue;
}
let nums = try!(parse_vlq_segment(segment));
dst_col = (dst_col as i64 + nums[0]) as u32;
let mut src = !0;
let mut name = !0;
if nums.len() > 1 {
if nums.len() != 4 && nums.len() != 5 {
fail!(Error::BadSegmentSize(nums.len() as u32));
}
src_id = nums[1] as u32;
if src_id >= rsm.sources.len() as u32 {
fail!(Error::BadSourceReference(src_id));
}
src = src_id;
src_line = (src_line as i64 + nums[2]) as u32;
src_col = (src_col as i64 + nums[3]) as u32;
if nums.len() > 4 {
name_id = (name_id as i64 + nums[4]) as u32;
if name_id >= rsm.names.len() as u32 {
fail!(Error::BadNameReference(name_id));
}
name = name_id as u32;
}
}
tokens.push(RawToken {
dst_line: dst_line as u32,
dst_col: dst_col,
src_line: src_line,
src_col: src_col,
src_id: src,
name_id: name,
});
line_index.push((dst_col, (tokens.len() - 1) as u32));
}
line_index.sort();
for (dst_col, token_id) in line_index {
index.push((dst_line as u32, dst_col, token_id));
}
}
let mut sources = rsm.sources;
if let Some(source_root) = rsm.source_root {
if !source_root.is_empty() {
let source_root = source_root.trim_right_matches('/');
sources = sources.into_iter().map(|x| {
if x.len() > 0 && (x.as_bytes()[0] == b'/' ||
x.as_bytes()[..5] == b"http:"[..] ||
x.as_bytes()[..6] == b"https:"[..]) {
x
} else {
format!("{}/{}", source_root, x)
}
}).collect();
}
}
Ok(SourceMap::new(
rsm.version, rsm.file, tokens, index, rsm.names, sources))
}
fn decode_index(rsm: RawSourceMap) -> Result<SourceMapIndex> {
let mut sections = vec![];
for mut raw_section in rsm.sections.unwrap_or(vec![]) {
sections.push(SourceMapSection::new(
(raw_section.offset.line, raw_section.offset.column),
raw_section.url,
match raw_section.map.take() {
Some(map) => Some(try!(decode_regular(*map))),
None => None,
}
));
}
Ok(SourceMapIndex::new(
rsm.version, rsm.file, sections))
}
/// Decodes a sourcemap or sourcemap index from a reader
///
/// This supports both sourcemaps and sourcemap indexes unless the
/// specialized methods on the individual types.
pub fn decode<R: Read>(rdr: R) -> Result<DecodedMap> {
let mut rdr = StripHeaderReader::new(rdr);
let rsm : RawSourceMap = try!(serde_json::from_reader(&mut rdr));
Ok(if rsm.sections.is_some() {
DecodedMap::Index(try!(decode_index(rsm)))
} else {
DecodedMap::Regular(try!(decode_regular(rsm)))
})
}
|
//! Utilities for logging messages from the library.
use once_cell::sync::Lazy;
#[macro_export]
macro_rules! rsvg_log {
(
$($arg:tt)+
) => {
if $crate::log::log_enabled() {
println!("{}", format_args!($($arg)+));
}
};
}
#[macro_export]
macro_rules! rsvg_log_session {
(
$session:expr,
$($arg:tt)+
) => {
if $session.log_enabled() {
println!("{}", format_args!($($arg)+));
}
};
}
pub fn log_enabled() -> bool {
static ENABLED: Lazy<bool> = Lazy::new(|| ::std::env::var_os("RSVG_LOG").is_some());
*ENABLED
}
/// Captures the basic state of a [`cairo::Context`] for logging purposes.
///
/// A librsvg "transaction" like rendering a
/// [`crate::api::SvgHandle`], which takes a Cairo context, depends on the state of the
/// context as it was passed in by the caller. For example, librsvg may decide to
/// operate differently depending on the context's target surface type, or its current
/// transformation matrix. This struct captures that sort of information.
#[derive(Copy, Clone, Debug, PartialEq)]
struct CairoContextState {
surface_type: cairo::SurfaceType,
matrix: cairo::Matrix,
}
impl CairoContextState {
fn new(cr: &cairo::Context) -> Self {
let surface_type = cr.target().type_();
let matrix = cr.matrix();
Self {
surface_type,
matrix,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn captures_cr_state() {
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 10, 10).unwrap();
let cr = cairo::Context::new(&surface).unwrap();
let state = CairoContextState::new(&cr);
assert_eq!(
CairoContextState {
surface_type: cairo::SurfaceType::Image,
matrix: cairo::Matrix::identity(),
},
state,
);
let surface = cairo::RecordingSurface::create(cairo::Content::ColorAlpha, None).unwrap();
let cr = cairo::Context::new(&surface).unwrap();
cr.scale(2.0, 3.0);
let state = CairoContextState::new(&cr);
let mut matrix = cairo::Matrix::identity();
matrix.scale(2.0, 3.0);
assert_eq!(
CairoContextState {
surface_type: cairo::SurfaceType::Recording,
matrix,
},
state,
);
}
}
CairoContextState: suppress a warning until we use this for real outside the tests
Part-of: <https://gitlab.gnome.org/GNOME/librsvg/-/merge_requests/731>
//! Utilities for logging messages from the library.
use once_cell::sync::Lazy;
#[macro_export]
macro_rules! rsvg_log {
(
$($arg:tt)+
) => {
if $crate::log::log_enabled() {
println!("{}", format_args!($($arg)+));
}
};
}
#[macro_export]
macro_rules! rsvg_log_session {
(
$session:expr,
$($arg:tt)+
) => {
if $session.log_enabled() {
println!("{}", format_args!($($arg)+));
}
};
}
pub fn log_enabled() -> bool {
static ENABLED: Lazy<bool> = Lazy::new(|| ::std::env::var_os("RSVG_LOG").is_some());
*ENABLED
}
/// Captures the basic state of a [`cairo::Context`] for logging purposes.
///
/// A librsvg "transaction" like rendering a
/// [`crate::api::SvgHandle`], which takes a Cairo context, depends on the state of the
/// context as it was passed in by the caller. For example, librsvg may decide to
/// operate differently depending on the context's target surface type, or its current
/// transformation matrix. This struct captures that sort of information.
#[derive(Copy, Clone, Debug, PartialEq)]
struct CairoContextState {
surface_type: cairo::SurfaceType,
matrix: cairo::Matrix,
}
impl CairoContextState {
#[cfg(test)]
fn new(cr: &cairo::Context) -> Self {
let surface_type = cr.target().type_();
let matrix = cr.matrix();
Self {
surface_type,
matrix,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn captures_cr_state() {
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 10, 10).unwrap();
let cr = cairo::Context::new(&surface).unwrap();
let state = CairoContextState::new(&cr);
assert_eq!(
CairoContextState {
surface_type: cairo::SurfaceType::Image,
matrix: cairo::Matrix::identity(),
},
state,
);
let surface = cairo::RecordingSurface::create(cairo::Content::ColorAlpha, None).unwrap();
let cr = cairo::Context::new(&surface).unwrap();
cr.scale(2.0, 3.0);
let state = CairoContextState::new(&cr);
let mut matrix = cairo::Matrix::identity();
matrix.scale(2.0, 3.0);
assert_eq!(
CairoContextState {
surface_type: cairo::SurfaceType::Recording,
matrix,
},
state,
);
}
}
|
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::io::{Cursor, SeekFrom, Take};
use std::marker::PhantomData;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::vec::Vec;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use fs2::FileExt;
use regex::Regex;
use data::{Entry, Hint};
use util::{xxhash32, XxHash32, get_file_handle};
const DATA_FILE_EXTENSION: &'static str = "cask.data";
const HINT_FILE_EXTENSION: &'static str = "cask.hint";
const LOCK_FILE_NAME: &'static str = "cask.lock";
const DEFAULT_SIZE_THRESHOLD: usize = 100 * 1024 * 1024;
pub struct Log {
pub path: PathBuf,
sync: bool,
size_threshold: usize,
lock_file: File,
files: Vec<u32>,
current_file_id: AtomicUsize,
pub active_file_id: u32,
active_log_writer: LogWriter,
}
impl Log {
pub fn open(path: &str, sync: bool) -> Log {
let path = PathBuf::from(path);
if path.exists() {
assert!(path.is_dir());
} else {
fs::create_dir(&path).unwrap();
}
let lock_file = File::create(path.join(LOCK_FILE_NAME)).unwrap();
lock_file.try_lock_exclusive().unwrap();
let files = find_data_files(&path);
let active_file_id = if files.is_empty() {
0
} else {
files[files.len() - 1] + 1
};
let active_log_writer = LogWriter::new(&path, active_file_id, sync);
info!("Created new active data file {:?}",
active_log_writer.data_file_path);
Log {
path: path,
sync: sync,
size_threshold: DEFAULT_SIZE_THRESHOLD,
lock_file: lock_file,
files: files,
current_file_id: AtomicUsize::new(active_file_id as usize),
active_file_id: active_file_id,
active_log_writer: active_log_writer,
}
}
pub fn files(&self) -> Vec<u32> {
self.files.clone()
}
pub fn entries<'a>(&self, file_id: u32) -> Entries<'a> {
let data_file_path = get_data_file_path(&self.path, file_id);
info!("Loading data file: {:?}", data_file_path);
let data_file = get_file_handle(&data_file_path, false);
let data_file_size = data_file.metadata().unwrap().len();
Entries {
data_file: data_file.take(data_file_size),
data_file_pos: 0,
phantom: PhantomData,
}
}
pub fn hints<'a>(&self, file_id: u32) -> Option<Hints<'a>> {
let hint_file_path = get_hint_file_path(&self.path, file_id);
if is_valid_hint_file(&hint_file_path) {
info!("Loading hint file: {:?}", hint_file_path);
let hint_file = get_file_handle(&hint_file_path, false);
let hint_file_size = hint_file.metadata().unwrap().len();
Some(Hints {
hint_file: hint_file.take(hint_file_size - 4),
phantom: PhantomData,
})
} else {
None
}
}
pub fn recreate_hints<'a>(&mut self, file_id: u32) -> RecreateHints<'a> {
let hint_file_path = get_hint_file_path(&self.path, file_id);
warn!("Re-creating hint file: {:?}", hint_file_path);
let hint_writer = HintWriter::new(&self.path, file_id);
let entries = self.entries(file_id);
RecreateHints {
hint_writer: hint_writer,
entries: entries,
}
}
pub fn read_entry<'a>(&self, file_id: u32, entry_pos: u64) -> Entry<'a> {
let mut data_file = get_file_handle(&get_data_file_path(&self.path, file_id), false);
data_file.seek(SeekFrom::Start(entry_pos)).unwrap();
Entry::from_read(&mut data_file)
}
pub fn append_entry<'a>(&mut self, entry: &Entry<'a>) -> (u32, u64) {
if self.active_log_writer.data_file_pos + entry.size() > self.size_threshold as u64 {
info!("Active data file {:?} reached file limit",
self.active_log_writer.data_file_path);
self.new_active_writer();
}
let entry_pos = self.active_log_writer.write(entry);
(self.active_file_id, entry_pos)
}
pub fn new_file_id(&self) -> u32 {
self.current_file_id.fetch_add(1, Ordering::SeqCst) as u32 + 1
}
pub fn swap_file(&mut self, file_id: u32, new_file_id: u32) {
let idx = self.files.binary_search(&file_id).unwrap();
self.files.remove(idx);
self.add_file(new_file_id);
let data_file_path = get_data_file_path(&self.path, file_id);
let hint_file_path = get_hint_file_path(&self.path, file_id);
fs::remove_file(data_file_path).unwrap();
fs::remove_file(hint_file_path).unwrap();
}
pub fn add_file(&mut self, file_id: u32) {
self.files.push(file_id);
self.files.sort();
}
fn new_active_writer(&mut self) {
let active_file_id = self.active_file_id;
self.add_file(active_file_id);
info!("Closed active data file {:?}",
self.active_log_writer.data_file_path);
self.active_file_id = self.new_file_id();
self.active_log_writer = LogWriter::new(&self.path, self.active_file_id, self.sync);
info!("Created new active data file {:?}",
self.active_log_writer.data_file_path);
}
}
impl Drop for Log {
fn drop(&mut self) {
self.lock_file.unlock().unwrap();
}
}
pub struct LogWriter {
sync: bool,
data_file_path: PathBuf,
data_file: File,
data_file_pos: u64,
hint_writer: HintWriter,
}
impl LogWriter {
pub fn new(path: &Path, file_id: u32, sync: bool) -> LogWriter {
let data_file_path = get_data_file_path(path, file_id);
let data_file = get_file_handle(&data_file_path, true);
let hint_writer = HintWriter::new(path, file_id);
LogWriter {
sync: sync,
data_file_path: data_file_path,
data_file: data_file,
data_file_pos: 0,
hint_writer: hint_writer,
}
}
pub fn write<'a>(&mut self, entry: &Entry<'a>) -> u64 {
let entry_pos = self.data_file_pos;
let hint = Hint::new(entry, entry_pos);
entry.write_bytes(&mut self.data_file);
self.hint_writer.write(&hint);
if self.sync {
self.data_file.sync_data().unwrap();
}
self.data_file_pos += entry.size();
entry_pos
}
}
impl Drop for LogWriter {
fn drop(&mut self) {
if self.sync {
self.data_file.sync_data().unwrap();
}
}
}
struct HintWriter {
hint_file: File,
hint_file_hasher: XxHash32,
}
impl HintWriter {
pub fn new(path: &Path, file_id: u32) -> HintWriter {
let hint_file = get_file_handle(&get_hint_file_path(path, file_id), true);
HintWriter {
hint_file: hint_file,
hint_file_hasher: XxHash32::new(),
}
}
pub fn write<'a>(&mut self, hint: &Hint<'a>) {
hint.write_bytes(&mut self.hint_file);
hint.write_bytes(&mut self.hint_file_hasher);
}
}
impl Drop for HintWriter {
fn drop(&mut self) {
self.hint_file
.write_u32::<LittleEndian>(self.hint_file_hasher.get())
.unwrap();
}
}
pub struct Entries<'a> {
data_file: Take<File>,
data_file_pos: u64,
phantom: PhantomData<&'a ()>,
}
impl<'a> Iterator for Entries<'a> {
type Item = (u64, Entry<'a>);
fn next(&mut self) -> Option<(u64, Entry<'a>)> {
if self.data_file.limit() == 0 {
None
} else {
let entry = Entry::from_read(&mut self.data_file);
let entry_pos = self.data_file_pos;
self.data_file_pos += entry.size();
Some((entry_pos, entry))
}
}
}
pub struct Hints<'a> {
hint_file: Take<File>,
phantom: PhantomData<&'a ()>,
}
impl<'a> Iterator for Hints<'a> {
type Item = Hint<'a>;
fn next(&mut self) -> Option<Hint<'a>> {
if self.hint_file.limit() == 0 {
None
} else {
Some(Hint::from_read(&mut self.hint_file))
}
}
}
pub struct RecreateHints<'a> {
hint_writer: HintWriter,
entries: Entries<'a>,
}
impl<'a> Iterator for RecreateHints<'a> {
type Item = Hint<'a>;
fn next(&mut self) -> Option<Hint<'a>> {
self.entries.next().map(|e| {
let (entry_pos, entry) = e;
let hint = Hint::from(entry, entry_pos);
self.hint_writer.write(&hint);
hint
})
}
}
impl<'a> Drop for RecreateHints<'a> {
fn drop(&mut self) {
while self.next().is_some() {}
}
}
fn get_data_file_path(path: &Path, file_id: u32) -> PathBuf {
let file_id = format!("{:010}", file_id);
path.join(file_id).with_extension(DATA_FILE_EXTENSION)
}
fn get_hint_file_path(path: &Path, file_id: u32) -> PathBuf {
let file_id = format!("{:010}", file_id);
path.join(file_id).with_extension(HINT_FILE_EXTENSION)
}
fn find_data_files(path: &Path) -> Vec<u32> {
let files = fs::read_dir(path).unwrap();
lazy_static! {
static ref RE: Regex =
Regex::new(&format!("(\\d+).{}$", DATA_FILE_EXTENSION)).unwrap();
}
let mut files: Vec<u32> = files.flat_map(|f| {
let file = f.unwrap();
let file_metadata = file.metadata().unwrap();
if file_metadata.is_file() {
let file_name = file.file_name();
let captures = RE.captures(file_name.to_str().unwrap());
captures.and_then(|c| c.at(1).and_then(|n| n.parse::<u32>().ok()))
} else {
None
}
})
.collect();
files.sort();
files
}
fn is_valid_hint_file(path: &Path) -> bool {
path.is_file() &&
{
let mut hint_file = get_file_handle(path, false);
// FIXME: avoid reading the whole hint file into memory;
let mut buf = Vec::new();
hint_file.read_to_end(&mut buf).unwrap();
buf.len() >= 4 &&
{
let hash = xxhash32(&buf[..buf.len() - 4]);
let mut cursor = Cursor::new(&buf[buf.len() - 4..]);
let checksum = cursor.read_u32::<LittleEndian>().unwrap();
let valid = hash == checksum;
if !valid {
warn!("Found corrupt hint file: {:?}", &path);
}
valid
}
}
}
increase default data file size
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::io::{Cursor, SeekFrom, Take};
use std::marker::PhantomData;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::vec::Vec;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use fs2::FileExt;
use regex::Regex;
use data::{Entry, Hint};
use util::{xxhash32, XxHash32, get_file_handle};
const DATA_FILE_EXTENSION: &'static str = "cask.data";
const HINT_FILE_EXTENSION: &'static str = "cask.hint";
const LOCK_FILE_NAME: &'static str = "cask.lock";
const DEFAULT_SIZE_THRESHOLD: usize = 2000 * 1024 * 1024;
pub struct Log {
pub path: PathBuf,
sync: bool,
size_threshold: usize,
lock_file: File,
files: Vec<u32>,
current_file_id: AtomicUsize,
pub active_file_id: u32,
active_log_writer: LogWriter,
}
impl Log {
pub fn open(path: &str, sync: bool) -> Log {
let path = PathBuf::from(path);
if path.exists() {
assert!(path.is_dir());
} else {
fs::create_dir(&path).unwrap();
}
let lock_file = File::create(path.join(LOCK_FILE_NAME)).unwrap();
lock_file.try_lock_exclusive().unwrap();
let files = find_data_files(&path);
let active_file_id = if files.is_empty() {
0
} else {
files[files.len() - 1] + 1
};
let active_log_writer = LogWriter::new(&path, active_file_id, sync);
info!("Created new active data file {:?}",
active_log_writer.data_file_path);
Log {
path: path,
sync: sync,
size_threshold: DEFAULT_SIZE_THRESHOLD,
lock_file: lock_file,
files: files,
current_file_id: AtomicUsize::new(active_file_id as usize),
active_file_id: active_file_id,
active_log_writer: active_log_writer,
}
}
pub fn files(&self) -> Vec<u32> {
self.files.clone()
}
pub fn entries<'a>(&self, file_id: u32) -> Entries<'a> {
let data_file_path = get_data_file_path(&self.path, file_id);
info!("Loading data file: {:?}", data_file_path);
let data_file = get_file_handle(&data_file_path, false);
let data_file_size = data_file.metadata().unwrap().len();
Entries {
data_file: data_file.take(data_file_size),
data_file_pos: 0,
phantom: PhantomData,
}
}
pub fn hints<'a>(&self, file_id: u32) -> Option<Hints<'a>> {
let hint_file_path = get_hint_file_path(&self.path, file_id);
if is_valid_hint_file(&hint_file_path) {
info!("Loading hint file: {:?}", hint_file_path);
let hint_file = get_file_handle(&hint_file_path, false);
let hint_file_size = hint_file.metadata().unwrap().len();
Some(Hints {
hint_file: hint_file.take(hint_file_size - 4),
phantom: PhantomData,
})
} else {
None
}
}
pub fn recreate_hints<'a>(&mut self, file_id: u32) -> RecreateHints<'a> {
let hint_file_path = get_hint_file_path(&self.path, file_id);
warn!("Re-creating hint file: {:?}", hint_file_path);
let hint_writer = HintWriter::new(&self.path, file_id);
let entries = self.entries(file_id);
RecreateHints {
hint_writer: hint_writer,
entries: entries,
}
}
pub fn read_entry<'a>(&self, file_id: u32, entry_pos: u64) -> Entry<'a> {
let mut data_file = get_file_handle(&get_data_file_path(&self.path, file_id), false);
data_file.seek(SeekFrom::Start(entry_pos)).unwrap();
Entry::from_read(&mut data_file)
}
pub fn append_entry<'a>(&mut self, entry: &Entry<'a>) -> (u32, u64) {
if self.active_log_writer.data_file_pos + entry.size() > self.size_threshold as u64 {
info!("Active data file {:?} reached file limit",
self.active_log_writer.data_file_path);
self.new_active_writer();
}
let entry_pos = self.active_log_writer.write(entry);
(self.active_file_id, entry_pos)
}
pub fn new_file_id(&self) -> u32 {
self.current_file_id.fetch_add(1, Ordering::SeqCst) as u32 + 1
}
pub fn swap_file(&mut self, file_id: u32, new_file_id: u32) {
let idx = self.files.binary_search(&file_id).unwrap();
self.files.remove(idx);
self.add_file(new_file_id);
let data_file_path = get_data_file_path(&self.path, file_id);
let hint_file_path = get_hint_file_path(&self.path, file_id);
fs::remove_file(data_file_path).unwrap();
fs::remove_file(hint_file_path).unwrap();
}
pub fn add_file(&mut self, file_id: u32) {
self.files.push(file_id);
self.files.sort();
}
fn new_active_writer(&mut self) {
let active_file_id = self.active_file_id;
self.add_file(active_file_id);
info!("Closed active data file {:?}",
self.active_log_writer.data_file_path);
self.active_file_id = self.new_file_id();
self.active_log_writer = LogWriter::new(&self.path, self.active_file_id, self.sync);
info!("Created new active data file {:?}",
self.active_log_writer.data_file_path);
}
}
impl Drop for Log {
fn drop(&mut self) {
self.lock_file.unlock().unwrap();
}
}
pub struct LogWriter {
sync: bool,
data_file_path: PathBuf,
data_file: File,
data_file_pos: u64,
hint_writer: HintWriter,
}
impl LogWriter {
pub fn new(path: &Path, file_id: u32, sync: bool) -> LogWriter {
let data_file_path = get_data_file_path(path, file_id);
let data_file = get_file_handle(&data_file_path, true);
let hint_writer = HintWriter::new(path, file_id);
LogWriter {
sync: sync,
data_file_path: data_file_path,
data_file: data_file,
data_file_pos: 0,
hint_writer: hint_writer,
}
}
pub fn write<'a>(&mut self, entry: &Entry<'a>) -> u64 {
let entry_pos = self.data_file_pos;
let hint = Hint::new(entry, entry_pos);
entry.write_bytes(&mut self.data_file);
self.hint_writer.write(&hint);
if self.sync {
self.data_file.sync_data().unwrap();
}
self.data_file_pos += entry.size();
entry_pos
}
}
impl Drop for LogWriter {
fn drop(&mut self) {
if self.sync {
self.data_file.sync_data().unwrap();
}
}
}
struct HintWriter {
hint_file: File,
hint_file_hasher: XxHash32,
}
impl HintWriter {
pub fn new(path: &Path, file_id: u32) -> HintWriter {
let hint_file = get_file_handle(&get_hint_file_path(path, file_id), true);
HintWriter {
hint_file: hint_file,
hint_file_hasher: XxHash32::new(),
}
}
pub fn write<'a>(&mut self, hint: &Hint<'a>) {
hint.write_bytes(&mut self.hint_file);
hint.write_bytes(&mut self.hint_file_hasher);
}
}
impl Drop for HintWriter {
fn drop(&mut self) {
self.hint_file
.write_u32::<LittleEndian>(self.hint_file_hasher.get())
.unwrap();
}
}
pub struct Entries<'a> {
data_file: Take<File>,
data_file_pos: u64,
phantom: PhantomData<&'a ()>,
}
impl<'a> Iterator for Entries<'a> {
type Item = (u64, Entry<'a>);
fn next(&mut self) -> Option<(u64, Entry<'a>)> {
if self.data_file.limit() == 0 {
None
} else {
let entry = Entry::from_read(&mut self.data_file);
let entry_pos = self.data_file_pos;
self.data_file_pos += entry.size();
Some((entry_pos, entry))
}
}
}
pub struct Hints<'a> {
hint_file: Take<File>,
phantom: PhantomData<&'a ()>,
}
impl<'a> Iterator for Hints<'a> {
type Item = Hint<'a>;
fn next(&mut self) -> Option<Hint<'a>> {
if self.hint_file.limit() == 0 {
None
} else {
Some(Hint::from_read(&mut self.hint_file))
}
}
}
pub struct RecreateHints<'a> {
hint_writer: HintWriter,
entries: Entries<'a>,
}
impl<'a> Iterator for RecreateHints<'a> {
type Item = Hint<'a>;
fn next(&mut self) -> Option<Hint<'a>> {
self.entries.next().map(|e| {
let (entry_pos, entry) = e;
let hint = Hint::from(entry, entry_pos);
self.hint_writer.write(&hint);
hint
})
}
}
impl<'a> Drop for RecreateHints<'a> {
fn drop(&mut self) {
while self.next().is_some() {}
}
}
fn get_data_file_path(path: &Path, file_id: u32) -> PathBuf {
let file_id = format!("{:010}", file_id);
path.join(file_id).with_extension(DATA_FILE_EXTENSION)
}
fn get_hint_file_path(path: &Path, file_id: u32) -> PathBuf {
let file_id = format!("{:010}", file_id);
path.join(file_id).with_extension(HINT_FILE_EXTENSION)
}
fn find_data_files(path: &Path) -> Vec<u32> {
let files = fs::read_dir(path).unwrap();
lazy_static! {
static ref RE: Regex =
Regex::new(&format!("(\\d+).{}$", DATA_FILE_EXTENSION)).unwrap();
}
let mut files: Vec<u32> = files.flat_map(|f| {
let file = f.unwrap();
let file_metadata = file.metadata().unwrap();
if file_metadata.is_file() {
let file_name = file.file_name();
let captures = RE.captures(file_name.to_str().unwrap());
captures.and_then(|c| c.at(1).and_then(|n| n.parse::<u32>().ok()))
} else {
None
}
})
.collect();
files.sort();
files
}
fn is_valid_hint_file(path: &Path) -> bool {
path.is_file() &&
{
let mut hint_file = get_file_handle(path, false);
// FIXME: avoid reading the whole hint file into memory;
let mut buf = Vec::new();
hint_file.read_to_end(&mut buf).unwrap();
buf.len() >= 4 &&
{
let hash = xxhash32(&buf[..buf.len() - 4]);
let mut cursor = Cursor::new(&buf[buf.len() - 4..]);
let checksum = cursor.read_u32::<LittleEndian>().unwrap();
let valid = hash == checksum;
if !valid {
warn!("Found corrupt hint file: {:?}", &path);
}
valid
}
}
}
|
use super::*;
use crate::token::{Brace, Bracket, Paren};
use proc_macro2::TokenStream;
#[cfg(feature = "parsing")]
use proc_macro2::{Delimiter, Span, TokenTree};
#[cfg(feature = "parsing")]
use crate::parse::{Parse, ParseStream, Parser, Result};
#[cfg(feature = "extra-traits")]
use crate::tt::TokenStreamHelper;
#[cfg(feature = "extra-traits")]
use std::hash::{Hash, Hasher};
ast_struct! {
/// A macro invocation: `println!("{}", mac)`.
///
/// *This type is available if Syn is built with the `"derive"` or `"full"`
/// feature.*
pub struct Macro #manual_extra_traits {
pub path: Path,
pub bang_token: Token![!],
pub delimiter: MacroDelimiter,
pub tokens: TokenStream,
}
}
ast_enum! {
/// A grouping token that surrounds a macro body: `m!(...)` or `m!{...}` or `m![...]`.
///
/// *This type is available if Syn is built with the `"derive"` or `"full"`
/// feature.*
pub enum MacroDelimiter {
Paren(Paren),
Brace(Brace),
Bracket(Bracket),
}
}
#[cfg(feature = "extra-traits")]
impl Eq for Macro {}
#[cfg(feature = "extra-traits")]
impl PartialEq for Macro {
fn eq(&self, other: &Self) -> bool {
self.path == other.path
&& self.bang_token == other.bang_token
&& self.delimiter == other.delimiter
&& TokenStreamHelper(&self.tokens) == TokenStreamHelper(&other.tokens)
}
}
#[cfg(feature = "extra-traits")]
impl Hash for Macro {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.path.hash(state);
self.bang_token.hash(state);
self.delimiter.hash(state);
TokenStreamHelper(&self.tokens).hash(state);
}
}
#[cfg(feature = "parsing")]
fn delimiter_span(delimiter: &MacroDelimiter) -> Span {
match delimiter {
MacroDelimiter::Paren(token) => token.span,
MacroDelimiter::Brace(token) => token.span,
MacroDelimiter::Bracket(token) => token.span,
}
}
impl Macro {
/// Parse the tokens within the macro invocation's delimiters into a syntax
/// tree.
///
/// This is equivalent to `syn::parse2::<T>(mac.tokens)` except that it
/// produces a more useful span when `tokens` is empty.
///
/// # Example
///
/// ```edition2018
/// use syn::{parse_quote, Expr, ExprLit, Ident, Lit, LitStr, Macro, Token};
/// use syn::ext::IdentExt;
/// use syn::parse::{Error, Parse, ParseStream, Result};
/// use syn::punctuated::Punctuated;
///
/// // The arguments expected by libcore's format_args macro, and as a
/// // result most other formatting and printing macros like println.
/// //
/// // println!("{} is {number:.prec$}", "x", prec=5, number=0.01)
/// struct FormatArgs {
/// format_string: Expr,
/// positional_args: Vec<Expr>,
/// named_args: Vec<(Ident, Expr)>,
/// }
///
/// impl Parse for FormatArgs {
/// fn parse(input: ParseStream) -> Result<Self> {
/// let format_string: Expr;
/// let mut positional_args = Vec::new();
/// let mut named_args = Vec::new();
///
/// format_string = input.parse()?;
/// while !input.is_empty() {
/// input.parse::<Token![,]>()?;
/// if input.is_empty() {
/// break;
/// }
/// if input.peek(Ident::peek_any) && input.peek2(Token![=]) {
/// while !input.is_empty() {
/// let name: Ident = input.call(Ident::parse_any)?;
/// input.parse::<Token![=]>()?;
/// let value: Expr = input.parse()?;
/// named_args.push((name, value));
/// if input.is_empty() {
/// break;
/// }
/// input.parse::<Token![,]>()?;
/// }
/// break;
/// }
/// positional_args.push(input.parse()?);
/// }
///
/// Ok(FormatArgs {
/// format_string,
/// positional_args,
/// named_args,
/// })
/// }
/// }
///
/// // Extract the first argument, the format string literal, from an
/// // invocation of a formatting or printing macro.
/// fn get_format_string(m: &Macro) -> Result<LitStr> {
/// let args: FormatArgs = m.parse_body()?;
/// match args.format_string {
/// Expr::Lit(ExprLit { lit: Lit::Str(lit), .. }) => Ok(lit),
/// other => {
/// // First argument was not a string literal expression.
/// // Maybe something like: println!(concat!(...), ...)
/// Err(Error::new_spanned(other, "format string must be a string literal"))
/// }
/// }
/// }
///
/// fn main() {
/// let invocation = parse_quote! {
/// println!("{:?}", Instant::now())
/// };
/// let lit = get_format_string(&invocation).unwrap();
/// assert_eq!(lit.value(), "{:?}");
/// }
/// ```
#[cfg(feature = "parsing")]
pub fn parse_body<T: Parse>(&self) -> Result<T> {
self.parse_body_with(T::parse)
}
/// Parse the tokens within the macro invocation's delimiters using the
/// given parser.
#[cfg(feature = "parsing")]
pub fn parse_body_with<F: Parser>(&self, parser: F) -> Result<F::Output> {
// TODO: see if we can get a group.span_close() span in here as the
// scope, rather than the span of the whole group.
let scope = delimiter_span(&self.delimiter);
crate::parse::parse_scoped(parser, scope, self.tokens.clone())
}
}
#[cfg(feature = "parsing")]
pub fn parse_delimiter(input: ParseStream) -> Result<(MacroDelimiter, TokenStream)> {
input.step(|cursor| {
if let Some((TokenTree::Group(g), rest)) = cursor.token_tree() {
let span = g.span();
let delimiter = match g.delimiter() {
Delimiter::Parenthesis => MacroDelimiter::Paren(Paren(span)),
Delimiter::Brace => MacroDelimiter::Brace(Brace(span)),
Delimiter::Bracket => MacroDelimiter::Bracket(Bracket(span)),
Delimiter::None => {
return Err(cursor.error("expected delimiter"));
}
};
Ok(((delimiter, g.stream().clone()), rest))
} else {
Err(cursor.error("expected delimiter"))
}
})
}
#[cfg(feature = "parsing")]
pub mod parsing {
use super::*;
use crate::parse::{Parse, ParseStream, Result};
impl Parse for Macro {
fn parse(input: ParseStream) -> Result<Self> {
let tokens;
Ok(Macro {
path: input.call(Path::parse_mod_style)?,
bang_token: input.parse()?,
delimiter: {
let (delimiter, content) = parse_delimiter(input)?;
tokens = content;
delimiter
},
tokens,
})
}
}
}
#[cfg(feature = "printing")]
mod printing {
use super::*;
use proc_macro2::TokenStream;
use quote::ToTokens;
impl ToTokens for Macro {
fn to_tokens(&self, tokens: &mut TokenStream) {
self.path.to_tokens(tokens);
self.bang_token.to_tokens(tokens);
match &self.delimiter {
MacroDelimiter::Paren(paren) => {
paren.surround(tokens, |tokens| self.tokens.to_tokens(tokens));
}
MacroDelimiter::Brace(brace) => {
brace.surround(tokens, |tokens| self.tokens.to_tokens(tokens));
}
MacroDelimiter::Bracket(bracket) => {
bracket.surround(tokens, |tokens| self.tokens.to_tokens(tokens));
}
}
}
}
}
Remove unneeded clone in parse_delimiter
use super::*;
use crate::token::{Brace, Bracket, Paren};
use proc_macro2::TokenStream;
#[cfg(feature = "parsing")]
use proc_macro2::{Delimiter, Span, TokenTree};
#[cfg(feature = "parsing")]
use crate::parse::{Parse, ParseStream, Parser, Result};
#[cfg(feature = "extra-traits")]
use crate::tt::TokenStreamHelper;
#[cfg(feature = "extra-traits")]
use std::hash::{Hash, Hasher};
ast_struct! {
/// A macro invocation: `println!("{}", mac)`.
///
/// *This type is available if Syn is built with the `"derive"` or `"full"`
/// feature.*
pub struct Macro #manual_extra_traits {
pub path: Path,
pub bang_token: Token![!],
pub delimiter: MacroDelimiter,
pub tokens: TokenStream,
}
}
ast_enum! {
/// A grouping token that surrounds a macro body: `m!(...)` or `m!{...}` or `m![...]`.
///
/// *This type is available if Syn is built with the `"derive"` or `"full"`
/// feature.*
pub enum MacroDelimiter {
Paren(Paren),
Brace(Brace),
Bracket(Bracket),
}
}
#[cfg(feature = "extra-traits")]
impl Eq for Macro {}
#[cfg(feature = "extra-traits")]
impl PartialEq for Macro {
fn eq(&self, other: &Self) -> bool {
self.path == other.path
&& self.bang_token == other.bang_token
&& self.delimiter == other.delimiter
&& TokenStreamHelper(&self.tokens) == TokenStreamHelper(&other.tokens)
}
}
#[cfg(feature = "extra-traits")]
impl Hash for Macro {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.path.hash(state);
self.bang_token.hash(state);
self.delimiter.hash(state);
TokenStreamHelper(&self.tokens).hash(state);
}
}
#[cfg(feature = "parsing")]
fn delimiter_span(delimiter: &MacroDelimiter) -> Span {
match delimiter {
MacroDelimiter::Paren(token) => token.span,
MacroDelimiter::Brace(token) => token.span,
MacroDelimiter::Bracket(token) => token.span,
}
}
impl Macro {
/// Parse the tokens within the macro invocation's delimiters into a syntax
/// tree.
///
/// This is equivalent to `syn::parse2::<T>(mac.tokens)` except that it
/// produces a more useful span when `tokens` is empty.
///
/// # Example
///
/// ```edition2018
/// use syn::{parse_quote, Expr, ExprLit, Ident, Lit, LitStr, Macro, Token};
/// use syn::ext::IdentExt;
/// use syn::parse::{Error, Parse, ParseStream, Result};
/// use syn::punctuated::Punctuated;
///
/// // The arguments expected by libcore's format_args macro, and as a
/// // result most other formatting and printing macros like println.
/// //
/// // println!("{} is {number:.prec$}", "x", prec=5, number=0.01)
/// struct FormatArgs {
/// format_string: Expr,
/// positional_args: Vec<Expr>,
/// named_args: Vec<(Ident, Expr)>,
/// }
///
/// impl Parse for FormatArgs {
/// fn parse(input: ParseStream) -> Result<Self> {
/// let format_string: Expr;
/// let mut positional_args = Vec::new();
/// let mut named_args = Vec::new();
///
/// format_string = input.parse()?;
/// while !input.is_empty() {
/// input.parse::<Token![,]>()?;
/// if input.is_empty() {
/// break;
/// }
/// if input.peek(Ident::peek_any) && input.peek2(Token![=]) {
/// while !input.is_empty() {
/// let name: Ident = input.call(Ident::parse_any)?;
/// input.parse::<Token![=]>()?;
/// let value: Expr = input.parse()?;
/// named_args.push((name, value));
/// if input.is_empty() {
/// break;
/// }
/// input.parse::<Token![,]>()?;
/// }
/// break;
/// }
/// positional_args.push(input.parse()?);
/// }
///
/// Ok(FormatArgs {
/// format_string,
/// positional_args,
/// named_args,
/// })
/// }
/// }
///
/// // Extract the first argument, the format string literal, from an
/// // invocation of a formatting or printing macro.
/// fn get_format_string(m: &Macro) -> Result<LitStr> {
/// let args: FormatArgs = m.parse_body()?;
/// match args.format_string {
/// Expr::Lit(ExprLit { lit: Lit::Str(lit), .. }) => Ok(lit),
/// other => {
/// // First argument was not a string literal expression.
/// // Maybe something like: println!(concat!(...), ...)
/// Err(Error::new_spanned(other, "format string must be a string literal"))
/// }
/// }
/// }
///
/// fn main() {
/// let invocation = parse_quote! {
/// println!("{:?}", Instant::now())
/// };
/// let lit = get_format_string(&invocation).unwrap();
/// assert_eq!(lit.value(), "{:?}");
/// }
/// ```
#[cfg(feature = "parsing")]
pub fn parse_body<T: Parse>(&self) -> Result<T> {
self.parse_body_with(T::parse)
}
/// Parse the tokens within the macro invocation's delimiters using the
/// given parser.
#[cfg(feature = "parsing")]
pub fn parse_body_with<F: Parser>(&self, parser: F) -> Result<F::Output> {
// TODO: see if we can get a group.span_close() span in here as the
// scope, rather than the span of the whole group.
let scope = delimiter_span(&self.delimiter);
crate::parse::parse_scoped(parser, scope, self.tokens.clone())
}
}
#[cfg(feature = "parsing")]
pub fn parse_delimiter(input: ParseStream) -> Result<(MacroDelimiter, TokenStream)> {
input.step(|cursor| {
if let Some((TokenTree::Group(g), rest)) = cursor.token_tree() {
let span = g.span();
let delimiter = match g.delimiter() {
Delimiter::Parenthesis => MacroDelimiter::Paren(Paren(span)),
Delimiter::Brace => MacroDelimiter::Brace(Brace(span)),
Delimiter::Bracket => MacroDelimiter::Bracket(Bracket(span)),
Delimiter::None => {
return Err(cursor.error("expected delimiter"));
}
};
Ok(((delimiter, g.stream()), rest))
} else {
Err(cursor.error("expected delimiter"))
}
})
}
#[cfg(feature = "parsing")]
pub mod parsing {
use super::*;
use crate::parse::{Parse, ParseStream, Result};
impl Parse for Macro {
fn parse(input: ParseStream) -> Result<Self> {
let tokens;
Ok(Macro {
path: input.call(Path::parse_mod_style)?,
bang_token: input.parse()?,
delimiter: {
let (delimiter, content) = parse_delimiter(input)?;
tokens = content;
delimiter
},
tokens,
})
}
}
}
#[cfg(feature = "printing")]
mod printing {
use super::*;
use proc_macro2::TokenStream;
use quote::ToTokens;
impl ToTokens for Macro {
fn to_tokens(&self, tokens: &mut TokenStream) {
self.path.to_tokens(tokens);
self.bang_token.to_tokens(tokens);
match &self.delimiter {
MacroDelimiter::Paren(paren) => {
paren.surround(tokens, |tokens| self.tokens.to_tokens(tokens));
}
MacroDelimiter::Brace(brace) => {
brace.surround(tokens, |tokens| self.tokens.to_tokens(tokens));
}
MacroDelimiter::Bracket(bracket) => {
bracket.surround(tokens, |tokens| self.tokens.to_tokens(tokens));
}
}
}
}
}
|
//! Domain name related scanning, used by both email and URL scanners.
//!
//! This is called domains for familiarity but it's about the authority part of URLs as defined in
//! https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
//!
//! ```text
//! authority = [ userinfo "@" ] host [ ":" port ]
//!
//!
//! userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
//!
//! host = IP-literal / IPv4address / reg-name
//!
//! IP-literal = "[" ( IPv6address / IPvFuture ) "]"
//!
//! IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
//!
//! reg-name = *( unreserved / pct-encoded / sub-delims )
//!
//!
//! unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
//!
//! sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
//! ```
use std::char;
pub(crate) fn find_authority_end(
s: &str,
mut userinfo_allowed: bool,
require_host: bool,
port_allowed: bool,
) -> (Option<usize>, Option<usize>) {
let mut end = Some(0);
let mut maybe_last_dot = None;
let mut last_dot = None;
let mut dot_allowed = false;
let mut hyphen_allowed = false;
let mut all_numeric = true;
let mut maybe_host = true;
let mut host_ended = false;
for (i, c) in s.char_indices() {
let can_be_last = match c {
// ALPHA
'a'..='z' | 'A'..='Z' | '\u{80}'..=char::MAX => {
// Can start or end a domain label, but not numeric
dot_allowed = true;
hyphen_allowed = true;
last_dot = maybe_last_dot;
all_numeric = false;
if host_ended {
maybe_host = false;
}
!require_host || !host_ended
}
// DIGIT
'0'..='9' => {
// Same as above, except numeric
dot_allowed = true;
hyphen_allowed = true;
last_dot = maybe_last_dot;
if host_ended {
maybe_host = false;
}
!require_host || !host_ended
}
// unreserved
'-' => {
// Hyphen can't be at start of a label, e.g. `-b` in `a.-b.com`
if !hyphen_allowed {
maybe_host = false;
}
// Hyphen can't be at end of a label, e.g. `b-` in `a.b-.com`
dot_allowed = false;
all_numeric = false;
!require_host
}
'.' => {
if !dot_allowed {
// Label can't be empty, e.g. `.example.com` or `a..com`
host_ended = true;
}
dot_allowed = false;
hyphen_allowed = false;
maybe_last_dot = Some(i);
false
}
'_' | '~' => {
// Hostnames can't contain these and we don't want to treat them as delimiters.
maybe_host = false;
false
}
// sub-delims
'!' | '$' | '&' | '\'' | '(' | ')' | '*' | '+' | ',' | ';' | '=' => {
// Can't be in hostnames, but we treat them as delimiters
host_ended = true;
if !userinfo_allowed && require_host {
// We don't have to look further
break;
}
false
}
':' => {
// Could be in userinfo, or we're getting a port now.
if !userinfo_allowed && !port_allowed {
break;
}
// Don't advance the last dot when we get to port numbers
maybe_last_dot = last_dot;
false
}
'@' => {
if !userinfo_allowed {
// We already had userinfo, can't have another `@` in a valid authority.
return (None, None);
}
// Sike! Everything before this has been userinfo, so let's reset our
// opinions about all the host bits.
userinfo_allowed = false;
maybe_last_dot = None;
last_dot = None;
dot_allowed = false;
hyphen_allowed = false;
all_numeric = true;
maybe_host = true;
host_ended = false;
false
}
'/' => {
if !require_host {
// For schemes where we allow anything, we want to stop at delimiter characters
// except if we get a slash closing the URL, which happened here.
end = Some(i);
}
break;
}
_ => {
// Anything else, this might be the end of the authority (can be empty).
// Now let the rest of the code handle checking whether the end of the URL is
// valid.
break;
}
};
if can_be_last {
end = Some(i + c.len_utf8());
}
}
if require_host {
if maybe_host {
// Can't have just a number without dots as the authority
if all_numeric && last_dot.is_none() && end != Some(0) {
return (None, None);
}
// If we have something that is not just numeric (not an IP address),
// check that the TLD looks reasonable. This is to avoid linking things like
// `abc@v1.1`.
if !all_numeric {
if let Some(last_dot) = last_dot {
if !valid_tld(&s[last_dot + 1..]) {
return (None, None);
}
}
}
return (end, last_dot);
} else {
return (None, None);
}
} else {
return (end, last_dot);
}
}
fn valid_tld(tld: &str) -> bool {
tld.chars()
.take_while(|c| c.is_ascii_alphabetic())
.take(2)
.count()
>= 2
}
Add pct-encoded to docs
//! Domain name related scanning, used by both email and URL scanners.
//!
//! This is called domains for familiarity but it's about the authority part of URLs as defined in
//! https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
//!
//! ```text
//! authority = [ userinfo "@" ] host [ ":" port ]
//!
//!
//! userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
//!
//! host = IP-literal / IPv4address / reg-name
//!
//! IP-literal = "[" ( IPv6address / IPvFuture ) "]"
//!
//! IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
//!
//! reg-name = *( unreserved / pct-encoded / sub-delims )
//!
//!
//! unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
//!
//! sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
//!
//! pct-encoded = "%" HEXDIG HEXDIG
//! ```
use std::char;
pub(crate) fn find_authority_end(
s: &str,
mut userinfo_allowed: bool,
require_host: bool,
port_allowed: bool,
) -> (Option<usize>, Option<usize>) {
let mut end = Some(0);
let mut maybe_last_dot = None;
let mut last_dot = None;
let mut dot_allowed = false;
let mut hyphen_allowed = false;
let mut all_numeric = true;
let mut maybe_host = true;
let mut host_ended = false;
for (i, c) in s.char_indices() {
let can_be_last = match c {
// ALPHA
'a'..='z' | 'A'..='Z' | '\u{80}'..=char::MAX => {
// Can start or end a domain label, but not numeric
dot_allowed = true;
hyphen_allowed = true;
last_dot = maybe_last_dot;
all_numeric = false;
if host_ended {
maybe_host = false;
}
!require_host || !host_ended
}
// DIGIT
'0'..='9' => {
// Same as above, except numeric
dot_allowed = true;
hyphen_allowed = true;
last_dot = maybe_last_dot;
if host_ended {
maybe_host = false;
}
!require_host || !host_ended
}
// unreserved
'-' => {
// Hyphen can't be at start of a label, e.g. `-b` in `a.-b.com`
if !hyphen_allowed {
maybe_host = false;
}
// Hyphen can't be at end of a label, e.g. `b-` in `a.b-.com`
dot_allowed = false;
all_numeric = false;
!require_host
}
'.' => {
if !dot_allowed {
// Label can't be empty, e.g. `.example.com` or `a..com`
host_ended = true;
}
dot_allowed = false;
hyphen_allowed = false;
maybe_last_dot = Some(i);
false
}
'_' | '~' => {
// Hostnames can't contain these and we don't want to treat them as delimiters.
maybe_host = false;
false
}
// sub-delims
'!' | '$' | '&' | '\'' | '(' | ')' | '*' | '+' | ',' | ';' | '=' => {
// Can't be in hostnames, but we treat them as delimiters
host_ended = true;
if !userinfo_allowed && require_host {
// We don't have to look further
break;
}
false
}
':' => {
// Could be in userinfo, or we're getting a port now.
if !userinfo_allowed && !port_allowed {
break;
}
// Don't advance the last dot when we get to port numbers
maybe_last_dot = last_dot;
false
}
'@' => {
if !userinfo_allowed {
// We already had userinfo, can't have another `@` in a valid authority.
return (None, None);
}
// Sike! Everything before this has been userinfo, so let's reset our
// opinions about all the host bits.
userinfo_allowed = false;
maybe_last_dot = None;
last_dot = None;
dot_allowed = false;
hyphen_allowed = false;
all_numeric = true;
maybe_host = true;
host_ended = false;
false
}
'/' => {
if !require_host {
// For schemes where we allow anything, we want to stop at delimiter characters
// except if we get a slash closing the URL, which happened here.
end = Some(i);
}
break;
}
_ => {
// Anything else, this might be the end of the authority (can be empty).
// Now let the rest of the code handle checking whether the end of the URL is
// valid.
break;
}
};
if can_be_last {
end = Some(i + c.len_utf8());
}
}
if require_host {
if maybe_host {
// Can't have just a number without dots as the authority
if all_numeric && last_dot.is_none() && end != Some(0) {
return (None, None);
}
// If we have something that is not just numeric (not an IP address),
// check that the TLD looks reasonable. This is to avoid linking things like
// `abc@v1.1`.
if !all_numeric {
if let Some(last_dot) = last_dot {
if !valid_tld(&s[last_dot + 1..]) {
return (None, None);
}
}
}
return (end, last_dot);
} else {
return (None, None);
}
} else {
return (end, last_dot);
}
}
fn valid_tld(tld: &str) -> bool {
tld.chars()
.take_while(|c| c.is_ascii_alphabetic())
.take(2)
.count()
>= 2
}
|
// Copyright (c) IxMilia. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
extern crate byteorder;
use self::byteorder::{
ByteOrder,
LittleEndian,
WriteBytesExt,
};
extern crate image;
use self::image::DynamicImage;
use entities::*;
use enums::*;
use header::*;
use objects::*;
use tables::*;
use drawing_item::{
DrawingItem,
DrawingItemMut,
};
use ::{
CodePair,
CodePairValue,
DxfError,
DxfResult,
};
use ::dxb_reader::DxbReader;
use ::dxb_writer::DxbWriter;
use ::entity_iter::EntityIter;
use ::handle_tracker::HandleTracker;
use ::helper_functions::*;
use ::object_iter::ObjectIter;
use block::Block;
use class::Class;
use code_pair_iter::CodePairIter;
use code_pair_writer::CodePairWriter;
use std::fs::File;
use std::io::{
BufReader,
BufWriter,
Cursor,
Read,
Write,
};
use std::collections::HashSet;
use std::iter::Iterator;
use std::path::Path;
use itertools::{
PutBack,
put_back,
};
/// Represents a DXF drawing.
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Drawing {
/// The drawing's header. Contains various drawing-specific values and settings.
pub header: Header,
/// The classes contained by the drawing.
pub classes: Vec<Class>,
/// The AppIds contained by the drawing.
pub app_ids: Vec<AppId>,
/// The block records contained by the drawing.
pub block_records: Vec<BlockRecord>,
/// The dimension styles contained by the drawing.
pub dim_styles: Vec<DimStyle>,
/// The layers contained by the drawing.
pub layers: Vec<Layer>,
/// The line types contained by the drawing.
pub line_types: Vec<LineType>,
/// The visual styles contained by the drawing.
pub styles: Vec<Style>,
/// The user coordinate systems (UCS) contained by the drawing.
pub ucss: Vec<Ucs>,
/// The views contained by the drawing.
pub views: Vec<View>,
/// The view ports contained by the drawing.
pub view_ports: Vec<ViewPort>,
/// The blocks contained by the drawing.
pub blocks: Vec<Block>,
/// The entities contained by the drawing.
pub entities: Vec<Entity>,
/// The objects contained by the drawing.
pub objects: Vec<Object>,
/// The thumbnail image preview of the drawing.
#[cfg_attr(feature = "serialize", serde(skip))]
pub thumbnail: Option<DynamicImage>,
}
impl Default for Drawing {
fn default() -> Self {
Drawing {
header: Header::default(),
classes: vec![],
app_ids: vec![],
block_records: vec![],
dim_styles: vec![],
layers: vec![],
line_types: vec![],
styles: vec![],
ucss: vec![],
views: vec![],
view_ports: vec![],
blocks: vec![],
entities: vec![],
objects: vec![],
thumbnail: None,
}
}
}
// public implementation
impl Drawing {
/// Loads a `Drawing` from anything that implements the `Read` trait.
pub fn load<T>(reader: &mut T) -> DxfResult<Drawing>
where T: Read + ?Sized {
let first_line = match read_line(reader) {
Some(Ok(line)) => line,
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
};
match &*first_line {
"AutoCAD DXB 1.0" => {
let mut reader = DxbReader::new(reader);
reader.load()
},
_ => {
let reader = CodePairIter::new(reader, first_line);
let mut drawing = Drawing::default();
drawing.clear();
let mut iter = put_back(reader);
Drawing::read_sections(&mut drawing, &mut iter)?;
match iter.next() {
Some(Ok(CodePair { code: 0, value: CodePairValue::Str(ref s), .. })) if s == "EOF" => Ok(drawing),
Some(Ok(pair)) => Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/EOF"))),
Some(Err(e)) => Err(e),
None => Ok(drawing),
}
}
}
}
/// Loads a `Drawing` from disk, using a `BufReader`.
pub fn load_file(file_name: &str) -> DxfResult<Drawing> {
let path = Path::new(file_name);
let file = File::open(&path)?;
let mut buf_reader = BufReader::new(file);
Drawing::load(&mut buf_reader)
}
/// Writes a `Drawing` to anything that implements the `Write` trait.
pub fn save<T>(&self, writer: &mut T) -> DxfResult<()>
where T: Write + ?Sized {
self.save_internal(writer, true)
}
/// Writes a `Drawing` as binary to anything that implements the `Write` trait.
pub fn save_binary<T>(&self, writer: &mut T) -> DxfResult<()>
where T: Write + ?Sized {
self.save_internal(writer, false)
}
fn save_internal<T>(&self, writer: &mut T, as_ascii: bool) -> DxfResult<()>
where T: Write + ?Sized {
// write to memory while tracking the used handle values
let mut buf = Cursor::new(vec![]);
let mut handle_tracker = HandleTracker::new(self.header.next_available_handle);
{
let mut code_pair_writer = CodePairWriter::new(&mut buf, as_ascii);
let write_handles = self.header.version >= AcadVersion::R13 || self.header.handles_enabled;
self.write_classes(&mut code_pair_writer)?;
self.write_tables(write_handles, &mut code_pair_writer, &mut handle_tracker)?;
self.write_blocks(write_handles, &mut code_pair_writer, &mut handle_tracker)?;
self.write_entities(write_handles, &mut code_pair_writer, &mut handle_tracker)?;
self.write_objects(&mut code_pair_writer, &mut handle_tracker)?;
self.write_thumbnail(&mut code_pair_writer)?;
code_pair_writer.write_code_pair(&CodePair::new_str(0, "EOF"))?;
}
// write header to the final location
{
let mut final_writer = CodePairWriter::new(writer, as_ascii);
final_writer.write_prelude()?;
self.header.write(&mut final_writer, handle_tracker.get_current_next_handle())?;
}
// copy memory to final location
writer.write_all(&*buf.into_inner())?;
Ok(())
}
/// Writes a `Drawing` to disk, using a `BufWriter`.
pub fn save_file(&self, file_name: &str) -> DxfResult<()> {
self.save_file_internal(file_name, true)
}
/// Writes a `Drawing` as binary to disk, using a `BufWriter`.
pub fn save_file_binary(&self, file_name: &str) -> DxfResult<()> {
self.save_file_internal(file_name, false)
}
fn save_file_internal(&self, file_name: &str, as_ascii: bool) -> DxfResult<()> {
let path = Path::new(file_name);
let file = File::create(&path)?;
let mut writer = BufWriter::new(file);
self.save_internal(&mut writer, as_ascii)
}
/// Writes a `Drawing` as DXB to anything that implements the `Write` trait.
pub fn save_dxb<T>(&self, writer: &mut T) -> DxfResult<()>
where T: Write + ?Sized {
let mut writer = DxbWriter::new(writer);
writer.write(self)
}
/// Writes a `Drawing` as DXB to disk, using a `BufWriter`.
pub fn save_file_dxb(&self, file_name: &str) -> DxfResult<()> {
let path = Path::new(file_name);
let file = File::create(&path)?;
let mut buf_writer = BufWriter::new(file);
self.save_dxb(&mut buf_writer)
}
/// Clears all items from the `Drawing`.
pub fn clear(&mut self) {
self.classes.clear();
self.app_ids.clear();
self.block_records.clear();
self.dim_styles.clear();
self.layers.clear();
self.line_types.clear();
self.styles.clear();
self.ucss.clear();
self.views.clear();
self.view_ports.clear();
self.blocks.clear();
self.entities.clear();
self.objects.clear();
self.thumbnail = None;
}
/// Normalizes the `Drawing` by ensuring expected items are present.
pub fn normalize(&mut self) {
// TODO: check for duplicates
self.header.normalize();
self.normalize_blocks();
self.normalize_entities();
self.normalize_objects();
self.normalize_app_ids();
self.normalize_block_records();
self.normalize_layers();
self.normalize_text_styles();
self.normalize_view_ports();
self.normalize_views();
self.ensure_mline_styles();
self.ensure_dimension_styles();
self.ensure_layers();
self.ensure_line_types();
self.ensure_text_styles();
self.ensure_view_ports();
self.ensure_views();
self.ensure_ucs();
self.app_ids.sort_by(|a, b| a.name.cmp(&b.name));
self.block_records.sort_by(|a, b| a.name.cmp(&b.name));
self.dim_styles.sort_by(|a, b| a.name.cmp(&b.name));
self.layers.sort_by(|a, b| a.name.cmp(&b.name));
self.line_types.sort_by(|a, b| a.name.cmp(&b.name));
self.styles.sort_by(|a, b| a.name.cmp(&b.name));
self.ucss.sort_by(|a, b| a.name.cmp(&b.name));
self.views.sort_by(|a, b| a.name.cmp(&b.name));
self.view_ports.sort_by(|a, b| a.name.cmp(&b.name));
}
/// Gets a `DrawingItem` with the appropriate handle or `None`.
pub fn get_item_by_handle<'a>(&'a self, handle: u32) -> Option<DrawingItem<'a>> {
for item in &self.app_ids {
if item.handle == handle {
return Some(DrawingItem::AppId(item));
}
}
for item in &self.blocks {
if item.handle == handle {
return Some(DrawingItem::Block(item));
}
}
for item in &self.block_records {
if item.handle == handle {
return Some(DrawingItem::BlockRecord(item));
}
}
for item in &self.dim_styles {
if item.handle == handle {
return Some(DrawingItem::DimStyle(item));
}
}
for item in &self.entities {
if item.common.handle == handle {
return Some(DrawingItem::Entity(item));
}
}
for item in &self.layers {
if item.handle == handle {
return Some(DrawingItem::Layer(item));
}
}
for item in &self.line_types {
if item.handle == handle {
return Some(DrawingItem::LineType(item));
}
}
for item in &self.objects {
if item.common.handle == handle {
return Some(DrawingItem::Object(item));
}
}
for item in &self.styles {
if item.handle == handle {
return Some(DrawingItem::Style(item));
}
}
for item in &self.ucss {
if item.handle == handle {
return Some(DrawingItem::Ucs(item));
}
}
for item in &self.views {
if item.handle == handle {
return Some(DrawingItem::View(item));
}
}
for item in &self.view_ports {
if item.handle == handle {
return Some(DrawingItem::ViewPort(item));
}
}
None
}
/// Gets a `DrawingItemMut` with the appropriate handle or `None`.
pub fn get_item_by_handle_mut<'a>(&'a mut self, handle: u32) -> Option<DrawingItemMut<'a>> {
for item in &mut self.app_ids {
if item.handle == handle {
return Some(DrawingItemMut::AppId(item));
}
}
for item in &mut self.blocks {
if item.handle == handle {
return Some(DrawingItemMut::Block(item));
}
}
for item in &mut self.block_records {
if item.handle == handle {
return Some(DrawingItemMut::BlockRecord(item));
}
}
for item in &mut self.dim_styles {
if item.handle == handle {
return Some(DrawingItemMut::DimStyle(item));
}
}
for item in &mut self.entities {
if item.common.handle == handle {
return Some(DrawingItemMut::Entity(item));
}
}
for item in &mut self.layers {
if item.handle == handle {
return Some(DrawingItemMut::Layer(item));
}
}
for item in &mut self.line_types {
if item.handle == handle {
return Some(DrawingItemMut::LineType(item));
}
}
for item in &mut self.objects {
if item.common.handle == handle {
return Some(DrawingItemMut::Object(item));
}
}
for item in &mut self.styles {
if item.handle == handle {
return Some(DrawingItemMut::Style(item));
}
}
for item in &mut self.ucss {
if item.handle == handle {
return Some(DrawingItemMut::Ucs(item));
}
}
for item in &mut self.views {
if item.handle == handle {
return Some(DrawingItemMut::View(item));
}
}
for item in &mut self.view_ports {
if item.handle == handle {
return Some(DrawingItemMut::ViewPort(item));
}
}
None
}
pub(crate) fn assign_and_get_handle(&mut self, item: &mut DrawingItemMut) -> u32 {
if item.get_handle() == 0 {
item.set_handle(self.header.next_available_handle);
self.header.next_available_handle += 1;
}
item.get_handle()
}
}
// private implementation
impl Drawing {
fn write_classes<T>(&self, writer: &mut CodePairWriter<T>) -> DxfResult<()>
where T: Write {
if self.classes.len() == 0 {
return Ok(());
}
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "CLASSES"))?;
for c in &self.classes {
c.write(&self.header.version, writer)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_tables<T>(&self, write_handles: bool, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "TABLES"))?;
write_tables(&self, write_handles, writer, handle_tracker)?;
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_blocks<T>(&self, write_handles: bool, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
if self.blocks.len() == 0 {
return Ok(());
}
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "BLOCKS"))?;
for b in &self.blocks {
b.write(&self.header.version, write_handles, writer, handle_tracker)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_entities<T>(&self, write_handles: bool, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "ENTITIES"))?;
for e in &self.entities {
e.write(&self.header.version, write_handles, writer, handle_tracker)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_objects<T>(&self, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "OBJECTS"))?;
for o in &self.objects {
o.write(&self.header.version, writer, handle_tracker)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_thumbnail<T>(&self, writer: &mut CodePairWriter<T>) -> DxfResult<()>
where T: Write {
if &self.header.version >= &AcadVersion::R2000 {
match self.thumbnail {
Some(ref i) => {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "THUMBNAILIMAGE"))?;
let mut data = vec![];
i.save(&mut data, image::ImageFormat::BMP)?;
let length = data.len() - 14; // skip 14 byte bmp header
writer.write_code_pair(&CodePair::new_i32(90, length as i32))?;
for s in data[14..].chunks(128) {
let mut line = String::new();
for b in s {
line.push_str(&format!("{:02X}", b));
}
writer.write_code_pair(&CodePair::new_string(310, &line))?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
},
None => (), // nothing to write
}
} // */
Ok(())
}
fn read_sections<I>(drawing: &mut Drawing, iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
loop {
match iter.next() {
Some(Ok(pair @ CodePair { code: 0, .. })) => {
match &*pair.assert_string()? {
"EOF" => {
iter.put_back(Ok(pair));
break;
},
"SECTION" => {
match iter.next() {
Some(Ok(CodePair { code: 2, value: CodePairValue::Str(s), .. })) => {
match &*s {
"HEADER" => drawing.header = Header::read(iter)?,
"CLASSES" => Class::read_classes(drawing, iter)?,
"TABLES" => drawing.read_section_item(iter, "TABLE", read_specific_table)?,
"BLOCKS" => drawing.read_section_item(iter, "BLOCK", Block::read_block)?,
"ENTITIES" => drawing.read_entities(iter)?,
"OBJECTS" => drawing.read_objects(iter)?,
"THUMBNAILIMAGE" => { let _ = drawing.read_thumbnail(iter)?; },
_ => Drawing::swallow_section(iter)?,
}
match iter.next() {
Some(Ok(CodePair { code: 0, value: CodePairValue::Str(ref s), .. })) if s == "ENDSEC" => (),
Some(Ok(pair)) => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/ENDSEC"))),
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
},
Some(Ok(pair)) => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 2/<section-name>"))),
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
},
_ => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/SECTION"))),
}
},
Some(Ok(pair)) => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/SECTION or 0/EOF"))),
Some(Err(e)) => return Err(e),
None => break, // ideally should have been 0/EOF
}
}
Ok(())
}
fn swallow_section<I>(iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
loop {
match iter.next() {
Some(Ok(pair)) => {
if pair.code == 0 && pair.assert_string()? == "ENDSEC" {
iter.put_back(Ok(pair));
break;
}
},
Some(Err(e)) => return Err(e),
None => break,
}
}
Ok(())
}
fn read_entities<I>(&mut self, iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
let mut iter = EntityIter { iter: iter };
iter.read_entities_into_vec(&mut self.entities)?;
Ok(())
}
fn read_objects<I>(&mut self, iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
let mut iter = put_back(ObjectIter { iter: iter });
loop {
match iter.next() {
Some(obj) => self.objects.push(obj),
None => break,
}
}
Ok(())
}
fn read_thumbnail<I>(&mut self, iter: &mut PutBack<I>) -> DxfResult<bool>
where I: Iterator<Item = DxfResult<CodePair>> {
// get the length; we don't really care about this since we'll just read whatever's there
let length_pair = next_pair!(iter);
let _length = match length_pair.code {
90 => length_pair.assert_i32()? as usize,
_ => return Err(DxfError::UnexpectedCode(length_pair.code, length_pair.offset)),
};
// prepend the BMP header that always seems to be missing from DXF files
let mut data : Vec<u8> = vec![
'B' as u8, 'M' as u8, // magic number
0x00, 0x00, 0x00, 0x00, // file length (calculated later)
0x00, 0x00, // reserved
0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00 // image data offset (calculated later)
];
let header_length = data.len();
let file_length_offset = 2;
let image_data_offset_offset = 10;
// read the hex data
loop {
match iter.next() {
Some(Ok(pair @ CodePair { code: 0, .. })) => {
// likely 0/ENDSEC
iter.put_back(Ok(pair));
break;
},
Some(Ok(pair @ CodePair { code: 310, .. })) => { parse_hex_string(&pair.assert_string()?, &mut data, pair.offset)?; },
Some(Ok(pair)) => { return Err(DxfError::UnexpectedCode(pair.code, pair.offset)); },
Some(Err(e)) => return Err(e),
None => break,
}
}
// set the file length
let mut length_bytes = vec![];
length_bytes.write_i32::<LittleEndian>(data.len() as i32)?;
data[file_length_offset + 0] = length_bytes[0];
data[file_length_offset + 1] = length_bytes[1];
data[file_length_offset + 2] = length_bytes[2];
data[file_length_offset + 3] = length_bytes[3];
// calculate the image data offset
let dib_header_size = LittleEndian::read_i32(&data[header_length..]) as usize;
// calculate the palette size
let palette_size = match dib_header_size {
40 => {
// BITMAPINFOHEADER
let bpp = LittleEndian::read_u16(&data[header_length + 14 ..]) as usize;
let palette_color_count = LittleEndian::read_u32(&data[header_length + 32 ..]) as usize;
bpp * palette_color_count
},
_ => return Ok(false),
};
// set the image data offset
let image_data_offset = header_length + dib_header_size + palette_size;
let mut offset_bytes = vec![];
offset_bytes.write_i32::<LittleEndian>(image_data_offset as i32)?;
data[image_data_offset_offset + 0] = offset_bytes[0];
data[image_data_offset_offset + 1] = offset_bytes[1];
data[image_data_offset_offset + 2] = offset_bytes[2];
data[image_data_offset_offset + 3] = offset_bytes[3];
let image = image::load_from_memory(&data)?;
self.thumbnail = Some(image);
Ok(true)
}
fn read_section_item<I, F>(&mut self, iter: &mut PutBack<I>, item_type: &str, callback: F) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>>,
F: Fn(&mut Drawing, &mut PutBack<I>) -> DxfResult<()> {
loop {
match iter.next() {
Some(Ok(pair)) => {
if pair.code == 0 {
match &*pair.assert_string()? {
"ENDSEC" => {
iter.put_back(Ok(pair));
break;
},
val => {
if val == item_type {
callback(self, iter)?;
}
else {
return Err(DxfError::UnexpectedCodePair(pair, String::new()));
}
},
}
}
else {
return Err(DxfError::UnexpectedCodePair(pair, String::new()));
}
},
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
}
Ok(())
}
pub(crate) fn swallow_table<I>(iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
loop {
match iter.next() {
Some(Ok(pair)) => {
if pair.code == 0 {
match &*pair.assert_string()? {
"TABLE" | "ENDSEC" | "ENDTAB" => {
iter.put_back(Ok(pair));
break;
},
_ => (), // swallow the code pair
}
}
}
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
}
Ok(())
}
fn normalize_blocks(&mut self) {
for i in 0..self.blocks.len() {
self.blocks[i].normalize();
}
}
fn normalize_entities(&mut self) {
for i in 0..self.entities.len() {
self.entities[i].normalize();
}
}
fn normalize_objects(&mut self) {
for i in 0..self.objects.len() {
self.objects[i].normalize();
}
}
fn normalize_app_ids(&mut self) {
// gather existing app ids
let mut existing_app_ids = HashSet::new();
for app_id in &self.app_ids {
add_to_existing(&mut existing_app_ids, &app_id.name);
}
// prepare app ids that should exist
let should_exist = vec![
String::from("ACAD"),
String::from("ACADANNOTATIVE"),
String::from("ACAD_NAV_VCDISPLAY"),
String::from("ACAD_MLEADERVER"),
];
// ensure all app ids that should exist do
for name in &should_exist {
if !existing_app_ids.contains(name) {
existing_app_ids.insert(name.clone());
self.app_ids.push(AppId {
name: name.clone(),
.. Default::default()
});
}
}
}
fn normalize_block_records(&mut self) {
// gather existing block records
let mut existing_block_records = HashSet::new();
for block_record in &self.block_records {
add_to_existing(&mut existing_block_records, &block_record.name);
}
// prepare block records that should exist
let should_exist = vec![
String::from("*MODEL_SPACE"),
String::from("*PAPER_SPACE"),
];
// ensure all block records that should exist do
for name in &should_exist {
if !existing_block_records.contains(name) {
existing_block_records.insert(name.clone());
self.block_records.push(BlockRecord {
name: name.clone(),
.. Default::default()
});
}
}
}
fn normalize_layers(&mut self) {
for i in 0..self.layers.len() {
self.layers[i].normalize();
}
}
fn normalize_text_styles(&mut self) {
for i in 0..self.styles.len() {
self.styles[i].normalize();
}
}
fn normalize_view_ports(&mut self) {
for i in 0..self.view_ports.len() {
self.view_ports[i].normalize();
}
}
fn normalize_views(&mut self) {
for i in 0..self.views.len() {
self.views[i].normalize();
}
}
fn ensure_mline_styles(&mut self) {
// gather existing mline style names
let mut existing_mline_styles = HashSet::new();
for obj in &self.objects {
match &obj.specific {
&ObjectType::MLineStyle(ref ml) => add_to_existing(&mut existing_mline_styles, &ml.style_name),
_ => (),
}
}
// find mline style names that should exist
let mut to_add = HashSet::new();
for ent in &self.entities {
match &ent.specific {
&EntityType::MLine(ref ml) => add_to_existing(&mut to_add, &ml.style_name),
_ => (),
}
}
// ensure all mline styles that should exist do
for name in &to_add {
if !existing_mline_styles.contains(name) {
existing_mline_styles.insert(name.clone());
self.objects.push(Object::new(ObjectType::MLineStyle(MLineStyle {
style_name: name.clone(),
.. Default::default()
})));
}
}
}
fn ensure_dimension_styles(&mut self) {
// gather existing dimension style names
let mut existing_dim_styles = HashSet::new();
for dim_style in &self.dim_styles {
add_to_existing(&mut existing_dim_styles, &dim_style.name);
}
// find dimension style names that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("STANDARD"));
add_to_existing(&mut to_add, &String::from("ANNOTATIVE"));
for ent in &self.entities {
match &ent.specific {
&EntityType::RotatedDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::RadialDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::DiameterDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::AngularThreePointDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::OrdinateDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::Leader(ref l) => add_to_existing(&mut to_add, &l.dimension_style_name),
&EntityType::Tolerance(ref t) => add_to_existing(&mut to_add, &t.dimension_style_name),
_ => (),
}
}
// ensure all dimension styles that should exist do
for name in &to_add {
if !existing_dim_styles.contains(name) {
existing_dim_styles.insert(name.clone());
self.dim_styles.push(DimStyle {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_layers(&mut self) {
// gather existing layer names
let mut existing_layers = HashSet::new();
for layer in &self.layers {
add_to_existing(&mut existing_layers, &layer.name);
}
// find layer names that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("0"));
add_to_existing(&mut to_add, &self.header.current_layer);
for block in &self.blocks {
add_to_existing(&mut to_add, &block.layer);
for ent in &block.entities {
add_to_existing(&mut to_add, &ent.common.layer);
}
}
for ent in &self.entities {
add_to_existing(&mut to_add, &ent.common.layer);
}
for obj in &self.objects {
match &obj.specific {
&ObjectType::LayerFilter(ref l) => {
for layer_name in &l.layer_names {
add_to_existing(&mut to_add, &layer_name);
}
},
&ObjectType::LayerIndex(ref l) => {
for layer_name in &l.layer_names {
add_to_existing(&mut to_add, &layer_name);
}
},
_ => (),
}
}
// ensure all layers that should exist do
for name in &to_add {
if !existing_layers.contains(name) {
existing_layers.insert(name.clone());
self.layers.push(Layer {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_line_types(&mut self) {
// gather existing line type names
let mut existing_line_types = HashSet::new();
for line_type in &self.line_types {
add_to_existing(&mut existing_line_types, &line_type.name);
}
// find line_types that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("BYLAYER"));
add_to_existing(&mut to_add, &String::from("BYBLOCK"));
add_to_existing(&mut to_add, &String::from("CONTINUOUS"));
add_to_existing(&mut to_add, &self.header.current_entity_line_type);
add_to_existing(&mut to_add, &self.header.dimension_line_type);
for layer in &self.layers {
add_to_existing(&mut to_add, &layer.line_type_name);
}
for block in &self.blocks {
for ent in &block.entities {
add_to_existing(&mut to_add, &ent.common.line_type_name);
}
}
for ent in &self.entities {
add_to_existing(&mut to_add, &ent.common.line_type_name);
}
for obj in &self.objects {
match &obj.specific {
&ObjectType::MLineStyle(ref style) => add_to_existing(&mut to_add, &style.style_name),
_ => (),
}
}
// ensure all line_types that should exist do
for name in &to_add {
if !existing_line_types.contains(name) {
existing_line_types.insert(name.clone());
self.line_types.push(LineType {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_text_styles(&mut self) {
// gather existing text style names
let mut existing_styles = HashSet::new();
for style in &self.styles {
add_to_existing(&mut existing_styles, &style.name);
}
// find styles that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("STANDARD"));
add_to_existing(&mut to_add, &String::from("ANNOTATIVE"));
for entity in &self.entities {
match &entity.specific {
&EntityType::ArcAlignedText(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::Attribute(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::AttributeDefinition(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::MText(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::Text(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
_ => (),
}
}
for obj in &self.objects {
match &obj.specific {
&ObjectType::MLineStyle(ref o) => add_to_existing(&mut to_add, &o.style_name),
_ => (),
}
}
// ensure all styles that should exist do
for name in &to_add {
if !existing_styles.contains(name) {
existing_styles.insert(name.clone());
self.styles.push(Style {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_view_ports(&mut self) {
// gather existing view port names
let mut existing_view_ports = HashSet::new();
for vp in &self.view_ports {
add_to_existing(&mut existing_view_ports, &vp.name);
}
// find view ports that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("*ACTIVE"));
// ensure all view ports that should exist do
for name in &to_add {
if !existing_view_ports.contains(name) {
existing_view_ports.insert(name.clone());
self.view_ports.push(ViewPort {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_views(&mut self) {
// gather existing view names
let mut existing_views = HashSet::new();
for view in &self.views {
add_to_existing(&mut existing_views, &view.name);
}
// find views that should exist
let mut to_add = HashSet::new();
for obj in &self.objects {
match &obj.specific {
&ObjectType::PlotSettings(ref ps) => add_to_existing(&mut to_add, &ps.plot_view_name),
_ => (),
}
}
// ensure all views that should exist do
for name in &to_add {
if !existing_views.contains(name) {
existing_views.insert(name.clone());
self.views.push(View {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_ucs(&mut self) {
// gather existing ucs names
let mut existing_ucs = HashSet::new();
for ucs in &self.ucss {
add_to_existing(&mut existing_ucs, &ucs.name);
}
// find ucs that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &self.header.ucs_definition_name);
add_to_existing(&mut to_add, &self.header.ucs_name);
add_to_existing(&mut to_add, &self.header.ortho_ucs_reference);
add_to_existing(&mut to_add, &self.header.paperspace_ucs_definition_name);
add_to_existing(&mut to_add, &self.header.paperspace_ucs_name);
add_to_existing(&mut to_add, &self.header.paperspace_ortho_ucs_reference);
// ensure all ucs that should exist do
for name in &to_add {
if !name.is_empty() && !existing_ucs.contains(name) {
existing_ucs.insert(name.clone());
self.ucss.push(Ucs {
name: name.clone(),
.. Default::default()
});
}
}
}
}
fn add_to_existing(set: &mut HashSet<String>, val: &String) {
if !set.contains(val) {
set.insert(val.clone());
}
}
remove unnecessary Cursor
// Copyright (c) IxMilia. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
extern crate byteorder;
use self::byteorder::{
ByteOrder,
LittleEndian,
WriteBytesExt,
};
extern crate image;
use self::image::DynamicImage;
use entities::*;
use enums::*;
use header::*;
use objects::*;
use tables::*;
use drawing_item::{
DrawingItem,
DrawingItemMut,
};
use ::{
CodePair,
CodePairValue,
DxfError,
DxfResult,
};
use ::dxb_reader::DxbReader;
use ::dxb_writer::DxbWriter;
use ::entity_iter::EntityIter;
use ::handle_tracker::HandleTracker;
use ::helper_functions::*;
use ::object_iter::ObjectIter;
use block::Block;
use class::Class;
use code_pair_iter::CodePairIter;
use code_pair_writer::CodePairWriter;
use std::fs::File;
use std::io::{
BufReader,
BufWriter,
Read,
Write,
};
use std::collections::HashSet;
use std::iter::Iterator;
use std::path::Path;
use itertools::{
PutBack,
put_back,
};
/// Represents a DXF drawing.
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Drawing {
/// The drawing's header. Contains various drawing-specific values and settings.
pub header: Header,
/// The classes contained by the drawing.
pub classes: Vec<Class>,
/// The AppIds contained by the drawing.
pub app_ids: Vec<AppId>,
/// The block records contained by the drawing.
pub block_records: Vec<BlockRecord>,
/// The dimension styles contained by the drawing.
pub dim_styles: Vec<DimStyle>,
/// The layers contained by the drawing.
pub layers: Vec<Layer>,
/// The line types contained by the drawing.
pub line_types: Vec<LineType>,
/// The visual styles contained by the drawing.
pub styles: Vec<Style>,
/// The user coordinate systems (UCS) contained by the drawing.
pub ucss: Vec<Ucs>,
/// The views contained by the drawing.
pub views: Vec<View>,
/// The view ports contained by the drawing.
pub view_ports: Vec<ViewPort>,
/// The blocks contained by the drawing.
pub blocks: Vec<Block>,
/// The entities contained by the drawing.
pub entities: Vec<Entity>,
/// The objects contained by the drawing.
pub objects: Vec<Object>,
/// The thumbnail image preview of the drawing.
#[cfg_attr(feature = "serialize", serde(skip))]
pub thumbnail: Option<DynamicImage>,
}
impl Default for Drawing {
fn default() -> Self {
Drawing {
header: Header::default(),
classes: vec![],
app_ids: vec![],
block_records: vec![],
dim_styles: vec![],
layers: vec![],
line_types: vec![],
styles: vec![],
ucss: vec![],
views: vec![],
view_ports: vec![],
blocks: vec![],
entities: vec![],
objects: vec![],
thumbnail: None,
}
}
}
// public implementation
impl Drawing {
/// Loads a `Drawing` from anything that implements the `Read` trait.
pub fn load<T>(reader: &mut T) -> DxfResult<Drawing>
where T: Read + ?Sized {
let first_line = match read_line(reader) {
Some(Ok(line)) => line,
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
};
match &*first_line {
"AutoCAD DXB 1.0" => {
let mut reader = DxbReader::new(reader);
reader.load()
},
_ => {
let reader = CodePairIter::new(reader, first_line);
let mut drawing = Drawing::default();
drawing.clear();
let mut iter = put_back(reader);
Drawing::read_sections(&mut drawing, &mut iter)?;
match iter.next() {
Some(Ok(CodePair { code: 0, value: CodePairValue::Str(ref s), .. })) if s == "EOF" => Ok(drawing),
Some(Ok(pair)) => Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/EOF"))),
Some(Err(e)) => Err(e),
None => Ok(drawing),
}
}
}
}
/// Loads a `Drawing` from disk, using a `BufReader`.
pub fn load_file(file_name: &str) -> DxfResult<Drawing> {
let path = Path::new(file_name);
let file = File::open(&path)?;
let mut buf_reader = BufReader::new(file);
Drawing::load(&mut buf_reader)
}
/// Writes a `Drawing` to anything that implements the `Write` trait.
pub fn save<T>(&self, writer: &mut T) -> DxfResult<()>
where T: Write + ?Sized {
self.save_internal(writer, true)
}
/// Writes a `Drawing` as binary to anything that implements the `Write` trait.
pub fn save_binary<T>(&self, writer: &mut T) -> DxfResult<()>
where T: Write + ?Sized {
self.save_internal(writer, false)
}
fn save_internal<T>(&self, writer: &mut T, as_ascii: bool) -> DxfResult<()>
where T: Write + ?Sized {
// write to memory while tracking the used handle values
let mut buf = vec![];
let mut handle_tracker = HandleTracker::new(self.header.next_available_handle);
{
let mut code_pair_writer = CodePairWriter::new(&mut buf, as_ascii);
let write_handles = self.header.version >= AcadVersion::R13 || self.header.handles_enabled;
self.write_classes(&mut code_pair_writer)?;
self.write_tables(write_handles, &mut code_pair_writer, &mut handle_tracker)?;
self.write_blocks(write_handles, &mut code_pair_writer, &mut handle_tracker)?;
self.write_entities(write_handles, &mut code_pair_writer, &mut handle_tracker)?;
self.write_objects(&mut code_pair_writer, &mut handle_tracker)?;
self.write_thumbnail(&mut code_pair_writer)?;
code_pair_writer.write_code_pair(&CodePair::new_str(0, "EOF"))?;
}
// write header to the final location
{
let mut final_writer = CodePairWriter::new(writer, as_ascii);
final_writer.write_prelude()?;
self.header.write(&mut final_writer, handle_tracker.get_current_next_handle())?;
}
// copy memory to final location
writer.write_all(&*buf)?;
Ok(())
}
/// Writes a `Drawing` to disk, using a `BufWriter`.
pub fn save_file(&self, file_name: &str) -> DxfResult<()> {
self.save_file_internal(file_name, true)
}
/// Writes a `Drawing` as binary to disk, using a `BufWriter`.
pub fn save_file_binary(&self, file_name: &str) -> DxfResult<()> {
self.save_file_internal(file_name, false)
}
fn save_file_internal(&self, file_name: &str, as_ascii: bool) -> DxfResult<()> {
let path = Path::new(file_name);
let file = File::create(&path)?;
let mut writer = BufWriter::new(file);
self.save_internal(&mut writer, as_ascii)
}
/// Writes a `Drawing` as DXB to anything that implements the `Write` trait.
pub fn save_dxb<T>(&self, writer: &mut T) -> DxfResult<()>
where T: Write + ?Sized {
let mut writer = DxbWriter::new(writer);
writer.write(self)
}
/// Writes a `Drawing` as DXB to disk, using a `BufWriter`.
pub fn save_file_dxb(&self, file_name: &str) -> DxfResult<()> {
let path = Path::new(file_name);
let file = File::create(&path)?;
let mut buf_writer = BufWriter::new(file);
self.save_dxb(&mut buf_writer)
}
/// Clears all items from the `Drawing`.
pub fn clear(&mut self) {
self.classes.clear();
self.app_ids.clear();
self.block_records.clear();
self.dim_styles.clear();
self.layers.clear();
self.line_types.clear();
self.styles.clear();
self.ucss.clear();
self.views.clear();
self.view_ports.clear();
self.blocks.clear();
self.entities.clear();
self.objects.clear();
self.thumbnail = None;
}
/// Normalizes the `Drawing` by ensuring expected items are present.
pub fn normalize(&mut self) {
// TODO: check for duplicates
self.header.normalize();
self.normalize_blocks();
self.normalize_entities();
self.normalize_objects();
self.normalize_app_ids();
self.normalize_block_records();
self.normalize_layers();
self.normalize_text_styles();
self.normalize_view_ports();
self.normalize_views();
self.ensure_mline_styles();
self.ensure_dimension_styles();
self.ensure_layers();
self.ensure_line_types();
self.ensure_text_styles();
self.ensure_view_ports();
self.ensure_views();
self.ensure_ucs();
self.app_ids.sort_by(|a, b| a.name.cmp(&b.name));
self.block_records.sort_by(|a, b| a.name.cmp(&b.name));
self.dim_styles.sort_by(|a, b| a.name.cmp(&b.name));
self.layers.sort_by(|a, b| a.name.cmp(&b.name));
self.line_types.sort_by(|a, b| a.name.cmp(&b.name));
self.styles.sort_by(|a, b| a.name.cmp(&b.name));
self.ucss.sort_by(|a, b| a.name.cmp(&b.name));
self.views.sort_by(|a, b| a.name.cmp(&b.name));
self.view_ports.sort_by(|a, b| a.name.cmp(&b.name));
}
/// Gets a `DrawingItem` with the appropriate handle or `None`.
pub fn get_item_by_handle<'a>(&'a self, handle: u32) -> Option<DrawingItem<'a>> {
for item in &self.app_ids {
if item.handle == handle {
return Some(DrawingItem::AppId(item));
}
}
for item in &self.blocks {
if item.handle == handle {
return Some(DrawingItem::Block(item));
}
}
for item in &self.block_records {
if item.handle == handle {
return Some(DrawingItem::BlockRecord(item));
}
}
for item in &self.dim_styles {
if item.handle == handle {
return Some(DrawingItem::DimStyle(item));
}
}
for item in &self.entities {
if item.common.handle == handle {
return Some(DrawingItem::Entity(item));
}
}
for item in &self.layers {
if item.handle == handle {
return Some(DrawingItem::Layer(item));
}
}
for item in &self.line_types {
if item.handle == handle {
return Some(DrawingItem::LineType(item));
}
}
for item in &self.objects {
if item.common.handle == handle {
return Some(DrawingItem::Object(item));
}
}
for item in &self.styles {
if item.handle == handle {
return Some(DrawingItem::Style(item));
}
}
for item in &self.ucss {
if item.handle == handle {
return Some(DrawingItem::Ucs(item));
}
}
for item in &self.views {
if item.handle == handle {
return Some(DrawingItem::View(item));
}
}
for item in &self.view_ports {
if item.handle == handle {
return Some(DrawingItem::ViewPort(item));
}
}
None
}
/// Gets a `DrawingItemMut` with the appropriate handle or `None`.
pub fn get_item_by_handle_mut<'a>(&'a mut self, handle: u32) -> Option<DrawingItemMut<'a>> {
for item in &mut self.app_ids {
if item.handle == handle {
return Some(DrawingItemMut::AppId(item));
}
}
for item in &mut self.blocks {
if item.handle == handle {
return Some(DrawingItemMut::Block(item));
}
}
for item in &mut self.block_records {
if item.handle == handle {
return Some(DrawingItemMut::BlockRecord(item));
}
}
for item in &mut self.dim_styles {
if item.handle == handle {
return Some(DrawingItemMut::DimStyle(item));
}
}
for item in &mut self.entities {
if item.common.handle == handle {
return Some(DrawingItemMut::Entity(item));
}
}
for item in &mut self.layers {
if item.handle == handle {
return Some(DrawingItemMut::Layer(item));
}
}
for item in &mut self.line_types {
if item.handle == handle {
return Some(DrawingItemMut::LineType(item));
}
}
for item in &mut self.objects {
if item.common.handle == handle {
return Some(DrawingItemMut::Object(item));
}
}
for item in &mut self.styles {
if item.handle == handle {
return Some(DrawingItemMut::Style(item));
}
}
for item in &mut self.ucss {
if item.handle == handle {
return Some(DrawingItemMut::Ucs(item));
}
}
for item in &mut self.views {
if item.handle == handle {
return Some(DrawingItemMut::View(item));
}
}
for item in &mut self.view_ports {
if item.handle == handle {
return Some(DrawingItemMut::ViewPort(item));
}
}
None
}
pub(crate) fn assign_and_get_handle(&mut self, item: &mut DrawingItemMut) -> u32 {
if item.get_handle() == 0 {
item.set_handle(self.header.next_available_handle);
self.header.next_available_handle += 1;
}
item.get_handle()
}
}
// private implementation
impl Drawing {
fn write_classes<T>(&self, writer: &mut CodePairWriter<T>) -> DxfResult<()>
where T: Write {
if self.classes.len() == 0 {
return Ok(());
}
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "CLASSES"))?;
for c in &self.classes {
c.write(&self.header.version, writer)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_tables<T>(&self, write_handles: bool, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "TABLES"))?;
write_tables(&self, write_handles, writer, handle_tracker)?;
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_blocks<T>(&self, write_handles: bool, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
if self.blocks.len() == 0 {
return Ok(());
}
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "BLOCKS"))?;
for b in &self.blocks {
b.write(&self.header.version, write_handles, writer, handle_tracker)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_entities<T>(&self, write_handles: bool, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "ENTITIES"))?;
for e in &self.entities {
e.write(&self.header.version, write_handles, writer, handle_tracker)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_objects<T>(&self, writer: &mut CodePairWriter<T>, handle_tracker: &mut HandleTracker) -> DxfResult<()>
where T: Write {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "OBJECTS"))?;
for o in &self.objects {
o.write(&self.header.version, writer, handle_tracker)?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
Ok(())
}
fn write_thumbnail<T>(&self, writer: &mut CodePairWriter<T>) -> DxfResult<()>
where T: Write {
if &self.header.version >= &AcadVersion::R2000 {
match self.thumbnail {
Some(ref i) => {
writer.write_code_pair(&CodePair::new_str(0, "SECTION"))?;
writer.write_code_pair(&CodePair::new_str(2, "THUMBNAILIMAGE"))?;
let mut data = vec![];
i.save(&mut data, image::ImageFormat::BMP)?;
let length = data.len() - 14; // skip 14 byte bmp header
writer.write_code_pair(&CodePair::new_i32(90, length as i32))?;
for s in data[14..].chunks(128) {
let mut line = String::new();
for b in s {
line.push_str(&format!("{:02X}", b));
}
writer.write_code_pair(&CodePair::new_string(310, &line))?;
}
writer.write_code_pair(&CodePair::new_str(0, "ENDSEC"))?;
},
None => (), // nothing to write
}
} // */
Ok(())
}
fn read_sections<I>(drawing: &mut Drawing, iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
loop {
match iter.next() {
Some(Ok(pair @ CodePair { code: 0, .. })) => {
match &*pair.assert_string()? {
"EOF" => {
iter.put_back(Ok(pair));
break;
},
"SECTION" => {
match iter.next() {
Some(Ok(CodePair { code: 2, value: CodePairValue::Str(s), .. })) => {
match &*s {
"HEADER" => drawing.header = Header::read(iter)?,
"CLASSES" => Class::read_classes(drawing, iter)?,
"TABLES" => drawing.read_section_item(iter, "TABLE", read_specific_table)?,
"BLOCKS" => drawing.read_section_item(iter, "BLOCK", Block::read_block)?,
"ENTITIES" => drawing.read_entities(iter)?,
"OBJECTS" => drawing.read_objects(iter)?,
"THUMBNAILIMAGE" => { let _ = drawing.read_thumbnail(iter)?; },
_ => Drawing::swallow_section(iter)?,
}
match iter.next() {
Some(Ok(CodePair { code: 0, value: CodePairValue::Str(ref s), .. })) if s == "ENDSEC" => (),
Some(Ok(pair)) => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/ENDSEC"))),
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
},
Some(Ok(pair)) => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 2/<section-name>"))),
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
},
_ => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/SECTION"))),
}
},
Some(Ok(pair)) => return Err(DxfError::UnexpectedCodePair(pair, String::from("expected 0/SECTION or 0/EOF"))),
Some(Err(e)) => return Err(e),
None => break, // ideally should have been 0/EOF
}
}
Ok(())
}
fn swallow_section<I>(iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
loop {
match iter.next() {
Some(Ok(pair)) => {
if pair.code == 0 && pair.assert_string()? == "ENDSEC" {
iter.put_back(Ok(pair));
break;
}
},
Some(Err(e)) => return Err(e),
None => break,
}
}
Ok(())
}
fn read_entities<I>(&mut self, iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
let mut iter = EntityIter { iter: iter };
iter.read_entities_into_vec(&mut self.entities)?;
Ok(())
}
fn read_objects<I>(&mut self, iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
let mut iter = put_back(ObjectIter { iter: iter });
loop {
match iter.next() {
Some(obj) => self.objects.push(obj),
None => break,
}
}
Ok(())
}
fn read_thumbnail<I>(&mut self, iter: &mut PutBack<I>) -> DxfResult<bool>
where I: Iterator<Item = DxfResult<CodePair>> {
// get the length; we don't really care about this since we'll just read whatever's there
let length_pair = next_pair!(iter);
let _length = match length_pair.code {
90 => length_pair.assert_i32()? as usize,
_ => return Err(DxfError::UnexpectedCode(length_pair.code, length_pair.offset)),
};
// prepend the BMP header that always seems to be missing from DXF files
let mut data : Vec<u8> = vec![
'B' as u8, 'M' as u8, // magic number
0x00, 0x00, 0x00, 0x00, // file length (calculated later)
0x00, 0x00, // reserved
0x00, 0x00, // reserved
0x00, 0x00, 0x00, 0x00 // image data offset (calculated later)
];
let header_length = data.len();
let file_length_offset = 2;
let image_data_offset_offset = 10;
// read the hex data
loop {
match iter.next() {
Some(Ok(pair @ CodePair { code: 0, .. })) => {
// likely 0/ENDSEC
iter.put_back(Ok(pair));
break;
},
Some(Ok(pair @ CodePair { code: 310, .. })) => { parse_hex_string(&pair.assert_string()?, &mut data, pair.offset)?; },
Some(Ok(pair)) => { return Err(DxfError::UnexpectedCode(pair.code, pair.offset)); },
Some(Err(e)) => return Err(e),
None => break,
}
}
// set the file length
let mut length_bytes = vec![];
length_bytes.write_i32::<LittleEndian>(data.len() as i32)?;
data[file_length_offset + 0] = length_bytes[0];
data[file_length_offset + 1] = length_bytes[1];
data[file_length_offset + 2] = length_bytes[2];
data[file_length_offset + 3] = length_bytes[3];
// calculate the image data offset
let dib_header_size = LittleEndian::read_i32(&data[header_length..]) as usize;
// calculate the palette size
let palette_size = match dib_header_size {
40 => {
// BITMAPINFOHEADER
let bpp = LittleEndian::read_u16(&data[header_length + 14 ..]) as usize;
let palette_color_count = LittleEndian::read_u32(&data[header_length + 32 ..]) as usize;
bpp * palette_color_count
},
_ => return Ok(false),
};
// set the image data offset
let image_data_offset = header_length + dib_header_size + palette_size;
let mut offset_bytes = vec![];
offset_bytes.write_i32::<LittleEndian>(image_data_offset as i32)?;
data[image_data_offset_offset + 0] = offset_bytes[0];
data[image_data_offset_offset + 1] = offset_bytes[1];
data[image_data_offset_offset + 2] = offset_bytes[2];
data[image_data_offset_offset + 3] = offset_bytes[3];
let image = image::load_from_memory(&data)?;
self.thumbnail = Some(image);
Ok(true)
}
fn read_section_item<I, F>(&mut self, iter: &mut PutBack<I>, item_type: &str, callback: F) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>>,
F: Fn(&mut Drawing, &mut PutBack<I>) -> DxfResult<()> {
loop {
match iter.next() {
Some(Ok(pair)) => {
if pair.code == 0 {
match &*pair.assert_string()? {
"ENDSEC" => {
iter.put_back(Ok(pair));
break;
},
val => {
if val == item_type {
callback(self, iter)?;
}
else {
return Err(DxfError::UnexpectedCodePair(pair, String::new()));
}
},
}
}
else {
return Err(DxfError::UnexpectedCodePair(pair, String::new()));
}
},
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
}
Ok(())
}
pub(crate) fn swallow_table<I>(iter: &mut PutBack<I>) -> DxfResult<()>
where I: Iterator<Item = DxfResult<CodePair>> {
loop {
match iter.next() {
Some(Ok(pair)) => {
if pair.code == 0 {
match &*pair.assert_string()? {
"TABLE" | "ENDSEC" | "ENDTAB" => {
iter.put_back(Ok(pair));
break;
},
_ => (), // swallow the code pair
}
}
}
Some(Err(e)) => return Err(e),
None => return Err(DxfError::UnexpectedEndOfInput),
}
}
Ok(())
}
fn normalize_blocks(&mut self) {
for i in 0..self.blocks.len() {
self.blocks[i].normalize();
}
}
fn normalize_entities(&mut self) {
for i in 0..self.entities.len() {
self.entities[i].normalize();
}
}
fn normalize_objects(&mut self) {
for i in 0..self.objects.len() {
self.objects[i].normalize();
}
}
fn normalize_app_ids(&mut self) {
// gather existing app ids
let mut existing_app_ids = HashSet::new();
for app_id in &self.app_ids {
add_to_existing(&mut existing_app_ids, &app_id.name);
}
// prepare app ids that should exist
let should_exist = vec![
String::from("ACAD"),
String::from("ACADANNOTATIVE"),
String::from("ACAD_NAV_VCDISPLAY"),
String::from("ACAD_MLEADERVER"),
];
// ensure all app ids that should exist do
for name in &should_exist {
if !existing_app_ids.contains(name) {
existing_app_ids.insert(name.clone());
self.app_ids.push(AppId {
name: name.clone(),
.. Default::default()
});
}
}
}
fn normalize_block_records(&mut self) {
// gather existing block records
let mut existing_block_records = HashSet::new();
for block_record in &self.block_records {
add_to_existing(&mut existing_block_records, &block_record.name);
}
// prepare block records that should exist
let should_exist = vec![
String::from("*MODEL_SPACE"),
String::from("*PAPER_SPACE"),
];
// ensure all block records that should exist do
for name in &should_exist {
if !existing_block_records.contains(name) {
existing_block_records.insert(name.clone());
self.block_records.push(BlockRecord {
name: name.clone(),
.. Default::default()
});
}
}
}
fn normalize_layers(&mut self) {
for i in 0..self.layers.len() {
self.layers[i].normalize();
}
}
fn normalize_text_styles(&mut self) {
for i in 0..self.styles.len() {
self.styles[i].normalize();
}
}
fn normalize_view_ports(&mut self) {
for i in 0..self.view_ports.len() {
self.view_ports[i].normalize();
}
}
fn normalize_views(&mut self) {
for i in 0..self.views.len() {
self.views[i].normalize();
}
}
fn ensure_mline_styles(&mut self) {
// gather existing mline style names
let mut existing_mline_styles = HashSet::new();
for obj in &self.objects {
match &obj.specific {
&ObjectType::MLineStyle(ref ml) => add_to_existing(&mut existing_mline_styles, &ml.style_name),
_ => (),
}
}
// find mline style names that should exist
let mut to_add = HashSet::new();
for ent in &self.entities {
match &ent.specific {
&EntityType::MLine(ref ml) => add_to_existing(&mut to_add, &ml.style_name),
_ => (),
}
}
// ensure all mline styles that should exist do
for name in &to_add {
if !existing_mline_styles.contains(name) {
existing_mline_styles.insert(name.clone());
self.objects.push(Object::new(ObjectType::MLineStyle(MLineStyle {
style_name: name.clone(),
.. Default::default()
})));
}
}
}
fn ensure_dimension_styles(&mut self) {
// gather existing dimension style names
let mut existing_dim_styles = HashSet::new();
for dim_style in &self.dim_styles {
add_to_existing(&mut existing_dim_styles, &dim_style.name);
}
// find dimension style names that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("STANDARD"));
add_to_existing(&mut to_add, &String::from("ANNOTATIVE"));
for ent in &self.entities {
match &ent.specific {
&EntityType::RotatedDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::RadialDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::DiameterDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::AngularThreePointDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::OrdinateDimension(ref d) => add_to_existing(&mut to_add, &d.dimension_base.dimension_style_name),
&EntityType::Leader(ref l) => add_to_existing(&mut to_add, &l.dimension_style_name),
&EntityType::Tolerance(ref t) => add_to_existing(&mut to_add, &t.dimension_style_name),
_ => (),
}
}
// ensure all dimension styles that should exist do
for name in &to_add {
if !existing_dim_styles.contains(name) {
existing_dim_styles.insert(name.clone());
self.dim_styles.push(DimStyle {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_layers(&mut self) {
// gather existing layer names
let mut existing_layers = HashSet::new();
for layer in &self.layers {
add_to_existing(&mut existing_layers, &layer.name);
}
// find layer names that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("0"));
add_to_existing(&mut to_add, &self.header.current_layer);
for block in &self.blocks {
add_to_existing(&mut to_add, &block.layer);
for ent in &block.entities {
add_to_existing(&mut to_add, &ent.common.layer);
}
}
for ent in &self.entities {
add_to_existing(&mut to_add, &ent.common.layer);
}
for obj in &self.objects {
match &obj.specific {
&ObjectType::LayerFilter(ref l) => {
for layer_name in &l.layer_names {
add_to_existing(&mut to_add, &layer_name);
}
},
&ObjectType::LayerIndex(ref l) => {
for layer_name in &l.layer_names {
add_to_existing(&mut to_add, &layer_name);
}
},
_ => (),
}
}
// ensure all layers that should exist do
for name in &to_add {
if !existing_layers.contains(name) {
existing_layers.insert(name.clone());
self.layers.push(Layer {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_line_types(&mut self) {
// gather existing line type names
let mut existing_line_types = HashSet::new();
for line_type in &self.line_types {
add_to_existing(&mut existing_line_types, &line_type.name);
}
// find line_types that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("BYLAYER"));
add_to_existing(&mut to_add, &String::from("BYBLOCK"));
add_to_existing(&mut to_add, &String::from("CONTINUOUS"));
add_to_existing(&mut to_add, &self.header.current_entity_line_type);
add_to_existing(&mut to_add, &self.header.dimension_line_type);
for layer in &self.layers {
add_to_existing(&mut to_add, &layer.line_type_name);
}
for block in &self.blocks {
for ent in &block.entities {
add_to_existing(&mut to_add, &ent.common.line_type_name);
}
}
for ent in &self.entities {
add_to_existing(&mut to_add, &ent.common.line_type_name);
}
for obj in &self.objects {
match &obj.specific {
&ObjectType::MLineStyle(ref style) => add_to_existing(&mut to_add, &style.style_name),
_ => (),
}
}
// ensure all line_types that should exist do
for name in &to_add {
if !existing_line_types.contains(name) {
existing_line_types.insert(name.clone());
self.line_types.push(LineType {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_text_styles(&mut self) {
// gather existing text style names
let mut existing_styles = HashSet::new();
for style in &self.styles {
add_to_existing(&mut existing_styles, &style.name);
}
// find styles that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("STANDARD"));
add_to_existing(&mut to_add, &String::from("ANNOTATIVE"));
for entity in &self.entities {
match &entity.specific {
&EntityType::ArcAlignedText(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::Attribute(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::AttributeDefinition(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::MText(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
&EntityType::Text(ref e) => add_to_existing(&mut to_add, &e.text_style_name),
_ => (),
}
}
for obj in &self.objects {
match &obj.specific {
&ObjectType::MLineStyle(ref o) => add_to_existing(&mut to_add, &o.style_name),
_ => (),
}
}
// ensure all styles that should exist do
for name in &to_add {
if !existing_styles.contains(name) {
existing_styles.insert(name.clone());
self.styles.push(Style {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_view_ports(&mut self) {
// gather existing view port names
let mut existing_view_ports = HashSet::new();
for vp in &self.view_ports {
add_to_existing(&mut existing_view_ports, &vp.name);
}
// find view ports that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &String::from("*ACTIVE"));
// ensure all view ports that should exist do
for name in &to_add {
if !existing_view_ports.contains(name) {
existing_view_ports.insert(name.clone());
self.view_ports.push(ViewPort {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_views(&mut self) {
// gather existing view names
let mut existing_views = HashSet::new();
for view in &self.views {
add_to_existing(&mut existing_views, &view.name);
}
// find views that should exist
let mut to_add = HashSet::new();
for obj in &self.objects {
match &obj.specific {
&ObjectType::PlotSettings(ref ps) => add_to_existing(&mut to_add, &ps.plot_view_name),
_ => (),
}
}
// ensure all views that should exist do
for name in &to_add {
if !existing_views.contains(name) {
existing_views.insert(name.clone());
self.views.push(View {
name: name.clone(),
.. Default::default()
});
}
}
}
fn ensure_ucs(&mut self) {
// gather existing ucs names
let mut existing_ucs = HashSet::new();
for ucs in &self.ucss {
add_to_existing(&mut existing_ucs, &ucs.name);
}
// find ucs that should exist
let mut to_add = HashSet::new();
add_to_existing(&mut to_add, &self.header.ucs_definition_name);
add_to_existing(&mut to_add, &self.header.ucs_name);
add_to_existing(&mut to_add, &self.header.ortho_ucs_reference);
add_to_existing(&mut to_add, &self.header.paperspace_ucs_definition_name);
add_to_existing(&mut to_add, &self.header.paperspace_ucs_name);
add_to_existing(&mut to_add, &self.header.paperspace_ortho_ucs_reference);
// ensure all ucs that should exist do
for name in &to_add {
if !name.is_empty() && !existing_ucs.contains(name) {
existing_ucs.insert(name.clone());
self.ucss.push(Ucs {
name: name.clone(),
.. Default::default()
});
}
}
}
}
fn add_to_existing(set: &mut HashSet<String>, val: &String) {
if !set.contains(val) {
set.insert(val.clone());
}
}
|
//! Access ELF constants, other helper functions, which are independent of ELF bithood. Also
//! provides simple parser which returns an Elf64 or Elf32 "pre-built" binary.
//!
//! **WARNING**: to use the automagic ELF datatype union parser, you _must_ enable both elf and
//! elf32 features - i.e., do not use `no_elf` **NOR** `no_elf32`, otherwise you'll get obscure
//! errors about [goblin::elf::from_fd](fn.from_fd.html) missing.
#[cfg(not(feature = "pure"))]
pub mod strtab;
// These are shareable values for the 32/64 bit implementations.
//
// They are publicly re-exported by the pub-using module
#[macro_use]
pub mod header {
macro_rules! elf_header {
($size:ident) => {
#[repr(C)]
#[derive(Clone, Default)]
pub struct Header {
pub e_ident: [u8; SIZEOF_IDENT],
pub e_type: u16,
pub e_machine: u16,
pub e_version: u32,
pub e_entry: $size,
pub e_phoff: $size,
pub e_shoff: $size,
pub e_flags: u32,
pub e_ehsize: u16,
pub e_phentsize: u16,
pub e_phnum: u16,
pub e_shentsize: u16,
pub e_shnum: u16,
pub e_shstrndx: u16,
}
}
}
/// No file type.
pub const ET_NONE: u16 = 0;
/// Relocatable file.
pub const ET_REL: u16 = 1;
/// Executable file.
pub const ET_EXEC: u16 = 2;
/// Shared object file.
pub const ET_DYN: u16 = 3;
/// Core file.
pub const ET_CORE: u16 = 4;
/// Number of defined types.
pub const ET_NUM: u16 = 5;
/// The ELF magic number.
pub const ELFMAG: &'static [u8; 4] = b"\x7FELF";
/// SELF (Security-enhanced ELF) magic number.
pub const SELFMAG: usize = 4;
/// File class byte index.
pub const EI_CLASS: usize = 4;
/// Invalid class.
pub const ELFCLASSNONE: u8 = 0;
/// 32-bit objects.
pub const ELFCLASS32: u8 = 1;
/// 64-bit objects.
pub const ELFCLASS64: u8 = 2;
/// ELF class number.
pub const ELFCLASSNUM: u8 = 3;
/// Data encoding byte index.
pub const EI_DATA: usize = 5;
/// Invalid data encoding.
pub const ELFDATANONE: u8 = 0;
/// 2's complement, little endian.
pub const ELFDATA2LSB: u8 = 1;
/// 2's complement, big endian.
pub const ELFDATA2MSB: u8 = 2;
/// Number of bytes in an identifier.
pub const SIZEOF_IDENT: usize = 16;
/// Convert an ET value to their associated string.
#[inline]
pub fn et_to_str(et: u16) -> &'static str {
match et {
ET_NONE => "NONE",
ET_REL => "REL",
ET_EXEC => "EXEC",
ET_DYN => "DYN",
ET_CORE => "CORE",
ET_NUM => "NUM",
_ => "UNKNOWN_ET",
}
}
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
/// Search forward in the stream.
pub fn peek(fd: &mut File) -> io::Result<(u8, bool)> {
let mut header = [0u8; SIZEOF_IDENT];
try!(fd.seek(Start(0)));
match try!(fd.read(&mut header)) {
SIZEOF_IDENT => {
let class = header[EI_CLASS];
let is_lsb = header[EI_DATA] == ELFDATA2LSB;
Ok((class, is_lsb))
}
count => {
io_error!("Error: {:?} size is smaller than an ELF identication header",
count)
}
}
}
}
/// Derive the `from_bytes` method for a header.
macro_rules! elf_header_from_bytes {
() => {
/// Returns the corresponding ELF header from the given byte array.
pub fn from_bytes(bytes: &[u8; SIZEOF_EHDR]) -> Header {
// This is not unsafe because the header's size is encoded in the function,
// although the header can be semantically invalid.
let header: &Header = unsafe { mem::transmute(bytes) };
header.clone()
}
};
}
/// Derive the `from_fd` method for a header.
macro_rules! elf_header_from_fd {
() => {
/// Load a header from a file.
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File) -> io::Result<Header> {
let mut elf_header = [0; SIZEOF_EHDR];
try!(fd.read(&mut elf_header));
Ok(Header::from_bytes(&elf_header))
}
};
}
macro_rules! elf_header_impure_impl {
($header:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::mem;
use std::fmt;
use std::fs::File;
use std::io::Read;
use std::io;
impl fmt::Debug for Header {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"e_ident: {:?} e_type: {} e_machine: 0x{:x} e_version: 0x{:x} e_entry: 0x{:x} \
e_phoff: 0x{:x} e_shoff: 0x{:x} e_flags: {:x} e_ehsize: {} e_phentsize: {} \
e_phnum: {} e_shentsize: {} e_shnum: {} e_shstrndx: {}",
self.e_ident,
et_to_str(self.e_type),
self.e_machine,
self.e_version,
self.e_entry,
self.e_phoff,
self.e_shoff,
self.e_flags,
self.e_ehsize,
self.e_phentsize,
self.e_phnum,
self.e_shentsize,
self.e_shnum,
self.e_shstrndx)
}
}
$header
}
};
}
}
#[macro_use]
pub mod program_header {
pub const PT_NULL: u32 = 0;
pub const PT_LOAD: u32 = 1;
pub const PT_DYNAMIC: u32 = 2;
pub const PT_INTERP: u32 = 3;
pub const PT_NOTE: u32 = 4;
pub const PT_SHLIB: u32 = 5;
pub const PT_PHDR: u32 = 6;
pub const PT_TLS: u32 = 7;
pub const PT_NUM: u32 = 8;
pub const PT_LOOS: u32 = 0x60000000;
pub const PT_GNU_EH_FRAME: u32 = 0x6474e550;
pub const PT_GNU_STACK: u32 = 0x6474e551;
pub const PT_GNU_RELRO: u32 = 0x6474e552;
pub const PT_LOSUNW: u32 = 0x6ffffffa;
pub const PT_SUNWBSS: u32 = 0x6ffffffa;
pub const PT_SUNWSTACK: u32 = 0x6ffffffb;
pub const PT_HISUNW: u32 = 0x6fffffff;
pub const PT_HIOS: u32 = 0x6fffffff;
pub const PT_LOPROC: u32 = 0x70000000;
pub const PT_HIPROC: u32 = 0x7fffffff;
/// Segment is executable
pub const PF_X: u32 = 1 << 0;
/// Segment is writable
pub const PF_W: u32 = 1 << 1;
/// Segment is readable
pub const PF_R: u32 = 1 << 2;
pub fn pt_to_str(pt: u32) -> &'static str {
match pt {
PT_NULL => "PT_NULL",
PT_LOAD => "PT_LOAD",
PT_DYNAMIC => "PT_DYNAMIC",
PT_INTERP => "PT_INTERP",
PT_NOTE => "PT_NOTE",
PT_SHLIB => "PT_SHLIB",
PT_PHDR => "PT_PHDR",
PT_TLS => "PT_TLS",
PT_NUM => "PT_NUM",
PT_LOOS => "PT_LOOS",
PT_GNU_EH_FRAME => "PT_GNU_EH_FRAME",
PT_GNU_STACK => "PT_GNU_STACK",
PT_GNU_RELRO => "PT_GNU_RELRO",
PT_SUNWBSS => "PT_SUNWBSS",
PT_SUNWSTACK => "PT_SUNWSTACK",
PT_HIOS => "PT_HIOS",
PT_LOPROC => "PT_LOPROC",
PT_HIPROC => "PT_HIPROC",
_ => "UNKNOWN_PT",
}
}
macro_rules! elf_program_header_from_bytes { () => {
pub fn from_bytes(bytes: &[u8], phnum: usize) -> Vec<ProgramHeader> {
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut ProgramHeader, phnum) };
let mut phdrs = Vec::with_capacity(phnum);
phdrs.extend_from_slice(bytes);
phdrs
}};}
macro_rules! elf_program_header_from_raw_parts { () => {
pub unsafe fn from_raw_parts<'a>(phdrp: *const ProgramHeader,
phnum: usize)
-> &'a [ProgramHeader] {
slice::from_raw_parts(phdrp, phnum)
}};}
macro_rules! elf_program_header_from_fd { () => {
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File, offset: u64, count: usize, _: bool) -> io::Result<Vec<ProgramHeader>> {
use std::io::Read;
let mut phdrs = vec![0u8; count * SIZEOF_PHDR];
try!(fd.seek(Start(offset)));
try!(fd.read(&mut phdrs));
Ok(ProgramHeader::from_bytes(&phdrs, count))
}
};}
macro_rules! elf_program_header_from_fd_endian { ($from_fd_endian:item) => {
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
};}
macro_rules! elf_program_header_impure_impl { ($header:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::slice;
use std::fmt;
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
impl fmt::Debug for ProgramHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"p_type: {} p_flags 0x{:x} p_offset: 0x{:x} p_vaddr: 0x{:x} p_paddr: 0x{:x} \
p_filesz: 0x{:x} p_memsz: 0x{:x} p_align: {}",
pt_to_str(self.p_type),
self.p_flags,
self.p_offset,
self.p_vaddr,
self.p_paddr,
self.p_filesz,
self.p_memsz,
self.p_align)
}
}
$header
}
};}
}
#[macro_use]
pub mod section_header {
macro_rules! elf_section_header {
($size:ident) => {
#[repr(C)]
#[derive(Clone, PartialEq, Default)]
pub struct SectionHeader {
/// Section name (string tbl index)
pub sh_name: u32,
/// Section type
pub sh_type: u32,
/// Section flags
pub sh_flags: $size,
/// Section virtual addr at execution
pub sh_addr: $size,
/// Section file offset
pub sh_offset: $size,
/// Section size in bytes
pub sh_size: $size,
/// Link to another section
pub sh_link: u32,
/// Additional section information
pub sh_info: u32,
/// Section alignment
pub sh_addralign: $size,
/// Entry size if section holds table
pub sh_entsize: $size,
}
}
}
/// Undefined section.
pub const SHN_UNDEF: u32 = 0;
/// Start of reserved indices.
pub const SHN_LORESERVE: u32 = 0xff00;
/// Start of processor-specific.
pub const SHN_LOPROC: u32 = 0xff00;
/// Order section before all others (Solaris).
pub const SHN_BEFORE: u32 = 0xff00;
/// Order section after all others (Solaris).
pub const SHN_AFTER: u32 = 0xff01;
/// End of processor-specific.
pub const SHN_HIPROC: u32 = 0xff1f;
/// Start of OS-specific.
pub const SHN_LOOS: u32 = 0xff20;
/// End of OS-specific.
pub const SHN_HIOS: u32 = 0xff3f;
/// Associated symbol is absolute.
pub const SHN_ABS: u32 = 0xfff1;
/// Associated symbol is common.
pub const SHN_COMMON: u32 = 0xfff2;
/// Index is in extra table.
pub const SHN_XINDEX: u32 = 0xffff;
/// End of reserved indices.
pub const SHN_HIRESERVE: u32 = 0xffff;
// === Legal values for sh_type (section type). ===
/// Section header table entry unused.
pub const SHT_NULL: u32 = 0;
/// Program data.
pub const SHT_PROGBITS: u32 = 1;
/// Symbol table.
pub const SHT_SYMTAB: u32 = 2;
/// String table.
pub const SHT_STRTAB: u32 = 3;
/// Relocation entries with addends.
pub const SHT_RELA: u32 = 4;
/// Symbol hash table.
pub const SHT_HASH: u32 = 5;
/// Dynamic linking information.
pub const SHT_DYNAMIC: u32 = 6;
/// Notes.
pub const SHT_NOTE: u32 = 7;
/// Program space with no data (bss).
pub const SHT_NOBITS: u32 = 8;
/// Relocation entries, no addends.
pub const SHT_REL: u32 = 9;
/// Reserved.
pub const SHT_SHLIB: u32 = 10;
/// Dynamic linker symbol table.
pub const SHT_DYNSYM: u32 = 11;
/// Array of constructors.
pub const SHT_INIT_ARRAY: u32 = 14;
/// Array of destructors.
pub const SHT_FINI_ARRAY: u32 = 15;
/// Array of pre-constructors.
pub const SHT_PREINIT_ARRAY: u32 = 16;
/// Section group.
pub const SHT_GROUP: u32 = 17;
/// Extended section indeces.
pub const SHT_SYMTAB_SHNDX: u32 = 18;
/// Number of defined types.
pub const SHT_NUM: u32 = 19;
/// Start OS-specific.
pub const SHT_LOOS: u32 = 0x60000000;
/// Object attributes.
pub const SHT_GNU_ATTRIBUTES: u32 = 0x6ffffff5;
/// GNU-style hash table.
pub const SHT_GNU_HASH: u32 = 0x6ffffff6;
/// Prelink library list.
pub const SHT_GNU_LIBLIST: u32 = 0x6ffffff7;
/// Checksum for DSO content.
pub const SHT_CHECKSUM: u32 = 0x6ffffff8;
/// Sun-specific low bound.
pub const SHT_LOSUNW: u32 = 0x6ffffffa;
pub const SHT_SUNW_MOVE: u32 = 0x6ffffffa;
pub const SHT_SUNW_COMDAT: u32 = 0x6ffffffb;
pub const SHT_SUNW_SYMINFO: u32 = 0x6ffffffc;
/// Version definition section.
pub const SHT_GNU_VERDEF: u32 = 0x6ffffffd;
/// Version needs section.
pub const SHT_GNU_VERNEED: u32 = 0x6ffffffe;
/// Version symbol table.
pub const SHT_GNU_VERSYM: u32 = 0x6fffffff;
/// Sun-specific high bound.
pub const SHT_HISUNW: u32 = 0x6fffffff;
/// End OS-specific type.
pub const SHT_HIOS: u32 = 0x6fffffff;
/// Start of processor-specific.
pub const SHT_LOPROC: u32 = 0x70000000;
/// End of processor-specific.
pub const SHT_HIPROC: u32 = 0x7fffffff;
/// Start of application-specific.
pub const SHT_LOUSER: u32 = 0x80000000;
/// End of application-specific.
pub const SHT_HIUSER: u32 = 0x8fffffff;
// Legal values for sh_flags (section flags)
/// Writable.
pub const SHF_WRITE: u32 = 1 << 0;
/// Occupies memory during execution.
pub const SHF_ALLOC: u32 = 1 << 1;
/// Executable.
pub const SHF_EXECINSTR: u32 = 1 << 2;
/// Might be merged.
pub const SHF_MERGE: u32 = 1 << 4;
/// Contains nul-terminated strings.
pub const SHF_STRINGS: u32 = 1 << 5;
/// `sh_info' contains SHT index.
pub const SHF_INFO_LINK: u32 = 1 << 6;
/// Preserve order after combining.
pub const SHF_LINK_ORDER: u32 = 1 << 7;
/// Non-standard OS specific handling required.
pub const SHF_OS_NONCONFORMING: u32 = 1 << 8;
/// Section is member of a group.
pub const SHF_GROUP: u32 = 1 << 9;
/// Section hold thread-local data.
pub const SHF_TLS: u32 = 1 << 10;
/// Section with compressed data.
pub const SHF_COMPRESSED: u32 = 1 << 11;
/// OS-specific..
pub const SHF_MASKOS: u32 = 0x0ff00000;
/// Processor-specific.
pub const SHF_MASKPROC: u32 = 0xf0000000;
/// Special ordering requirement (Solaris).
pub const SHF_ORDERED: u32 = 1 << 30;
// /// Section is excluded unless referenced or allocated (Solaris).
// pub const SHF_EXCLUDE: u32 = 1U << 31;
pub fn sht_to_str(sht: u32) -> &'static str {
match sht {
//TODO: implement
/*
PT_NULL => "PT_NULL",
PT_LOAD => "PT_LOAD",
PT_DYNAMIC => "PT_DYNAMIC",
PT_INTERP => "PT_INTERP",
PT_NOTE => "PT_NOTE",
PT_SHLIB => "PT_SHLIB",
PT_PHDR => "PT_PHDR",
PT_TLS => "PT_TLS",
PT_NUM => "PT_NUM",
PT_LOOS => "PT_LOOS",
PT_GNU_EH_FRAME => "PT_GNU_EH_FRAME",
PT_GNU_STACK => "PT_GNU_STACK",
PT_GNU_RELRO => "PT_GNU_RELRO",
PT_SUNWBSS => "PT_SUNWBSS",
PT_SUNWSTACK => "PT_SUNWSTACK",
PT_HIOS => "PT_HIOS",
PT_LOPROC => "PT_LOPROC",
PT_HIPROC => "PT_HIPROC",
*/
_ => "UNKNOWN_SHT",
}
}
macro_rules! elf_section_header_from_bytes { () => {
pub fn from_bytes(bytes: &[u8], shnum: usize) -> Vec<SectionHeader> {
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut SectionHeader, shnum) };
let mut shdrs = Vec::with_capacity(shnum);
shdrs.extend_from_slice(bytes);
shdrs
}};}
macro_rules! elf_section_header_from_raw_parts { () => {
pub unsafe fn from_raw_parts<'a>(shdrp: *const SectionHeader,
shnum: usize)
-> &'a [SectionHeader] {
slice::from_raw_parts(shdrp, shnum)
}};}
macro_rules! elf_section_header_from_fd { () => {
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File, offset: u64, count: usize, _: bool) -> io::Result<Vec<SectionHeader>> {
use std::io::Read;
let mut shdrs = vec![0u8; count * SIZEOF_SHDR];
try!(fd.seek(Start(offset)));
try!(fd.read(&mut shdrs));
Ok(SectionHeader::from_bytes(&shdrs, count))
}
};}
macro_rules! elf_section_header_from_fd_endian { ($from_fd_endian:item) => {
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
};}
macro_rules! elf_section_header_impure_impl { ($header:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::slice;
use std::fmt;
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
impl fmt::Debug for SectionHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"sh_name: {} sh_type {} sh_flags: 0x{:x} sh_addr: 0x{:x} sh_offset: 0x{:x} \
sh_size: 0x{:x} sh_link: 0x{:x} sh_info: 0x{:x} sh_addralign 0x{:x} sh_entsize 0x{:x}",
self.sh_name,
sht_to_str(self.sh_type as u32),
self.sh_flags,
self.sh_addr,
self.sh_offset,
self.sh_size,
self.sh_link,
self.sh_info,
self.sh_addralign,
self.sh_entsize)
}
}
$header
}
};}
}
#[macro_use]
pub mod sym {
// === Sym bindings ===
/// Local symbol.
pub const STB_LOCAL: u8 = 0;
/// Global symbol.
pub const STB_GLOBAL: u8 = 1;
/// Weak symbol.
pub const STB_WEAK: u8 = 2;
/// Number of defined types..
pub const STB_NUM: u8 = 3;
/// Start of OS-specific.
pub const STB_LOOS: u8 = 10;
/// Unique symbol..
pub const STB_GNU_UNIQUE: u8 = 10;
/// End of OS-specific.
pub const STB_HIOS: u8 = 12;
/// Start of processor-specific.
pub const STB_LOPROC: u8 = 13;
/// End of processor-specific.
pub const STB_HIPROC: u8 = 15;
/// === Sym types ===
/// Symbol type is unspecified.
pub const STT_NOTYPE: u8 = 0;
/// Symbol is a data object.
pub const STT_OBJECT: u8 = 1;
/// Symbol is a code object.
pub const STT_FUNC: u8 = 2;
/// Symbol associated with a section.
pub const STT_SECTION: u8 = 3;
/// Symbol's name is file name.
pub const STT_FILE: u8 = 4;
/// Symbol is a common data object.
pub const STT_COMMON: u8 = 5;
/// Symbol is thread-local data object.
pub const STT_TLS: u8 = 6;
/// Number of defined types.
pub const STT_NUM: u8 = 7;
/// Start of OS-specific.
pub const STT_LOOS: u8 = 10;
/// Symbol is indirect code object.
pub const STT_GNU_IFUNC: u8 = 10;
/// End of OS-specific.
pub const STT_HIOS: u8 = 12;
/// Start of processor-specific.
pub const STT_LOPROC: u8 = 13;
/// End of processor-specific.
pub const STT_HIPROC: u8 = 15;
/// Get the ST binding.
///
/// This is the first four bits of the byte.
#[inline]
pub fn st_bind(info: u8) -> u8 {
info >> 4
}
/// Get the ST type.
///
/// This is the last four bits of the byte.
#[inline]
pub fn st_type(info: u8) -> u8 {
info & 0xf
}
/// Is this information defining an import?
#[inline]
pub fn is_import(info: u8, value: u8) -> bool {
let binding = st_bind(info);
binding == STB_GLOBAL && value == 0
}
/// Convenience function to get the &'static str type from the symbols `st_info`.
pub fn get_type(info: u8) -> &'static str {
type_to_str(st_type(info))
}
/// Get the string for some binding.
#[inline]
pub fn bind_to_str(typ: u8) -> &'static str {
match typ {
STB_LOCAL => "LOCAL",
STB_GLOBAL => "GLOBAL",
STB_WEAK => "WEAK",
STB_NUM => "NUM",
STB_GNU_UNIQUE => "GNU_UNIQUE",
_ => "UNKNOWN_STB",
}
}
/// Get the string for some type.
#[inline]
pub fn type_to_str(typ: u8) -> &'static str {
match typ {
STT_NOTYPE => "NOTYPE",
STT_OBJECT => "OBJECT",
STT_FUNC => "FUNC",
STT_SECTION => "SECTION",
STT_FILE => "FILE",
STT_COMMON => "COMMON",
STT_TLS => "TLS",
STT_NUM => "NUM",
STT_GNU_IFUNC => "GNU_IFUNC",
_ => "UNKNOWN_STT",
}
}
macro_rules! elf_sym_impure_impl {
($from_fd_endian:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use std::fs::File;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
use super::*;
impl fmt::Debug for Sym {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let bind = st_bind(self.st_info);
let typ = st_type(self.st_info);
write!(f,
"st_name: {} {} {} st_other: {} st_shndx: {} st_value: {:x} st_size: {}",
self.st_name,
bind_to_str(bind),
type_to_str(typ),
self.st_other,
self.st_shndx,
self.st_value,
self.st_size)
}
}
pub unsafe fn from_raw<'a>(symp: *const Sym, count: usize) -> &'a [Sym] {
slice::from_raw_parts(symp, count)
}
// TODO: this is broken, fix (not used often by me since don't have luxury of debug symbols usually)
#[cfg(feature = "no_endian_fd")]
pub fn from_fd<'a>(fd: &mut File, offset: usize, count: usize, _: bool) -> io::Result<Vec<Sym>> {
// TODO: AFAIK this shouldn't work, since i pass in a byte size...
let mut bytes = vec![0u8; count * SIZEOF_SYM];
try!(fd.seek(Start(offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Sym, count) };
let mut syms = Vec::with_capacity(count);
syms.extend_from_slice(bytes);
syms.dedup();
Ok(syms)
}
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
}
};
}
}
#[macro_use]
pub mod dyn {
// TODO: figure out what's the best, most friendly + safe API choice here - u32s or u64s
// remember that DT_TAG is "pointer sized"/used as address sometimes Original rationale: I
// decided to use u64 instead of u32 due to pattern matching use case seems safer to cast the
// elf32's d_tag from u32 -> u64 at runtime instead of casting the elf64's d_tag from u64 ->
// u32 at runtime
// TODO: Documentation.
pub const DT_NULL: u64 = 0;
pub const DT_NEEDED: u64 = 1;
pub const DT_PLTRELSZ: u64 = 2;
pub const DT_PLTGOT: u64 = 3;
pub const DT_HASH: u64 = 4;
pub const DT_STRTAB: u64 = 5;
pub const DT_SYMTAB: u64 = 6;
pub const DT_RELA: u64 = 7;
pub const DT_RELASZ: u64 = 8;
pub const DT_RELAENT: u64 = 9;
pub const DT_STRSZ: u64 = 10;
pub const DT_SYMENT: u64 = 11;
pub const DT_INIT: u64 = 12;
pub const DT_FINI: u64 = 13;
pub const DT_SONAME: u64 = 14;
pub const DT_RPATH: u64 = 15;
pub const DT_SYMBOLIC: u64 = 16;
pub const DT_REL: u64 = 17;
pub const DT_RELSZ: u64 = 18;
pub const DT_RELENT: u64 = 19;
pub const DT_PLTREL: u64 = 20;
pub const DT_DEBUG: u64 = 21;
pub const DT_TEXTREL: u64 = 22;
pub const DT_JMPREL: u64 = 23;
pub const DT_BIND_NOW: u64 = 24;
pub const DT_INIT_ARRAY: u64 = 25;
pub const DT_FINI_ARRAY: u64 = 26;
pub const DT_INIT_ARRAYSZ: u64 = 27;
pub const DT_FINI_ARRAYSZ: u64 = 28;
pub const DT_RUNPATH: u64 = 29;
pub const DT_FLAGS: u64 = 30;
pub const DT_ENCODING: u64 = 32;
pub const DT_PREINIT_ARRAY: u64 = 32;
pub const DT_PREINIT_ARRAYSZ: u64 = 33;
pub const DT_NUM: u64 = 34;
pub const DT_LOOS: u64 = 0x6000000d;
pub const DT_HIOS: u64 = 0x6ffff000;
pub const DT_LOPROC: u64 = 0x70000000;
pub const DT_HIPROC: u64 = 0x7fffffff;
// pub const DT_PROCNUM: u64 = DT_MIPS_NUM;
pub const DT_VERSYM: u64 = 0x6ffffff0;
pub const DT_RELACOUNT: u64 = 0x6ffffff9;
pub const DT_RELCOUNT: u64 = 0x6ffffffa;
pub const DT_GNU_HASH: u64 = 0x6ffffef5;
pub const DT_VERDEF: u64 = 0x6ffffffc;
pub const DT_VERDEFNUM: u64 = 0x6ffffffd;
pub const DT_VERNEED: u64 = 0x6ffffffe;
pub const DT_VERNEEDNUM: u64 = 0x6fffffff;
pub const DT_FLAGS_1: u64 = 0x6ffffffb;
/// Converts a tag to its string representation.
#[inline]
pub fn tag_to_str(tag: u64) -> &'static str {
match tag {
DT_NULL => "DT_NULL",
DT_NEEDED => "DT_NEEDED",
DT_PLTRELSZ => "DT_PLTRELSZ",
DT_PLTGOT => "DT_PLTGOT",
DT_HASH => "DT_HASH",
DT_STRTAB => "DT_STRTAB",
DT_SYMTAB => "DT_SYMTAB",
DT_RELA => "DT_RELA",
DT_RELASZ => "DT_RELASZ",
DT_RELAENT => "DT_RELAENT",
DT_STRSZ => "DT_STRSZ",
DT_SYMENT => "DT_SYMENT",
DT_INIT => "DT_INIT",
DT_FINI => "DT_FINI",
DT_SONAME => "DT_SONAME",
DT_RPATH => "DT_RPATH",
DT_SYMBOLIC => "DT_SYMBOLIC",
DT_REL => "DT_REL",
DT_RELSZ => "DT_RELSZ",
DT_RELENT => "DT_RELENT",
DT_PLTREL => "DT_PLTREL",
DT_DEBUG => "DT_DEBUG",
DT_TEXTREL => "DT_TEXTREL",
DT_JMPREL => "DT_JMPREL",
DT_BIND_NOW => "DT_BIND_NOW",
DT_INIT_ARRAY => "DT_INIT_ARRAY",
DT_FINI_ARRAY => "DT_FINI_ARRAY",
DT_INIT_ARRAYSZ => "DT_INIT_ARRAYSZ",
DT_FINI_ARRAYSZ => "DT_FINI_ARRAYSZ",
DT_RUNPATH => "DT_RUNPATH",
DT_FLAGS => "DT_FLAGS",
DT_PREINIT_ARRAY => "DT_PREINIT_ARRAY",
DT_PREINIT_ARRAYSZ => "DT_PREINIT_ARRAYSZ",
DT_NUM => "DT_NUM",
DT_LOOS => "DT_LOOS",
DT_HIOS => "DT_HIOS",
DT_LOPROC => "DT_LOPROC",
DT_HIPROC => "DT_HIPROC",
DT_VERSYM => "DT_VERSYM",
DT_RELACOUNT => "DT_RELACOUNT",
DT_RELCOUNT => "DT_RELCOUNT",
DT_GNU_HASH => "DT_GNU_HASH",
DT_VERDEF => "DT_VERDEF",
DT_VERDEFNUM => "DT_VERDEFNUM",
DT_VERNEED => "DT_VERNEED",
DT_VERNEEDNUM => "DT_VERNEEDNUM",
DT_FLAGS_1 => "DT_FLAGS_1",
_ => "UNKNOWN_TAG",
}
}
// Values of `d_un.d_val` in the DT_FLAGS entry
/// Object may use DF_ORIGIN.
pub const DF_ORIGIN: u64 = 0x00000001;
/// Symbol resolutions starts here.
pub const DF_SYMBOLIC: u64 = 0x00000002;
/// Object contains text relocations.
pub const DF_TEXTREL: u64 = 0x00000004;
/// No lazy binding for this object.
pub const DF_BIND_NOW: u64 = 0x00000008;
/// Module uses the static TLS model.
pub const DF_STATIC_TLS: u64 = 0x00000010;
// State flags selectable in the `d_un.d_val` element of the DT_FLAGS_1 entry in the dynamic section.
/// Set RTLD_NOW for this object.
pub const DF_1_NOW: u64 = 0x00000001;
/// Set RTLD_GLOBAL for this object.
pub const DF_1_GLOBAL: u64 = 0x00000002;
/// Set RTLD_GROUP for this object.
pub const DF_1_GROUP: u64 = 0x00000004;
/// Set RTLD_NODELETE for this object.
pub const DF_1_NODELETE: u64 = 0x00000008;
/// Trigger filtee loading at runtime.
pub const DF_1_LOADFLTR: u64 = 0x00000010;
/// Set RTLD_INITFIRST for this object.
pub const DF_1_INITFIRST: u64 = 0x00000020;
/// Set RTLD_NOOPEN for this object.
pub const DF_1_NOOPEN: u64 = 0x00000040;
/// $ORIGIN must be handled.
pub const DF_1_ORIGIN: u64 = 0x00000080;
/// Direct binding enabled.
pub const DF_1_DIRECT: u64 = 0x00000100;
pub const DF_1_TRANS: u64 = 0x00000200;
/// Object is used to interpose.
pub const DF_1_INTERPOSE: u64 = 0x00000400;
/// Ignore default lib search path.
pub const DF_1_NODEFLIB: u64 = 0x00000800;
/// Object can't be dldump'ed.
pub const DF_1_NODUMP: u64 = 0x00001000;
/// Configuration alternative created.
pub const DF_1_CONFALT: u64 = 0x00002000;
/// Filtee terminates filters search.
pub const DF_1_ENDFILTEE: u64 = 0x00004000;
/// Disp reloc applied at build time.
pub const DF_1_DISPRELDNE: u64 = 0x00008000;
/// Disp reloc applied at run-time.
pub const DF_1_DISPRELPND: u64 = 0x00010000;
/// Object has no-direct binding.
pub const DF_1_NODIRECT: u64 = 0x00020000;
pub const DF_1_IGNMULDEF: u64 = 0x00040000;
pub const DF_1_NOKSYMS: u64 = 0x00080000;
pub const DF_1_NOHDR: u64 = 0x00100000;
/// Object is modified after built.
pub const DF_1_EDITED: u64 = 0x00200000;
pub const DF_1_NORELOC: u64 = 0x00400000;
/// Object has individual interposers.
pub const DF_1_SYMINTPOSE: u64 = 0x00800000;
/// Global auditing required.
pub const DF_1_GLOBAUDIT: u64 = 0x01000000;
/// Singleton symbols are used.
pub const DF_1_SINGLETON: u64 = 0x02000000;
macro_rules! elf_dyn_impure_impl {
($size:ident, $from_fd_endian:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
use super::super::program_header::{ProgramHeader, PT_DYNAMIC};
use super::super::super::elf::strtab::Strtab;
use super::*;
impl fmt::Debug for Dyn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"d_tag: {} d_val: 0x{:x}",
tag_to_str(self.d_tag as u64),
self.d_val)
}
}
impl fmt::Debug for DynamicInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let gnu_hash = if let Some(addr) = self.gnu_hash { addr } else { 0 };
let hash = if let Some(addr) = self.hash { addr } else { 0 };
let pltgot = if let Some(addr) = self.pltgot { addr } else { 0 };
write!(f, "rela: 0x{:x} relasz: {} relaent: {} relacount: {} gnu_hash: 0x{:x} hash: 0x{:x} strtab: 0x{:x} strsz: {} symtab: 0x{:x} syment: {} pltgot: 0x{:x} pltrelsz: {} pltrel: {} jmprel: 0x{:x} verneed: 0x{:x} verneednum: {} versym: 0x{:x} init: 0x{:x} fini: 0x{:x} needed_count: {}",
self.rela,
self.relasz,
self.relaent,
self.relacount,
gnu_hash,
hash,
self.strtab,
self.strsz,
self.symtab,
self.syment,
pltgot,
self.pltrelsz,
self.pltrel,
self.jmprel,
self.verneed,
self.verneednum,
self.versym,
self.init,
self.fini,
self.needed_count,
)
}
}
#[cfg(feature = "no_endian_fd")]
/// Returns a vector of dynamic entries from the given fd and program headers
pub fn from_fd(mut fd: &File, phdrs: &[ProgramHeader], _: bool) -> io::Result<Option<Vec<Dyn>>> {
use std::io::Read;
for phdr in phdrs {
if phdr.p_type == PT_DYNAMIC {
let filesz = phdr.p_filesz as usize;
let dync = filesz / SIZEOF_DYN;
let mut bytes = vec![0u8; filesz];
try!(fd.seek(Start(phdr.p_offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Dyn, dync) };
let mut dyns = Vec::with_capacity(dync);
dyns.extend_from_slice(bytes);
dyns.dedup();
return Ok(Some(dyns));
}
}
Ok(None)
}
/// Given a bias and a memory address (typically for a _correctly_ mmap'd binary in memory), returns the `_DYNAMIC` array as a slice of that memory
pub unsafe fn from_raw<'a>(bias: $size, vaddr: $size) -> &'a [Dyn] {
let dynp = vaddr.wrapping_add(bias) as *const Dyn;
let mut idx = 0;
while (*dynp.offset(idx)).d_tag as u64 != DT_NULL {
idx += 1;
}
slice::from_raw_parts(dynp, idx as usize)
}
// TODO: these bare functions have always seemed awkward, but not sure where they should go...
/// Maybe gets and returns the dynamic array with the same lifetime as the [phdrs], using the provided bias with wrapping addition.
/// If the bias is wrong, it will either segfault or give you incorrect values, beware
pub unsafe fn from_phdrs(bias: $size, phdrs: &[ProgramHeader]) -> Option<&[Dyn]> {
for phdr in phdrs {
// FIXME: change to casting to u64 similar to DT_*?
if phdr.p_type as u32 == PT_DYNAMIC {
return Some(from_raw(bias, phdr.p_vaddr));
}
}
None
}
/// Gets the needed libraries from the `_DYNAMIC` array, with the str slices lifetime tied to the dynamic array/strtab's lifetime(s)
pub unsafe fn get_needed<'a>(dyns: &[Dyn], strtab: *const Strtab<'a>, count: usize) -> Vec<&'a str> {
let mut needed = Vec::with_capacity(count);
for dyn in dyns {
if dyn.d_tag as u64 == DT_NEEDED {
let lib = &(*strtab)[dyn.d_val as usize];
needed.push(lib);
}
}
needed
}
#[cfg(not(feature = "no_endian_fd"))]
/// Returns a vector of dynamic entries from the given fd and program headers
$from_fd_endian
}
/// Important dynamic linking info generated via a single pass through the _DYNAMIC array
#[derive(Default)]
pub struct DynamicInfo {
pub rela: usize,
pub relasz: usize,
pub relaent: $size,
pub relacount: usize,
pub gnu_hash: Option<$size>,
pub hash: Option<$size>,
pub strtab: usize,
pub strsz: usize,
pub symtab: usize,
pub syment: usize,
pub pltgot: Option<$size>,
pub pltrelsz: usize,
pub pltrel: $size,
pub jmprel: usize,
pub verneed: $size,
pub verneednum: $size,
pub versym: $size,
pub init: $size,
pub fini: $size,
pub init_array: $size,
pub init_arraysz: usize,
pub fini_array: $size,
pub fini_arraysz: usize,
pub needed_count: usize,
pub flags: $size,
pub flags_1: $size,
pub soname: usize,
}
impl DynamicInfo {
pub fn new(dynamic: &[Dyn], bias: usize) -> DynamicInfo {
let mut res = DynamicInfo::default();
for dyn in dynamic {
match dyn.d_tag as u64 {
DT_RELA => res.rela = dyn.d_val.wrapping_add(bias as _) as usize, // .rela.dyn
DT_RELASZ => res.relasz = dyn.d_val as usize,
DT_RELAENT => res.relaent = dyn.d_val as _,
DT_RELACOUNT => res.relacount = dyn.d_val as usize,
DT_GNU_HASH => res.gnu_hash = Some(dyn.d_val.wrapping_add(bias as _)),
DT_HASH => res.hash = Some(dyn.d_val.wrapping_add(bias as _)) as _,
DT_STRTAB => res.strtab = dyn.d_val.wrapping_add(bias as _) as usize,
DT_STRSZ => res.strsz = dyn.d_val as usize,
DT_SYMTAB => res.symtab = dyn.d_val.wrapping_add(bias as _) as usize,
DT_SYMENT => res.syment = dyn.d_val as usize,
DT_PLTGOT => res.pltgot = Some(dyn.d_val.wrapping_add(bias as _)) as _,
DT_PLTRELSZ => res.pltrelsz = dyn.d_val as usize,
DT_PLTREL => res.pltrel = dyn.d_val as _,
DT_JMPREL => res.jmprel = dyn.d_val.wrapping_add(bias as _) as usize, // .rela.plt
DT_VERNEED => res.verneed = dyn.d_val.wrapping_add(bias as _) as _,
DT_VERNEEDNUM => res.verneednum = dyn.d_val as _,
DT_VERSYM => res.versym = dyn.d_val.wrapping_add(bias as _) as _,
DT_INIT => res.init = dyn.d_val.wrapping_add(bias as _) as _,
DT_FINI => res.fini = dyn.d_val.wrapping_add(bias as _) as _,
DT_INIT_ARRAY => res.init_array = dyn.d_val.wrapping_add(bias as _) as _,
DT_INIT_ARRAYSZ => res.init_arraysz = dyn.d_val as _,
DT_FINI_ARRAY => res.fini_array = dyn.d_val.wrapping_add(bias as _) as _,
DT_FINI_ARRAYSZ => res.fini_arraysz = dyn.d_val as _,
DT_NEEDED => res.needed_count += 1,
DT_FLAGS => res.flags = dyn.d_val as _,
DT_FLAGS_1 => res.flags_1 = dyn.d_val as _,
DT_SONAME => res.soname = dyn.d_val as _,
_ => (),
}
}
res
}
}
};
}
}
#[macro_use]
pub mod rela {
/// No reloc.
pub const R_X86_64_NONE: u64 = 0;
/// Direct 64 bit.
pub const R_X86_64_64: u64 = 1;
/// PC relative 32 bit signed.
pub const R_X86_64_PC32: u64 = 2;
/// 32 bit GOT entry.
pub const R_X86_64_GOT32: u64 = 3;
/// 32 bit PLT address.
pub const R_X86_64_PLT32: u64 = 4;
/// Copy symbol at runtime.
pub const R_X86_64_COPY: u64 = 5;
/// Create GOT entry.
pub const R_X86_64_GLOB_DAT: u64 = 6;
/// Create PLT entry.
pub const R_X86_64_JUMP_SLOT: u64 = 7;
/// Adjust by program base.
pub const R_X86_64_RELATIVE: u64 = 8;
/// 32 bit signed PC relative offset to GOT.
pub const R_X86_64_GOTPCREL: u64 = 9;
/// Direct 32 bit zero extended.
pub const R_X86_64_32: u64 = 10;
/// Direct 32 bit sign extended.
pub const R_X86_64_32S: u64 = 11;
/// Direct 16 bit zero extended.
pub const R_X86_64_16: u64 = 12;
/// 16 bit sign extended pc relative.
pub const R_X86_64_PC16: u64 = 13;
/// Direct 8 bit sign extended.
pub const R_X86_64_8: u64 = 14;
/// 8 bit sign extended pc relative.
pub const R_X86_64_PC8: u64 = 15;
/// ID of module containing symbol.
pub const R_X86_64_DTPMOD64: u64 = 16;
/// Offset in module's TLS block.
pub const R_X86_64_DTPOFF64: u64 = 17;
/// Offset in initial TLS block.
pub const R_X86_64_TPOFF64: u64 = 18;
/// 32 bit signed PC relative offset to two GOT entries for GD symbol.
pub const R_X86_64_TLSGD: u64 = 19;
/// 32 bit signed PC relative offset to two GOT entries for LD symbol.
pub const R_X86_64_TLSLD: u64 = 20;
/// Offset in TLS block.
pub const R_X86_64_DTPOFF32: u64 = 21;
/// 32 bit signed PC relative offset to GOT entry for IE symbol.
pub const R_X86_64_GOTTPOFF: u64 = 22;
/// Offset in initial TLS block.
pub const R_X86_64_TPOFF32: u64 = 23;
/// PC relative 64 bit.
pub const R_X86_64_PC64: u64 = 24;
/// 64 bit offset to GOT.
pub const R_X86_64_GOTOFF64: u64 = 25;
/// 32 bit signed pc relative offset to GOT.
pub const R_X86_64_GOTPC32: u64 = 26;
/// 64-bit GOT entry offset.
pub const R_X86_64_GOT64: u64 = 27;
/// 64-bit PC relative offset to GOT entry.
pub const R_X86_64_GOTPCREL64: u64 = 28;
/// 64-bit PC relative offset to GOT.
pub const R_X86_64_GOTPC64: u64 = 29;
/// like GOT64, says PLT entry needed.
pub const R_X86_64_GOTPLT64: u64 = 30;
/// 64-bit GOT relative offset to PLT entry.
pub const R_X86_64_PLTOFF64: u64 = 31;
/// Size of symbol plus 32-bit addend.
pub const R_X86_64_SIZE32: u64 = 32;
/// Size of symbol plus 64-bit addend.
pub const R_X86_64_SIZE64: u64 = 33;
/// GOT offset for TLS descriptor..
pub const R_X86_64_GOTPC32_TLSDESC: u64 = 34;
/// Marker for call through TLS descriptor..
pub const R_X86_64_TLSDESC_CALL: u64 = 35;
/// TLS descriptor..
pub const R_X86_64_TLSDESC: u64 = 36;
/// Adjust indirectly by program base.
pub const R_X86_64_IRELATIVE: u64 = 37;
/// 64-bit adjust by program base.
pub const R_X86_64_RELATIVE64: u64 = 38;
pub const R_X86_64_NUM: u64 = 39;
#[inline]
pub fn type_to_str(typ: u64) -> &'static str {
match typ {
R_X86_64_NONE => "NONE",
R_X86_64_64 => "64",
R_X86_64_PC32 => "PC32",
R_X86_64_GOT32 => "GOT32",
R_X86_64_PLT32 => "PLT32",
R_X86_64_COPY => "COPY",
R_X86_64_GLOB_DAT => "GLOB_DAT",
R_X86_64_JUMP_SLOT => "JUMP_SLOT",
R_X86_64_RELATIVE => "RELATIVE",
R_X86_64_GOTPCREL => "GOTPCREL",
R_X86_64_32 => "32",
R_X86_64_32S => "32S",
R_X86_64_16 => "16",
R_X86_64_PC16 => "PC16",
R_X86_64_8 => "8",
R_X86_64_PC8 => "PC8",
R_X86_64_DTPMOD64 => "DTPMOD64",
R_X86_64_DTPOFF64 => "DTPOFF64",
R_X86_64_TPOFF64 => "TPOFF64",
R_X86_64_TLSGD => "TLSGD",
R_X86_64_TLSLD => "TLSLD",
R_X86_64_DTPOFF32 => "DTPOFF32",
R_X86_64_GOTTPOFF => "GOTTPOFF",
R_X86_64_TPOFF32 => "TPOFF32",
R_X86_64_PC64 => "PC64",
R_X86_64_GOTOFF64 => "GOTOFF64",
R_X86_64_GOTPC32 => "GOTPC32",
R_X86_64_GOT64 => "GOT64",
R_X86_64_GOTPCREL64 => "GOTPCREL64",
R_X86_64_GOTPC64 => "GOTPC64",
R_X86_64_GOTPLT64 => "GOTPLT64",
R_X86_64_PLTOFF64 => "PLTOFF64",
R_X86_64_SIZE32 => "SIZE32",
R_X86_64_SIZE64 => "SIZE64",
R_X86_64_GOTPC32_TLSDESC => "GOTPC32_TLSDESC",
R_X86_64_TLSDESC_CALL => "TLSDESC_CALL",
R_X86_64_TLSDESC => "TLSDESC",
R_X86_64_IRELATIVE => "IRELATIVE",
R_X86_64_RELATIVE64 => "RELATIVE64",
_ => "UNKNOWN_RELA_TYPE",
}
}
macro_rules! elf_rela_impure_impl { ($from_fd_endian:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
impl fmt::Debug for Rela {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let sym = r_sym(self.r_info);
let typ = r_type(self.r_info);
write!(f,
"r_offset: {:x} {} @ {} r_addend: {:x}",
self.r_offset,
type_to_str(typ as u64),
sym,
self.r_addend)
}
}
/// Gets the rela entries given a rela u64 and the _size_ of the rela section in the binary, in bytes. Works for regular rela and the pltrela table.
/// Assumes the pointer is valid and can safely return a slice of memory pointing to the relas because:
/// 1. `rela` points to memory received from the kernel (i.e., it loaded the executable), _or_
/// 2. The binary has already been mmapped (i.e., it's a `SharedObject`), and hence it's safe to return a slice of that memory.
/// 3. Or if you obtained the pointer in some other lawful manner
pub unsafe fn from_raw<'a>(ptr: *const Rela, size: usize) -> &'a [Rela] {
slice::from_raw_parts(ptr, size / SIZEOF_RELA)
}
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File, offset: usize, size: usize, _: bool) -> io::Result<Vec<Rela>> {
use std::io::Read;
let count = size / SIZEOF_RELA;
let mut bytes = vec![0u8; size];
try!(fd.seek(Start(offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Rela, count) };
let mut res = Vec::with_capacity(count);
res.extend_from_slice(bytes);
Ok(res)
}
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
}
};}
}
#[cfg(all(not(feature = "pure"), not(feature = "no_elf32"), not(feature = "no_elf")))]
pub use self::impure::*;
#[cfg(all(not(feature = "pure"), not(feature = "no_elf32"), not(feature = "no_elf")))]
#[macro_use]
mod impure {
use std::fs::File;
use std::io;
// use std::io::Read;
// use std::io::SeekFrom::Start;
use super::header;
use super::super::elf32;
use super::super::elf64;
#[derive(Debug)]
pub enum Binary {
Elf32(elf32::Binary),
Elf64(elf64::Binary),
}
pub fn from_fd(fd: &mut File) -> io::Result<Binary> {
match try!(header::peek(fd)) {
(header::ELFCLASS64, _is_lsb) => Ok(Binary::Elf64(try!(elf64::Binary::from_fd(fd)))),
(header::ELFCLASS32, _is_lsb) => Ok(Binary::Elf32(try!(elf32::Binary::from_fd(fd)))),
(class, is_lsb) => {
io_error!("Unknown values in ELF ident header: class: {} is_lsb: {}",
class,
is_lsb)
}
}
}
}
macro_rules! elf_from_fd { ($intmax:expr) => {
use std::path::Path;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
pub use super::super::elf::strtab;
use super::{header, program_header, section_header, dyn, sym, rela};
#[derive(Debug)]
pub struct Binary {
pub header: header::Header,
pub program_headers: Vec<program_header::ProgramHeader>,
pub section_headers: Vec<section_header::SectionHeader>,
pub shdr_strtab: strtab::Strtab<'static>,
pub dynamic: Option<Vec<dyn::Dyn>>,
pub dynsyms: Vec<sym::Sym>,
pub dynstrtab: strtab::Strtab<'static>,
pub syms: Vec<sym::Sym>,
pub strtab: strtab::Strtab<'static>,
pub rela: Vec<rela::Rela>,
pub pltrela: Vec<rela::Rela>,
pub soname: Option<String>,
pub interpreter: Option<String>,
pub libraries: Vec<String>,
pub is_lib: bool,
pub size: usize,
pub entry: usize,
}
impl Binary {
pub fn from_fd (fd: &mut File) -> io::Result<Binary> {
let header = try!(header::Header::from_fd(fd));
let entry = header.e_entry as usize;
let is_lib = header.e_type == header::ET_DYN;
let is_lsb = header.e_ident[header::EI_DATA] == header::ELFDATA2LSB;
let program_headers = try!(program_header::ProgramHeader::from_fd(fd, header.e_phoff as u64, header.e_phnum as usize, is_lsb));
let dynamic = try!(dyn::from_fd(fd, &program_headers, is_lsb));
let mut bias: usize = 0;
for ph in &program_headers {
if ph.p_type == program_header::PT_LOAD {
// this is an overflow hack that allows us to use virtual memory addresses as though they're in the file by generating a fake load bias which is then used to overflow the values in the dynamic array, and in a few other places (see Dyn::DynamicInfo), to generate actual file offsets; you may have to marinate a bit on why this works. i am unsure whether it works in every conceivable case. i learned this trick from reading too much dynamic linker C code (a whole other class of C code) and having to deal with broken older kernels on VMs. enjoi
bias = (($intmax - ph.p_vaddr).wrapping_add(1)) as usize;
break;
}
}
let mut interpreter = None;
for ph in &program_headers {
if ph.p_type == program_header::PT_INTERP {
let mut bytes = vec![0u8; (ph.p_filesz - 1) as usize];
try!(fd.seek(Start(ph.p_offset as u64)));
try!(fd.read(&mut bytes));
interpreter = Some(String::from_utf8(bytes).unwrap())
}
}
println!("header: 0x{:x}, header.e_shnum: {}", header.e_shoff, header.e_shnum);
let section_headers = try!(section_header::SectionHeader::from_fd(fd, header.e_shoff as u64, header.e_shnum as usize, is_lsb));
let mut syms = vec![];
let mut strtab = strtab::Strtab::default();
for shdr in §ion_headers {
if shdr.sh_type as u32 == section_header::SHT_SYMTAB {
let count = shdr.sh_size / shdr.sh_entsize;
syms = try!(sym::from_fd(fd, shdr.sh_offset as usize, count as usize, is_lsb))
}
if shdr.sh_type as u32 == section_header::SHT_STRTAB {
strtab = try!(strtab::Strtab::from_fd(fd, shdr.sh_offset as usize, shdr.sh_size as usize));
}
}
let strtab_idx = header.e_shstrndx as usize;
let shdr_strtab = if strtab_idx >= section_headers.len() {
strtab::Strtab::default()
} else {
let shdr = §ion_headers[strtab_idx];
try!(strtab::Strtab::from_fd(fd, shdr.sh_offset as usize, shdr.sh_size as usize))
};
let mut soname = None;
let mut libraries = vec![];
let mut dynsyms = vec![];
let mut rela = vec![];
let mut pltrela = vec![];
let mut dynstrtab = strtab::Strtab::default();
if let Some(ref dynamic) = dynamic {
let dyn_info = dyn::DynamicInfo::new(&dynamic, bias); // we explicitly overflow the values here with our bias
dynstrtab = try!(strtab::Strtab::from_fd(fd,
dyn_info.strtab,
dyn_info.strsz));
if dyn_info.soname != 0 {
soname = Some(dynstrtab.get(dyn_info.soname).to_owned())
}
if dyn_info.needed_count > 0 {
let needed = unsafe { dyn::get_needed(dynamic, &dynstrtab, dyn_info.needed_count)};
libraries = Vec::with_capacity(dyn_info.needed_count);
for lib in needed {
libraries.push(lib.to_owned());
}
}
let num_syms = (dyn_info.strtab - dyn_info.symtab) / dyn_info.syment;
dynsyms = try!(sym::from_fd(fd, dyn_info.symtab, num_syms, is_lsb));
rela = try!(rela::from_fd(fd, dyn_info.rela, dyn_info.relasz, is_lsb));
pltrela = try!(rela::from_fd(fd, dyn_info.jmprel, dyn_info.pltrelsz, is_lsb));
}
let elf = Binary {
header: header,
program_headers: program_headers,
section_headers: section_headers,
shdr_strtab: shdr_strtab,
dynamic: dynamic,
dynsyms: dynsyms,
dynstrtab: dynstrtab,
syms: syms,
strtab: strtab,
rela: rela,
pltrela: pltrela,
soname: soname,
interpreter: interpreter,
libraries: libraries,
is_lib: is_lib,
size: fd.metadata().unwrap().len() as usize,
entry: entry,
};
Ok(elf)
}
pub fn from_path(path: &Path) -> io::Result<Binary> {
let mut fd = try!(File::open(&path));
let metadata = fd.metadata().unwrap();
if metadata.len() < header::SIZEOF_EHDR as u64 {
io_error!("Error: {:?} size is smaller than an ELF header", path.as_os_str())
} else {
Self::from_fd(&mut fd)
}
}
}
};}
elf: remove accidental print from binary loader
//! Access ELF constants, other helper functions, which are independent of ELF bithood. Also
//! provides simple parser which returns an Elf64 or Elf32 "pre-built" binary.
//!
//! **WARNING**: to use the automagic ELF datatype union parser, you _must_ enable both elf and
//! elf32 features - i.e., do not use `no_elf` **NOR** `no_elf32`, otherwise you'll get obscure
//! errors about [goblin::elf::from_fd](fn.from_fd.html) missing.
#[cfg(not(feature = "pure"))]
pub mod strtab;
// These are shareable values for the 32/64 bit implementations.
//
// They are publicly re-exported by the pub-using module
#[macro_use]
pub mod header {
macro_rules! elf_header {
($size:ident) => {
#[repr(C)]
#[derive(Clone, Default)]
pub struct Header {
pub e_ident: [u8; SIZEOF_IDENT],
pub e_type: u16,
pub e_machine: u16,
pub e_version: u32,
pub e_entry: $size,
pub e_phoff: $size,
pub e_shoff: $size,
pub e_flags: u32,
pub e_ehsize: u16,
pub e_phentsize: u16,
pub e_phnum: u16,
pub e_shentsize: u16,
pub e_shnum: u16,
pub e_shstrndx: u16,
}
}
}
/// No file type.
pub const ET_NONE: u16 = 0;
/// Relocatable file.
pub const ET_REL: u16 = 1;
/// Executable file.
pub const ET_EXEC: u16 = 2;
/// Shared object file.
pub const ET_DYN: u16 = 3;
/// Core file.
pub const ET_CORE: u16 = 4;
/// Number of defined types.
pub const ET_NUM: u16 = 5;
/// The ELF magic number.
pub const ELFMAG: &'static [u8; 4] = b"\x7FELF";
/// SELF (Security-enhanced ELF) magic number.
pub const SELFMAG: usize = 4;
/// File class byte index.
pub const EI_CLASS: usize = 4;
/// Invalid class.
pub const ELFCLASSNONE: u8 = 0;
/// 32-bit objects.
pub const ELFCLASS32: u8 = 1;
/// 64-bit objects.
pub const ELFCLASS64: u8 = 2;
/// ELF class number.
pub const ELFCLASSNUM: u8 = 3;
/// Data encoding byte index.
pub const EI_DATA: usize = 5;
/// Invalid data encoding.
pub const ELFDATANONE: u8 = 0;
/// 2's complement, little endian.
pub const ELFDATA2LSB: u8 = 1;
/// 2's complement, big endian.
pub const ELFDATA2MSB: u8 = 2;
/// Number of bytes in an identifier.
pub const SIZEOF_IDENT: usize = 16;
/// Convert an ET value to their associated string.
#[inline]
pub fn et_to_str(et: u16) -> &'static str {
match et {
ET_NONE => "NONE",
ET_REL => "REL",
ET_EXEC => "EXEC",
ET_DYN => "DYN",
ET_CORE => "CORE",
ET_NUM => "NUM",
_ => "UNKNOWN_ET",
}
}
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
/// Search forward in the stream.
pub fn peek(fd: &mut File) -> io::Result<(u8, bool)> {
let mut header = [0u8; SIZEOF_IDENT];
try!(fd.seek(Start(0)));
match try!(fd.read(&mut header)) {
SIZEOF_IDENT => {
let class = header[EI_CLASS];
let is_lsb = header[EI_DATA] == ELFDATA2LSB;
Ok((class, is_lsb))
}
count => {
io_error!("Error: {:?} size is smaller than an ELF identication header",
count)
}
}
}
}
/// Derive the `from_bytes` method for a header.
macro_rules! elf_header_from_bytes {
() => {
/// Returns the corresponding ELF header from the given byte array.
pub fn from_bytes(bytes: &[u8; SIZEOF_EHDR]) -> Header {
// This is not unsafe because the header's size is encoded in the function,
// although the header can be semantically invalid.
let header: &Header = unsafe { mem::transmute(bytes) };
header.clone()
}
};
}
/// Derive the `from_fd` method for a header.
macro_rules! elf_header_from_fd {
() => {
/// Load a header from a file.
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File) -> io::Result<Header> {
let mut elf_header = [0; SIZEOF_EHDR];
try!(fd.read(&mut elf_header));
Ok(Header::from_bytes(&elf_header))
}
};
}
macro_rules! elf_header_impure_impl {
($header:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::mem;
use std::fmt;
use std::fs::File;
use std::io::Read;
use std::io;
impl fmt::Debug for Header {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"e_ident: {:?} e_type: {} e_machine: 0x{:x} e_version: 0x{:x} e_entry: 0x{:x} \
e_phoff: 0x{:x} e_shoff: 0x{:x} e_flags: {:x} e_ehsize: {} e_phentsize: {} \
e_phnum: {} e_shentsize: {} e_shnum: {} e_shstrndx: {}",
self.e_ident,
et_to_str(self.e_type),
self.e_machine,
self.e_version,
self.e_entry,
self.e_phoff,
self.e_shoff,
self.e_flags,
self.e_ehsize,
self.e_phentsize,
self.e_phnum,
self.e_shentsize,
self.e_shnum,
self.e_shstrndx)
}
}
$header
}
};
}
}
#[macro_use]
pub mod program_header {
pub const PT_NULL: u32 = 0;
pub const PT_LOAD: u32 = 1;
pub const PT_DYNAMIC: u32 = 2;
pub const PT_INTERP: u32 = 3;
pub const PT_NOTE: u32 = 4;
pub const PT_SHLIB: u32 = 5;
pub const PT_PHDR: u32 = 6;
pub const PT_TLS: u32 = 7;
pub const PT_NUM: u32 = 8;
pub const PT_LOOS: u32 = 0x60000000;
pub const PT_GNU_EH_FRAME: u32 = 0x6474e550;
pub const PT_GNU_STACK: u32 = 0x6474e551;
pub const PT_GNU_RELRO: u32 = 0x6474e552;
pub const PT_LOSUNW: u32 = 0x6ffffffa;
pub const PT_SUNWBSS: u32 = 0x6ffffffa;
pub const PT_SUNWSTACK: u32 = 0x6ffffffb;
pub const PT_HISUNW: u32 = 0x6fffffff;
pub const PT_HIOS: u32 = 0x6fffffff;
pub const PT_LOPROC: u32 = 0x70000000;
pub const PT_HIPROC: u32 = 0x7fffffff;
/// Segment is executable
pub const PF_X: u32 = 1 << 0;
/// Segment is writable
pub const PF_W: u32 = 1 << 1;
/// Segment is readable
pub const PF_R: u32 = 1 << 2;
pub fn pt_to_str(pt: u32) -> &'static str {
match pt {
PT_NULL => "PT_NULL",
PT_LOAD => "PT_LOAD",
PT_DYNAMIC => "PT_DYNAMIC",
PT_INTERP => "PT_INTERP",
PT_NOTE => "PT_NOTE",
PT_SHLIB => "PT_SHLIB",
PT_PHDR => "PT_PHDR",
PT_TLS => "PT_TLS",
PT_NUM => "PT_NUM",
PT_LOOS => "PT_LOOS",
PT_GNU_EH_FRAME => "PT_GNU_EH_FRAME",
PT_GNU_STACK => "PT_GNU_STACK",
PT_GNU_RELRO => "PT_GNU_RELRO",
PT_SUNWBSS => "PT_SUNWBSS",
PT_SUNWSTACK => "PT_SUNWSTACK",
PT_HIOS => "PT_HIOS",
PT_LOPROC => "PT_LOPROC",
PT_HIPROC => "PT_HIPROC",
_ => "UNKNOWN_PT",
}
}
macro_rules! elf_program_header_from_bytes { () => {
pub fn from_bytes(bytes: &[u8], phnum: usize) -> Vec<ProgramHeader> {
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut ProgramHeader, phnum) };
let mut phdrs = Vec::with_capacity(phnum);
phdrs.extend_from_slice(bytes);
phdrs
}};}
macro_rules! elf_program_header_from_raw_parts { () => {
pub unsafe fn from_raw_parts<'a>(phdrp: *const ProgramHeader,
phnum: usize)
-> &'a [ProgramHeader] {
slice::from_raw_parts(phdrp, phnum)
}};}
macro_rules! elf_program_header_from_fd { () => {
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File, offset: u64, count: usize, _: bool) -> io::Result<Vec<ProgramHeader>> {
use std::io::Read;
let mut phdrs = vec![0u8; count * SIZEOF_PHDR];
try!(fd.seek(Start(offset)));
try!(fd.read(&mut phdrs));
Ok(ProgramHeader::from_bytes(&phdrs, count))
}
};}
macro_rules! elf_program_header_from_fd_endian { ($from_fd_endian:item) => {
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
};}
macro_rules! elf_program_header_impure_impl { ($header:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::slice;
use std::fmt;
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
impl fmt::Debug for ProgramHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"p_type: {} p_flags 0x{:x} p_offset: 0x{:x} p_vaddr: 0x{:x} p_paddr: 0x{:x} \
p_filesz: 0x{:x} p_memsz: 0x{:x} p_align: {}",
pt_to_str(self.p_type),
self.p_flags,
self.p_offset,
self.p_vaddr,
self.p_paddr,
self.p_filesz,
self.p_memsz,
self.p_align)
}
}
$header
}
};}
}
#[macro_use]
pub mod section_header {
macro_rules! elf_section_header {
($size:ident) => {
#[repr(C)]
#[derive(Clone, PartialEq, Default)]
pub struct SectionHeader {
/// Section name (string tbl index)
pub sh_name: u32,
/// Section type
pub sh_type: u32,
/// Section flags
pub sh_flags: $size,
/// Section virtual addr at execution
pub sh_addr: $size,
/// Section file offset
pub sh_offset: $size,
/// Section size in bytes
pub sh_size: $size,
/// Link to another section
pub sh_link: u32,
/// Additional section information
pub sh_info: u32,
/// Section alignment
pub sh_addralign: $size,
/// Entry size if section holds table
pub sh_entsize: $size,
}
}
}
/// Undefined section.
pub const SHN_UNDEF: u32 = 0;
/// Start of reserved indices.
pub const SHN_LORESERVE: u32 = 0xff00;
/// Start of processor-specific.
pub const SHN_LOPROC: u32 = 0xff00;
/// Order section before all others (Solaris).
pub const SHN_BEFORE: u32 = 0xff00;
/// Order section after all others (Solaris).
pub const SHN_AFTER: u32 = 0xff01;
/// End of processor-specific.
pub const SHN_HIPROC: u32 = 0xff1f;
/// Start of OS-specific.
pub const SHN_LOOS: u32 = 0xff20;
/// End of OS-specific.
pub const SHN_HIOS: u32 = 0xff3f;
/// Associated symbol is absolute.
pub const SHN_ABS: u32 = 0xfff1;
/// Associated symbol is common.
pub const SHN_COMMON: u32 = 0xfff2;
/// Index is in extra table.
pub const SHN_XINDEX: u32 = 0xffff;
/// End of reserved indices.
pub const SHN_HIRESERVE: u32 = 0xffff;
// === Legal values for sh_type (section type). ===
/// Section header table entry unused.
pub const SHT_NULL: u32 = 0;
/// Program data.
pub const SHT_PROGBITS: u32 = 1;
/// Symbol table.
pub const SHT_SYMTAB: u32 = 2;
/// String table.
pub const SHT_STRTAB: u32 = 3;
/// Relocation entries with addends.
pub const SHT_RELA: u32 = 4;
/// Symbol hash table.
pub const SHT_HASH: u32 = 5;
/// Dynamic linking information.
pub const SHT_DYNAMIC: u32 = 6;
/// Notes.
pub const SHT_NOTE: u32 = 7;
/// Program space with no data (bss).
pub const SHT_NOBITS: u32 = 8;
/// Relocation entries, no addends.
pub const SHT_REL: u32 = 9;
/// Reserved.
pub const SHT_SHLIB: u32 = 10;
/// Dynamic linker symbol table.
pub const SHT_DYNSYM: u32 = 11;
/// Array of constructors.
pub const SHT_INIT_ARRAY: u32 = 14;
/// Array of destructors.
pub const SHT_FINI_ARRAY: u32 = 15;
/// Array of pre-constructors.
pub const SHT_PREINIT_ARRAY: u32 = 16;
/// Section group.
pub const SHT_GROUP: u32 = 17;
/// Extended section indeces.
pub const SHT_SYMTAB_SHNDX: u32 = 18;
/// Number of defined types.
pub const SHT_NUM: u32 = 19;
/// Start OS-specific.
pub const SHT_LOOS: u32 = 0x60000000;
/// Object attributes.
pub const SHT_GNU_ATTRIBUTES: u32 = 0x6ffffff5;
/// GNU-style hash table.
pub const SHT_GNU_HASH: u32 = 0x6ffffff6;
/// Prelink library list.
pub const SHT_GNU_LIBLIST: u32 = 0x6ffffff7;
/// Checksum for DSO content.
pub const SHT_CHECKSUM: u32 = 0x6ffffff8;
/// Sun-specific low bound.
pub const SHT_LOSUNW: u32 = 0x6ffffffa;
pub const SHT_SUNW_MOVE: u32 = 0x6ffffffa;
pub const SHT_SUNW_COMDAT: u32 = 0x6ffffffb;
pub const SHT_SUNW_SYMINFO: u32 = 0x6ffffffc;
/// Version definition section.
pub const SHT_GNU_VERDEF: u32 = 0x6ffffffd;
/// Version needs section.
pub const SHT_GNU_VERNEED: u32 = 0x6ffffffe;
/// Version symbol table.
pub const SHT_GNU_VERSYM: u32 = 0x6fffffff;
/// Sun-specific high bound.
pub const SHT_HISUNW: u32 = 0x6fffffff;
/// End OS-specific type.
pub const SHT_HIOS: u32 = 0x6fffffff;
/// Start of processor-specific.
pub const SHT_LOPROC: u32 = 0x70000000;
/// End of processor-specific.
pub const SHT_HIPROC: u32 = 0x7fffffff;
/// Start of application-specific.
pub const SHT_LOUSER: u32 = 0x80000000;
/// End of application-specific.
pub const SHT_HIUSER: u32 = 0x8fffffff;
// Legal values for sh_flags (section flags)
/// Writable.
pub const SHF_WRITE: u32 = 1 << 0;
/// Occupies memory during execution.
pub const SHF_ALLOC: u32 = 1 << 1;
/// Executable.
pub const SHF_EXECINSTR: u32 = 1 << 2;
/// Might be merged.
pub const SHF_MERGE: u32 = 1 << 4;
/// Contains nul-terminated strings.
pub const SHF_STRINGS: u32 = 1 << 5;
/// `sh_info' contains SHT index.
pub const SHF_INFO_LINK: u32 = 1 << 6;
/// Preserve order after combining.
pub const SHF_LINK_ORDER: u32 = 1 << 7;
/// Non-standard OS specific handling required.
pub const SHF_OS_NONCONFORMING: u32 = 1 << 8;
/// Section is member of a group.
pub const SHF_GROUP: u32 = 1 << 9;
/// Section hold thread-local data.
pub const SHF_TLS: u32 = 1 << 10;
/// Section with compressed data.
pub const SHF_COMPRESSED: u32 = 1 << 11;
/// OS-specific..
pub const SHF_MASKOS: u32 = 0x0ff00000;
/// Processor-specific.
pub const SHF_MASKPROC: u32 = 0xf0000000;
/// Special ordering requirement (Solaris).
pub const SHF_ORDERED: u32 = 1 << 30;
// /// Section is excluded unless referenced or allocated (Solaris).
// pub const SHF_EXCLUDE: u32 = 1U << 31;
pub fn sht_to_str(sht: u32) -> &'static str {
match sht {
//TODO: implement
/*
PT_NULL => "PT_NULL",
PT_LOAD => "PT_LOAD",
PT_DYNAMIC => "PT_DYNAMIC",
PT_INTERP => "PT_INTERP",
PT_NOTE => "PT_NOTE",
PT_SHLIB => "PT_SHLIB",
PT_PHDR => "PT_PHDR",
PT_TLS => "PT_TLS",
PT_NUM => "PT_NUM",
PT_LOOS => "PT_LOOS",
PT_GNU_EH_FRAME => "PT_GNU_EH_FRAME",
PT_GNU_STACK => "PT_GNU_STACK",
PT_GNU_RELRO => "PT_GNU_RELRO",
PT_SUNWBSS => "PT_SUNWBSS",
PT_SUNWSTACK => "PT_SUNWSTACK",
PT_HIOS => "PT_HIOS",
PT_LOPROC => "PT_LOPROC",
PT_HIPROC => "PT_HIPROC",
*/
_ => "UNKNOWN_SHT",
}
}
macro_rules! elf_section_header_from_bytes { () => {
pub fn from_bytes(bytes: &[u8], shnum: usize) -> Vec<SectionHeader> {
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut SectionHeader, shnum) };
let mut shdrs = Vec::with_capacity(shnum);
shdrs.extend_from_slice(bytes);
shdrs
}};}
macro_rules! elf_section_header_from_raw_parts { () => {
pub unsafe fn from_raw_parts<'a>(shdrp: *const SectionHeader,
shnum: usize)
-> &'a [SectionHeader] {
slice::from_raw_parts(shdrp, shnum)
}};}
macro_rules! elf_section_header_from_fd { () => {
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File, offset: u64, count: usize, _: bool) -> io::Result<Vec<SectionHeader>> {
use std::io::Read;
let mut shdrs = vec![0u8; count * SIZEOF_SHDR];
try!(fd.seek(Start(offset)));
try!(fd.read(&mut shdrs));
Ok(SectionHeader::from_bytes(&shdrs, count))
}
};}
macro_rules! elf_section_header_from_fd_endian { ($from_fd_endian:item) => {
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
};}
macro_rules! elf_section_header_impure_impl { ($header:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::slice;
use std::fmt;
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
impl fmt::Debug for SectionHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"sh_name: {} sh_type {} sh_flags: 0x{:x} sh_addr: 0x{:x} sh_offset: 0x{:x} \
sh_size: 0x{:x} sh_link: 0x{:x} sh_info: 0x{:x} sh_addralign 0x{:x} sh_entsize 0x{:x}",
self.sh_name,
sht_to_str(self.sh_type as u32),
self.sh_flags,
self.sh_addr,
self.sh_offset,
self.sh_size,
self.sh_link,
self.sh_info,
self.sh_addralign,
self.sh_entsize)
}
}
$header
}
};}
}
#[macro_use]
pub mod sym {
// === Sym bindings ===
/// Local symbol.
pub const STB_LOCAL: u8 = 0;
/// Global symbol.
pub const STB_GLOBAL: u8 = 1;
/// Weak symbol.
pub const STB_WEAK: u8 = 2;
/// Number of defined types..
pub const STB_NUM: u8 = 3;
/// Start of OS-specific.
pub const STB_LOOS: u8 = 10;
/// Unique symbol..
pub const STB_GNU_UNIQUE: u8 = 10;
/// End of OS-specific.
pub const STB_HIOS: u8 = 12;
/// Start of processor-specific.
pub const STB_LOPROC: u8 = 13;
/// End of processor-specific.
pub const STB_HIPROC: u8 = 15;
/// === Sym types ===
/// Symbol type is unspecified.
pub const STT_NOTYPE: u8 = 0;
/// Symbol is a data object.
pub const STT_OBJECT: u8 = 1;
/// Symbol is a code object.
pub const STT_FUNC: u8 = 2;
/// Symbol associated with a section.
pub const STT_SECTION: u8 = 3;
/// Symbol's name is file name.
pub const STT_FILE: u8 = 4;
/// Symbol is a common data object.
pub const STT_COMMON: u8 = 5;
/// Symbol is thread-local data object.
pub const STT_TLS: u8 = 6;
/// Number of defined types.
pub const STT_NUM: u8 = 7;
/// Start of OS-specific.
pub const STT_LOOS: u8 = 10;
/// Symbol is indirect code object.
pub const STT_GNU_IFUNC: u8 = 10;
/// End of OS-specific.
pub const STT_HIOS: u8 = 12;
/// Start of processor-specific.
pub const STT_LOPROC: u8 = 13;
/// End of processor-specific.
pub const STT_HIPROC: u8 = 15;
/// Get the ST binding.
///
/// This is the first four bits of the byte.
#[inline]
pub fn st_bind(info: u8) -> u8 {
info >> 4
}
/// Get the ST type.
///
/// This is the last four bits of the byte.
#[inline]
pub fn st_type(info: u8) -> u8 {
info & 0xf
}
/// Is this information defining an import?
#[inline]
pub fn is_import(info: u8, value: u8) -> bool {
let binding = st_bind(info);
binding == STB_GLOBAL && value == 0
}
/// Convenience function to get the &'static str type from the symbols `st_info`.
pub fn get_type(info: u8) -> &'static str {
type_to_str(st_type(info))
}
/// Get the string for some binding.
#[inline]
pub fn bind_to_str(typ: u8) -> &'static str {
match typ {
STB_LOCAL => "LOCAL",
STB_GLOBAL => "GLOBAL",
STB_WEAK => "WEAK",
STB_NUM => "NUM",
STB_GNU_UNIQUE => "GNU_UNIQUE",
_ => "UNKNOWN_STB",
}
}
/// Get the string for some type.
#[inline]
pub fn type_to_str(typ: u8) -> &'static str {
match typ {
STT_NOTYPE => "NOTYPE",
STT_OBJECT => "OBJECT",
STT_FUNC => "FUNC",
STT_SECTION => "SECTION",
STT_FILE => "FILE",
STT_COMMON => "COMMON",
STT_TLS => "TLS",
STT_NUM => "NUM",
STT_GNU_IFUNC => "GNU_IFUNC",
_ => "UNKNOWN_STT",
}
}
macro_rules! elf_sym_impure_impl {
($from_fd_endian:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use std::fs::File;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
use super::*;
impl fmt::Debug for Sym {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let bind = st_bind(self.st_info);
let typ = st_type(self.st_info);
write!(f,
"st_name: {} {} {} st_other: {} st_shndx: {} st_value: {:x} st_size: {}",
self.st_name,
bind_to_str(bind),
type_to_str(typ),
self.st_other,
self.st_shndx,
self.st_value,
self.st_size)
}
}
pub unsafe fn from_raw<'a>(symp: *const Sym, count: usize) -> &'a [Sym] {
slice::from_raw_parts(symp, count)
}
// TODO: this is broken, fix (not used often by me since don't have luxury of debug symbols usually)
#[cfg(feature = "no_endian_fd")]
pub fn from_fd<'a>(fd: &mut File, offset: usize, count: usize, _: bool) -> io::Result<Vec<Sym>> {
// TODO: AFAIK this shouldn't work, since i pass in a byte size...
let mut bytes = vec![0u8; count * SIZEOF_SYM];
try!(fd.seek(Start(offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Sym, count) };
let mut syms = Vec::with_capacity(count);
syms.extend_from_slice(bytes);
syms.dedup();
Ok(syms)
}
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
}
};
}
}
#[macro_use]
pub mod dyn {
// TODO: figure out what's the best, most friendly + safe API choice here - u32s or u64s
// remember that DT_TAG is "pointer sized"/used as address sometimes Original rationale: I
// decided to use u64 instead of u32 due to pattern matching use case seems safer to cast the
// elf32's d_tag from u32 -> u64 at runtime instead of casting the elf64's d_tag from u64 ->
// u32 at runtime
// TODO: Documentation.
pub const DT_NULL: u64 = 0;
pub const DT_NEEDED: u64 = 1;
pub const DT_PLTRELSZ: u64 = 2;
pub const DT_PLTGOT: u64 = 3;
pub const DT_HASH: u64 = 4;
pub const DT_STRTAB: u64 = 5;
pub const DT_SYMTAB: u64 = 6;
pub const DT_RELA: u64 = 7;
pub const DT_RELASZ: u64 = 8;
pub const DT_RELAENT: u64 = 9;
pub const DT_STRSZ: u64 = 10;
pub const DT_SYMENT: u64 = 11;
pub const DT_INIT: u64 = 12;
pub const DT_FINI: u64 = 13;
pub const DT_SONAME: u64 = 14;
pub const DT_RPATH: u64 = 15;
pub const DT_SYMBOLIC: u64 = 16;
pub const DT_REL: u64 = 17;
pub const DT_RELSZ: u64 = 18;
pub const DT_RELENT: u64 = 19;
pub const DT_PLTREL: u64 = 20;
pub const DT_DEBUG: u64 = 21;
pub const DT_TEXTREL: u64 = 22;
pub const DT_JMPREL: u64 = 23;
pub const DT_BIND_NOW: u64 = 24;
pub const DT_INIT_ARRAY: u64 = 25;
pub const DT_FINI_ARRAY: u64 = 26;
pub const DT_INIT_ARRAYSZ: u64 = 27;
pub const DT_FINI_ARRAYSZ: u64 = 28;
pub const DT_RUNPATH: u64 = 29;
pub const DT_FLAGS: u64 = 30;
pub const DT_ENCODING: u64 = 32;
pub const DT_PREINIT_ARRAY: u64 = 32;
pub const DT_PREINIT_ARRAYSZ: u64 = 33;
pub const DT_NUM: u64 = 34;
pub const DT_LOOS: u64 = 0x6000000d;
pub const DT_HIOS: u64 = 0x6ffff000;
pub const DT_LOPROC: u64 = 0x70000000;
pub const DT_HIPROC: u64 = 0x7fffffff;
// pub const DT_PROCNUM: u64 = DT_MIPS_NUM;
pub const DT_VERSYM: u64 = 0x6ffffff0;
pub const DT_RELACOUNT: u64 = 0x6ffffff9;
pub const DT_RELCOUNT: u64 = 0x6ffffffa;
pub const DT_GNU_HASH: u64 = 0x6ffffef5;
pub const DT_VERDEF: u64 = 0x6ffffffc;
pub const DT_VERDEFNUM: u64 = 0x6ffffffd;
pub const DT_VERNEED: u64 = 0x6ffffffe;
pub const DT_VERNEEDNUM: u64 = 0x6fffffff;
pub const DT_FLAGS_1: u64 = 0x6ffffffb;
/// Converts a tag to its string representation.
#[inline]
pub fn tag_to_str(tag: u64) -> &'static str {
match tag {
DT_NULL => "DT_NULL",
DT_NEEDED => "DT_NEEDED",
DT_PLTRELSZ => "DT_PLTRELSZ",
DT_PLTGOT => "DT_PLTGOT",
DT_HASH => "DT_HASH",
DT_STRTAB => "DT_STRTAB",
DT_SYMTAB => "DT_SYMTAB",
DT_RELA => "DT_RELA",
DT_RELASZ => "DT_RELASZ",
DT_RELAENT => "DT_RELAENT",
DT_STRSZ => "DT_STRSZ",
DT_SYMENT => "DT_SYMENT",
DT_INIT => "DT_INIT",
DT_FINI => "DT_FINI",
DT_SONAME => "DT_SONAME",
DT_RPATH => "DT_RPATH",
DT_SYMBOLIC => "DT_SYMBOLIC",
DT_REL => "DT_REL",
DT_RELSZ => "DT_RELSZ",
DT_RELENT => "DT_RELENT",
DT_PLTREL => "DT_PLTREL",
DT_DEBUG => "DT_DEBUG",
DT_TEXTREL => "DT_TEXTREL",
DT_JMPREL => "DT_JMPREL",
DT_BIND_NOW => "DT_BIND_NOW",
DT_INIT_ARRAY => "DT_INIT_ARRAY",
DT_FINI_ARRAY => "DT_FINI_ARRAY",
DT_INIT_ARRAYSZ => "DT_INIT_ARRAYSZ",
DT_FINI_ARRAYSZ => "DT_FINI_ARRAYSZ",
DT_RUNPATH => "DT_RUNPATH",
DT_FLAGS => "DT_FLAGS",
DT_PREINIT_ARRAY => "DT_PREINIT_ARRAY",
DT_PREINIT_ARRAYSZ => "DT_PREINIT_ARRAYSZ",
DT_NUM => "DT_NUM",
DT_LOOS => "DT_LOOS",
DT_HIOS => "DT_HIOS",
DT_LOPROC => "DT_LOPROC",
DT_HIPROC => "DT_HIPROC",
DT_VERSYM => "DT_VERSYM",
DT_RELACOUNT => "DT_RELACOUNT",
DT_RELCOUNT => "DT_RELCOUNT",
DT_GNU_HASH => "DT_GNU_HASH",
DT_VERDEF => "DT_VERDEF",
DT_VERDEFNUM => "DT_VERDEFNUM",
DT_VERNEED => "DT_VERNEED",
DT_VERNEEDNUM => "DT_VERNEEDNUM",
DT_FLAGS_1 => "DT_FLAGS_1",
_ => "UNKNOWN_TAG",
}
}
// Values of `d_un.d_val` in the DT_FLAGS entry
/// Object may use DF_ORIGIN.
pub const DF_ORIGIN: u64 = 0x00000001;
/// Symbol resolutions starts here.
pub const DF_SYMBOLIC: u64 = 0x00000002;
/// Object contains text relocations.
pub const DF_TEXTREL: u64 = 0x00000004;
/// No lazy binding for this object.
pub const DF_BIND_NOW: u64 = 0x00000008;
/// Module uses the static TLS model.
pub const DF_STATIC_TLS: u64 = 0x00000010;
// State flags selectable in the `d_un.d_val` element of the DT_FLAGS_1 entry in the dynamic section.
/// Set RTLD_NOW for this object.
pub const DF_1_NOW: u64 = 0x00000001;
/// Set RTLD_GLOBAL for this object.
pub const DF_1_GLOBAL: u64 = 0x00000002;
/// Set RTLD_GROUP for this object.
pub const DF_1_GROUP: u64 = 0x00000004;
/// Set RTLD_NODELETE for this object.
pub const DF_1_NODELETE: u64 = 0x00000008;
/// Trigger filtee loading at runtime.
pub const DF_1_LOADFLTR: u64 = 0x00000010;
/// Set RTLD_INITFIRST for this object.
pub const DF_1_INITFIRST: u64 = 0x00000020;
/// Set RTLD_NOOPEN for this object.
pub const DF_1_NOOPEN: u64 = 0x00000040;
/// $ORIGIN must be handled.
pub const DF_1_ORIGIN: u64 = 0x00000080;
/// Direct binding enabled.
pub const DF_1_DIRECT: u64 = 0x00000100;
pub const DF_1_TRANS: u64 = 0x00000200;
/// Object is used to interpose.
pub const DF_1_INTERPOSE: u64 = 0x00000400;
/// Ignore default lib search path.
pub const DF_1_NODEFLIB: u64 = 0x00000800;
/// Object can't be dldump'ed.
pub const DF_1_NODUMP: u64 = 0x00001000;
/// Configuration alternative created.
pub const DF_1_CONFALT: u64 = 0x00002000;
/// Filtee terminates filters search.
pub const DF_1_ENDFILTEE: u64 = 0x00004000;
/// Disp reloc applied at build time.
pub const DF_1_DISPRELDNE: u64 = 0x00008000;
/// Disp reloc applied at run-time.
pub const DF_1_DISPRELPND: u64 = 0x00010000;
/// Object has no-direct binding.
pub const DF_1_NODIRECT: u64 = 0x00020000;
pub const DF_1_IGNMULDEF: u64 = 0x00040000;
pub const DF_1_NOKSYMS: u64 = 0x00080000;
pub const DF_1_NOHDR: u64 = 0x00100000;
/// Object is modified after built.
pub const DF_1_EDITED: u64 = 0x00200000;
pub const DF_1_NORELOC: u64 = 0x00400000;
/// Object has individual interposers.
pub const DF_1_SYMINTPOSE: u64 = 0x00800000;
/// Global auditing required.
pub const DF_1_GLOBAUDIT: u64 = 0x01000000;
/// Singleton symbols are used.
pub const DF_1_SINGLETON: u64 = 0x02000000;
macro_rules! elf_dyn_impure_impl {
($size:ident, $from_fd_endian:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
use super::super::program_header::{ProgramHeader, PT_DYNAMIC};
use super::super::super::elf::strtab::Strtab;
use super::*;
impl fmt::Debug for Dyn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"d_tag: {} d_val: 0x{:x}",
tag_to_str(self.d_tag as u64),
self.d_val)
}
}
impl fmt::Debug for DynamicInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let gnu_hash = if let Some(addr) = self.gnu_hash { addr } else { 0 };
let hash = if let Some(addr) = self.hash { addr } else { 0 };
let pltgot = if let Some(addr) = self.pltgot { addr } else { 0 };
write!(f, "rela: 0x{:x} relasz: {} relaent: {} relacount: {} gnu_hash: 0x{:x} hash: 0x{:x} strtab: 0x{:x} strsz: {} symtab: 0x{:x} syment: {} pltgot: 0x{:x} pltrelsz: {} pltrel: {} jmprel: 0x{:x} verneed: 0x{:x} verneednum: {} versym: 0x{:x} init: 0x{:x} fini: 0x{:x} needed_count: {}",
self.rela,
self.relasz,
self.relaent,
self.relacount,
gnu_hash,
hash,
self.strtab,
self.strsz,
self.symtab,
self.syment,
pltgot,
self.pltrelsz,
self.pltrel,
self.jmprel,
self.verneed,
self.verneednum,
self.versym,
self.init,
self.fini,
self.needed_count,
)
}
}
#[cfg(feature = "no_endian_fd")]
/// Returns a vector of dynamic entries from the given fd and program headers
pub fn from_fd(mut fd: &File, phdrs: &[ProgramHeader], _: bool) -> io::Result<Option<Vec<Dyn>>> {
use std::io::Read;
for phdr in phdrs {
if phdr.p_type == PT_DYNAMIC {
let filesz = phdr.p_filesz as usize;
let dync = filesz / SIZEOF_DYN;
let mut bytes = vec![0u8; filesz];
try!(fd.seek(Start(phdr.p_offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Dyn, dync) };
let mut dyns = Vec::with_capacity(dync);
dyns.extend_from_slice(bytes);
dyns.dedup();
return Ok(Some(dyns));
}
}
Ok(None)
}
/// Given a bias and a memory address (typically for a _correctly_ mmap'd binary in memory), returns the `_DYNAMIC` array as a slice of that memory
pub unsafe fn from_raw<'a>(bias: $size, vaddr: $size) -> &'a [Dyn] {
let dynp = vaddr.wrapping_add(bias) as *const Dyn;
let mut idx = 0;
while (*dynp.offset(idx)).d_tag as u64 != DT_NULL {
idx += 1;
}
slice::from_raw_parts(dynp, idx as usize)
}
// TODO: these bare functions have always seemed awkward, but not sure where they should go...
/// Maybe gets and returns the dynamic array with the same lifetime as the [phdrs], using the provided bias with wrapping addition.
/// If the bias is wrong, it will either segfault or give you incorrect values, beware
pub unsafe fn from_phdrs(bias: $size, phdrs: &[ProgramHeader]) -> Option<&[Dyn]> {
for phdr in phdrs {
// FIXME: change to casting to u64 similar to DT_*?
if phdr.p_type as u32 == PT_DYNAMIC {
return Some(from_raw(bias, phdr.p_vaddr));
}
}
None
}
/// Gets the needed libraries from the `_DYNAMIC` array, with the str slices lifetime tied to the dynamic array/strtab's lifetime(s)
pub unsafe fn get_needed<'a>(dyns: &[Dyn], strtab: *const Strtab<'a>, count: usize) -> Vec<&'a str> {
let mut needed = Vec::with_capacity(count);
for dyn in dyns {
if dyn.d_tag as u64 == DT_NEEDED {
let lib = &(*strtab)[dyn.d_val as usize];
needed.push(lib);
}
}
needed
}
#[cfg(not(feature = "no_endian_fd"))]
/// Returns a vector of dynamic entries from the given fd and program headers
$from_fd_endian
}
/// Important dynamic linking info generated via a single pass through the _DYNAMIC array
#[derive(Default)]
pub struct DynamicInfo {
pub rela: usize,
pub relasz: usize,
pub relaent: $size,
pub relacount: usize,
pub gnu_hash: Option<$size>,
pub hash: Option<$size>,
pub strtab: usize,
pub strsz: usize,
pub symtab: usize,
pub syment: usize,
pub pltgot: Option<$size>,
pub pltrelsz: usize,
pub pltrel: $size,
pub jmprel: usize,
pub verneed: $size,
pub verneednum: $size,
pub versym: $size,
pub init: $size,
pub fini: $size,
pub init_array: $size,
pub init_arraysz: usize,
pub fini_array: $size,
pub fini_arraysz: usize,
pub needed_count: usize,
pub flags: $size,
pub flags_1: $size,
pub soname: usize,
}
impl DynamicInfo {
pub fn new(dynamic: &[Dyn], bias: usize) -> DynamicInfo {
let mut res = DynamicInfo::default();
for dyn in dynamic {
match dyn.d_tag as u64 {
DT_RELA => res.rela = dyn.d_val.wrapping_add(bias as _) as usize, // .rela.dyn
DT_RELASZ => res.relasz = dyn.d_val as usize,
DT_RELAENT => res.relaent = dyn.d_val as _,
DT_RELACOUNT => res.relacount = dyn.d_val as usize,
DT_GNU_HASH => res.gnu_hash = Some(dyn.d_val.wrapping_add(bias as _)),
DT_HASH => res.hash = Some(dyn.d_val.wrapping_add(bias as _)) as _,
DT_STRTAB => res.strtab = dyn.d_val.wrapping_add(bias as _) as usize,
DT_STRSZ => res.strsz = dyn.d_val as usize,
DT_SYMTAB => res.symtab = dyn.d_val.wrapping_add(bias as _) as usize,
DT_SYMENT => res.syment = dyn.d_val as usize,
DT_PLTGOT => res.pltgot = Some(dyn.d_val.wrapping_add(bias as _)) as _,
DT_PLTRELSZ => res.pltrelsz = dyn.d_val as usize,
DT_PLTREL => res.pltrel = dyn.d_val as _,
DT_JMPREL => res.jmprel = dyn.d_val.wrapping_add(bias as _) as usize, // .rela.plt
DT_VERNEED => res.verneed = dyn.d_val.wrapping_add(bias as _) as _,
DT_VERNEEDNUM => res.verneednum = dyn.d_val as _,
DT_VERSYM => res.versym = dyn.d_val.wrapping_add(bias as _) as _,
DT_INIT => res.init = dyn.d_val.wrapping_add(bias as _) as _,
DT_FINI => res.fini = dyn.d_val.wrapping_add(bias as _) as _,
DT_INIT_ARRAY => res.init_array = dyn.d_val.wrapping_add(bias as _) as _,
DT_INIT_ARRAYSZ => res.init_arraysz = dyn.d_val as _,
DT_FINI_ARRAY => res.fini_array = dyn.d_val.wrapping_add(bias as _) as _,
DT_FINI_ARRAYSZ => res.fini_arraysz = dyn.d_val as _,
DT_NEEDED => res.needed_count += 1,
DT_FLAGS => res.flags = dyn.d_val as _,
DT_FLAGS_1 => res.flags_1 = dyn.d_val as _,
DT_SONAME => res.soname = dyn.d_val as _,
_ => (),
}
}
res
}
}
};
}
}
#[macro_use]
pub mod rela {
/// No reloc.
pub const R_X86_64_NONE: u64 = 0;
/// Direct 64 bit.
pub const R_X86_64_64: u64 = 1;
/// PC relative 32 bit signed.
pub const R_X86_64_PC32: u64 = 2;
/// 32 bit GOT entry.
pub const R_X86_64_GOT32: u64 = 3;
/// 32 bit PLT address.
pub const R_X86_64_PLT32: u64 = 4;
/// Copy symbol at runtime.
pub const R_X86_64_COPY: u64 = 5;
/// Create GOT entry.
pub const R_X86_64_GLOB_DAT: u64 = 6;
/// Create PLT entry.
pub const R_X86_64_JUMP_SLOT: u64 = 7;
/// Adjust by program base.
pub const R_X86_64_RELATIVE: u64 = 8;
/// 32 bit signed PC relative offset to GOT.
pub const R_X86_64_GOTPCREL: u64 = 9;
/// Direct 32 bit zero extended.
pub const R_X86_64_32: u64 = 10;
/// Direct 32 bit sign extended.
pub const R_X86_64_32S: u64 = 11;
/// Direct 16 bit zero extended.
pub const R_X86_64_16: u64 = 12;
/// 16 bit sign extended pc relative.
pub const R_X86_64_PC16: u64 = 13;
/// Direct 8 bit sign extended.
pub const R_X86_64_8: u64 = 14;
/// 8 bit sign extended pc relative.
pub const R_X86_64_PC8: u64 = 15;
/// ID of module containing symbol.
pub const R_X86_64_DTPMOD64: u64 = 16;
/// Offset in module's TLS block.
pub const R_X86_64_DTPOFF64: u64 = 17;
/// Offset in initial TLS block.
pub const R_X86_64_TPOFF64: u64 = 18;
/// 32 bit signed PC relative offset to two GOT entries for GD symbol.
pub const R_X86_64_TLSGD: u64 = 19;
/// 32 bit signed PC relative offset to two GOT entries for LD symbol.
pub const R_X86_64_TLSLD: u64 = 20;
/// Offset in TLS block.
pub const R_X86_64_DTPOFF32: u64 = 21;
/// 32 bit signed PC relative offset to GOT entry for IE symbol.
pub const R_X86_64_GOTTPOFF: u64 = 22;
/// Offset in initial TLS block.
pub const R_X86_64_TPOFF32: u64 = 23;
/// PC relative 64 bit.
pub const R_X86_64_PC64: u64 = 24;
/// 64 bit offset to GOT.
pub const R_X86_64_GOTOFF64: u64 = 25;
/// 32 bit signed pc relative offset to GOT.
pub const R_X86_64_GOTPC32: u64 = 26;
/// 64-bit GOT entry offset.
pub const R_X86_64_GOT64: u64 = 27;
/// 64-bit PC relative offset to GOT entry.
pub const R_X86_64_GOTPCREL64: u64 = 28;
/// 64-bit PC relative offset to GOT.
pub const R_X86_64_GOTPC64: u64 = 29;
/// like GOT64, says PLT entry needed.
pub const R_X86_64_GOTPLT64: u64 = 30;
/// 64-bit GOT relative offset to PLT entry.
pub const R_X86_64_PLTOFF64: u64 = 31;
/// Size of symbol plus 32-bit addend.
pub const R_X86_64_SIZE32: u64 = 32;
/// Size of symbol plus 64-bit addend.
pub const R_X86_64_SIZE64: u64 = 33;
/// GOT offset for TLS descriptor..
pub const R_X86_64_GOTPC32_TLSDESC: u64 = 34;
/// Marker for call through TLS descriptor..
pub const R_X86_64_TLSDESC_CALL: u64 = 35;
/// TLS descriptor..
pub const R_X86_64_TLSDESC: u64 = 36;
/// Adjust indirectly by program base.
pub const R_X86_64_IRELATIVE: u64 = 37;
/// 64-bit adjust by program base.
pub const R_X86_64_RELATIVE64: u64 = 38;
pub const R_X86_64_NUM: u64 = 39;
#[inline]
pub fn type_to_str(typ: u64) -> &'static str {
match typ {
R_X86_64_NONE => "NONE",
R_X86_64_64 => "64",
R_X86_64_PC32 => "PC32",
R_X86_64_GOT32 => "GOT32",
R_X86_64_PLT32 => "PLT32",
R_X86_64_COPY => "COPY",
R_X86_64_GLOB_DAT => "GLOB_DAT",
R_X86_64_JUMP_SLOT => "JUMP_SLOT",
R_X86_64_RELATIVE => "RELATIVE",
R_X86_64_GOTPCREL => "GOTPCREL",
R_X86_64_32 => "32",
R_X86_64_32S => "32S",
R_X86_64_16 => "16",
R_X86_64_PC16 => "PC16",
R_X86_64_8 => "8",
R_X86_64_PC8 => "PC8",
R_X86_64_DTPMOD64 => "DTPMOD64",
R_X86_64_DTPOFF64 => "DTPOFF64",
R_X86_64_TPOFF64 => "TPOFF64",
R_X86_64_TLSGD => "TLSGD",
R_X86_64_TLSLD => "TLSLD",
R_X86_64_DTPOFF32 => "DTPOFF32",
R_X86_64_GOTTPOFF => "GOTTPOFF",
R_X86_64_TPOFF32 => "TPOFF32",
R_X86_64_PC64 => "PC64",
R_X86_64_GOTOFF64 => "GOTOFF64",
R_X86_64_GOTPC32 => "GOTPC32",
R_X86_64_GOT64 => "GOT64",
R_X86_64_GOTPCREL64 => "GOTPCREL64",
R_X86_64_GOTPC64 => "GOTPC64",
R_X86_64_GOTPLT64 => "GOTPLT64",
R_X86_64_PLTOFF64 => "PLTOFF64",
R_X86_64_SIZE32 => "SIZE32",
R_X86_64_SIZE64 => "SIZE64",
R_X86_64_GOTPC32_TLSDESC => "GOTPC32_TLSDESC",
R_X86_64_TLSDESC_CALL => "TLSDESC_CALL",
R_X86_64_TLSDESC => "TLSDESC",
R_X86_64_IRELATIVE => "IRELATIVE",
R_X86_64_RELATIVE64 => "RELATIVE64",
_ => "UNKNOWN_RELA_TYPE",
}
}
macro_rules! elf_rela_impure_impl { ($from_fd_endian:item) => {
#[cfg(not(feature = "pure"))]
pub use self::impure::*;
#[cfg(not(feature = "pure"))]
mod impure {
use super::*;
use std::fs::File;
use std::io::Seek;
use std::io::SeekFrom::Start;
use std::io;
use std::fmt;
use std::slice;
impl fmt::Debug for Rela {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let sym = r_sym(self.r_info);
let typ = r_type(self.r_info);
write!(f,
"r_offset: {:x} {} @ {} r_addend: {:x}",
self.r_offset,
type_to_str(typ as u64),
sym,
self.r_addend)
}
}
/// Gets the rela entries given a rela u64 and the _size_ of the rela section in the binary, in bytes. Works for regular rela and the pltrela table.
/// Assumes the pointer is valid and can safely return a slice of memory pointing to the relas because:
/// 1. `rela` points to memory received from the kernel (i.e., it loaded the executable), _or_
/// 2. The binary has already been mmapped (i.e., it's a `SharedObject`), and hence it's safe to return a slice of that memory.
/// 3. Or if you obtained the pointer in some other lawful manner
pub unsafe fn from_raw<'a>(ptr: *const Rela, size: usize) -> &'a [Rela] {
slice::from_raw_parts(ptr, size / SIZEOF_RELA)
}
#[cfg(feature = "no_endian_fd")]
pub fn from_fd(fd: &mut File, offset: usize, size: usize, _: bool) -> io::Result<Vec<Rela>> {
use std::io::Read;
let count = size / SIZEOF_RELA;
let mut bytes = vec![0u8; size];
try!(fd.seek(Start(offset as u64)));
try!(fd.read(&mut bytes));
let bytes = unsafe { slice::from_raw_parts(bytes.as_ptr() as *mut Rela, count) };
let mut res = Vec::with_capacity(count);
res.extend_from_slice(bytes);
Ok(res)
}
#[cfg(not(feature = "no_endian_fd"))]
$from_fd_endian
}
};}
}
#[cfg(all(not(feature = "pure"), not(feature = "no_elf32"), not(feature = "no_elf")))]
pub use self::impure::*;
#[cfg(all(not(feature = "pure"), not(feature = "no_elf32"), not(feature = "no_elf")))]
#[macro_use]
mod impure {
use std::fs::File;
use std::io;
// use std::io::Read;
// use std::io::SeekFrom::Start;
use super::header;
use super::super::elf32;
use super::super::elf64;
#[derive(Debug)]
pub enum Binary {
Elf32(elf32::Binary),
Elf64(elf64::Binary),
}
pub fn from_fd(fd: &mut File) -> io::Result<Binary> {
match try!(header::peek(fd)) {
(header::ELFCLASS64, _is_lsb) => Ok(Binary::Elf64(try!(elf64::Binary::from_fd(fd)))),
(header::ELFCLASS32, _is_lsb) => Ok(Binary::Elf32(try!(elf32::Binary::from_fd(fd)))),
(class, is_lsb) => {
io_error!("Unknown values in ELF ident header: class: {} is_lsb: {}",
class,
is_lsb)
}
}
}
}
macro_rules! elf_from_fd { ($intmax:expr) => {
use std::path::Path;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom::Start;
pub use super::super::elf::strtab;
use super::{header, program_header, section_header, dyn, sym, rela};
#[derive(Debug)]
pub struct Binary {
pub header: header::Header,
pub program_headers: Vec<program_header::ProgramHeader>,
pub section_headers: Vec<section_header::SectionHeader>,
pub shdr_strtab: strtab::Strtab<'static>,
pub dynamic: Option<Vec<dyn::Dyn>>,
pub dynsyms: Vec<sym::Sym>,
pub dynstrtab: strtab::Strtab<'static>,
pub syms: Vec<sym::Sym>,
pub strtab: strtab::Strtab<'static>,
pub rela: Vec<rela::Rela>,
pub pltrela: Vec<rela::Rela>,
pub soname: Option<String>,
pub interpreter: Option<String>,
pub libraries: Vec<String>,
pub is_lib: bool,
pub size: usize,
pub entry: usize,
}
impl Binary {
pub fn from_fd (fd: &mut File) -> io::Result<Binary> {
let header = try!(header::Header::from_fd(fd));
let entry = header.e_entry as usize;
let is_lib = header.e_type == header::ET_DYN;
let is_lsb = header.e_ident[header::EI_DATA] == header::ELFDATA2LSB;
let program_headers = try!(program_header::ProgramHeader::from_fd(fd, header.e_phoff as u64, header.e_phnum as usize, is_lsb));
let dynamic = try!(dyn::from_fd(fd, &program_headers, is_lsb));
let mut bias: usize = 0;
for ph in &program_headers {
if ph.p_type == program_header::PT_LOAD {
// this is an overflow hack that allows us to use virtual memory addresses as though they're in the file by generating a fake load bias which is then used to overflow the values in the dynamic array, and in a few other places (see Dyn::DynamicInfo), to generate actual file offsets; you may have to marinate a bit on why this works. i am unsure whether it works in every conceivable case. i learned this trick from reading too much dynamic linker C code (a whole other class of C code) and having to deal with broken older kernels on VMs. enjoi
bias = (($intmax - ph.p_vaddr).wrapping_add(1)) as usize;
break;
}
}
let mut interpreter = None;
for ph in &program_headers {
if ph.p_type == program_header::PT_INTERP {
let mut bytes = vec![0u8; (ph.p_filesz - 1) as usize];
try!(fd.seek(Start(ph.p_offset as u64)));
try!(fd.read(&mut bytes));
interpreter = Some(String::from_utf8(bytes).unwrap())
}
}
let section_headers = try!(section_header::SectionHeader::from_fd(fd, header.e_shoff as u64, header.e_shnum as usize, is_lsb));
let mut syms = vec![];
let mut strtab = strtab::Strtab::default();
for shdr in §ion_headers {
if shdr.sh_type as u32 == section_header::SHT_SYMTAB {
let count = shdr.sh_size / shdr.sh_entsize;
syms = try!(sym::from_fd(fd, shdr.sh_offset as usize, count as usize, is_lsb))
}
if shdr.sh_type as u32 == section_header::SHT_STRTAB {
strtab = try!(strtab::Strtab::from_fd(fd, shdr.sh_offset as usize, shdr.sh_size as usize));
}
}
let strtab_idx = header.e_shstrndx as usize;
let shdr_strtab = if strtab_idx >= section_headers.len() {
strtab::Strtab::default()
} else {
let shdr = §ion_headers[strtab_idx];
try!(strtab::Strtab::from_fd(fd, shdr.sh_offset as usize, shdr.sh_size as usize))
};
let mut soname = None;
let mut libraries = vec![];
let mut dynsyms = vec![];
let mut rela = vec![];
let mut pltrela = vec![];
let mut dynstrtab = strtab::Strtab::default();
if let Some(ref dynamic) = dynamic {
let dyn_info = dyn::DynamicInfo::new(&dynamic, bias); // we explicitly overflow the values here with our bias
dynstrtab = try!(strtab::Strtab::from_fd(fd,
dyn_info.strtab,
dyn_info.strsz));
if dyn_info.soname != 0 {
soname = Some(dynstrtab.get(dyn_info.soname).to_owned())
}
if dyn_info.needed_count > 0 {
let needed = unsafe { dyn::get_needed(dynamic, &dynstrtab, dyn_info.needed_count)};
libraries = Vec::with_capacity(dyn_info.needed_count);
for lib in needed {
libraries.push(lib.to_owned());
}
}
let num_syms = (dyn_info.strtab - dyn_info.symtab) / dyn_info.syment;
dynsyms = try!(sym::from_fd(fd, dyn_info.symtab, num_syms, is_lsb));
rela = try!(rela::from_fd(fd, dyn_info.rela, dyn_info.relasz, is_lsb));
pltrela = try!(rela::from_fd(fd, dyn_info.jmprel, dyn_info.pltrelsz, is_lsb));
}
let elf = Binary {
header: header,
program_headers: program_headers,
section_headers: section_headers,
shdr_strtab: shdr_strtab,
dynamic: dynamic,
dynsyms: dynsyms,
dynstrtab: dynstrtab,
syms: syms,
strtab: strtab,
rela: rela,
pltrela: pltrela,
soname: soname,
interpreter: interpreter,
libraries: libraries,
is_lib: is_lib,
size: fd.metadata().unwrap().len() as usize,
entry: entry,
};
Ok(elf)
}
pub fn from_path(path: &Path) -> io::Result<Binary> {
let mut fd = try!(File::open(&path));
let metadata = fd.metadata().unwrap();
if metadata.len() < header::SIZEOF_EHDR as u64 {
io_error!("Error: {:?} size is smaller than an ELF header", path.as_os_str())
} else {
Self::from_fd(&mut fd)
}
}
}
};}
|
// Implements http://rosettacode.org/wiki/Entropy
extern crate collections;
use std::str::StrSlice;
use collections::hashmap::HashMap;
pub fn shannon_entropy(s: &str) -> f64 {
let mut map = HashMap::<char, uint>::new();
for c in s.chars() {
map.insert_or_update_with(c, 1, |_,v| *v += 1);
}
map.iter().fold(0f64, |mut acc, (_, nb)| {
let p = (*nb as f64)/(s.len() as f64);
acc -= p * p.log2();
acc
})
}
fn main() {
println!("{:f}", shannon_entropy("1223334444"));
}
Commented shannon entropy
// Implements http://rosettacode.org/wiki/Entropy
extern crate collections;
use std::str::StrSlice;
use collections::hashmap::HashMap;
pub fn shannon_entropy(s: &str) -> f64 {
let mut map = HashMap::<char, uint>::new();
// Count occurrences of each char
for c in s.chars() {
map.insert_or_update_with(c, 1, |_,v| *v += 1);
}
// Calculate the entropy
let len = s.len() as f64;
map.iter().fold(0f64, |mut acc, (_, nb)| {
let p = (*nb as f64) / len;
acc -= p * p.log2();
acc
})
}
fn main() {
println!("{:f}", shannon_entropy("1223334444"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.