CombinedText
stringlengths
4
3.42M
use std::cmp; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; /// A simple implementation of a [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) pub struct HyperLogLog { registers: Vec<u8>, b: usize, } impl HyperLogLog { /// Creates a new, empty HyperLogLog. /// /// - `b` number of bits used for register selection, number of registers within the /// HyperLogLog will be `2^b`. `b` must be in `[4, 16]` /// /// Panics when `b` is out of bounds. pub fn new(b: usize) -> HyperLogLog { assert!(b >= 4); assert!(b <= 16); let m = (1 as usize) << b; let registers = vec![0; m]; HyperLogLog { registers: registers, b: b, } } /// Adds an element to the HyperLogLog. pub fn add<T>(&mut self, obj: &T) where T: Hash, { let mut hasher = DefaultHasher::new(); obj.hash(&mut hasher); let h: u64 = hasher.finish(); let w = h >> self.b; let j = h - (w << self.b); let p = w.leading_zeros() + 1 - (self.b as u32); let m_old = self.registers[j as usize]; self.registers[j as usize] = cmp::max(m_old, p as u8); } /// Guess the number of unique elements seen by the HyperLogLog. pub fn count(&self) -> usize { let m = self.registers.len() as f64; let z = 1f64 / self.registers .iter() .map(|&x| 2f64.powi(-(x as i32))) .sum::<f64>(); let am = if m >= 128. { 0.7213 / (1. + 1.079 / m) } else if m >= 64. { 0.709 } else if m >= 32. { 0.697 } else { 0.673 }; let e = am * m * m * z; let e_star = if e <= 5. / 2. * m { // small range correction let v = self.registers.iter().filter(|&&x| x == 0).count(); if v != 0 { m * (m / (v as f64)).ln() } else { e } } else if e <= 1. / 30. * 2f64.powi(32) { // intermediate range => no correction e } else { // large range correction -2f64.powi(32) * (1. - e / 2f64.powi(32)).ln() }; e_star as usize } /// Empties the HyperLogLog. pub fn clear(&mut self) { self.registers = vec![0; self.registers.len()]; } /// Checks whether the HyperLogLog has never seen an element. pub fn is_empty(&self) -> bool { self.registers.iter().all(|&x| x == 0) } } #[cfg(test)] mod tests { use super::HyperLogLog; #[test] fn empty() { let hll = HyperLogLog::new(8); assert_eq!(hll.count(), 0); assert!(hll.is_empty()); } #[test] fn add_b4_n1k() { let mut hll = HyperLogLog::new(4); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 571); assert!(!hll.is_empty()); } #[test] fn add_b8_n1k() { let mut hll = HyperLogLog::new(8); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 966); assert!(!hll.is_empty()); } #[test] fn add_b12_n1k() { let mut hll = HyperLogLog::new(12); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 984); assert!(!hll.is_empty()); } #[test] fn add_b16_n1k() { let mut hll = HyperLogLog::new(16); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 998); assert!(!hll.is_empty()); } #[test] fn add_b8_n10k() { let mut hll = HyperLogLog::new(8); for i in 0..10000 { hll.add(&i); } assert_eq!(hll.count(), 10196); assert!(!hll.is_empty()); } #[test] fn add_b12_n10k() { let mut hll = HyperLogLog::new(12); for i in 0..10000 { hll.add(&i); } assert_eq!(hll.count(), 10303); assert!(!hll.is_empty()); } #[test] fn add_b16_n10k() { let mut hll = HyperLogLog::new(16); for i in 0..10000 { hll.add(&i); } assert_eq!(hll.count(), 10055); assert!(!hll.is_empty()); } #[test] fn add_b16_n100k() { let mut hll = HyperLogLog::new(16); for i in 0..100000 { hll.add(&i); } assert_eq!(hll.count(), 100551); assert!(!hll.is_empty()); } #[test] fn add_b16_n1m() { let mut hll = HyperLogLog::new(16); for i in 0..1000000 { hll.add(&i); } assert_eq!(hll.count(), 1000226); assert!(!hll.is_empty()); } #[test] fn clear() { let mut hll = HyperLogLog::new(8); for i in 0..1000 { hll.add(&i); } hll.clear(); assert_eq!(hll.count(), 0); assert!(hll.is_empty()); } } hyperloglog: impl debug use std::cmp; use std::collections::hash_map::DefaultHasher; use std::fmt; use std::hash::{Hash, Hasher}; /// A simple implementation of a [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) pub struct HyperLogLog { registers: Vec<u8>, b: usize, } impl HyperLogLog { /// Creates a new, empty HyperLogLog. /// /// - `b` number of bits used for register selection, number of registers within the /// HyperLogLog will be `2^b`. `b` must be in `[4, 16]` /// /// Panics when `b` is out of bounds. pub fn new(b: usize) -> HyperLogLog { assert!(b >= 4); assert!(b <= 16); let m = (1 as usize) << b; let registers = vec![0; m]; HyperLogLog { registers: registers, b: b, } } /// Adds an element to the HyperLogLog. pub fn add<T>(&mut self, obj: &T) where T: Hash, { let mut hasher = DefaultHasher::new(); obj.hash(&mut hasher); let h: u64 = hasher.finish(); let w = h >> self.b; let j = h - (w << self.b); let p = w.leading_zeros() + 1 - (self.b as u32); let m_old = self.registers[j as usize]; self.registers[j as usize] = cmp::max(m_old, p as u8); } /// Guess the number of unique elements seen by the HyperLogLog. pub fn count(&self) -> usize { let m = self.registers.len() as f64; let z = 1f64 / self.registers .iter() .map(|&x| 2f64.powi(-(x as i32))) .sum::<f64>(); let am = if m >= 128. { 0.7213 / (1. + 1.079 / m) } else if m >= 64. { 0.709 } else if m >= 32. { 0.697 } else { 0.673 }; let e = am * m * m * z; let e_star = if e <= 5. / 2. * m { // small range correction let v = self.registers.iter().filter(|&&x| x == 0).count(); if v != 0 { m * (m / (v as f64)).ln() } else { e } } else if e <= 1. / 30. * 2f64.powi(32) { // intermediate range => no correction e } else { // large range correction -2f64.powi(32) * (1. - e / 2f64.powi(32)).ln() }; e_star as usize } /// Empties the HyperLogLog. pub fn clear(&mut self) { self.registers = vec![0; self.registers.len()]; } /// Checks whether the HyperLogLog has never seen an element. pub fn is_empty(&self) -> bool { self.registers.iter().all(|&x| x == 0) } } impl fmt::Debug for HyperLogLog { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "HyperLogLog {{ b: {} }}", self.b) } } #[cfg(test)] mod tests { use super::HyperLogLog; #[test] fn empty() { let hll = HyperLogLog::new(8); assert_eq!(hll.count(), 0); assert!(hll.is_empty()); } #[test] fn add_b4_n1k() { let mut hll = HyperLogLog::new(4); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 571); assert!(!hll.is_empty()); } #[test] fn add_b8_n1k() { let mut hll = HyperLogLog::new(8); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 966); assert!(!hll.is_empty()); } #[test] fn add_b12_n1k() { let mut hll = HyperLogLog::new(12); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 984); assert!(!hll.is_empty()); } #[test] fn add_b16_n1k() { let mut hll = HyperLogLog::new(16); for i in 0..1000 { hll.add(&i); } assert_eq!(hll.count(), 998); assert!(!hll.is_empty()); } #[test] fn add_b8_n10k() { let mut hll = HyperLogLog::new(8); for i in 0..10000 { hll.add(&i); } assert_eq!(hll.count(), 10196); assert!(!hll.is_empty()); } #[test] fn add_b12_n10k() { let mut hll = HyperLogLog::new(12); for i in 0..10000 { hll.add(&i); } assert_eq!(hll.count(), 10303); assert!(!hll.is_empty()); } #[test] fn add_b16_n10k() { let mut hll = HyperLogLog::new(16); for i in 0..10000 { hll.add(&i); } assert_eq!(hll.count(), 10055); assert!(!hll.is_empty()); } #[test] fn add_b16_n100k() { let mut hll = HyperLogLog::new(16); for i in 0..100000 { hll.add(&i); } assert_eq!(hll.count(), 100551); assert!(!hll.is_empty()); } #[test] fn add_b16_n1m() { let mut hll = HyperLogLog::new(16); for i in 0..1000000 { hll.add(&i); } assert_eq!(hll.count(), 1000226); assert!(!hll.is_empty()); } #[test] fn clear() { let mut hll = HyperLogLog::new(8); for i in 0..1000 { hll.add(&i); } hll.clear(); assert_eq!(hll.count(), 0); assert!(hll.is_empty()); } #[test] fn debug() { let hll = HyperLogLog::new(12); assert_eq!(format!("{:?}", hll), "HyperLogLog { b: 12 }"); } }
extern crate openssl; use std::io; use std::fmt; use std::error; use self::openssl::pkcs12; use self::openssl::error::ErrorStack; use self::openssl::ssl::{self, SslMethod, SslConnectorBuilder, SslConnector, SslAcceptorBuilder, SslAcceptor, MidHandshakeSslStream, SslContextBuilder, ShutdownResult}; use self::openssl::x509::X509; use Protocol; fn supported_protocols(protocols: &[Protocol], ctx: &mut SslContextBuilder) { // This constant is only defined on OpenSSL 1.0.2 and above, so manually do it. let ssl_op_no_ssl_mask = ssl::SSL_OP_NO_SSLV2 | ssl::SSL_OP_NO_SSLV3 | ssl::SSL_OP_NO_TLSV1 | ssl::SSL_OP_NO_TLSV1_1 | ssl::SSL_OP_NO_TLSV1_2; ctx.clear_options(ssl_op_no_ssl_mask); let mut options = ssl_op_no_ssl_mask; for protocol in protocols { let op = match *protocol { Protocol::Sslv3 => ssl::SSL_OP_NO_SSLV3, Protocol::Tlsv10 => ssl::SSL_OP_NO_TLSV1, Protocol::Tlsv11 => ssl::SSL_OP_NO_TLSV1_1, Protocol::Tlsv12 => ssl::SSL_OP_NO_TLSV1_2, Protocol::__NonExhaustive => unreachable!(), }; options &= !op; } ctx.set_options(options); } pub struct Error(ssl::Error); impl error::Error for Error { fn description(&self) -> &str { error::Error::description(&self.0) } fn cause(&self) -> Option<&error::Error> { error::Error::cause(&self.0) } } impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.0, fmt) } } impl fmt::Debug for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.0, fmt) } } impl From<ssl::Error> for Error { fn from(err: ssl::Error) -> Error { Error(err) } } impl From<ErrorStack> for Error { fn from(err: ErrorStack) -> Error { ssl::Error::Ssl(err).into() } } pub struct Pkcs12(pkcs12::ParsedPkcs12); impl Pkcs12 { pub fn from_der(buf: &[u8], pass: &str) -> Result<Pkcs12, Error> { let pkcs12 = try!(pkcs12::Pkcs12::from_der(buf)); let parsed = try!(pkcs12.parse(pass)); Ok(Pkcs12(parsed)) } } pub struct Certificate(X509); impl Certificate { pub fn from_der(buf: &[u8]) -> Result<Certificate, Error> { let cert = try!(X509::from_der(buf)); Ok(Certificate(cert)) } } pub struct MidHandshakeTlsStream<S>(MidHandshakeSslStream<S>); impl<S> fmt::Debug for MidHandshakeTlsStream<S> where S: fmt::Debug { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.0, fmt) } } impl<S> MidHandshakeTlsStream<S> { pub fn get_ref(&self) -> &S { self.0.get_ref() } pub fn get_mut(&mut self) -> &mut S { self.0.get_mut() } } impl<S> MidHandshakeTlsStream<S> where S: io::Read + io::Write { pub fn handshake(self) -> Result<TlsStream<S>, HandshakeError<S>> { match self.0.handshake() { Ok(s) => Ok(TlsStream(s)), Err(e) => Err(e.into()), } } } pub enum HandshakeError<S> { Failure(Error), Interrupted(MidHandshakeTlsStream<S>), } impl<S> From<ssl::HandshakeError<S>> for HandshakeError<S> { fn from(e: ssl::HandshakeError<S>) -> HandshakeError<S> { match e { ssl::HandshakeError::SetupFailure(e) => { HandshakeError::Failure(Error(ssl::Error::Ssl(e))) } ssl::HandshakeError::Failure(e) => HandshakeError::Failure(Error(e.into_error())), ssl::HandshakeError::Interrupted(s) => { HandshakeError::Interrupted(MidHandshakeTlsStream(s)) } } } } impl<S> From<ErrorStack> for HandshakeError<S> { fn from(e: ErrorStack) -> HandshakeError<S> { HandshakeError::Failure(e.into()) } } pub struct TlsConnectorBuilder(SslConnectorBuilder); impl TlsConnectorBuilder { pub fn identity(&mut self, pkcs12: Pkcs12) -> Result<(), Error> { let ctx = self.0.builder_mut(); // FIXME clear chain certs to clean up if called multiple times try!(ctx.set_certificate(&pkcs12.0.cert)); try!(ctx.set_private_key(&pkcs12.0.pkey)); try!(ctx.check_private_key()); for cert in pkcs12.0.chain { try!(ctx.add_extra_chain_cert(cert)); } Ok(()) } pub fn add_root_certificate(&mut self, cert: Certificate) -> Result<(), Error> { try!(self.0 .builder_mut() .cert_store_mut() .add_cert(cert.0)); Ok(()) } pub fn supported_protocols(&mut self, protocols: &[Protocol]) -> Result<(), Error> { supported_protocols(protocols, self.0.builder_mut()); Ok(()) } pub fn build(self) -> Result<TlsConnector, Error> { Ok(TlsConnector(self.0.build())) } } pub struct TlsConnector(SslConnector); impl TlsConnector { pub fn builder() -> Result<TlsConnectorBuilder, Error> { let builder = try!(SslConnectorBuilder::new(SslMethod::tls())); Ok(TlsConnectorBuilder(builder)) } pub fn connect<S>(&self, domain: &str, stream: S) -> Result<TlsStream<S>, HandshakeError<S>> where S: io::Read + io::Write { let s = try!(self.0.connect(domain, stream)); Ok(TlsStream(s)) } pub fn connect_no_domain<S>(&self, stream: S) -> Result<TlsStream<S>, HandshakeError<S>> where S: io::Read + io::Write { let s = try!(self.0.danger_connect_without_providing_domain_for_certificate_verification_and_server_name_indication(stream)); Ok(TlsStream(s)) } } /// OpenSSL-specific extensions to `TlsConnectorBuilder`. pub trait TlsConnectorBuilderExt { /// Returns a shared reference to the inner `SslConnectorBuilder`. fn builder(&self) -> &SslConnectorBuilder; /// Returns a mutable reference to the inner `SslConnectorBuilder`. fn builder_mut(&mut self) -> &mut SslConnectorBuilder; } impl TlsConnectorBuilderExt for ::TlsConnectorBuilder { fn builder(&self) -> &SslConnectorBuilder { &(self.0).0 } fn builder_mut(&mut self) -> &mut SslConnectorBuilder { &mut (self.0).0 } } pub struct TlsAcceptorBuilder(SslAcceptorBuilder); impl TlsAcceptorBuilder { pub fn supported_protocols(&mut self, protocols: &[Protocol]) -> Result<(), Error> { supported_protocols(protocols, self.0.builder_mut()); Ok(()) } pub fn build(self) -> Result<TlsAcceptor, Error> { Ok(TlsAcceptor(self.0.build())) } } pub struct TlsAcceptor(SslAcceptor); impl TlsAcceptor { pub fn builder(pkcs12: Pkcs12) -> Result<TlsAcceptorBuilder, Error> { let builder = try!(SslAcceptorBuilder::mozilla_intermediate(SslMethod::tls(), &pkcs12.0.pkey, &pkcs12.0.cert, &pkcs12.0.chain)); Ok(TlsAcceptorBuilder(builder)) } pub fn accept<S>(&self, stream: S) -> Result<TlsStream<S>, HandshakeError<S>> where S: io::Read + io::Write { let s = try!(self.0.accept(stream)); Ok(TlsStream(s)) } } /// OpenSSL-specific extensions to `TlsAcceptorBuilder`. pub trait TlsAcceptorBuilderExt { /// Returns a shared reference to the inner `SslAcceptorBuilder`. fn builder(&self) -> &SslAcceptorBuilder; /// Returns a mutable reference to the inner `SslAcceptorBuilder`. fn builder_mut(&mut self) -> &mut SslAcceptorBuilder; } impl TlsAcceptorBuilderExt for ::TlsAcceptorBuilder { fn builder(&self) -> &SslAcceptorBuilder { &(self.0).0 } fn builder_mut(&mut self) -> &mut SslAcceptorBuilder { &mut (self.0).0 } } pub struct TlsStream<S>(ssl::SslStream<S>); impl<S: fmt::Debug> fmt::Debug for TlsStream<S> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.0, fmt) } } impl<S: io::Read + io::Write> TlsStream<S> { pub fn buffered_read_size(&self) -> Result<usize, Error> { Ok(self.0.ssl().pending()) } pub fn shutdown(&mut self) -> io::Result<()> { match self.0.shutdown() { Ok(_) | Err(ssl::Error::ZeroReturn) => Ok(()), Err(ssl::Error::Stream(e)) | Err(ssl::Error::WantRead(e)) | Err(ssl::Error::WantWrite(e)) => Err(e), Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), } } pub fn get_ref(&self) -> &S { self.0.get_ref() } pub fn get_mut(&mut self) -> &mut S { self.0.get_mut() } } impl<S: io::Read + io::Write> io::Read for TlsStream<S> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) } } impl<S: io::Read + io::Write> io::Write for TlsStream<S> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) } fn flush(&mut self) -> io::Result<()> { self.0.flush() } } /// OpenSSL-specific extensions to `TlsStream`. pub trait TlsStreamExt<S> { /// Returns a shared reference to the OpenSSL `SslStream`. fn raw_stream(&self) -> &ssl::SslStream<S>; /// Returns a mutable reference to the OpenSSL `SslStream`. fn raw_stream_mut(&mut self) -> &mut ssl::SslStream<S>; } impl<S> TlsStreamExt<S> for ::TlsStream<S> { fn raw_stream(&self) -> &ssl::SslStream<S> { &(self.0).0 } fn raw_stream_mut(&mut self) -> &mut ssl::SslStream<S> { &mut (self.0).0 } } /// OpenSSL-specific extensions to `Error` pub trait ErrorExt { /// Extract the underlying OpenSSL error for inspection. fn openssl_error(&self) -> &ssl::Error; } impl ErrorExt for ::Error { fn openssl_error(&self) -> &ssl::Error { &(self.0).0 } } Remove unused import extern crate openssl; use std::io; use std::fmt; use std::error; use self::openssl::pkcs12; use self::openssl::error::ErrorStack; use self::openssl::ssl::{self, SslMethod, SslConnectorBuilder, SslConnector, SslAcceptorBuilder, SslAcceptor, MidHandshakeSslStream, SslContextBuilder}; use self::openssl::x509::X509; use Protocol; fn supported_protocols(protocols: &[Protocol], ctx: &mut SslContextBuilder) { // This constant is only defined on OpenSSL 1.0.2 and above, so manually do it. let ssl_op_no_ssl_mask = ssl::SSL_OP_NO_SSLV2 | ssl::SSL_OP_NO_SSLV3 | ssl::SSL_OP_NO_TLSV1 | ssl::SSL_OP_NO_TLSV1_1 | ssl::SSL_OP_NO_TLSV1_2; ctx.clear_options(ssl_op_no_ssl_mask); let mut options = ssl_op_no_ssl_mask; for protocol in protocols { let op = match *protocol { Protocol::Sslv3 => ssl::SSL_OP_NO_SSLV3, Protocol::Tlsv10 => ssl::SSL_OP_NO_TLSV1, Protocol::Tlsv11 => ssl::SSL_OP_NO_TLSV1_1, Protocol::Tlsv12 => ssl::SSL_OP_NO_TLSV1_2, Protocol::__NonExhaustive => unreachable!(), }; options &= !op; } ctx.set_options(options); } pub struct Error(ssl::Error); impl error::Error for Error { fn description(&self) -> &str { error::Error::description(&self.0) } fn cause(&self) -> Option<&error::Error> { error::Error::cause(&self.0) } } impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.0, fmt) } } impl fmt::Debug for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.0, fmt) } } impl From<ssl::Error> for Error { fn from(err: ssl::Error) -> Error { Error(err) } } impl From<ErrorStack> for Error { fn from(err: ErrorStack) -> Error { ssl::Error::Ssl(err).into() } } pub struct Pkcs12(pkcs12::ParsedPkcs12); impl Pkcs12 { pub fn from_der(buf: &[u8], pass: &str) -> Result<Pkcs12, Error> { let pkcs12 = try!(pkcs12::Pkcs12::from_der(buf)); let parsed = try!(pkcs12.parse(pass)); Ok(Pkcs12(parsed)) } } pub struct Certificate(X509); impl Certificate { pub fn from_der(buf: &[u8]) -> Result<Certificate, Error> { let cert = try!(X509::from_der(buf)); Ok(Certificate(cert)) } } pub struct MidHandshakeTlsStream<S>(MidHandshakeSslStream<S>); impl<S> fmt::Debug for MidHandshakeTlsStream<S> where S: fmt::Debug { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.0, fmt) } } impl<S> MidHandshakeTlsStream<S> { pub fn get_ref(&self) -> &S { self.0.get_ref() } pub fn get_mut(&mut self) -> &mut S { self.0.get_mut() } } impl<S> MidHandshakeTlsStream<S> where S: io::Read + io::Write { pub fn handshake(self) -> Result<TlsStream<S>, HandshakeError<S>> { match self.0.handshake() { Ok(s) => Ok(TlsStream(s)), Err(e) => Err(e.into()), } } } pub enum HandshakeError<S> { Failure(Error), Interrupted(MidHandshakeTlsStream<S>), } impl<S> From<ssl::HandshakeError<S>> for HandshakeError<S> { fn from(e: ssl::HandshakeError<S>) -> HandshakeError<S> { match e { ssl::HandshakeError::SetupFailure(e) => { HandshakeError::Failure(Error(ssl::Error::Ssl(e))) } ssl::HandshakeError::Failure(e) => HandshakeError::Failure(Error(e.into_error())), ssl::HandshakeError::Interrupted(s) => { HandshakeError::Interrupted(MidHandshakeTlsStream(s)) } } } } impl<S> From<ErrorStack> for HandshakeError<S> { fn from(e: ErrorStack) -> HandshakeError<S> { HandshakeError::Failure(e.into()) } } pub struct TlsConnectorBuilder(SslConnectorBuilder); impl TlsConnectorBuilder { pub fn identity(&mut self, pkcs12: Pkcs12) -> Result<(), Error> { let ctx = self.0.builder_mut(); // FIXME clear chain certs to clean up if called multiple times try!(ctx.set_certificate(&pkcs12.0.cert)); try!(ctx.set_private_key(&pkcs12.0.pkey)); try!(ctx.check_private_key()); for cert in pkcs12.0.chain { try!(ctx.add_extra_chain_cert(cert)); } Ok(()) } pub fn add_root_certificate(&mut self, cert: Certificate) -> Result<(), Error> { try!(self.0 .builder_mut() .cert_store_mut() .add_cert(cert.0)); Ok(()) } pub fn supported_protocols(&mut self, protocols: &[Protocol]) -> Result<(), Error> { supported_protocols(protocols, self.0.builder_mut()); Ok(()) } pub fn build(self) -> Result<TlsConnector, Error> { Ok(TlsConnector(self.0.build())) } } pub struct TlsConnector(SslConnector); impl TlsConnector { pub fn builder() -> Result<TlsConnectorBuilder, Error> { let builder = try!(SslConnectorBuilder::new(SslMethod::tls())); Ok(TlsConnectorBuilder(builder)) } pub fn connect<S>(&self, domain: &str, stream: S) -> Result<TlsStream<S>, HandshakeError<S>> where S: io::Read + io::Write { let s = try!(self.0.connect(domain, stream)); Ok(TlsStream(s)) } pub fn connect_no_domain<S>(&self, stream: S) -> Result<TlsStream<S>, HandshakeError<S>> where S: io::Read + io::Write { let s = try!(self.0.danger_connect_without_providing_domain_for_certificate_verification_and_server_name_indication(stream)); Ok(TlsStream(s)) } } /// OpenSSL-specific extensions to `TlsConnectorBuilder`. pub trait TlsConnectorBuilderExt { /// Returns a shared reference to the inner `SslConnectorBuilder`. fn builder(&self) -> &SslConnectorBuilder; /// Returns a mutable reference to the inner `SslConnectorBuilder`. fn builder_mut(&mut self) -> &mut SslConnectorBuilder; } impl TlsConnectorBuilderExt for ::TlsConnectorBuilder { fn builder(&self) -> &SslConnectorBuilder { &(self.0).0 } fn builder_mut(&mut self) -> &mut SslConnectorBuilder { &mut (self.0).0 } } pub struct TlsAcceptorBuilder(SslAcceptorBuilder); impl TlsAcceptorBuilder { pub fn supported_protocols(&mut self, protocols: &[Protocol]) -> Result<(), Error> { supported_protocols(protocols, self.0.builder_mut()); Ok(()) } pub fn build(self) -> Result<TlsAcceptor, Error> { Ok(TlsAcceptor(self.0.build())) } } pub struct TlsAcceptor(SslAcceptor); impl TlsAcceptor { pub fn builder(pkcs12: Pkcs12) -> Result<TlsAcceptorBuilder, Error> { let builder = try!(SslAcceptorBuilder::mozilla_intermediate(SslMethod::tls(), &pkcs12.0.pkey, &pkcs12.0.cert, &pkcs12.0.chain)); Ok(TlsAcceptorBuilder(builder)) } pub fn accept<S>(&self, stream: S) -> Result<TlsStream<S>, HandshakeError<S>> where S: io::Read + io::Write { let s = try!(self.0.accept(stream)); Ok(TlsStream(s)) } } /// OpenSSL-specific extensions to `TlsAcceptorBuilder`. pub trait TlsAcceptorBuilderExt { /// Returns a shared reference to the inner `SslAcceptorBuilder`. fn builder(&self) -> &SslAcceptorBuilder; /// Returns a mutable reference to the inner `SslAcceptorBuilder`. fn builder_mut(&mut self) -> &mut SslAcceptorBuilder; } impl TlsAcceptorBuilderExt for ::TlsAcceptorBuilder { fn builder(&self) -> &SslAcceptorBuilder { &(self.0).0 } fn builder_mut(&mut self) -> &mut SslAcceptorBuilder { &mut (self.0).0 } } pub struct TlsStream<S>(ssl::SslStream<S>); impl<S: fmt::Debug> fmt::Debug for TlsStream<S> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.0, fmt) } } impl<S: io::Read + io::Write> TlsStream<S> { pub fn buffered_read_size(&self) -> Result<usize, Error> { Ok(self.0.ssl().pending()) } pub fn shutdown(&mut self) -> io::Result<()> { match self.0.shutdown() { Ok(_) | Err(ssl::Error::ZeroReturn) => Ok(()), Err(ssl::Error::Stream(e)) | Err(ssl::Error::WantRead(e)) | Err(ssl::Error::WantWrite(e)) => Err(e), Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), } } pub fn get_ref(&self) -> &S { self.0.get_ref() } pub fn get_mut(&mut self) -> &mut S { self.0.get_mut() } } impl<S: io::Read + io::Write> io::Read for TlsStream<S> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.0.read(buf) } } impl<S: io::Read + io::Write> io::Write for TlsStream<S> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) } fn flush(&mut self) -> io::Result<()> { self.0.flush() } } /// OpenSSL-specific extensions to `TlsStream`. pub trait TlsStreamExt<S> { /// Returns a shared reference to the OpenSSL `SslStream`. fn raw_stream(&self) -> &ssl::SslStream<S>; /// Returns a mutable reference to the OpenSSL `SslStream`. fn raw_stream_mut(&mut self) -> &mut ssl::SslStream<S>; } impl<S> TlsStreamExt<S> for ::TlsStream<S> { fn raw_stream(&self) -> &ssl::SslStream<S> { &(self.0).0 } fn raw_stream_mut(&mut self) -> &mut ssl::SslStream<S> { &mut (self.0).0 } } /// OpenSSL-specific extensions to `Error` pub trait ErrorExt { /// Extract the underlying OpenSSL error for inspection. fn openssl_error(&self) -> &ssl::Error; } impl ErrorExt for ::Error { fn openssl_error(&self) -> &ssl::Error { &(self.0).0 } }
// Copyright 2014-2016 bluss and ndarray developers. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use libnum; use itertools::free::enumerate; use imp_prelude::*; use numeric_util; use { LinalgScalar, }; #[cfg(feature="blas")] use std::mem::swap; #[cfg(feature="blas")] use std::os::raw::c_int; #[cfg(feature="blas")] use std::any::{Any, TypeId}; #[cfg(feature="blas")] use blas_sys::c::{CblasNoTrans, CblasTrans, CblasRowMajor}; #[cfg(feature="blas")] use blas_sys; /// len of vector before we use blas #[cfg(feature="blas")] const DOT_BLAS_CUTOFF: usize = 32; /// side of matrix before we use blas #[cfg(feature="blas")] const GEMM_BLAS_CUTOFF: usize = 7; impl<A, S> ArrayBase<S, Ix> where S: Data<Elem=A>, { /// Compute the dot product of one-dimensional arrays. /// /// The dot product is a sum of the elementwise products (no conjugation /// of complex operands, and thus not their inner product). /// /// **Panics** if the arrays are not of the same length. pub fn dot<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { self.dot_impl(rhs) } fn dot_generic<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { debug_assert_eq!(self.len(), rhs.len()); assert!(self.len() == rhs.len()); if let Some(self_s) = self.as_slice() { if let Some(rhs_s) = rhs.as_slice() { return numeric_util::unrolled_dot(self_s, rhs_s); } } let mut sum = A::zero(); for i in 0..self.len() { unsafe { sum = sum.clone() + self.uget(i).clone() * rhs.uget(i).clone(); } } sum } #[cfg(not(feature="blas"))] fn dot_impl<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { self.dot_generic(rhs) } #[cfg(feature="blas")] fn dot_impl<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { // Use only if the vector is large enough to be worth it if self.len() >= DOT_BLAS_CUTOFF { debug_assert_eq!(self.len(), rhs.len()); assert!(self.len() == rhs.len()); macro_rules! dot { ($ty:ty, $func:ident) => {{ if blas_compat_1d::<$ty, _>(self) && blas_compat_1d::<$ty, _>(rhs) { let n = self.len() as c_int; let incx = self.strides()[0] as c_int; let incy = rhs.strides()[0] as c_int; let ret = unsafe { blas_sys::c::$func( n, self.ptr as *const $ty, incx, rhs.ptr as *const $ty, incy) }; return cast_as::<$ty, A>(&ret); } }} } dot!{f32, cblas_sdot}; dot!{f64, cblas_ddot}; } self.dot_generic(rhs) } } impl<A, S> ArrayBase<S, (Ix, Ix)> where S: Data<Elem=A>, { /// Return an array view of row `index`. /// /// **Panics** if `index` is out of bounds. pub fn row(&self, index: Ix) -> ArrayView<A, Ix> { self.subview(Axis(0), index) } /// Return a mutable array view of row `index`. /// /// **Panics** if `index` is out of bounds. pub fn row_mut(&mut self, index: Ix) -> ArrayViewMut<A, Ix> where S: DataMut { self.subview_mut(Axis(0), index) } /// Return an array view of column `index`. /// /// **Panics** if `index` is out of bounds. pub fn column(&self, index: Ix) -> ArrayView<A, Ix> { self.subview(Axis(1), index) } /// Return a mutable array view of column `index`. /// /// **Panics** if `index` is out of bounds. pub fn column_mut(&mut self, index: Ix) -> ArrayViewMut<A, Ix> where S: DataMut { self.subview_mut(Axis(1), index) } /// Perform matrix multiplication of rectangular arrays `self` and `rhs`. /// /// The array shapes must agree in the way that /// if `self` is *M* × *N*, then `rhs` is *N* × *K*. /// /// Return a result array with shape *M* × *K*. /// /// **Panics** if shapes are incompatible. /// /// ``` /// use ndarray::arr2; /// /// let a = arr2(&[[1., 2.], /// [0., 1.]]); /// let b = arr2(&[[1., 2.], /// [2., 3.]]); /// /// assert!( /// a.mat_mul(&b) == arr2(&[[5., 8.], /// [2., 3.]]) /// ); /// ``` /// pub fn mat_mul(&self, rhs: &ArrayBase<S, (Ix, Ix)>) -> OwnedArray<A, (Ix, Ix)> where A: LinalgScalar, { let ((m, a), (b, n)) = (self.dim, rhs.dim); let (lhs_columns, rhs_rows) = (a, b); assert!(lhs_columns == rhs_rows); assert!(m.checked_mul(n).is_some()); mat_mul_impl(self, rhs) } /// Perform the matrix multiplication of the rectangular array `self` and /// column vector `rhs`. /// /// The array shapes must agree in the way that /// if `self` is *M* × *N*, then `rhs` is *N*. /// /// Return a result array with shape *M*. /// /// **Panics** if shapes are incompatible. pub fn mat_mul_col(&self, rhs: &ArrayBase<S, Ix>) -> OwnedArray<A, Ix> where A: LinalgScalar, { let ((m, a), n) = (self.dim, rhs.dim); let (self_columns, other_rows) = (a, n); assert!(self_columns == other_rows); // Avoid initializing the memory in vec -- set it during iteration let mut res_elems = Vec::<A>::with_capacity(m as usize); unsafe { res_elems.set_len(m as usize); } for (i, rr) in enumerate(&mut res_elems) { unsafe { *rr = (0..a).fold(libnum::zero::<A>(), move |s, k| s + *self.uget((i, k)) * *rhs.uget(k) ); } } unsafe { ArrayBase::from_vec_dim_unchecked(m, res_elems) } } } #[cfg(not(feature="blas"))] use self::mat_mul_general as mat_mul_impl; #[cfg(feature="blas")] fn mat_mul_impl<A, S>(lhs: &ArrayBase<S, (Ix, Ix)>, rhs: &ArrayBase<S, (Ix, Ix)>) -> OwnedArray<A, (Ix, Ix)> where A: LinalgScalar, S: Data<Elem=A>, { // size cutoff for using BLAS let cut = GEMM_BLAS_CUTOFF; let ((mut m, a), (_, mut n)) = (lhs.dim, rhs.dim); if !(m > cut || n > cut || a > cut) || !(same_type::<A, f32>() || same_type::<A, f64>()) { return mat_mul_general(lhs, rhs); } // Use `c` for c-order and `f` for an f-order matrix // We can handle c * c, f * f generally and // c * f and f * c if the `f` matrix is square. let mut lhs_ = lhs.view(); let mut rhs_ = rhs.view(); let lhs_s0 = lhs_.strides()[0]; let rhs_s0 = rhs_.strides()[0]; let both_f = lhs_s0 == 1 && rhs_s0 == 1; let mut lhs_trans = CblasNoTrans; let mut rhs_trans = CblasNoTrans; if both_f { // A^t B^t = C^t => B A = C lhs_ = lhs_.reversed_axes(); rhs_ = rhs_.reversed_axes(); swap(&mut lhs_, &mut rhs_); swap(&mut m, &mut n); } else if lhs_s0 == 1 && m == a { lhs_ = lhs_.reversed_axes(); lhs_trans = CblasTrans; } else if rhs_s0 == 1 && a == n { rhs_ = rhs_.reversed_axes(); rhs_trans = CblasTrans; } macro_rules! gemm { ($ty:ty, $gemm:ident) => { if blas_row_major_2d::<$ty, _>(&lhs_) && blas_row_major_2d::<$ty, _>(&rhs_) { let mut elems = Vec::<A>::with_capacity(m * n); let c; unsafe { elems.set_len(m * n); c = OwnedArray::from_vec_dim_unchecked((m, n), elems); } { let (m, k) = match lhs_trans { CblasNoTrans => lhs_.dim(), _ => { let (rows, cols) = lhs_.dim(); (cols, rows) } }; let n = match rhs_trans { CblasNoTrans => rhs_.dim().1, _ => rhs_.dim().0, }; unsafe { blas_sys::c::$gemm( CblasRowMajor, lhs_trans, rhs_trans, m as c_int, // m, rows of OP(a) n as c_int, // n, cols of OP(b) k as c_int, // k, cols of OP(a) 1.0, // alpha lhs_.ptr as *const _, // a lhs_.strides()[0] as c_int, // lda rhs_.ptr as *const _, // b rhs_.strides()[0] as c_int, // ldb 0.0, // beta c.ptr as *mut _, // c c.strides()[0] as c_int, // ldc ); } } return if both_f { c.reversed_axes() } else { c }; } } } gemm!(f32, cblas_sgemm); gemm!(f64, cblas_dgemm); return mat_mul_general(lhs, rhs); } fn mat_mul_general<A, S>(lhs: &ArrayBase<S, (Ix, Ix)>, rhs: &ArrayBase<S, (Ix, Ix)>) -> OwnedArray<A, (Ix, Ix)> where A: LinalgScalar, S: Data<Elem=A>, { let ((m, a), (_, n)) = (lhs.dim, rhs.dim); // Avoid initializing the memory in vec -- set it during iteration // Panic safe because A: Copy let mut res_elems = Vec::<A>::with_capacity(m as usize * n as usize); unsafe { res_elems.set_len(m as usize * n as usize); } let mut i = 0; let mut j = 0; for rr in &mut res_elems { unsafe { *rr = (0..a).fold(libnum::zero::<A>(), move |s, k| s + *lhs.uget((i, k)) * *rhs.uget((k, j)) ); } j += 1; if j == n { j = 0; i += 1; } } unsafe { ArrayBase::from_vec_dim_unchecked((m, n), res_elems) } } #[cfg(feature="blas")] #[inline(always)] /// Return `true` if `A` and `B` are the same type fn same_type<A: Any, B: Any>() -> bool { TypeId::of::<A>() == TypeId::of::<B>() } #[cfg(feature="blas")] // Read pointer to type `A` as type `B`. // // **Panics** if `A` and `B` are not the same type fn cast_as<A: Any + Copy, B: Any + Copy>(a: &A) -> B { assert!(same_type::<A, B>()); unsafe { ::std::ptr::read(a as *const _ as *const B) } } #[cfg(feature="blas")] fn blas_compat_1d<A, S>(a: &ArrayBase<S, Ix>) -> bool where S: Data, A: Any, S::Elem: Any, { if !same_type::<A, S::Elem>() { return false; } if a.len() > c_int::max_value() as usize { return false; } let stride = a.strides()[0]; if stride > c_int::max_value() as isize || stride < c_int::min_value() as isize { return false; } true } #[cfg(feature="blas")] fn blas_row_major_2d<A, S>(a: &ArrayBase<S, (Ix, Ix)>) -> bool where S: Data, A: Any, S::Elem: Any, { if !same_type::<A, S::Elem>() { return false; } let s0 = a.strides()[0]; let s1 = a.strides()[1]; if s1 != 1 { return false; } if (s0 > c_int::max_value() as isize || s0 < c_int::min_value() as isize) || (s1 > c_int::max_value() as isize || s1 < c_int::min_value() as isize) { return false; } let (m, n) = a.dim(); if m > c_int::max_value() as usize || n > c_int::max_value() as usize { return false; } true } General matrix multiply also returns f-order result from two f-order inputs // Copyright 2014-2016 bluss and ndarray developers. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use libnum::Zero; use itertools::free::enumerate; use imp_prelude::*; use numeric_util; use { LinalgScalar, }; #[cfg(feature="blas")] use std::mem::swap; #[cfg(feature="blas")] use std::os::raw::c_int; #[cfg(feature="blas")] use std::any::{Any, TypeId}; #[cfg(feature="blas")] use blas_sys::c::{CblasNoTrans, CblasTrans, CblasRowMajor}; #[cfg(feature="blas")] use blas_sys; /// len of vector before we use blas #[cfg(feature="blas")] const DOT_BLAS_CUTOFF: usize = 32; /// side of matrix before we use blas #[cfg(feature="blas")] const GEMM_BLAS_CUTOFF: usize = 7; impl<A, S> ArrayBase<S, Ix> where S: Data<Elem=A>, { /// Compute the dot product of one-dimensional arrays. /// /// The dot product is a sum of the elementwise products (no conjugation /// of complex operands, and thus not their inner product). /// /// **Panics** if the arrays are not of the same length. pub fn dot<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { self.dot_impl(rhs) } fn dot_generic<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { debug_assert_eq!(self.len(), rhs.len()); assert!(self.len() == rhs.len()); if let Some(self_s) = self.as_slice() { if let Some(rhs_s) = rhs.as_slice() { return numeric_util::unrolled_dot(self_s, rhs_s); } } let mut sum = A::zero(); for i in 0..self.len() { unsafe { sum = sum.clone() + self.uget(i).clone() * rhs.uget(i).clone(); } } sum } #[cfg(not(feature="blas"))] fn dot_impl<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { self.dot_generic(rhs) } #[cfg(feature="blas")] fn dot_impl<S2>(&self, rhs: &ArrayBase<S2, Ix>) -> A where S2: Data<Elem=A>, A: LinalgScalar, { // Use only if the vector is large enough to be worth it if self.len() >= DOT_BLAS_CUTOFF { debug_assert_eq!(self.len(), rhs.len()); assert!(self.len() == rhs.len()); macro_rules! dot { ($ty:ty, $func:ident) => {{ if blas_compat_1d::<$ty, _>(self) && blas_compat_1d::<$ty, _>(rhs) { let n = self.len() as c_int; let incx = self.strides()[0] as c_int; let incy = rhs.strides()[0] as c_int; let ret = unsafe { blas_sys::c::$func( n, self.ptr as *const $ty, incx, rhs.ptr as *const $ty, incy) }; return cast_as::<$ty, A>(&ret); } }} } dot!{f32, cblas_sdot}; dot!{f64, cblas_ddot}; } self.dot_generic(rhs) } } impl<A, S> ArrayBase<S, (Ix, Ix)> where S: Data<Elem=A>, { /// Return an array view of row `index`. /// /// **Panics** if `index` is out of bounds. pub fn row(&self, index: Ix) -> ArrayView<A, Ix> { self.subview(Axis(0), index) } /// Return a mutable array view of row `index`. /// /// **Panics** if `index` is out of bounds. pub fn row_mut(&mut self, index: Ix) -> ArrayViewMut<A, Ix> where S: DataMut { self.subview_mut(Axis(0), index) } /// Return an array view of column `index`. /// /// **Panics** if `index` is out of bounds. pub fn column(&self, index: Ix) -> ArrayView<A, Ix> { self.subview(Axis(1), index) } /// Return a mutable array view of column `index`. /// /// **Panics** if `index` is out of bounds. pub fn column_mut(&mut self, index: Ix) -> ArrayViewMut<A, Ix> where S: DataMut { self.subview_mut(Axis(1), index) } /// Perform matrix multiplication of rectangular arrays `self` and `rhs`. /// /// The array shapes must agree in the way that /// if `self` is *M* × *N*, then `rhs` is *N* × *K*. /// /// Return a result array with shape *M* × *K*. /// /// **Panics** if shapes are incompatible. /// /// ``` /// use ndarray::arr2; /// /// let a = arr2(&[[1., 2.], /// [0., 1.]]); /// let b = arr2(&[[1., 2.], /// [2., 3.]]); /// /// assert!( /// a.mat_mul(&b) == arr2(&[[5., 8.], /// [2., 3.]]) /// ); /// ``` /// pub fn mat_mul(&self, rhs: &ArrayBase<S, (Ix, Ix)>) -> OwnedArray<A, (Ix, Ix)> where A: LinalgScalar, { let ((m, a), (b, n)) = (self.dim, rhs.dim); let (lhs_columns, rhs_rows) = (a, b); assert!(lhs_columns == rhs_rows); assert!(m.checked_mul(n).is_some()); mat_mul_impl(self, rhs) } /// Perform the matrix multiplication of the rectangular array `self` and /// column vector `rhs`. /// /// The array shapes must agree in the way that /// if `self` is *M* × *N*, then `rhs` is *N*. /// /// Return a result array with shape *M*. /// /// **Panics** if shapes are incompatible. pub fn mat_mul_col(&self, rhs: &ArrayBase<S, Ix>) -> OwnedArray<A, Ix> where A: LinalgScalar, { let ((m, a), n) = (self.dim, rhs.dim); let (self_columns, other_rows) = (a, n); assert!(self_columns == other_rows); // Avoid initializing the memory in vec -- set it during iteration let mut res_elems = Vec::<A>::with_capacity(m as usize); unsafe { res_elems.set_len(m as usize); } for (i, rr) in enumerate(&mut res_elems) { unsafe { *rr = (0..a).fold(A::zero(), move |s, k| s + *self.uget((i, k)) * *rhs.uget(k) ); } } unsafe { ArrayBase::from_vec_dim_unchecked(m, res_elems) } } } #[cfg(not(feature="blas"))] use self::mat_mul_general as mat_mul_impl; #[cfg(feature="blas")] fn mat_mul_impl<A, S>(lhs: &ArrayBase<S, (Ix, Ix)>, rhs: &ArrayBase<S, (Ix, Ix)>) -> OwnedArray<A, (Ix, Ix)> where A: LinalgScalar, S: Data<Elem=A>, { // size cutoff for using BLAS let cut = GEMM_BLAS_CUTOFF; let ((mut m, a), (_, mut n)) = (lhs.dim, rhs.dim); if !(m > cut || n > cut || a > cut) || !(same_type::<A, f32>() || same_type::<A, f64>()) { return mat_mul_general(lhs, rhs); } // Use `c` for c-order and `f` for an f-order matrix // We can handle c * c, f * f generally and // c * f and f * c if the `f` matrix is square. let mut lhs_ = lhs.view(); let mut rhs_ = rhs.view(); let lhs_s0 = lhs_.strides()[0]; let rhs_s0 = rhs_.strides()[0]; let both_f = lhs_s0 == 1 && rhs_s0 == 1; let mut lhs_trans = CblasNoTrans; let mut rhs_trans = CblasNoTrans; if both_f { // A^t B^t = C^t => B A = C lhs_ = lhs_.reversed_axes(); rhs_ = rhs_.reversed_axes(); swap(&mut lhs_, &mut rhs_); swap(&mut m, &mut n); } else if lhs_s0 == 1 && m == a { lhs_ = lhs_.reversed_axes(); lhs_trans = CblasTrans; } else if rhs_s0 == 1 && a == n { rhs_ = rhs_.reversed_axes(); rhs_trans = CblasTrans; } macro_rules! gemm { ($ty:ty, $gemm:ident) => { if blas_row_major_2d::<$ty, _>(&lhs_) && blas_row_major_2d::<$ty, _>(&rhs_) { let mut elems = Vec::<A>::with_capacity(m * n); let c; unsafe { elems.set_len(m * n); c = OwnedArray::from_vec_dim_unchecked((m, n), elems); } { let (m, k) = match lhs_trans { CblasNoTrans => lhs_.dim(), _ => { let (rows, cols) = lhs_.dim(); (cols, rows) } }; let n = match rhs_trans { CblasNoTrans => rhs_.dim().1, _ => rhs_.dim().0, }; unsafe { blas_sys::c::$gemm( CblasRowMajor, lhs_trans, rhs_trans, m as c_int, // m, rows of OP(a) n as c_int, // n, cols of OP(b) k as c_int, // k, cols of OP(a) 1.0, // alpha lhs_.ptr as *const _, // a lhs_.strides()[0] as c_int, // lda rhs_.ptr as *const _, // b rhs_.strides()[0] as c_int, // ldb 0.0, // beta c.ptr as *mut _, // c c.strides()[0] as c_int, // ldc ); } } return if both_f { c.reversed_axes() } else { c }; } } } gemm!(f32, cblas_sgemm); gemm!(f64, cblas_dgemm); return mat_mul_general(lhs, rhs); } fn mat_mul_general<A, S>(lhs: &ArrayBase<S, (Ix, Ix)>, rhs: &ArrayBase<S, (Ix, Ix)>) -> OwnedArray<A, (Ix, Ix)> where A: LinalgScalar, S: Data<Elem=A>, { let ((m, a), (_, n)) = (lhs.dim, rhs.dim); let lhs_s0 = lhs.strides()[0]; let rhs_s0 = rhs.strides()[0]; let column_major = lhs_s0 == 1 && rhs_s0 == 1; // Avoid initializing the memory in vec -- set it during iteration // Panic safe because A: Copy let mut res_elems = Vec::<A>::with_capacity(m as usize * n as usize); unsafe { res_elems.set_len(m as usize * n as usize); } let mut i = 0; let mut j = 0; for rr in &mut res_elems { unsafe { *rr = (0..a).fold(A::zero(), move |s, k| s + *lhs.uget((i, k)) * *rhs.uget((k, j))); } if !column_major { j += 1; if j == n { j = 0; i += 1; } } else { i += 1; if i == m { i = 0; j += 1; } } } unsafe { if !column_major { ArrayBase::from_vec_dim_unchecked((m, n), res_elems) } else { ArrayBase::from_vec_dim_unchecked_f((m, n), res_elems) } } } #[cfg(feature="blas")] #[inline(always)] /// Return `true` if `A` and `B` are the same type fn same_type<A: Any, B: Any>() -> bool { TypeId::of::<A>() == TypeId::of::<B>() } #[cfg(feature="blas")] // Read pointer to type `A` as type `B`. // // **Panics** if `A` and `B` are not the same type fn cast_as<A: Any + Copy, B: Any + Copy>(a: &A) -> B { assert!(same_type::<A, B>()); unsafe { ::std::ptr::read(a as *const _ as *const B) } } #[cfg(feature="blas")] fn blas_compat_1d<A, S>(a: &ArrayBase<S, Ix>) -> bool where S: Data, A: Any, S::Elem: Any, { if !same_type::<A, S::Elem>() { return false; } if a.len() > c_int::max_value() as usize { return false; } let stride = a.strides()[0]; if stride > c_int::max_value() as isize || stride < c_int::min_value() as isize { return false; } true } #[cfg(feature="blas")] fn blas_row_major_2d<A, S>(a: &ArrayBase<S, (Ix, Ix)>) -> bool where S: Data, A: Any, S::Elem: Any, { if !same_type::<A, S::Elem>() { return false; } let s0 = a.strides()[0]; let s1 = a.strides()[1]; if s1 != 1 { return false; } if (s0 > c_int::max_value() as isize || s0 < c_int::min_value() as isize) || (s1 > c_int::max_value() as isize || s1 < c_int::min_value() as isize) { return false; } let (m, n) = a.dim(); if m > c_int::max_value() as usize || n > c_int::max_value() as usize { return false; } true }
use frontend::*; use std::cmp; use std::io::{self, Write, stdout}; const SIZE: usize = 30000; struct Brainfuck { code: Vec<FaustCmd>, code_pos: usize, jumps: Vec<usize>, tape: Vec<u8>, tape_pos: usize, } fn get_input_line() -> String { let mut input = String::new(); print!("\ninput> "); stdout().flush().expect("Failed to write buffered output to stdout"); io::stdin().read_line(&mut input).expect("Error reading input."); String::from(input.trim()) } fn clamp(val: isize, min_val: usize, max_val: usize) -> usize { cmp::max(cmp::min(max_val as isize - 1, val), min_val as isize) as usize } impl Brainfuck { fn new(code: Vec<FaustCmd>) -> Brainfuck { let jumps = Brainfuck::make_jumptable(&code); Brainfuck { code: code, code_pos: 0, jumps: jumps, tape: vec![0; SIZE], tape_pos: 0, } } #[inline] fn interpret(&mut self) { let mut in_buffer = String::from(""); while self.code_pos < self.code.len() { if let Some(c) = self.code.iter().nth(self.code_pos) { match c.clone() { // Basic Commands FaustCmd::Repeatable(cmd, n) => match cmd.clone() { BasicCmd::Add => self.tape[self.tape_pos] = (self.tape[self.tape_pos] as u8).wrapping_add(n as u8), BasicCmd::Sub => self.tape[self.tape_pos] = (self.tape[self.tape_pos] as u8).wrapping_sub(n as u8), BasicCmd::Skip => self.tape_pos = clamp(self.tape_pos as isize + n as isize, 0, SIZE), BasicCmd::Rewind => self.tape_pos = clamp(self.tape_pos as isize - n as isize, 0, SIZE), }, FaustCmd::Clear => self.tape[self.tape_pos] = 0, // Input/Output FaustCmd::Input => { if in_buffer.len() <= 0 { in_buffer += &get_input_line(); }; if in_buffer.len() > 0 { if let Some(chr) = in_buffer.chars().nth(0) { in_buffer.remove(0); self.tape[self.tape_pos] = chr as u8; } else { panic!("Exited by user"); } }; }, FaustCmd::Output => { print!("{}", self.tape[self.tape_pos] as u8 as char); stdout().flush().expect("Failed to write buffered output to stdout"); }, // Loop/Branch Commands FaustCmd::JumpEqualZero => if self.tape[self.tape_pos] == 0 { self.code_pos = self.jumps[self.code_pos]; }, FaustCmd::JumpNotZero => if self.tape[self.tape_pos] != 0 { self.code_pos = self.jumps[self.code_pos]; }, _ => {}, }; } self.code_pos += 1; } print!("\n"); } fn make_jumptable(code: &Vec<FaustCmd>) -> Vec<usize> { let mut stack = Vec::new(); let mut jumps = vec![0usize; code.len()]; for (i, c) in code.iter().enumerate() { match *c { FaustCmd::JumpEqualZero => stack.push(i), FaustCmd::JumpNotZero => if let Some(left) = stack.pop() { let right = i; jumps[left] = right; jumps[right] = left; }, _ => continue, } } if stack.len() != 0 { panic!("Not enough left brackets in brainfuck code!") } jumps } } pub fn test(code: &String) { let mut bf = Brainfuck::new(brainfuck::vanilla_brainfuck(code)); let exec_time = benchmark! {{ bf.interpret(); }}; println!("Time taken {} ms", exec_time); } Revised decoding order use frontend::*; use std::cmp; use std::io::{self, Write, stdout}; const SIZE: usize = 30000; struct Brainfuck { code: Vec<FaustCmd>, code_pos: usize, jumps: Vec<usize>, tape: Vec<u8>, tape_pos: usize, } fn get_input_line() -> String { let mut input = String::new(); print!("\ninput> "); stdout().flush().expect("Failed to write buffered output to stdout"); io::stdin().read_line(&mut input).expect("Error reading input."); String::from(input.trim()) } fn clamp(val: isize, min_val: usize, max_val: usize) -> usize { cmp::max(cmp::min(max_val as isize - 1, val), min_val as isize) as usize } impl Brainfuck { fn new(code: Vec<FaustCmd>) -> Brainfuck { let jumps = Brainfuck::make_jumptable(&code); Brainfuck { code: code, code_pos: 0, jumps: jumps, tape: vec![0; SIZE], tape_pos: 0, } } #[inline] fn interpret(&mut self) { let mut in_buffer = String::from(""); while self.code_pos < self.code.len() { if let Some(c) = self.code.iter().nth(self.code_pos) { match c.clone() { // Loop/Branch Commands FaustCmd::JumpEqualZero => if self.tape[self.tape_pos] == 0 { self.code_pos = self.jumps[self.code_pos]; }, FaustCmd::JumpNotZero => if self.tape[self.tape_pos] != 0 { self.code_pos = self.jumps[self.code_pos]; }, // Basic Commands FaustCmd::Repeatable(cmd, n) => match cmd.clone() { BasicCmd::Add => self.tape[self.tape_pos] = (self.tape[self.tape_pos] as u8).wrapping_add(n as u8), BasicCmd::Sub => self.tape[self.tape_pos] = (self.tape[self.tape_pos] as u8).wrapping_sub(n as u8), BasicCmd::Skip => self.tape_pos = clamp(self.tape_pos as isize + n as isize, 0, SIZE), BasicCmd::Rewind => self.tape_pos = clamp(self.tape_pos as isize - n as isize, 0, SIZE), }, FaustCmd::Clear => self.tape[self.tape_pos] = 0, // Input/Output FaustCmd::Input => { if in_buffer.len() <= 0 { in_buffer += &get_input_line(); }; if in_buffer.len() > 0 { if let Some(chr) = in_buffer.chars().nth(0) { in_buffer.remove(0); self.tape[self.tape_pos] = chr as u8; } else { panic!("Exited by user"); } }; }, FaustCmd::Output => { print!("{}", self.tape[self.tape_pos] as u8 as char); stdout().flush().expect("Failed to write buffered output to stdout"); }, _ => {}, }; } self.code_pos += 1; } print!("\n"); } fn make_jumptable(code: &Vec<FaustCmd>) -> Vec<usize> { let mut stack = Vec::new(); let mut jumps = vec![0usize; code.len()]; for (i, c) in code.iter().enumerate() { match *c { FaustCmd::JumpEqualZero => stack.push(i), FaustCmd::JumpNotZero => if let Some(left) = stack.pop() { let right = i; jumps[left] = right; jumps[right] = left; }, _ => continue, } } if stack.len() != 0 { panic!("Not enough left brackets in brainfuck code!") } jumps } } pub fn test(code: &String) { let mut bf = Brainfuck::new(brainfuck::vanilla_brainfuck(code)); let exec_time = benchmark! {{ bf.interpret(); }}; println!("Time taken {} ms", exec_time); }
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A logger configured via an environment variable which writes to standard //! error. //! //! ## Example //! //! ``` //! #[macro_use] extern crate log; //! extern crate env_logger; //! //! use log::LogLevel; //! //! fn main() { //! env_logger::init().unwrap(); //! //! debug!("this is a debug {}", "message"); //! error!("this is printed by default"); //! //! if log_enabled!(LogLevel::Info) { //! let x = 3 * 4; // expensive computation //! info!("the answer was: {}", x); //! } //! } //! ``` //! //! Assumes the binary is `main`: //! //! ```{.bash} //! $ RUST_LOG=error ./main //! ERROR:main: this is printed by default //! ``` //! //! ```{.bash} //! $ RUST_LOG=info ./main //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! ```{.bash} //! $ RUST_LOG=debug ./main //! DEBUG:main: this is a debug message //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! You can also set the log level on a per module basis: //! //! ```{.bash} //! $ RUST_LOG=main=info ./main //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! And enable all logging: //! //! ```{.bash} //! $ RUST_LOG=main ./main //! DEBUG:main: this is a debug message //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! See the documentation for the log crate for more information about its API. //! //! ## Enabling logging //! //! Log levels are controlled on a per-module basis, and by default all logging //! is disabled except for `error!`. Logging is controlled via the `RUST_LOG` //! environment variable. The value of this environment variable is a //! comma-separated list of logging directives. A logging directive is of the //! form: //! //! ```text //! path::to::module=log_level //! ``` //! //! The path to the module is rooted in the name of the crate it was compiled //! for, so if your program is contained in a file `hello.rs`, for example, to //! turn on logging for this file you would use a value of `RUST_LOG=hello`. //! Furthermore, this path is a prefix-search, so all modules nested in the //! specified module will also have logging enabled. //! //! The actual `log_level` is optional to specify. If omitted, all logging will //! be enabled. If specified, it must be one of the strings `debug`, `error`, //! `info`, `warn`, or `trace`. //! //! As the log level for a module is optional, the module to enable logging for //! is also optional. If only a `log_level` is provided, then the global log //! level for all modules is set to this value. //! //! Some examples of valid values of `RUST_LOG` are: //! //! * `hello` turns on all logging for the 'hello' module //! * `info` turns on all info logging //! * `hello=debug` turns on debug logging for 'hello' //! * `hello,std::option` turns on hello, and std's option logging //! * `error,hello=warn` turn on global error logging and also warn for hello //! //! ## Filtering results //! //! A RUST_LOG directive may include a regex filter. The syntax is to append `/` //! followed by a regex. Each message is checked against the regex, and is only //! logged if it matches. Note that the matching is done after formatting the //! log string but before adding any logging meta-data. There is a single filter //! for all modules. //! //! Some examples: //! //! * `hello/foo` turns on all logging for the 'hello' module where the log //! message includes 'foo'. //! * `info/f.o` turns on all info logging where the log message includes 'foo', //! 'f1o', 'fao', etc. //! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log //! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc. //! * `error,hello=warn/[0-9] scopes` turn on global error logging and also //! warn for hello. In both cases the log message must include a single digit //! number followed by 'scopes'. #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/env_logger/")] #![cfg_attr(test, deny(warnings))] extern crate regex; extern crate log; use regex::Regex; use std::env; use std::io::prelude::*; use std::io; use std::mem; use log::{Log, LogLevel, LogLevelFilter, LogRecord, SetLoggerError, LogMetadata}; struct Logger { directives: Vec<LogDirective>, filter: Option<Regex>, format: Box<Fn(&LogRecord) -> String + Sync + Send>, } /// LogBuilder acts as builder for initializing the Logger. /// It can be used to customize the log format, change the enviromental variable used /// to provide the logging directives and also set the default log level filter. /// /// ## Example /// /// ``` /// #[macro_use] /// extern crate log; /// extern crate env_logger; /// /// use std::env; /// use log::{LogRecord, LogLevelFilter}; /// use env_logger::LogBuilder; /// /// fn main() { /// let format = |record: &LogRecord| { /// format!("{} - {}", record.level(), record.args()) /// }; /// /// let mut builder = LogBuilder::new(); /// builder.format(format).filter(None, LogLevelFilter::Info); /// /// if env::var("RUST_LOG").is_ok() { /// builder.parse(&env::var("RUST_LOG").unwrap()); /// } /// /// builder.init().unwrap(); /// /// error!("error message"); /// info!("info message"); /// } /// ``` pub struct LogBuilder { directives: Vec<LogDirective>, filter: Option<Regex>, format: Box<Fn(&LogRecord) -> String + Sync + Send>, } impl LogBuilder { /// Initializes the log builder with defaults pub fn new() -> LogBuilder { LogBuilder { directives: Vec::new(), filter: None, format: Box::new(|record: &LogRecord| { format!("{}:{}: {}", record.level(), record.location().module_path(), record.args()) }), } } /// Adds filters to the logger /// /// The given module (if any) will log at most the specified level provided. /// If no module is provided then the filter will apply to all log messages. pub fn filter(&mut self, module: Option<&str>, level: LogLevelFilter) -> &mut Self { self.directives.push(LogDirective { name: module.map(|s| s.to_string()), level: level, }); self } /// Sets the format function for formatting the log output. /// /// This function is called on each record logged to produce a string which /// is actually printed out. pub fn format<F: 'static>(&mut self, format: F) -> &mut Self where F: Fn(&LogRecord) -> String + Sync + Send { self.format = Box::new(format); self } /// Parses the directives string in the same form as the RUST_LOG /// environment variable. /// /// See the module documentation for more details. pub fn parse(&mut self, filters: &str) -> &mut Self { let (directives, filter) = parse_logging_spec(filters); self.filter = filter; for directive in directives { self.directives.push(directive); } self } /// Initializes the global logger with an env logger. /// /// This should be called early in the execution of a Rust program, and the /// global logger may only be initialized once. Future initialization /// attempts will return an error. pub fn init(&mut self) -> Result<(), SetLoggerError> { if self.directives.is_empty() { // Adds the default filter if none exist self.directives.push(LogDirective { name: None, level: LogLevelFilter::Error, }); } else { // Sort the directives by length of their name, this allows a // little more efficient lookup at runtime. self.directives.sort_by(|a, b| { let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0); let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0); alen.cmp(&blen) }); } log::set_logger(|max_level| { let level = { let max = self.directives.iter().map(|d| d.level).max(); max.unwrap_or(LogLevelFilter::Off) }; max_level.set(level); Box::new(self.build()) }) } fn build(&mut self) -> Logger { Logger { directives: mem::replace(&mut self.directives, Vec::new()), filter: mem::replace(&mut self.filter, None), format: mem::replace(&mut self.format, Box::new(|_| String::new())), } } } impl Logger { fn enabled(&self, level: LogLevel, target: &str) -> bool { // Search for the longest match, the vector is assumed to be pre-sorted. for directive in self.directives.iter().rev() { match directive.name { Some(ref name) if !target.starts_with(&**name) => {}, Some(..) | None => { return level <= directive.level } } } false } } impl Log for Logger { fn enabled(&self, metadata: &LogMetadata) -> bool { self.enabled(metadata.level(), metadata.target()) } fn log(&self, record: &LogRecord) { if !Log::enabled(self, record.metadata()) { return; } if let Some(filter) = self.filter.as_ref() { if filter.is_match(&*record.args().to_string()) { return; } } let _ = writeln!(&mut io::stderr(), "{}", (self.format)(record)); } } struct LogDirective { name: Option<String>, level: LogLevelFilter, } /// Initializes the global logger with an env logger. /// /// This should be called early in the execution of a Rust program, and the /// global logger may only be initialized once. Future initialization attempts /// will return an error. pub fn init() -> Result<(), SetLoggerError> { let mut builder = LogBuilder::new(); if let Ok(s) = env::var("RUST_LOG") { builder.parse(&s); } builder.init() } /// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo") /// and return a vector with log directives. fn parse_logging_spec(spec: &str) -> (Vec<LogDirective>, Option<Regex>) { let mut dirs = Vec::new(); let mut parts = spec.split('/'); let mods = parts.next(); let filter = parts.next(); if parts.next().is_some() { println!("warning: invalid logging spec '{}', \ ignoring it (too many '/'s)", spec); return (dirs, None); } mods.map(|m| { for s in m.split(',') { if s.len() == 0 { continue } let mut parts = s.split('='); let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { (Some(part0), None, None) => { // if the single argument is a log-level string or number, // treat that as a global fallback match part0.parse() { Ok(num) => (num, None), Err(_) => (LogLevelFilter::max(), Some(part0)), } } (Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)), (Some(part0), Some(part1), None) => { match part1.parse() { Ok(num) => (num, Some(part0)), _ => { println!("warning: invalid logging spec '{}', \ ignoring it", part1); continue } } }, _ => { println!("warning: invalid logging spec '{}', \ ignoring it", s); continue } }; dirs.push(LogDirective { name: name.map(|s| s.to_string()), level: log_level, }); }}); let filter = filter.map_or(None, |filter| { match Regex::new(filter) { Ok(re) => Some(re), Err(e) => { println!("warning: invalid regex filter - {}", e); None } } }); return (dirs, filter); } #[cfg(test)] mod tests { use log::{Log, LogLevel, LogLevelFilter}; use super::{LogBuilder, Logger, LogDirective, parse_logging_spec}; fn make_logger(dirs: Vec<LogDirective>) -> Logger { let mut logger = LogBuilder::new().build(); logger.directives = dirs; logger } #[test] fn filter_info() { let logger = LogBuilder::new().filter(None, LogLevelFilter::Info).build(); assert!(logger.enabled(LogLevel::Info, "crate1")); assert!(!logger.enabled(LogLevel::Debug, "crate1")); } #[test] fn filter_beginning_longest_match() { let logger = LogBuilder::new() .filter(Some("crate2"), LogLevelFilter::Info) .filter(Some("crate2::mod"), LogLevelFilter::Debug) .filter(Some("crate1::mod1"), LogLevelFilter::Warn) .build(); assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); assert!(!logger.enabled(LogLevel::Debug, "crate2")); } #[test] fn parse_default() { let logger = LogBuilder::new().parse("info,crate1::mod1=warn").build(); assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); } #[test] fn match_full_path() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); assert!(!logger.enabled(LogLevel::Info, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2")); assert!(!logger.enabled(LogLevel::Debug, "crate2")); } #[test] fn no_match() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(!logger.enabled(LogLevel::Warn, "crate3")); } #[test] fn match_beginning() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Info, "crate2::mod1")); } #[test] fn match_beginning_longest_match() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate2::mod".to_string()), level: LogLevelFilter::Debug }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); assert!(!logger.enabled(LogLevel::Debug, "crate2")); } #[test] fn match_default() { let logger = make_logger(vec![ LogDirective { name: None, level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); } #[test] fn zero_level() { let logger = make_logger(vec![ LogDirective { name: None, level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Off } ]); assert!(!logger.enabled(LogLevel::Error, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); } #[test] fn parse_logging_spec_valid() { let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug"); assert_eq!(dirs.len(), 3); assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Error); assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); assert_eq!(dirs[1].level, LogLevelFilter::max()); assert_eq!(dirs[2].name, Some("crate2".to_string())); assert_eq!(dirs[2].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_invalid_crate() { // test parse_logging_spec with multiple = in specification let (dirs, filter) = parse_logging_spec("crate1::mod1=warn=info,crate2=debug"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_invalid_log_level() { // test parse_logging_spec with 'noNumber' as log level let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=debug"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_string_log_level() { // test parse_logging_spec with 'warn' as log level let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Warn); assert!(filter.is_none()); } #[test] fn parse_logging_spec_empty_log_level() { // test parse_logging_spec with '' as log level let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2="); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::max()); assert!(filter.is_none()); } #[test] fn parse_logging_spec_global() { // test parse_logging_spec with no crate let (dirs, filter) = parse_logging_spec("warn,crate2=debug"); assert_eq!(dirs.len(), 2); assert_eq!(dirs[0].name, None); assert_eq!(dirs[0].level, LogLevelFilter::Warn); assert_eq!(dirs[1].name, Some("crate2".to_string())); assert_eq!(dirs[1].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_valid_filter() { let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc"); assert_eq!(dirs.len(), 3); assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Error); assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); assert_eq!(dirs[1].level, LogLevelFilter::max()); assert_eq!(dirs[2].name, Some("crate2".to_string())); assert_eq!(dirs[2].level, LogLevelFilter::Debug); assert!(filter.is_some() && filter.unwrap().to_string() == "abc"); } #[test] fn parse_logging_spec_invalid_crate_filter() { let (dirs, filter) = parse_logging_spec("crate1::mod1=error=warn,crate2=debug/a.c"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Debug); assert!(filter.is_some() && filter.unwrap().to_string() == "a.c"); } #[test] fn parse_logging_spec_empty_with_filter() { let (dirs, filter) = parse_logging_spec("crate1/a*c"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate1".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::max()); assert!(filter.is_some() && filter.unwrap().to_string() == "a*c"); } } Fixed handling of filter regexps. Regexps in RUST_LOG as explained in the module documentation should only allow log messages that match the given expression and not the other way around. // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A logger configured via an environment variable which writes to standard //! error. //! //! ## Example //! //! ``` //! #[macro_use] extern crate log; //! extern crate env_logger; //! //! use log::LogLevel; //! //! fn main() { //! env_logger::init().unwrap(); //! //! debug!("this is a debug {}", "message"); //! error!("this is printed by default"); //! //! if log_enabled!(LogLevel::Info) { //! let x = 3 * 4; // expensive computation //! info!("the answer was: {}", x); //! } //! } //! ``` //! //! Assumes the binary is `main`: //! //! ```{.bash} //! $ RUST_LOG=error ./main //! ERROR:main: this is printed by default //! ``` //! //! ```{.bash} //! $ RUST_LOG=info ./main //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! ```{.bash} //! $ RUST_LOG=debug ./main //! DEBUG:main: this is a debug message //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! You can also set the log level on a per module basis: //! //! ```{.bash} //! $ RUST_LOG=main=info ./main //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! And enable all logging: //! //! ```{.bash} //! $ RUST_LOG=main ./main //! DEBUG:main: this is a debug message //! ERROR:main: this is printed by default //! INFO:main: the answer was: 12 //! ``` //! //! See the documentation for the log crate for more information about its API. //! //! ## Enabling logging //! //! Log levels are controlled on a per-module basis, and by default all logging //! is disabled except for `error!`. Logging is controlled via the `RUST_LOG` //! environment variable. The value of this environment variable is a //! comma-separated list of logging directives. A logging directive is of the //! form: //! //! ```text //! path::to::module=log_level //! ``` //! //! The path to the module is rooted in the name of the crate it was compiled //! for, so if your program is contained in a file `hello.rs`, for example, to //! turn on logging for this file you would use a value of `RUST_LOG=hello`. //! Furthermore, this path is a prefix-search, so all modules nested in the //! specified module will also have logging enabled. //! //! The actual `log_level` is optional to specify. If omitted, all logging will //! be enabled. If specified, it must be one of the strings `debug`, `error`, //! `info`, `warn`, or `trace`. //! //! As the log level for a module is optional, the module to enable logging for //! is also optional. If only a `log_level` is provided, then the global log //! level for all modules is set to this value. //! //! Some examples of valid values of `RUST_LOG` are: //! //! * `hello` turns on all logging for the 'hello' module //! * `info` turns on all info logging //! * `hello=debug` turns on debug logging for 'hello' //! * `hello,std::option` turns on hello, and std's option logging //! * `error,hello=warn` turn on global error logging and also warn for hello //! //! ## Filtering results //! //! A RUST_LOG directive may include a regex filter. The syntax is to append `/` //! followed by a regex. Each message is checked against the regex, and is only //! logged if it matches. Note that the matching is done after formatting the //! log string but before adding any logging meta-data. There is a single filter //! for all modules. //! //! Some examples: //! //! * `hello/foo` turns on all logging for the 'hello' module where the log //! message includes 'foo'. //! * `info/f.o` turns on all info logging where the log message includes 'foo', //! 'f1o', 'fao', etc. //! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log //! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc. //! * `error,hello=warn/[0-9] scopes` turn on global error logging and also //! warn for hello. In both cases the log message must include a single digit //! number followed by 'scopes'. #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/env_logger/")] #![cfg_attr(test, deny(warnings))] extern crate regex; extern crate log; use regex::Regex; use std::env; use std::io::prelude::*; use std::io; use std::mem; use log::{Log, LogLevel, LogLevelFilter, LogRecord, SetLoggerError, LogMetadata}; struct Logger { directives: Vec<LogDirective>, filter: Option<Regex>, format: Box<Fn(&LogRecord) -> String + Sync + Send>, } /// LogBuilder acts as builder for initializing the Logger. /// It can be used to customize the log format, change the enviromental variable used /// to provide the logging directives and also set the default log level filter. /// /// ## Example /// /// ``` /// #[macro_use] /// extern crate log; /// extern crate env_logger; /// /// use std::env; /// use log::{LogRecord, LogLevelFilter}; /// use env_logger::LogBuilder; /// /// fn main() { /// let format = |record: &LogRecord| { /// format!("{} - {}", record.level(), record.args()) /// }; /// /// let mut builder = LogBuilder::new(); /// builder.format(format).filter(None, LogLevelFilter::Info); /// /// if env::var("RUST_LOG").is_ok() { /// builder.parse(&env::var("RUST_LOG").unwrap()); /// } /// /// builder.init().unwrap(); /// /// error!("error message"); /// info!("info message"); /// } /// ``` pub struct LogBuilder { directives: Vec<LogDirective>, filter: Option<Regex>, format: Box<Fn(&LogRecord) -> String + Sync + Send>, } impl LogBuilder { /// Initializes the log builder with defaults pub fn new() -> LogBuilder { LogBuilder { directives: Vec::new(), filter: None, format: Box::new(|record: &LogRecord| { format!("{}:{}: {}", record.level(), record.location().module_path(), record.args()) }), } } /// Adds filters to the logger /// /// The given module (if any) will log at most the specified level provided. /// If no module is provided then the filter will apply to all log messages. pub fn filter(&mut self, module: Option<&str>, level: LogLevelFilter) -> &mut Self { self.directives.push(LogDirective { name: module.map(|s| s.to_string()), level: level, }); self } /// Sets the format function for formatting the log output. /// /// This function is called on each record logged to produce a string which /// is actually printed out. pub fn format<F: 'static>(&mut self, format: F) -> &mut Self where F: Fn(&LogRecord) -> String + Sync + Send { self.format = Box::new(format); self } /// Parses the directives string in the same form as the RUST_LOG /// environment variable. /// /// See the module documentation for more details. pub fn parse(&mut self, filters: &str) -> &mut Self { let (directives, filter) = parse_logging_spec(filters); self.filter = filter; for directive in directives { self.directives.push(directive); } self } /// Initializes the global logger with an env logger. /// /// This should be called early in the execution of a Rust program, and the /// global logger may only be initialized once. Future initialization /// attempts will return an error. pub fn init(&mut self) -> Result<(), SetLoggerError> { if self.directives.is_empty() { // Adds the default filter if none exist self.directives.push(LogDirective { name: None, level: LogLevelFilter::Error, }); } else { // Sort the directives by length of their name, this allows a // little more efficient lookup at runtime. self.directives.sort_by(|a, b| { let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0); let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0); alen.cmp(&blen) }); } log::set_logger(|max_level| { let level = { let max = self.directives.iter().map(|d| d.level).max(); max.unwrap_or(LogLevelFilter::Off) }; max_level.set(level); Box::new(self.build()) }) } fn build(&mut self) -> Logger { Logger { directives: mem::replace(&mut self.directives, Vec::new()), filter: mem::replace(&mut self.filter, None), format: mem::replace(&mut self.format, Box::new(|_| String::new())), } } } impl Logger { fn enabled(&self, level: LogLevel, target: &str) -> bool { // Search for the longest match, the vector is assumed to be pre-sorted. for directive in self.directives.iter().rev() { match directive.name { Some(ref name) if !target.starts_with(&**name) => {}, Some(..) | None => { return level <= directive.level } } } false } } impl Log for Logger { fn enabled(&self, metadata: &LogMetadata) -> bool { self.enabled(metadata.level(), metadata.target()) } fn log(&self, record: &LogRecord) { if !Log::enabled(self, record.metadata()) { return; } if let Some(filter) = self.filter.as_ref() { if !filter.is_match(&*record.args().to_string()) { return; } } let _ = writeln!(&mut io::stderr(), "{}", (self.format)(record)); } } struct LogDirective { name: Option<String>, level: LogLevelFilter, } /// Initializes the global logger with an env logger. /// /// This should be called early in the execution of a Rust program, and the /// global logger may only be initialized once. Future initialization attempts /// will return an error. pub fn init() -> Result<(), SetLoggerError> { let mut builder = LogBuilder::new(); if let Ok(s) = env::var("RUST_LOG") { builder.parse(&s); } builder.init() } /// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo") /// and return a vector with log directives. fn parse_logging_spec(spec: &str) -> (Vec<LogDirective>, Option<Regex>) { let mut dirs = Vec::new(); let mut parts = spec.split('/'); let mods = parts.next(); let filter = parts.next(); if parts.next().is_some() { println!("warning: invalid logging spec '{}', \ ignoring it (too many '/'s)", spec); return (dirs, None); } mods.map(|m| { for s in m.split(',') { if s.len() == 0 { continue } let mut parts = s.split('='); let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { (Some(part0), None, None) => { // if the single argument is a log-level string or number, // treat that as a global fallback match part0.parse() { Ok(num) => (num, None), Err(_) => (LogLevelFilter::max(), Some(part0)), } } (Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)), (Some(part0), Some(part1), None) => { match part1.parse() { Ok(num) => (num, Some(part0)), _ => { println!("warning: invalid logging spec '{}', \ ignoring it", part1); continue } } }, _ => { println!("warning: invalid logging spec '{}', \ ignoring it", s); continue } }; dirs.push(LogDirective { name: name.map(|s| s.to_string()), level: log_level, }); }}); let filter = filter.map_or(None, |filter| { match Regex::new(filter) { Ok(re) => Some(re), Err(e) => { println!("warning: invalid regex filter - {}", e); None } } }); return (dirs, filter); } #[cfg(test)] mod tests { use log::{Log, LogLevel, LogLevelFilter}; use super::{LogBuilder, Logger, LogDirective, parse_logging_spec}; fn make_logger(dirs: Vec<LogDirective>) -> Logger { let mut logger = LogBuilder::new().build(); logger.directives = dirs; logger } #[test] fn filter_info() { let logger = LogBuilder::new().filter(None, LogLevelFilter::Info).build(); assert!(logger.enabled(LogLevel::Info, "crate1")); assert!(!logger.enabled(LogLevel::Debug, "crate1")); } #[test] fn filter_beginning_longest_match() { let logger = LogBuilder::new() .filter(Some("crate2"), LogLevelFilter::Info) .filter(Some("crate2::mod"), LogLevelFilter::Debug) .filter(Some("crate1::mod1"), LogLevelFilter::Warn) .build(); assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); assert!(!logger.enabled(LogLevel::Debug, "crate2")); } #[test] fn parse_default() { let logger = LogBuilder::new().parse("info,crate1::mod1=warn").build(); assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); } #[test] fn match_full_path() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); assert!(!logger.enabled(LogLevel::Info, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2")); assert!(!logger.enabled(LogLevel::Debug, "crate2")); } #[test] fn no_match() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(!logger.enabled(LogLevel::Warn, "crate3")); } #[test] fn match_beginning() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Info, "crate2::mod1")); } #[test] fn match_beginning_longest_match() { let logger = make_logger(vec![ LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, LogDirective { name: Some("crate2::mod".to_string()), level: LogLevelFilter::Debug }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); assert!(!logger.enabled(LogLevel::Debug, "crate2")); } #[test] fn match_default() { let logger = make_logger(vec![ LogDirective { name: None, level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } ]); assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); } #[test] fn zero_level() { let logger = make_logger(vec![ LogDirective { name: None, level: LogLevelFilter::Info }, LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Off } ]); assert!(!logger.enabled(LogLevel::Error, "crate1::mod1")); assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); } #[test] fn parse_logging_spec_valid() { let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug"); assert_eq!(dirs.len(), 3); assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Error); assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); assert_eq!(dirs[1].level, LogLevelFilter::max()); assert_eq!(dirs[2].name, Some("crate2".to_string())); assert_eq!(dirs[2].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_invalid_crate() { // test parse_logging_spec with multiple = in specification let (dirs, filter) = parse_logging_spec("crate1::mod1=warn=info,crate2=debug"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_invalid_log_level() { // test parse_logging_spec with 'noNumber' as log level let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=debug"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_string_log_level() { // test parse_logging_spec with 'warn' as log level let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Warn); assert!(filter.is_none()); } #[test] fn parse_logging_spec_empty_log_level() { // test parse_logging_spec with '' as log level let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2="); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::max()); assert!(filter.is_none()); } #[test] fn parse_logging_spec_global() { // test parse_logging_spec with no crate let (dirs, filter) = parse_logging_spec("warn,crate2=debug"); assert_eq!(dirs.len(), 2); assert_eq!(dirs[0].name, None); assert_eq!(dirs[0].level, LogLevelFilter::Warn); assert_eq!(dirs[1].name, Some("crate2".to_string())); assert_eq!(dirs[1].level, LogLevelFilter::Debug); assert!(filter.is_none()); } #[test] fn parse_logging_spec_valid_filter() { let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc"); assert_eq!(dirs.len(), 3); assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Error); assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); assert_eq!(dirs[1].level, LogLevelFilter::max()); assert_eq!(dirs[2].name, Some("crate2".to_string())); assert_eq!(dirs[2].level, LogLevelFilter::Debug); assert!(filter.is_some() && filter.unwrap().to_string() == "abc"); } #[test] fn parse_logging_spec_invalid_crate_filter() { let (dirs, filter) = parse_logging_spec("crate1::mod1=error=warn,crate2=debug/a.c"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate2".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::Debug); assert!(filter.is_some() && filter.unwrap().to_string() == "a.c"); } #[test] fn parse_logging_spec_empty_with_filter() { let (dirs, filter) = parse_logging_spec("crate1/a*c"); assert_eq!(dirs.len(), 1); assert_eq!(dirs[0].name, Some("crate1".to_string())); assert_eq!(dirs[0].level, LogLevelFilter::max()); assert!(filter.is_some() && filter.unwrap().to_string() == "a*c"); } }
//! Tests for the `cargo fix` command. use cargo::core::Edition; use cargo_test_support::git; use cargo_test_support::paths::CargoPathExt; use cargo_test_support::registry::{Dependency, Package}; use cargo_test_support::tools; use cargo_test_support::{basic_manifest, is_nightly, project}; #[cargo_test] fn do_not_fix_broken_builds() { let p = project() .file( "src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } pub fn foo2() { let _x: u32 = "a"; } "#, ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_status(101) .with_stderr_contains("[ERROR] could not compile `foo`") .run(); assert!(p.read_file("src/lib.rs").contains("let mut x = 3;")); } #[cargo_test] fn fix_broken_if_requested() { let p = project() .file( "src/lib.rs", r#" fn foo(a: &u32) -> u32 { a + 1 } pub fn bar() { foo(1); } "#, ) .build(); p.cargo("fix --allow-no-vcs --broken-code") .env("__CARGO_FIX_YOLO", "1") .run(); } #[cargo_test] fn broken_fixes_backed_out() { // This works as follows: // - Create a `rustc` shim (the "foo" project) which will pretend that the // verification step fails. // - There is an empty build script so `foo` has `OUT_DIR` to track the steps. // - The first "check", `foo` creates a file in OUT_DIR, and it completes // successfully with a warning diagnostic to remove unused `mut`. // - rustfix removes the `mut`. // - The second "check" to verify the changes, `foo` swaps out the content // with something that fails to compile. It creates a second file so it // won't do anything in the third check. // - cargo fix discovers that the fix failed, and it backs out the changes. // - The third "check" is done to display the original diagnostics of the // original code. let p = project() .file( "foo/Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' [workspace] "#, ) .file( "foo/src/main.rs", r#" use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{self, Command}; fn main() { // Ignore calls to things like --print=file-names and compiling build.rs. let is_lib_rs = env::args_os() .map(PathBuf::from) .any(|l| l == Path::new("src/lib.rs")); if is_lib_rs { let path = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let first = path.join("first"); let second = path.join("second"); if first.exists() && !second.exists() { fs::write("src/lib.rs", b"not rust code").unwrap(); fs::File::create(&second).unwrap(); } else { fs::File::create(&first).unwrap(); } } let status = Command::new("rustc") .args(env::args().skip(1)) .status() .expect("failed to run rustc"); process::exit(status.code().unwrap_or(2)); } "#, ) .file( "bar/Cargo.toml", r#" [package] name = 'bar' version = '0.1.0' [workspace] "#, ) .file("bar/build.rs", "fn main() {}") .file( "bar/src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } "#, ) .build(); // Build our rustc shim p.cargo("build").cwd("foo").run(); // Attempt to fix code, but our shim will always fail the second compile p.cargo("fix --allow-no-vcs --lib") .cwd("bar") .env("__CARGO_FIX_YOLO", "1") .env("RUSTC", p.root().join("foo/target/debug/foo")) .with_stderr_contains( "warning: failed to automatically apply fixes suggested by rustc \ to crate `bar`\n\ \n\ after fixes were automatically applied the compiler reported \ errors within these files:\n\ \n \ * src/lib.rs\n\ \n\ This likely indicates a bug in either rustc or cargo itself,\n\ and we would appreciate a bug report! You're likely to see \n\ a number of compiler warnings after this message which cargo\n\ attempted to fix but failed. If you could open an issue at\n\ [..]\n\ quoting the full output of this command we'd be very appreciative!\n\ Note that you may be able to make some more progress in the near-term\n\ fixing code with the `--broken-code` flag\n\ \n\ The following errors were reported:\n\ error: expected one of `!` or `::`, found `rust`\n\ ", ) .with_stderr_contains("Original diagnostics will follow.") .with_stderr_contains("[WARNING] variable does not need to be mutable") .with_stderr_does_not_contain("[..][FIXED][..]") .run(); // Make sure the fix which should have been applied was backed out assert!(p.read_file("bar/src/lib.rs").contains("let mut x = 3;")); } #[cargo_test] fn fix_path_deps() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = 'bar' } [workspace] "#, ) .file( "src/lib.rs", r#" extern crate bar; pub fn foo() -> u32 { let mut x = 3; x } "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file( "bar/src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs -p foo -p bar") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr_unordered( "\ [CHECKING] bar v0.1.0 ([..]) [FIXED] bar/src/lib.rs (1 fix) [CHECKING] foo v0.1.0 ([..]) [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .run(); } #[cargo_test] fn do_not_fix_non_relevant_deps() { let p = project() .no_manifest() .file( "foo/Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = '../bar' } [workspace] "#, ) .file("foo/src/lib.rs", "") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file( "bar/src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .cwd("foo") .run(); assert!(p.read_file("bar/src/lib.rs").contains("mut")); } #[cargo_test] fn prepare_for_2018() { let p = project() .file( "src/lib.rs", r#" #![allow(unused)] mod foo { pub const FOO: &str = "fooo"; } mod bar { use ::foo::FOO; } fn main() { let x = ::foo::FOO; } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --edition --allow-no-vcs") .with_stderr(stderr) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(p.read_file("src/lib.rs").contains("use crate::foo::FOO;")); assert!(p .read_file("src/lib.rs") .contains("let x = crate::foo::FOO;")); } #[cargo_test] fn local_paths() { let p = project() .file( "src/lib.rs", r#" use test::foo; mod test { pub fn foo() {} } pub fn f() { foo(); } "#, ) .build(); p.cargo("fix --edition --allow-no-vcs") .with_stderr( "\ [CHECKING] foo v0.0.1 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(p.read_file("src/lib.rs").contains("use crate::test::foo;")); } #[cargo_test] fn upgrade_extern_crate() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = '2018' [workspace] [dependencies] bar = { path = 'bar' } "#, ) .file( "src/lib.rs", r#" #![warn(rust_2018_idioms)] extern crate bar; use bar::bar; pub fn foo() { ::bar::bar(); bar(); } "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); let stderr = "\ [CHECKING] bar v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [FIXED] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(!p.read_file("src/lib.rs").contains("extern crate")); } #[cargo_test] fn specify_rustflags() { let p = project() .file( "src/lib.rs", r#" #![allow(unused)] mod foo { pub const FOO: &str = "fooo"; } fn main() { let x = ::foo::FOO; } "#, ) .build(); p.cargo("fix --edition --allow-no-vcs") .env("RUSTFLAGS", "-C linker=cc") .with_stderr( "\ [CHECKING] foo v0.0.1 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .with_stdout("") .run(); } #[cargo_test] fn no_changes_necessary() { let p = project().file("src/lib.rs", "").build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn fixes_extra_mut() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXED] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn fixes_two_missing_ampersands() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; let mut y = 3; x + y } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXED] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn tricky() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; let mut y = 3; x + y } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXED] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn preserve_line_endings() { let p = project() .file( "src/lib.rs", "fn add(a: &u32) -> u32 { a + 1 }\r\n\ pub fn foo() -> u32 { let mut x = 3; add(&x) }\r\n\ ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(p.read_file("src/lib.rs").contains("\r\n")); } #[cargo_test] fn fix_deny_warnings() { let p = project() .file( "src/lib.rs", "#![deny(warnings)] pub fn foo() { let mut x = 3; drop(x); } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); } #[cargo_test] fn fix_deny_warnings_but_not_others() { let p = project() .file( "src/lib.rs", " #![deny(unused_mut)] pub fn foo() -> u32 { let mut x = 3; x } pub fn bar() { #[allow(unused_mut)] let mut _y = 4; } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x = 3;")); assert!(p.read_file("src/lib.rs").contains("let mut _y = 4;")); } #[cargo_test] fn fix_two_files() { let p = project() .file( "src/lib.rs", " pub mod bar; pub fn foo() -> u32 { let mut x = 3; x } ", ) .file( "src/bar.rs", " pub fn foo() -> u32 { let mut x = 3; x } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr_contains("[FIXED] src/bar.rs (1 fix)") .with_stderr_contains("[FIXED] src/lib.rs (1 fix)") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x = 3;")); assert!(!p.read_file("src/bar.rs").contains("let mut x = 3;")); } #[cargo_test] fn fixes_missing_ampersand() { let p = project() .file("src/main.rs", "fn main() { let mut x = 3; drop(x); }") .file( "src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } #[test] pub fn foo2() { let mut x = 3; drop(x); } "#, ) .file( "tests/a.rs", r#" #[test] pub fn foo() { let mut x = 3; drop(x); } "#, ) .file("examples/foo.rs", "fn main() { let mut x = 3; drop(x); }") .file("build.rs", "fn main() { let mut x = 3; drop(x); }") .build(); p.cargo("fix --all-targets --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr_contains("[COMPILING] foo v0.0.1 ([..])") .with_stderr_contains("[FIXED] build.rs (1 fix)") // Don't assert number of fixes for this one, as we don't know if we're // fixing it once or twice! We run this all concurrently, and if we // compile (and fix) in `--test` mode first, we get two fixes. Otherwise // we'll fix one non-test thing, and then fix another one later in // test mode. .with_stderr_contains("[FIXED] src/lib.rs[..]") .with_stderr_contains("[FIXED] src/main.rs (1 fix)") .with_stderr_contains("[FIXED] examples/foo.rs (1 fix)") .with_stderr_contains("[FIXED] tests/a.rs (1 fix)") .with_stderr_contains("[FINISHED] [..]") .run(); p.cargo("build").run(); p.cargo("test").run(); } #[cargo_test] fn fix_features() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [features] bar = [] [workspace] "#, ) .file( "src/lib.rs", r#" #[cfg(feature = "bar")] pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs").run(); p.cargo("build").run(); p.cargo("fix --features bar --allow-no-vcs").run(); p.cargo("build --features bar").run(); } #[cargo_test] fn shows_warnings() { let p = project() .file( "src/lib.rs", "#[deprecated] fn bar() {} pub fn foo() { let _ = bar(); }", ) .build(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated[..]") .run(); } #[cargo_test] fn warns_if_no_vcs_detected() { let p = project().file("src/lib.rs", "pub fn foo() {}").build(); p.cargo("fix") .with_status(101) .with_stderr( "error: no VCS found for this package and `cargo fix` can potentially perform \ destructive changes; if you'd like to suppress this error pass `--allow-no-vcs`\ ", ) .run(); p.cargo("fix --allow-no-vcs").run(); } #[cargo_test] fn warns_about_dirty_working_directory() { let p = git::new("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); p.change_file("src/lib.rs", ""); p.cargo("fix") .with_status(101) .with_stderr( "\ error: the working directory of this package has uncommitted changes, \ and `cargo fix` can potentially perform destructive changes; if you'd \ like to suppress this error pass `--allow-dirty`, `--allow-staged`, or \ commit the changes to these files: * src/lib.rs (dirty) ", ) .run(); p.cargo("fix --allow-dirty").run(); } #[cargo_test] fn warns_about_staged_working_directory() { let (p, repo) = git::new_repo("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); p.change_file("src/lib.rs", "pub fn bar() {}"); git::add(&repo); p.cargo("fix") .with_status(101) .with_stderr( "\ error: the working directory of this package has uncommitted changes, \ and `cargo fix` can potentially perform destructive changes; if you'd \ like to suppress this error pass `--allow-dirty`, `--allow-staged`, or \ commit the changes to these files: * src/lib.rs (staged) ", ) .run(); p.cargo("fix --allow-staged").run(); } #[cargo_test] fn does_not_warn_about_clean_working_directory() { let p = git::new("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); p.cargo("fix").run(); } #[cargo_test] fn does_not_warn_about_dirty_ignored_files() { let p = git::new("foo", |p| { p.file("src/lib.rs", "pub fn foo() {}") .file(".gitignore", "bar\n") }); p.change_file("bar", ""); p.cargo("fix").run(); } #[cargo_test] fn fix_all_targets_by_default() { let p = project() .file("src/lib.rs", "pub fn foo() { let mut x = 3; drop(x); }") .file("tests/foo.rs", "pub fn foo() { let mut x = 3; drop(x); }") .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x")); assert!(!p.read_file("tests/foo.rs").contains("let mut x")); } #[cargo_test] fn prepare_for_unstable() { // During the period where a new edition is coming up, but not yet stable, // this test will verify that it cannot be migrated to on stable. If there // is no next edition, it does nothing. let next = match Edition::LATEST_UNSTABLE { Some(next) => next, None => { eprintln!("Next edition is currently not available, skipping test."); return; } }; let latest_stable = Edition::LATEST_STABLE; let p = project() .file( "Cargo.toml", &format!( r#" [package] name = "foo" version = "0.1.0" edition = "{}" "#, latest_stable ), ) .file("src/lib.rs", "") .build(); // -j1 to make the error more deterministic (otherwise there can be // multiple errors since they run in parallel). p.cargo("fix --edition --allow-no-vcs -j1") .with_status(101) .with_stderr(&format!("\ [CHECKING] foo [..] [ERROR] cannot migrate src/lib.rs to edition {next} Edition {next} is unstable and not allowed in this release, consider trying the nightly release channel. error: could not compile `foo` To learn more, run the command again with --verbose. ", next=next)) .run(); if !is_nightly() { // The rest of this test is fundamentally always nightly. return; } p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr(&format!( "\ [CHECKING] foo [..] [MIGRATING] src/lib.rs from {latest_stable} edition to {next} [FINISHED] [..] ", latest_stable = latest_stable, next = next, )) .run(); } #[cargo_test] fn prepare_for_latest_stable() { // This is the stable counterpart of prepare_for_unstable. let latest_stable = Edition::LATEST_STABLE; let previous = latest_stable.previous().unwrap(); let p = project() .file( "Cargo.toml", &format!( r#" [package] name = 'foo' version = '0.1.0' edition = '{}' "#, previous ), ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .with_stderr(&format!( "\ [CHECKING] foo [..] [MIGRATING] src/lib.rs from {} edition to {} [FINISHED] [..] ", previous, latest_stable )) .run(); } #[cargo_test] fn prepare_for_already_on_latest_unstable() { // During the period where a new edition is coming up, but not yet stable, // this test will check what happens if you are already on the latest. If // there is no next edition, it does nothing. if !is_nightly() { // This test is fundamentally always nightly. return; } let next_edition = match Edition::LATEST_UNSTABLE { Some(next) => next, None => { eprintln!("Next edition is currently not available, skipping test."); return; } }; let p = project() .file( "Cargo.toml", &format!( r#" cargo-features = ["edition{}"] [package] name = 'foo' version = '0.1.0' edition = '{}' "#, next_edition, next_edition ), ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr_contains(&format!( "\ [CHECKING] foo [..] [WARNING] `src/lib.rs` is already on the latest edition ({next_edition}), unable to migrate further [FINISHED] [..] ", next_edition = next_edition )) .run(); } #[cargo_test] fn prepare_for_already_on_latest_stable() { // Stable counterpart of prepare_for_already_on_latest_unstable. if Edition::LATEST_UNSTABLE.is_some() { eprintln!("This test cannot run while the latest edition is unstable, skipping."); return; } let latest_stable = Edition::LATEST_STABLE; let p = project() .file( "Cargo.toml", &format!( r#" [package] name = 'foo' version = '0.1.0' edition = '{}' "#, latest_stable ), ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .with_stderr_contains(&format!( "\ [CHECKING] foo [..] [WARNING] `src/lib.rs` is already on the latest edition ({latest_stable}), unable to migrate further [FINISHED] [..] ", latest_stable = latest_stable )) .run(); } #[cargo_test] fn fix_overlapping() { let p = project() .file( "src/lib.rs", r#" pub fn foo<T>() {} pub struct A; pub mod bar { pub fn baz() { ::foo::<::A>(); } } "#, ) .build(); p.cargo("fix --allow-no-vcs --edition --lib") .with_stderr( "\ [CHECKING] foo [..] [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (2 fixes) [FINISHED] dev [..] ", ) .run(); let contents = p.read_file("src/lib.rs"); println!("{}", contents); assert!(contents.contains("crate::foo::<crate::A>()")); } #[cargo_test] fn fix_idioms() { let p = project() .file( "Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' edition = '2018' "#, ) .file( "src/lib.rs", r#" use std::any::Any; pub fn foo() { let _x: Box<Any> = Box::new(3); } "#, ) .build(); let stderr = "\ [CHECKING] foo [..] [FIXED] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --edition-idioms --allow-no-vcs") .with_stderr(stderr) .run(); assert!(p.read_file("src/lib.rs").contains("Box<dyn Any>")); } #[cargo_test] fn idioms_2015_ok() { let p = project().file("src/lib.rs", "").build(); p.cargo("fix --edition-idioms --allow-no-vcs").run(); } #[cargo_test] fn shows_warnings_on_second_run_without_changes() { let p = project() .file( "src/lib.rs", r#" #[deprecated] fn bar() {} pub fn foo() { let _ = bar(); } "#, ) .build(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated[..]") .run(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated[..]") .run(); } #[cargo_test] fn shows_warnings_on_second_run_without_changes_on_multiple_targets() { let p = project() .file( "src/lib.rs", r#" #[deprecated] fn bar() {} pub fn foo() { let _ = bar(); } "#, ) .file( "src/main.rs", r#" #[deprecated] fn bar() {} fn main() { let _ = bar(); } "#, ) .file( "tests/foo.rs", r#" #[deprecated] fn bar() {} #[test] fn foo_test() { let _ = bar(); } "#, ) .file( "tests/bar.rs", r#" #[deprecated] fn bar() {} #[test] fn foo_test() { let _ = bar(); } "#, ) .file( "examples/fooxample.rs", r#" #[deprecated] fn bar() {} fn main() { let _ = bar(); } "#, ) .build(); p.cargo("fix --allow-no-vcs --all-targets") .with_stderr_contains(" --> examples/fooxample.rs:6:29") .with_stderr_contains(" --> src/lib.rs:6:29") .with_stderr_contains(" --> src/main.rs:6:29") .with_stderr_contains(" --> tests/bar.rs:7:29") .with_stderr_contains(" --> tests/foo.rs:7:29") .run(); p.cargo("fix --allow-no-vcs --all-targets") .with_stderr_contains(" --> examples/fooxample.rs:6:29") .with_stderr_contains(" --> src/lib.rs:6:29") .with_stderr_contains(" --> src/main.rs:6:29") .with_stderr_contains(" --> tests/bar.rs:7:29") .with_stderr_contains(" --> tests/foo.rs:7:29") .run(); } #[cargo_test] fn doesnt_rebuild_dependencies() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = 'bar' } [workspace] "#, ) .file("src/lib.rs", "extern crate bar;") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs -p foo") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr( "\ [CHECKING] bar v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); p.cargo("fix --allow-no-vcs -p foo") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr( "\ [CHECKING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn does_not_crash_with_rustc_wrapper() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs") .env("RUSTC_WRAPPER", tools::echo_wrapper()) .run(); p.build_dir().rm_rf(); p.cargo("fix --allow-no-vcs --verbose") .env("RUSTC_WORKSPACE_WRAPPER", tools::echo_wrapper()) .run(); } #[cargo_test] fn uses_workspace_wrapper_and_primary_wrapper_override() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs --verbose") .env("RUSTC_WORKSPACE_WRAPPER", tools::echo_wrapper()) .with_stderr_contains("WRAPPER CALLED: rustc src/lib.rs --crate-name foo [..]") .run(); } #[cargo_test] fn only_warn_for_relevant_crates() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] a = { path = 'a' } "#, ) .file("src/lib.rs", "") .file( "a/Cargo.toml", r#" [package] name = "a" version = "0.1.0" "#, ) .file( "a/src/lib.rs", " pub fn foo() {} pub mod bar { use foo; pub fn baz() { foo() } } ", ) .build(); p.cargo("fix --allow-no-vcs --edition") .with_stderr( "\ [CHECKING] a v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn fix_to_broken_code() { let p = project() .file( "foo/Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' [workspace] "#, ) .file( "foo/src/main.rs", r#" use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{self, Command}; fn main() { let is_lib_rs = env::args_os() .map(PathBuf::from) .any(|l| l == Path::new("src/lib.rs")); if is_lib_rs { let path = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let path = path.join("foo"); if path.exists() { panic!() } else { fs::File::create(&path).unwrap(); } } let status = Command::new("rustc") .args(env::args().skip(1)) .status() .expect("failed to run rustc"); process::exit(status.code().unwrap_or(2)); } "#, ) .file( "bar/Cargo.toml", r#" [package] name = 'bar' version = '0.1.0' [workspace] "#, ) .file("bar/build.rs", "fn main() {}") .file("bar/src/lib.rs", "pub fn foo() { let mut x = 3; drop(x); }") .build(); // Build our rustc shim p.cargo("build").cwd("foo").run(); // Attempt to fix code, but our shim will always fail the second compile p.cargo("fix --allow-no-vcs --broken-code") .cwd("bar") .env("RUSTC", p.root().join("foo/target/debug/foo")) .with_status(101) .with_stderr_contains("[WARNING] failed to automatically apply fixes [..]") .run(); assert_eq!( p.read_file("bar/src/lib.rs"), "pub fn foo() { let x = 3; drop(x); }" ); } #[cargo_test] fn fix_with_common() { let p = project() .file("src/lib.rs", "") .file( "tests/t1.rs", "mod common; #[test] fn t1() { common::try(); }", ) .file( "tests/t2.rs", "mod common; #[test] fn t2() { common::try(); }", ) .file("tests/common/mod.rs", "pub fn try() {}") .build(); p.cargo("fix --edition --allow-no-vcs").run(); assert_eq!(p.read_file("tests/common/mod.rs"), "pub fn r#try() {}"); } #[cargo_test] fn fix_in_existing_repo_weird_ignore() { // Check that ignore doesn't ignore the repo itself. let p = git::new("foo", |project| { project .file("src/lib.rs", "") .file(".gitignore", "foo\ninner\n") .file("inner/file", "") }); p.cargo("fix").run(); // This is questionable about whether it is the right behavior. It should // probably be checking if any source file for the current project is // ignored. p.cargo("fix") .cwd("inner") .with_stderr_contains("[ERROR] no VCS found[..]") .with_status(101) .run(); p.cargo("fix").cwd("src").run(); } #[cargo_test] fn fix_color_message() { // Check that color appears in diagnostics. let p = project() .file("src/lib.rs", "std::compile_error!{\"color test\"}") .build(); p.cargo("fix --allow-no-vcs --color=always") .with_stderr_contains("[..]\x1b[[..]") .with_status(101) .run(); p.cargo("fix --allow-no-vcs --color=never") .with_stderr_contains("error: color test") .with_stderr_does_not_contain("[..]\x1b[[..]") .with_status(101) .run(); } #[cargo_test] fn edition_v2_resolver_report() { // Show a report if the V2 resolver shows differences. if !is_nightly() { // 2021 is unstable return; } Package::new("common", "1.0.0") .feature("f1", &[]) .file("src/lib.rs", "") .publish(); Package::new("bar", "1.0.0") .add_dep( Dependency::new("common", "1.0") .target("cfg(whatever)") .enable_features(&["f1"]), ) .publish(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = "2018" [dependencies] common = "1.0" bar = "1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr_unordered("\ [UPDATING] [..] [DOWNLOADING] crates ... [DOWNLOADED] common v1.0.0 [..] [DOWNLOADED] bar v1.0.0 [..] note: Switching to Edition 2021 will enable the use of the version 2 feature resolver in Cargo. This may cause dependencies to resolve with a different set of features. More information about the resolver changes may be found at https://doc.rust-lang.org/cargo/reference/features.html#feature-resolver-version-2 The following differences were detected with the current configuration: common v1.0.0 removed features `f1` [CHECKING] common v1.0.0 [CHECKING] bar v1.0.0 [CHECKING] foo v0.1.0 [..] [MIGRATING] src/lib.rs from 2018 edition to 2021 [FINISHED] [..] ") .run(); } #[cargo_test] fn rustfix_handles_multi_spans() { // Checks that rustfix handles a single diagnostic with multiple // suggestion spans (non_fmt_panic in this case). let p = project() .file("Cargo.toml", &basic_manifest("foo", "0.1.0")) .file( "src/lib.rs", r#" pub fn foo() { panic!(format!("hey")); } "#, ) .build(); p.cargo("fix --allow-no-vcs").run(); assert!(p.read_file("src/lib.rs").contains(r#"panic!("hey");"#)); } #[cargo_test] fn fix_edition_2021() { // Can migrate 2021, even when lints are allowed. if !is_nightly() { // 2021 is unstable return; } let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = "2018" "#, ) .file( "src/lib.rs", r#" #![allow(ellipsis_inclusive_range_patterns)] pub fn f() -> bool { let x = 123; match x { 0...100 => true, _ => false, } } "#, ) .build(); p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr( "\ [CHECKING] foo v0.1.0 [..] [MIGRATING] src/lib.rs from 2018 edition to 2021 [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .run(); assert!(p.read_file("src/lib.rs").contains(r#"0..=100 => true,"#)); } Auto merge of #9642 - ehuss:2021-edition-disable, r=alexcrichton Temporarily ignore 2021 edition fix. The latest nightly broke the interaction of `--force-warns` and `--cap-lints`. Since this will likely take at least a few days to fix, I am temporarily disabling this test to get cargo's CI working again. //! Tests for the `cargo fix` command. use cargo::core::Edition; use cargo_test_support::git; use cargo_test_support::paths::CargoPathExt; use cargo_test_support::registry::{Dependency, Package}; use cargo_test_support::tools; use cargo_test_support::{basic_manifest, is_nightly, project}; #[cargo_test] fn do_not_fix_broken_builds() { let p = project() .file( "src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } pub fn foo2() { let _x: u32 = "a"; } "#, ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_status(101) .with_stderr_contains("[ERROR] could not compile `foo`") .run(); assert!(p.read_file("src/lib.rs").contains("let mut x = 3;")); } #[cargo_test] fn fix_broken_if_requested() { let p = project() .file( "src/lib.rs", r#" fn foo(a: &u32) -> u32 { a + 1 } pub fn bar() { foo(1); } "#, ) .build(); p.cargo("fix --allow-no-vcs --broken-code") .env("__CARGO_FIX_YOLO", "1") .run(); } #[cargo_test] fn broken_fixes_backed_out() { // This works as follows: // - Create a `rustc` shim (the "foo" project) which will pretend that the // verification step fails. // - There is an empty build script so `foo` has `OUT_DIR` to track the steps. // - The first "check", `foo` creates a file in OUT_DIR, and it completes // successfully with a warning diagnostic to remove unused `mut`. // - rustfix removes the `mut`. // - The second "check" to verify the changes, `foo` swaps out the content // with something that fails to compile. It creates a second file so it // won't do anything in the third check. // - cargo fix discovers that the fix failed, and it backs out the changes. // - The third "check" is done to display the original diagnostics of the // original code. let p = project() .file( "foo/Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' [workspace] "#, ) .file( "foo/src/main.rs", r#" use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{self, Command}; fn main() { // Ignore calls to things like --print=file-names and compiling build.rs. let is_lib_rs = env::args_os() .map(PathBuf::from) .any(|l| l == Path::new("src/lib.rs")); if is_lib_rs { let path = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let first = path.join("first"); let second = path.join("second"); if first.exists() && !second.exists() { fs::write("src/lib.rs", b"not rust code").unwrap(); fs::File::create(&second).unwrap(); } else { fs::File::create(&first).unwrap(); } } let status = Command::new("rustc") .args(env::args().skip(1)) .status() .expect("failed to run rustc"); process::exit(status.code().unwrap_or(2)); } "#, ) .file( "bar/Cargo.toml", r#" [package] name = 'bar' version = '0.1.0' [workspace] "#, ) .file("bar/build.rs", "fn main() {}") .file( "bar/src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } "#, ) .build(); // Build our rustc shim p.cargo("build").cwd("foo").run(); // Attempt to fix code, but our shim will always fail the second compile p.cargo("fix --allow-no-vcs --lib") .cwd("bar") .env("__CARGO_FIX_YOLO", "1") .env("RUSTC", p.root().join("foo/target/debug/foo")) .with_stderr_contains( "warning: failed to automatically apply fixes suggested by rustc \ to crate `bar`\n\ \n\ after fixes were automatically applied the compiler reported \ errors within these files:\n\ \n \ * src/lib.rs\n\ \n\ This likely indicates a bug in either rustc or cargo itself,\n\ and we would appreciate a bug report! You're likely to see \n\ a number of compiler warnings after this message which cargo\n\ attempted to fix but failed. If you could open an issue at\n\ [..]\n\ quoting the full output of this command we'd be very appreciative!\n\ Note that you may be able to make some more progress in the near-term\n\ fixing code with the `--broken-code` flag\n\ \n\ The following errors were reported:\n\ error: expected one of `!` or `::`, found `rust`\n\ ", ) .with_stderr_contains("Original diagnostics will follow.") .with_stderr_contains("[WARNING] variable does not need to be mutable") .with_stderr_does_not_contain("[..][FIXED][..]") .run(); // Make sure the fix which should have been applied was backed out assert!(p.read_file("bar/src/lib.rs").contains("let mut x = 3;")); } #[cargo_test] fn fix_path_deps() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = 'bar' } [workspace] "#, ) .file( "src/lib.rs", r#" extern crate bar; pub fn foo() -> u32 { let mut x = 3; x } "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file( "bar/src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs -p foo -p bar") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr_unordered( "\ [CHECKING] bar v0.1.0 ([..]) [FIXED] bar/src/lib.rs (1 fix) [CHECKING] foo v0.1.0 ([..]) [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .run(); } #[cargo_test] fn do_not_fix_non_relevant_deps() { let p = project() .no_manifest() .file( "foo/Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = '../bar' } [workspace] "#, ) .file("foo/src/lib.rs", "") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file( "bar/src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .cwd("foo") .run(); assert!(p.read_file("bar/src/lib.rs").contains("mut")); } #[cargo_test] fn prepare_for_2018() { let p = project() .file( "src/lib.rs", r#" #![allow(unused)] mod foo { pub const FOO: &str = "fooo"; } mod bar { use ::foo::FOO; } fn main() { let x = ::foo::FOO; } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --edition --allow-no-vcs") .with_stderr(stderr) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(p.read_file("src/lib.rs").contains("use crate::foo::FOO;")); assert!(p .read_file("src/lib.rs") .contains("let x = crate::foo::FOO;")); } #[cargo_test] fn local_paths() { let p = project() .file( "src/lib.rs", r#" use test::foo; mod test { pub fn foo() {} } pub fn f() { foo(); } "#, ) .build(); p.cargo("fix --edition --allow-no-vcs") .with_stderr( "\ [CHECKING] foo v0.0.1 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(p.read_file("src/lib.rs").contains("use crate::test::foo;")); } #[cargo_test] fn upgrade_extern_crate() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = '2018' [workspace] [dependencies] bar = { path = 'bar' } "#, ) .file( "src/lib.rs", r#" #![warn(rust_2018_idioms)] extern crate bar; use bar::bar; pub fn foo() { ::bar::bar(); bar(); } "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); let stderr = "\ [CHECKING] bar v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [FIXED] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(!p.read_file("src/lib.rs").contains("extern crate")); } #[cargo_test] fn specify_rustflags() { let p = project() .file( "src/lib.rs", r#" #![allow(unused)] mod foo { pub const FOO: &str = "fooo"; } fn main() { let x = ::foo::FOO; } "#, ) .build(); p.cargo("fix --edition --allow-no-vcs") .env("RUSTFLAGS", "-C linker=cc") .with_stderr( "\ [CHECKING] foo v0.0.1 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .with_stdout("") .run(); } #[cargo_test] fn no_changes_necessary() { let p = project().file("src/lib.rs", "").build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn fixes_extra_mut() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXED] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn fixes_two_missing_ampersands() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; let mut y = 3; x + y } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXED] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn tricky() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; let mut y = 3; x + y } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXED] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn preserve_line_endings() { let p = project() .file( "src/lib.rs", "fn add(a: &u32) -> u32 { a + 1 }\r\n\ pub fn foo() -> u32 { let mut x = 3; add(&x) }\r\n\ ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(p.read_file("src/lib.rs").contains("\r\n")); } #[cargo_test] fn fix_deny_warnings() { let p = project() .file( "src/lib.rs", "#![deny(warnings)] pub fn foo() { let mut x = 3; drop(x); } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); } #[cargo_test] fn fix_deny_warnings_but_not_others() { let p = project() .file( "src/lib.rs", " #![deny(unused_mut)] pub fn foo() -> u32 { let mut x = 3; x } pub fn bar() { #[allow(unused_mut)] let mut _y = 4; } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x = 3;")); assert!(p.read_file("src/lib.rs").contains("let mut _y = 4;")); } #[cargo_test] fn fix_two_files() { let p = project() .file( "src/lib.rs", " pub mod bar; pub fn foo() -> u32 { let mut x = 3; x } ", ) .file( "src/bar.rs", " pub fn foo() -> u32 { let mut x = 3; x } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr_contains("[FIXED] src/bar.rs (1 fix)") .with_stderr_contains("[FIXED] src/lib.rs (1 fix)") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x = 3;")); assert!(!p.read_file("src/bar.rs").contains("let mut x = 3;")); } #[cargo_test] fn fixes_missing_ampersand() { let p = project() .file("src/main.rs", "fn main() { let mut x = 3; drop(x); }") .file( "src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } #[test] pub fn foo2() { let mut x = 3; drop(x); } "#, ) .file( "tests/a.rs", r#" #[test] pub fn foo() { let mut x = 3; drop(x); } "#, ) .file("examples/foo.rs", "fn main() { let mut x = 3; drop(x); }") .file("build.rs", "fn main() { let mut x = 3; drop(x); }") .build(); p.cargo("fix --all-targets --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr_contains("[COMPILING] foo v0.0.1 ([..])") .with_stderr_contains("[FIXED] build.rs (1 fix)") // Don't assert number of fixes for this one, as we don't know if we're // fixing it once or twice! We run this all concurrently, and if we // compile (and fix) in `--test` mode first, we get two fixes. Otherwise // we'll fix one non-test thing, and then fix another one later in // test mode. .with_stderr_contains("[FIXED] src/lib.rs[..]") .with_stderr_contains("[FIXED] src/main.rs (1 fix)") .with_stderr_contains("[FIXED] examples/foo.rs (1 fix)") .with_stderr_contains("[FIXED] tests/a.rs (1 fix)") .with_stderr_contains("[FINISHED] [..]") .run(); p.cargo("build").run(); p.cargo("test").run(); } #[cargo_test] fn fix_features() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [features] bar = [] [workspace] "#, ) .file( "src/lib.rs", r#" #[cfg(feature = "bar")] pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs").run(); p.cargo("build").run(); p.cargo("fix --features bar --allow-no-vcs").run(); p.cargo("build --features bar").run(); } #[cargo_test] fn shows_warnings() { let p = project() .file( "src/lib.rs", "#[deprecated] fn bar() {} pub fn foo() { let _ = bar(); }", ) .build(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated[..]") .run(); } #[cargo_test] fn warns_if_no_vcs_detected() { let p = project().file("src/lib.rs", "pub fn foo() {}").build(); p.cargo("fix") .with_status(101) .with_stderr( "error: no VCS found for this package and `cargo fix` can potentially perform \ destructive changes; if you'd like to suppress this error pass `--allow-no-vcs`\ ", ) .run(); p.cargo("fix --allow-no-vcs").run(); } #[cargo_test] fn warns_about_dirty_working_directory() { let p = git::new("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); p.change_file("src/lib.rs", ""); p.cargo("fix") .with_status(101) .with_stderr( "\ error: the working directory of this package has uncommitted changes, \ and `cargo fix` can potentially perform destructive changes; if you'd \ like to suppress this error pass `--allow-dirty`, `--allow-staged`, or \ commit the changes to these files: * src/lib.rs (dirty) ", ) .run(); p.cargo("fix --allow-dirty").run(); } #[cargo_test] fn warns_about_staged_working_directory() { let (p, repo) = git::new_repo("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); p.change_file("src/lib.rs", "pub fn bar() {}"); git::add(&repo); p.cargo("fix") .with_status(101) .with_stderr( "\ error: the working directory of this package has uncommitted changes, \ and `cargo fix` can potentially perform destructive changes; if you'd \ like to suppress this error pass `--allow-dirty`, `--allow-staged`, or \ commit the changes to these files: * src/lib.rs (staged) ", ) .run(); p.cargo("fix --allow-staged").run(); } #[cargo_test] fn does_not_warn_about_clean_working_directory() { let p = git::new("foo", |p| p.file("src/lib.rs", "pub fn foo() {}")); p.cargo("fix").run(); } #[cargo_test] fn does_not_warn_about_dirty_ignored_files() { let p = git::new("foo", |p| { p.file("src/lib.rs", "pub fn foo() {}") .file(".gitignore", "bar\n") }); p.change_file("bar", ""); p.cargo("fix").run(); } #[cargo_test] fn fix_all_targets_by_default() { let p = project() .file("src/lib.rs", "pub fn foo() { let mut x = 3; drop(x); }") .file("tests/foo.rs", "pub fn foo() { let mut x = 3; drop(x); }") .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x")); assert!(!p.read_file("tests/foo.rs").contains("let mut x")); } #[cargo_test] fn prepare_for_unstable() { // During the period where a new edition is coming up, but not yet stable, // this test will verify that it cannot be migrated to on stable. If there // is no next edition, it does nothing. let next = match Edition::LATEST_UNSTABLE { Some(next) => next, None => { eprintln!("Next edition is currently not available, skipping test."); return; } }; let latest_stable = Edition::LATEST_STABLE; let p = project() .file( "Cargo.toml", &format!( r#" [package] name = "foo" version = "0.1.0" edition = "{}" "#, latest_stable ), ) .file("src/lib.rs", "") .build(); // -j1 to make the error more deterministic (otherwise there can be // multiple errors since they run in parallel). p.cargo("fix --edition --allow-no-vcs -j1") .with_status(101) .with_stderr(&format!("\ [CHECKING] foo [..] [ERROR] cannot migrate src/lib.rs to edition {next} Edition {next} is unstable and not allowed in this release, consider trying the nightly release channel. error: could not compile `foo` To learn more, run the command again with --verbose. ", next=next)) .run(); if !is_nightly() { // The rest of this test is fundamentally always nightly. return; } p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr(&format!( "\ [CHECKING] foo [..] [MIGRATING] src/lib.rs from {latest_stable} edition to {next} [FINISHED] [..] ", latest_stable = latest_stable, next = next, )) .run(); } #[cargo_test] fn prepare_for_latest_stable() { // This is the stable counterpart of prepare_for_unstable. let latest_stable = Edition::LATEST_STABLE; let previous = latest_stable.previous().unwrap(); let p = project() .file( "Cargo.toml", &format!( r#" [package] name = 'foo' version = '0.1.0' edition = '{}' "#, previous ), ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .with_stderr(&format!( "\ [CHECKING] foo [..] [MIGRATING] src/lib.rs from {} edition to {} [FINISHED] [..] ", previous, latest_stable )) .run(); } #[cargo_test] fn prepare_for_already_on_latest_unstable() { // During the period where a new edition is coming up, but not yet stable, // this test will check what happens if you are already on the latest. If // there is no next edition, it does nothing. if !is_nightly() { // This test is fundamentally always nightly. return; } let next_edition = match Edition::LATEST_UNSTABLE { Some(next) => next, None => { eprintln!("Next edition is currently not available, skipping test."); return; } }; let p = project() .file( "Cargo.toml", &format!( r#" cargo-features = ["edition{}"] [package] name = 'foo' version = '0.1.0' edition = '{}' "#, next_edition, next_edition ), ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr_contains(&format!( "\ [CHECKING] foo [..] [WARNING] `src/lib.rs` is already on the latest edition ({next_edition}), unable to migrate further [FINISHED] [..] ", next_edition = next_edition )) .run(); } #[cargo_test] fn prepare_for_already_on_latest_stable() { // Stable counterpart of prepare_for_already_on_latest_unstable. if Edition::LATEST_UNSTABLE.is_some() { eprintln!("This test cannot run while the latest edition is unstable, skipping."); return; } let latest_stable = Edition::LATEST_STABLE; let p = project() .file( "Cargo.toml", &format!( r#" [package] name = 'foo' version = '0.1.0' edition = '{}' "#, latest_stable ), ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .with_stderr_contains(&format!( "\ [CHECKING] foo [..] [WARNING] `src/lib.rs` is already on the latest edition ({latest_stable}), unable to migrate further [FINISHED] [..] ", latest_stable = latest_stable )) .run(); } #[cargo_test] fn fix_overlapping() { let p = project() .file( "src/lib.rs", r#" pub fn foo<T>() {} pub struct A; pub mod bar { pub fn baz() { ::foo::<::A>(); } } "#, ) .build(); p.cargo("fix --allow-no-vcs --edition --lib") .with_stderr( "\ [CHECKING] foo [..] [MIGRATING] src/lib.rs from 2015 edition to 2018 [FIXED] src/lib.rs (2 fixes) [FINISHED] dev [..] ", ) .run(); let contents = p.read_file("src/lib.rs"); println!("{}", contents); assert!(contents.contains("crate::foo::<crate::A>()")); } #[cargo_test] fn fix_idioms() { let p = project() .file( "Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' edition = '2018' "#, ) .file( "src/lib.rs", r#" use std::any::Any; pub fn foo() { let _x: Box<Any> = Box::new(3); } "#, ) .build(); let stderr = "\ [CHECKING] foo [..] [FIXED] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --edition-idioms --allow-no-vcs") .with_stderr(stderr) .run(); assert!(p.read_file("src/lib.rs").contains("Box<dyn Any>")); } #[cargo_test] fn idioms_2015_ok() { let p = project().file("src/lib.rs", "").build(); p.cargo("fix --edition-idioms --allow-no-vcs").run(); } #[cargo_test] fn shows_warnings_on_second_run_without_changes() { let p = project() .file( "src/lib.rs", r#" #[deprecated] fn bar() {} pub fn foo() { let _ = bar(); } "#, ) .build(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated[..]") .run(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated[..]") .run(); } #[cargo_test] fn shows_warnings_on_second_run_without_changes_on_multiple_targets() { let p = project() .file( "src/lib.rs", r#" #[deprecated] fn bar() {} pub fn foo() { let _ = bar(); } "#, ) .file( "src/main.rs", r#" #[deprecated] fn bar() {} fn main() { let _ = bar(); } "#, ) .file( "tests/foo.rs", r#" #[deprecated] fn bar() {} #[test] fn foo_test() { let _ = bar(); } "#, ) .file( "tests/bar.rs", r#" #[deprecated] fn bar() {} #[test] fn foo_test() { let _ = bar(); } "#, ) .file( "examples/fooxample.rs", r#" #[deprecated] fn bar() {} fn main() { let _ = bar(); } "#, ) .build(); p.cargo("fix --allow-no-vcs --all-targets") .with_stderr_contains(" --> examples/fooxample.rs:6:29") .with_stderr_contains(" --> src/lib.rs:6:29") .with_stderr_contains(" --> src/main.rs:6:29") .with_stderr_contains(" --> tests/bar.rs:7:29") .with_stderr_contains(" --> tests/foo.rs:7:29") .run(); p.cargo("fix --allow-no-vcs --all-targets") .with_stderr_contains(" --> examples/fooxample.rs:6:29") .with_stderr_contains(" --> src/lib.rs:6:29") .with_stderr_contains(" --> src/main.rs:6:29") .with_stderr_contains(" --> tests/bar.rs:7:29") .with_stderr_contains(" --> tests/foo.rs:7:29") .run(); } #[cargo_test] fn doesnt_rebuild_dependencies() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = 'bar' } [workspace] "#, ) .file("src/lib.rs", "extern crate bar;") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs -p foo") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr( "\ [CHECKING] bar v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); p.cargo("fix --allow-no-vcs -p foo") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr( "\ [CHECKING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn does_not_crash_with_rustc_wrapper() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs") .env("RUSTC_WRAPPER", tools::echo_wrapper()) .run(); p.build_dir().rm_rf(); p.cargo("fix --allow-no-vcs --verbose") .env("RUSTC_WORKSPACE_WRAPPER", tools::echo_wrapper()) .run(); } #[cargo_test] fn uses_workspace_wrapper_and_primary_wrapper_override() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs --verbose") .env("RUSTC_WORKSPACE_WRAPPER", tools::echo_wrapper()) .with_stderr_contains("WRAPPER CALLED: rustc src/lib.rs --crate-name foo [..]") .run(); } #[cargo_test] fn only_warn_for_relevant_crates() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] a = { path = 'a' } "#, ) .file("src/lib.rs", "") .file( "a/Cargo.toml", r#" [package] name = "a" version = "0.1.0" "#, ) .file( "a/src/lib.rs", " pub fn foo() {} pub mod bar { use foo; pub fn baz() { foo() } } ", ) .build(); p.cargo("fix --allow-no-vcs --edition") .with_stderr( "\ [CHECKING] a v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [MIGRATING] src/lib.rs from 2015 edition to 2018 [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn fix_to_broken_code() { let p = project() .file( "foo/Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' [workspace] "#, ) .file( "foo/src/main.rs", r#" use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{self, Command}; fn main() { let is_lib_rs = env::args_os() .map(PathBuf::from) .any(|l| l == Path::new("src/lib.rs")); if is_lib_rs { let path = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let path = path.join("foo"); if path.exists() { panic!() } else { fs::File::create(&path).unwrap(); } } let status = Command::new("rustc") .args(env::args().skip(1)) .status() .expect("failed to run rustc"); process::exit(status.code().unwrap_or(2)); } "#, ) .file( "bar/Cargo.toml", r#" [package] name = 'bar' version = '0.1.0' [workspace] "#, ) .file("bar/build.rs", "fn main() {}") .file("bar/src/lib.rs", "pub fn foo() { let mut x = 3; drop(x); }") .build(); // Build our rustc shim p.cargo("build").cwd("foo").run(); // Attempt to fix code, but our shim will always fail the second compile p.cargo("fix --allow-no-vcs --broken-code") .cwd("bar") .env("RUSTC", p.root().join("foo/target/debug/foo")) .with_status(101) .with_stderr_contains("[WARNING] failed to automatically apply fixes [..]") .run(); assert_eq!( p.read_file("bar/src/lib.rs"), "pub fn foo() { let x = 3; drop(x); }" ); } #[cargo_test] fn fix_with_common() { let p = project() .file("src/lib.rs", "") .file( "tests/t1.rs", "mod common; #[test] fn t1() { common::try(); }", ) .file( "tests/t2.rs", "mod common; #[test] fn t2() { common::try(); }", ) .file("tests/common/mod.rs", "pub fn try() {}") .build(); p.cargo("fix --edition --allow-no-vcs").run(); assert_eq!(p.read_file("tests/common/mod.rs"), "pub fn r#try() {}"); } #[cargo_test] fn fix_in_existing_repo_weird_ignore() { // Check that ignore doesn't ignore the repo itself. let p = git::new("foo", |project| { project .file("src/lib.rs", "") .file(".gitignore", "foo\ninner\n") .file("inner/file", "") }); p.cargo("fix").run(); // This is questionable about whether it is the right behavior. It should // probably be checking if any source file for the current project is // ignored. p.cargo("fix") .cwd("inner") .with_stderr_contains("[ERROR] no VCS found[..]") .with_status(101) .run(); p.cargo("fix").cwd("src").run(); } #[cargo_test] fn fix_color_message() { // Check that color appears in diagnostics. let p = project() .file("src/lib.rs", "std::compile_error!{\"color test\"}") .build(); p.cargo("fix --allow-no-vcs --color=always") .with_stderr_contains("[..]\x1b[[..]") .with_status(101) .run(); p.cargo("fix --allow-no-vcs --color=never") .with_stderr_contains("error: color test") .with_stderr_does_not_contain("[..]\x1b[[..]") .with_status(101) .run(); } #[cargo_test] fn edition_v2_resolver_report() { // Show a report if the V2 resolver shows differences. if !is_nightly() { // 2021 is unstable return; } Package::new("common", "1.0.0") .feature("f1", &[]) .file("src/lib.rs", "") .publish(); Package::new("bar", "1.0.0") .add_dep( Dependency::new("common", "1.0") .target("cfg(whatever)") .enable_features(&["f1"]), ) .publish(); let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = "2018" [dependencies] common = "1.0" bar = "1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr_unordered("\ [UPDATING] [..] [DOWNLOADING] crates ... [DOWNLOADED] common v1.0.0 [..] [DOWNLOADED] bar v1.0.0 [..] note: Switching to Edition 2021 will enable the use of the version 2 feature resolver in Cargo. This may cause dependencies to resolve with a different set of features. More information about the resolver changes may be found at https://doc.rust-lang.org/cargo/reference/features.html#feature-resolver-version-2 The following differences were detected with the current configuration: common v1.0.0 removed features `f1` [CHECKING] common v1.0.0 [CHECKING] bar v1.0.0 [CHECKING] foo v0.1.0 [..] [MIGRATING] src/lib.rs from 2018 edition to 2021 [FINISHED] [..] ") .run(); } #[cargo_test] fn rustfix_handles_multi_spans() { // Checks that rustfix handles a single diagnostic with multiple // suggestion spans (non_fmt_panic in this case). let p = project() .file("Cargo.toml", &basic_manifest("foo", "0.1.0")) .file( "src/lib.rs", r#" pub fn foo() { panic!(format!("hey")); } "#, ) .build(); p.cargo("fix --allow-no-vcs").run(); assert!(p.read_file("src/lib.rs").contains(r#"panic!("hey");"#)); } #[cargo_test] #[ignore] // Broken, see https://github.com/rust-lang/rust/pull/86009 fn fix_edition_2021() { // Can migrate 2021, even when lints are allowed. if !is_nightly() { // 2021 is unstable return; } let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = "2018" "#, ) .file( "src/lib.rs", r#" #![allow(ellipsis_inclusive_range_patterns)] pub fn f() -> bool { let x = 123; match x { 0...100 => true, _ => false, } } "#, ) .build(); p.cargo("fix --edition --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr( "\ [CHECKING] foo v0.1.0 [..] [MIGRATING] src/lib.rs from 2018 edition to 2021 [FIXED] src/lib.rs (1 fix) [FINISHED] [..] ", ) .run(); assert!(p.read_file("src/lib.rs").contains(r#"0..=100 => true,"#)); }
// Copyright 2016 Alexander Reece // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![cfg_attr(feature="clippy", feature(plugin))] #![cfg_attr(feature="clippy", plugin(clippy))] #![cfg_attr(not(feature="clippy"), allow(unknown_lints))] extern crate env_logger; #[cfg(feature = "amqp0-codegen")] extern crate amqp0_codegen as codegen; #[cfg(feature = "amqp0-specs")] extern crate amqp0_specs as specs; fn main() { amqp0::build(); } #[cfg(not(feature = "amqp0-specs"))] mod amqp0 { pub fn build() { println!("Skipping build (neither amqp0-build-primitives nor amqp0-pregen-primitives specified)"); } } #[cfg(feature = "amqp0-specs")] mod amqp0 { use std::env; use std::path::{Path, PathBuf}; use env_logger; use codegen; use codegen::primalgen::ModulesWriter; use specs::specs as amqp0_specs; const BUILDER_CRATES: &'static [&'static str] = &["amqp0-codegen"]; const BUILDER_REBUILD: &'static [&'static str] = &["amqp0-build-primitives"]; const BUILDER_PREGEN: &'static [&'static str] = &["amqp0-pregen-primitives"]; struct PrimitivesSource { base_dir: PathBuf, } impl codegen::Source for PrimitivesSource { fn name(&self) -> &str { "amqp0-primitives" } fn crates(&self) -> &[&str] { BUILDER_CRATES } fn rebuild_features(&self) -> &[&str] { BUILDER_REBUILD } fn pregeneration_features(&self) -> &[&str] { BUILDER_PREGEN } fn base_dir(&self) -> &Path { &self.base_dir } fn should_format(&self) -> bool { cfg!(feature = "amqp0-pregen-primitives") } } pub fn build() { let base_dir = if cfg!(feature = "amqp0-pregen-primitives") { PathBuf::from("pregen") } else { env::var_os("OUT_DIR") .map(PathBuf::from) .expect("Error: OUT_DIR not set") }; env_logger::init().unwrap(); println!("Building amqp0-primitives"); let source = PrimitivesSource { base_dir: base_dir }; let writer = ModulesWriter::new(&source, amqp0_specs()); writer.write_files().unwrap(); } } [primitives] Add newline before extern crate // Copyright 2016 Alexander Reece // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![cfg_attr(feature="clippy", feature(plugin))] #![cfg_attr(feature="clippy", plugin(clippy))] #![cfg_attr(not(feature="clippy"), allow(unknown_lints))] extern crate env_logger; #[cfg(feature = "amqp0-codegen")] extern crate amqp0_codegen as codegen; #[cfg(feature = "amqp0-specs")] extern crate amqp0_specs as specs; fn main() { amqp0::build(); } #[cfg(not(feature = "amqp0-specs"))] mod amqp0 { pub fn build() { println!("Skipping build (neither amqp0-build-primitives nor amqp0-pregen-primitives specified)"); } } #[cfg(feature = "amqp0-specs")] mod amqp0 { use std::env; use std::path::{Path, PathBuf}; use env_logger; use codegen; use codegen::primalgen::ModulesWriter; use specs::specs as amqp0_specs; const BUILDER_CRATES: &'static [&'static str] = &["amqp0-codegen"]; const BUILDER_REBUILD: &'static [&'static str] = &["amqp0-build-primitives"]; const BUILDER_PREGEN: &'static [&'static str] = &["amqp0-pregen-primitives"]; struct PrimitivesSource { base_dir: PathBuf, } impl codegen::Source for PrimitivesSource { fn name(&self) -> &str { "amqp0-primitives" } fn crates(&self) -> &[&str] { BUILDER_CRATES } fn rebuild_features(&self) -> &[&str] { BUILDER_REBUILD } fn pregeneration_features(&self) -> &[&str] { BUILDER_PREGEN } fn base_dir(&self) -> &Path { &self.base_dir } fn should_format(&self) -> bool { cfg!(feature = "amqp0-pregen-primitives") } } pub fn build() { let base_dir = if cfg!(feature = "amqp0-pregen-primitives") { PathBuf::from("pregen") } else { env::var_os("OUT_DIR") .map(PathBuf::from) .expect("Error: OUT_DIR not set") }; env_logger::init().unwrap(); println!("Building amqp0-primitives"); let source = PrimitivesSource { base_dir: base_dir }; let writer = ModulesWriter::new(&source, amqp0_specs()); writer.write_files().unwrap(); } }
//! This module contains paths to types and functions Clippy needs to know //! about. //! //! Whenever possible, please consider diagnostic items over hardcoded paths. //! See <https://github.com/rust-lang/rust-clippy/issues/5393> for more information. pub const ANY_TRAIT: [&str; 3] = ["std", "any", "Any"]; pub const ARC_PTR_EQ: [&str; 4] = ["alloc", "sync", "Arc", "ptr_eq"]; pub const ASMUT_TRAIT: [&str; 3] = ["core", "convert", "AsMut"]; pub const ASREF_TRAIT: [&str; 3] = ["core", "convert", "AsRef"]; pub const BEGIN_PANIC: [&str; 3] = ["std", "panicking", "begin_panic"]; pub const BEGIN_PANIC_FMT: [&str; 3] = ["std", "panicking", "begin_panic_fmt"]; pub const BINARY_HEAP: [&str; 4] = ["alloc", "collections", "binary_heap", "BinaryHeap"]; pub const BORROW_TRAIT: [&str; 3] = ["core", "borrow", "Borrow"]; pub const BOX: [&str; 3] = ["alloc", "boxed", "Box"]; pub const BTREEMAP: [&str; 5] = ["alloc", "collections", "btree", "map", "BTreeMap"]; pub const BTREEMAP_ENTRY: [&str; 5] = ["alloc", "collections", "btree", "map", "Entry"]; pub const BTREESET: [&str; 5] = ["alloc", "collections", "btree", "set", "BTreeSet"]; pub const CLONE_TRAIT: [&str; 3] = ["core", "clone", "Clone"]; pub const CLONE_TRAIT_METHOD: [&str; 4] = ["core", "clone", "Clone", "clone"]; pub const CMP_MAX: [&str; 3] = ["core", "cmp", "max"]; pub const CMP_MIN: [&str; 3] = ["core", "cmp", "min"]; pub const COW: [&str; 3] = ["alloc", "borrow", "Cow"]; pub const CSTRING: [&str; 4] = ["std", "ffi", "c_str", "CString"]; pub const CSTRING_AS_C_STR: [&str; 5] = ["std", "ffi", "c_str", "CString", "as_c_str"]; pub const DEFAULT_TRAIT: [&str; 3] = ["core", "default", "Default"]; pub const DEFAULT_TRAIT_METHOD: [&str; 4] = ["core", "default", "Default", "default"]; pub const DEREF_MUT_TRAIT_METHOD: [&str; 5] = ["core", "ops", "deref", "DerefMut", "deref_mut"]; pub const DEREF_TRAIT_METHOD: [&str; 5] = ["core", "ops", "deref", "Deref", "deref"]; pub const DISPLAY_FMT_METHOD: [&str; 4] = ["core", "fmt", "Display", "fmt"]; pub const DISPLAY_TRAIT: [&str; 3] = ["core", "fmt", "Display"]; pub const DOUBLE_ENDED_ITERATOR: [&str; 4] = ["core", "iter", "traits", "DoubleEndedIterator"]; pub const DROP: [&str; 3] = ["core", "mem", "drop"]; pub const DROP_TRAIT: [&str; 4] = ["core", "ops", "drop", "Drop"]; pub const DURATION: [&str; 3] = ["core", "time", "Duration"]; pub const EARLY_CONTEXT: [&str; 4] = ["rustc", "lint", "context", "EarlyContext"]; pub const EXIT: [&str; 3] = ["std", "process", "exit"]; pub const F32_EPSILON: [&str; 2] = ["f32", "EPSILON"]; pub const F64_EPSILON: [&str; 2] = ["f64", "EPSILON"]; pub const FILE: [&str; 3] = ["std", "fs", "File"]; pub const FILE_TYPE: [&str; 3] = ["std", "fs", "FileType"]; pub const FMT_ARGUMENTS_NEW_V1: [&str; 4] = ["core", "fmt", "Arguments", "new_v1"]; pub const FMT_ARGUMENTS_NEW_V1_FORMATTED: [&str; 4] = ["core", "fmt", "Arguments", "new_v1_formatted"]; pub const FMT_ARGUMENTV1_NEW: [&str; 4] = ["core", "fmt", "ArgumentV1", "new"]; pub const FROM_FROM: [&str; 4] = ["core", "convert", "From", "from"]; pub const FROM_TRAIT: [&str; 3] = ["core", "convert", "From"]; pub const FUTURE_FROM_GENERATOR: [&str; 3] = ["core", "future", "from_generator"]; pub const HASH: [&str; 2] = ["hash", "Hash"]; pub const HASHMAP: [&str; 5] = ["std", "collections", "hash", "map", "HashMap"]; pub const HASHMAP_ENTRY: [&str; 5] = ["std", "collections", "hash", "map", "Entry"]; pub const HASHSET: [&str; 5] = ["std", "collections", "hash", "set", "HashSet"]; pub const INDEX: [&str; 3] = ["core", "ops", "Index"]; pub const INDEX_MUT: [&str; 3] = ["core", "ops", "IndexMut"]; pub const INTO: [&str; 3] = ["core", "convert", "Into"]; pub const INTO_ITERATOR: [&str; 5] = ["core", "iter", "traits", "collect", "IntoIterator"]; pub const IO_READ: [&str; 3] = ["std", "io", "Read"]; pub const IO_WRITE: [&str; 3] = ["std", "io", "Write"]; pub const ITERATOR: [&str; 5] = ["core", "iter", "traits", "iterator", "Iterator"]; pub const LATE_CONTEXT: [&str; 4] = ["rustc", "lint", "context", "LateContext"]; pub const LINKED_LIST: [&str; 4] = ["alloc", "collections", "linked_list", "LinkedList"]; pub const LINT: [&str; 3] = ["rustc_session", "lint", "Lint"]; pub const MEM_DISCRIMINANT: [&str; 3] = ["core", "mem", "discriminant"]; pub const MEM_FORGET: [&str; 3] = ["core", "mem", "forget"]; pub const MEM_MANUALLY_DROP: [&str; 4] = ["core", "mem", "manually_drop", "ManuallyDrop"]; pub const MEM_MAYBEUNINIT: [&str; 4] = ["core", "mem", "maybe_uninit", "MaybeUninit"]; pub const MEM_MAYBEUNINIT_UNINIT: [&str; 5] = ["core", "mem", "maybe_uninit", "MaybeUninit", "uninit"]; pub const MEM_REPLACE: [&str; 3] = ["core", "mem", "replace"]; pub const MUTEX_GUARD: [&str; 4] = ["std", "sync", "mutex", "MutexGuard"]; pub const OPEN_OPTIONS: [&str; 3] = ["std", "fs", "OpenOptions"]; pub const OPS_MODULE: [&str; 2] = ["core", "ops"]; pub const OPTION: [&str; 3] = ["core", "option", "Option"]; pub const OPTION_NONE: [&str; 4] = ["core", "option", "Option", "None"]; pub const OPTION_SOME: [&str; 4] = ["core", "option", "Option", "Some"]; pub const ORD: [&str; 3] = ["core", "cmp", "Ord"]; pub const OS_STRING: [&str; 4] = ["std", "ffi", "os_str", "OsString"]; pub const OS_STRING_AS_OS_STR: [&str; 5] = ["std", "ffi", "os_str", "OsString", "as_os_str"]; pub const OS_STR_TO_OS_STRING: [&str; 5] = ["std", "ffi", "os_str", "OsStr", "to_os_string"]; pub const PARKING_LOT_MUTEX_GUARD: [&str; 2] = ["parking_lot", "MutexGuard"]; pub const PARKING_LOT_RWLOCK_READ_GUARD: [&str; 2] = ["parking_lot", "RwLockReadGuard"]; pub const PARKING_LOT_RWLOCK_WRITE_GUARD: [&str; 2] = ["parking_lot", "RwLockWriteGuard"]; pub const PATH: [&str; 3] = ["std", "path", "Path"]; pub const PATH_BUF: [&str; 3] = ["std", "path", "PathBuf"]; pub const PATH_BUF_AS_PATH: [&str; 4] = ["std", "path", "PathBuf", "as_path"]; pub const PATH_TO_PATH_BUF: [&str; 4] = ["std", "path", "Path", "to_path_buf"]; pub const POLL: [&str; 4] = ["core", "task", "poll", "Poll"]; pub const PTR_EQ: [&str; 3] = ["core", "ptr", "eq"]; pub const PTR_NULL: [&str; 2] = ["ptr", "null"]; pub const PTR_NULL_MUT: [&str; 2] = ["ptr", "null_mut"]; pub const PUSH_STR: [&str; 4] = ["alloc", "string", "String", "push_str"]; pub const RANGE_ARGUMENT_TRAIT: [&str; 3] = ["core", "ops", "RangeBounds"]; pub const RC: [&str; 3] = ["alloc", "rc", "Rc"]; pub const RC_PTR_EQ: [&str; 4] = ["alloc", "rc", "Rc", "ptr_eq"]; pub const RECEIVER: [&str; 4] = ["std", "sync", "mpsc", "Receiver"]; pub const REGEX_BUILDER_NEW: [&str; 5] = ["regex", "re_builder", "unicode", "RegexBuilder", "new"]; pub const REGEX_BYTES_BUILDER_NEW: [&str; 5] = ["regex", "re_builder", "bytes", "RegexBuilder", "new"]; pub const REGEX_BYTES_NEW: [&str; 4] = ["regex", "re_bytes", "Regex", "new"]; pub const REGEX_BYTES_SET_NEW: [&str; 5] = ["regex", "re_set", "bytes", "RegexSet", "new"]; pub const REGEX_NEW: [&str; 4] = ["regex", "re_unicode", "Regex", "new"]; pub const REGEX_SET_NEW: [&str; 5] = ["regex", "re_set", "unicode", "RegexSet", "new"]; pub const REPEAT: [&str; 3] = ["core", "iter", "repeat"]; pub const RESULT: [&str; 3] = ["core", "result", "Result"]; pub const RESULT_ERR: [&str; 4] = ["core", "result", "Result", "Err"]; pub const RESULT_OK: [&str; 4] = ["core", "result", "Result", "Ok"]; pub const RWLOCK_READ_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockReadGuard"]; pub const RWLOCK_WRITE_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockWriteGuard"]; pub const SERDE_DESERIALIZE: [&str; 2] = ["_serde", "Deserialize"]; pub const SERDE_DE_VISITOR: [&str; 3] = ["serde", "de", "Visitor"]; pub const SLICE_INTO_VEC: [&str; 4] = ["alloc", "slice", "<impl [T]>", "into_vec"]; pub const SLICE_ITER: [&str; 3] = ["core", "slice", "Iter"]; pub const STDERR: [&str; 4] = ["std", "io", "stdio", "stderr"]; pub const STDOUT: [&str; 4] = ["std", "io", "stdio", "stdout"]; pub const STD_CONVERT_IDENTITY: [&str; 3] = ["std", "convert", "identity"]; pub const STD_FS_CREATE_DIR: [&str; 3] = ["std", "fs", "create_dir"]; pub const STD_MEM_TRANSMUTE: [&str; 3] = ["std", "mem", "transmute"]; pub const STD_PTR_NULL: [&str; 3] = ["std", "ptr", "null"]; pub const STRING_AS_MUT_STR: [&str; 4] = ["alloc", "string", "String", "as_mut_str"]; pub const STRING_AS_STR: [&str; 4] = ["alloc", "string", "String", "as_str"]; pub const SYNTAX_CONTEXT: [&str; 3] = ["rustc_span", "hygiene", "SyntaxContext"]; pub const TO_OWNED: [&str; 3] = ["alloc", "borrow", "ToOwned"]; pub const TO_OWNED_METHOD: [&str; 4] = ["alloc", "borrow", "ToOwned", "to_owned"]; pub const TO_STRING: [&str; 3] = ["alloc", "string", "ToString"]; pub const TO_STRING_METHOD: [&str; 4] = ["alloc", "string", "ToString", "to_string"]; pub const TRANSMUTE: [&str; 4] = ["core", "intrinsics", "", "transmute"]; pub const TRY_FROM: [&str; 4] = ["core", "convert", "TryFrom", "try_from"]; pub const TRY_INTO_TRAIT: [&str; 3] = ["core", "convert", "TryInto"]; pub const VEC: [&str; 3] = ["alloc", "vec", "Vec"]; pub const VEC_AS_MUT_SLICE: [&str; 4] = ["alloc", "vec", "Vec", "as_mut_slice"]; pub const VEC_AS_SLICE: [&str; 4] = ["alloc", "vec", "Vec", "as_slice"]; pub const VEC_DEQUE: [&str; 4] = ["alloc", "collections", "vec_deque", "VecDeque"]; pub const VEC_FROM_ELEM: [&str; 3] = ["alloc", "vec", "from_elem"]; pub const VEC_NEW: [&str; 4] = ["alloc", "vec", "Vec", "new"]; pub const VEC_RESIZE: [&str; 4] = ["alloc", "vec", "Vec", "resize"]; pub const WEAK_ARC: [&str; 3] = ["alloc", "sync", "Weak"]; pub const WEAK_RC: [&str; 3] = ["alloc", "rc", "Weak"]; Fix clippy hard-code slice::Iter path //! This module contains paths to types and functions Clippy needs to know //! about. //! //! Whenever possible, please consider diagnostic items over hardcoded paths. //! See <https://github.com/rust-lang/rust-clippy/issues/5393> for more information. pub const ANY_TRAIT: [&str; 3] = ["std", "any", "Any"]; pub const ARC_PTR_EQ: [&str; 4] = ["alloc", "sync", "Arc", "ptr_eq"]; pub const ASMUT_TRAIT: [&str; 3] = ["core", "convert", "AsMut"]; pub const ASREF_TRAIT: [&str; 3] = ["core", "convert", "AsRef"]; pub const BEGIN_PANIC: [&str; 3] = ["std", "panicking", "begin_panic"]; pub const BEGIN_PANIC_FMT: [&str; 3] = ["std", "panicking", "begin_panic_fmt"]; pub const BINARY_HEAP: [&str; 4] = ["alloc", "collections", "binary_heap", "BinaryHeap"]; pub const BORROW_TRAIT: [&str; 3] = ["core", "borrow", "Borrow"]; pub const BOX: [&str; 3] = ["alloc", "boxed", "Box"]; pub const BTREEMAP: [&str; 5] = ["alloc", "collections", "btree", "map", "BTreeMap"]; pub const BTREEMAP_ENTRY: [&str; 5] = ["alloc", "collections", "btree", "map", "Entry"]; pub const BTREESET: [&str; 5] = ["alloc", "collections", "btree", "set", "BTreeSet"]; pub const CLONE_TRAIT: [&str; 3] = ["core", "clone", "Clone"]; pub const CLONE_TRAIT_METHOD: [&str; 4] = ["core", "clone", "Clone", "clone"]; pub const CMP_MAX: [&str; 3] = ["core", "cmp", "max"]; pub const CMP_MIN: [&str; 3] = ["core", "cmp", "min"]; pub const COW: [&str; 3] = ["alloc", "borrow", "Cow"]; pub const CSTRING: [&str; 4] = ["std", "ffi", "c_str", "CString"]; pub const CSTRING_AS_C_STR: [&str; 5] = ["std", "ffi", "c_str", "CString", "as_c_str"]; pub const DEFAULT_TRAIT: [&str; 3] = ["core", "default", "Default"]; pub const DEFAULT_TRAIT_METHOD: [&str; 4] = ["core", "default", "Default", "default"]; pub const DEREF_MUT_TRAIT_METHOD: [&str; 5] = ["core", "ops", "deref", "DerefMut", "deref_mut"]; pub const DEREF_TRAIT_METHOD: [&str; 5] = ["core", "ops", "deref", "Deref", "deref"]; pub const DISPLAY_FMT_METHOD: [&str; 4] = ["core", "fmt", "Display", "fmt"]; pub const DISPLAY_TRAIT: [&str; 3] = ["core", "fmt", "Display"]; pub const DOUBLE_ENDED_ITERATOR: [&str; 4] = ["core", "iter", "traits", "DoubleEndedIterator"]; pub const DROP: [&str; 3] = ["core", "mem", "drop"]; pub const DROP_TRAIT: [&str; 4] = ["core", "ops", "drop", "Drop"]; pub const DURATION: [&str; 3] = ["core", "time", "Duration"]; pub const EARLY_CONTEXT: [&str; 4] = ["rustc", "lint", "context", "EarlyContext"]; pub const EXIT: [&str; 3] = ["std", "process", "exit"]; pub const F32_EPSILON: [&str; 2] = ["f32", "EPSILON"]; pub const F64_EPSILON: [&str; 2] = ["f64", "EPSILON"]; pub const FILE: [&str; 3] = ["std", "fs", "File"]; pub const FILE_TYPE: [&str; 3] = ["std", "fs", "FileType"]; pub const FMT_ARGUMENTS_NEW_V1: [&str; 4] = ["core", "fmt", "Arguments", "new_v1"]; pub const FMT_ARGUMENTS_NEW_V1_FORMATTED: [&str; 4] = ["core", "fmt", "Arguments", "new_v1_formatted"]; pub const FMT_ARGUMENTV1_NEW: [&str; 4] = ["core", "fmt", "ArgumentV1", "new"]; pub const FROM_FROM: [&str; 4] = ["core", "convert", "From", "from"]; pub const FROM_TRAIT: [&str; 3] = ["core", "convert", "From"]; pub const FUTURE_FROM_GENERATOR: [&str; 3] = ["core", "future", "from_generator"]; pub const HASH: [&str; 2] = ["hash", "Hash"]; pub const HASHMAP: [&str; 5] = ["std", "collections", "hash", "map", "HashMap"]; pub const HASHMAP_ENTRY: [&str; 5] = ["std", "collections", "hash", "map", "Entry"]; pub const HASHSET: [&str; 5] = ["std", "collections", "hash", "set", "HashSet"]; pub const INDEX: [&str; 3] = ["core", "ops", "Index"]; pub const INDEX_MUT: [&str; 3] = ["core", "ops", "IndexMut"]; pub const INTO: [&str; 3] = ["core", "convert", "Into"]; pub const INTO_ITERATOR: [&str; 5] = ["core", "iter", "traits", "collect", "IntoIterator"]; pub const IO_READ: [&str; 3] = ["std", "io", "Read"]; pub const IO_WRITE: [&str; 3] = ["std", "io", "Write"]; pub const ITERATOR: [&str; 5] = ["core", "iter", "traits", "iterator", "Iterator"]; pub const LATE_CONTEXT: [&str; 4] = ["rustc", "lint", "context", "LateContext"]; pub const LINKED_LIST: [&str; 4] = ["alloc", "collections", "linked_list", "LinkedList"]; pub const LINT: [&str; 3] = ["rustc_session", "lint", "Lint"]; pub const MEM_DISCRIMINANT: [&str; 3] = ["core", "mem", "discriminant"]; pub const MEM_FORGET: [&str; 3] = ["core", "mem", "forget"]; pub const MEM_MANUALLY_DROP: [&str; 4] = ["core", "mem", "manually_drop", "ManuallyDrop"]; pub const MEM_MAYBEUNINIT: [&str; 4] = ["core", "mem", "maybe_uninit", "MaybeUninit"]; pub const MEM_MAYBEUNINIT_UNINIT: [&str; 5] = ["core", "mem", "maybe_uninit", "MaybeUninit", "uninit"]; pub const MEM_REPLACE: [&str; 3] = ["core", "mem", "replace"]; pub const MUTEX_GUARD: [&str; 4] = ["std", "sync", "mutex", "MutexGuard"]; pub const OPEN_OPTIONS: [&str; 3] = ["std", "fs", "OpenOptions"]; pub const OPS_MODULE: [&str; 2] = ["core", "ops"]; pub const OPTION: [&str; 3] = ["core", "option", "Option"]; pub const OPTION_NONE: [&str; 4] = ["core", "option", "Option", "None"]; pub const OPTION_SOME: [&str; 4] = ["core", "option", "Option", "Some"]; pub const ORD: [&str; 3] = ["core", "cmp", "Ord"]; pub const OS_STRING: [&str; 4] = ["std", "ffi", "os_str", "OsString"]; pub const OS_STRING_AS_OS_STR: [&str; 5] = ["std", "ffi", "os_str", "OsString", "as_os_str"]; pub const OS_STR_TO_OS_STRING: [&str; 5] = ["std", "ffi", "os_str", "OsStr", "to_os_string"]; pub const PARKING_LOT_MUTEX_GUARD: [&str; 2] = ["parking_lot", "MutexGuard"]; pub const PARKING_LOT_RWLOCK_READ_GUARD: [&str; 2] = ["parking_lot", "RwLockReadGuard"]; pub const PARKING_LOT_RWLOCK_WRITE_GUARD: [&str; 2] = ["parking_lot", "RwLockWriteGuard"]; pub const PATH: [&str; 3] = ["std", "path", "Path"]; pub const PATH_BUF: [&str; 3] = ["std", "path", "PathBuf"]; pub const PATH_BUF_AS_PATH: [&str; 4] = ["std", "path", "PathBuf", "as_path"]; pub const PATH_TO_PATH_BUF: [&str; 4] = ["std", "path", "Path", "to_path_buf"]; pub const POLL: [&str; 4] = ["core", "task", "poll", "Poll"]; pub const PTR_EQ: [&str; 3] = ["core", "ptr", "eq"]; pub const PTR_NULL: [&str; 2] = ["ptr", "null"]; pub const PTR_NULL_MUT: [&str; 2] = ["ptr", "null_mut"]; pub const PUSH_STR: [&str; 4] = ["alloc", "string", "String", "push_str"]; pub const RANGE_ARGUMENT_TRAIT: [&str; 3] = ["core", "ops", "RangeBounds"]; pub const RC: [&str; 3] = ["alloc", "rc", "Rc"]; pub const RC_PTR_EQ: [&str; 4] = ["alloc", "rc", "Rc", "ptr_eq"]; pub const RECEIVER: [&str; 4] = ["std", "sync", "mpsc", "Receiver"]; pub const REGEX_BUILDER_NEW: [&str; 5] = ["regex", "re_builder", "unicode", "RegexBuilder", "new"]; pub const REGEX_BYTES_BUILDER_NEW: [&str; 5] = ["regex", "re_builder", "bytes", "RegexBuilder", "new"]; pub const REGEX_BYTES_NEW: [&str; 4] = ["regex", "re_bytes", "Regex", "new"]; pub const REGEX_BYTES_SET_NEW: [&str; 5] = ["regex", "re_set", "bytes", "RegexSet", "new"]; pub const REGEX_NEW: [&str; 4] = ["regex", "re_unicode", "Regex", "new"]; pub const REGEX_SET_NEW: [&str; 5] = ["regex", "re_set", "unicode", "RegexSet", "new"]; pub const REPEAT: [&str; 3] = ["core", "iter", "repeat"]; pub const RESULT: [&str; 3] = ["core", "result", "Result"]; pub const RESULT_ERR: [&str; 4] = ["core", "result", "Result", "Err"]; pub const RESULT_OK: [&str; 4] = ["core", "result", "Result", "Ok"]; pub const RWLOCK_READ_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockReadGuard"]; pub const RWLOCK_WRITE_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockWriteGuard"]; pub const SERDE_DESERIALIZE: [&str; 2] = ["_serde", "Deserialize"]; pub const SERDE_DE_VISITOR: [&str; 3] = ["serde", "de", "Visitor"]; pub const SLICE_INTO_VEC: [&str; 4] = ["alloc", "slice", "<impl [T]>", "into_vec"]; pub const SLICE_ITER: [&str; 4] = ["core", "slice", "iter", "Iter"]; pub const STDERR: [&str; 4] = ["std", "io", "stdio", "stderr"]; pub const STDOUT: [&str; 4] = ["std", "io", "stdio", "stdout"]; pub const STD_CONVERT_IDENTITY: [&str; 3] = ["std", "convert", "identity"]; pub const STD_FS_CREATE_DIR: [&str; 3] = ["std", "fs", "create_dir"]; pub const STD_MEM_TRANSMUTE: [&str; 3] = ["std", "mem", "transmute"]; pub const STD_PTR_NULL: [&str; 3] = ["std", "ptr", "null"]; pub const STRING_AS_MUT_STR: [&str; 4] = ["alloc", "string", "String", "as_mut_str"]; pub const STRING_AS_STR: [&str; 4] = ["alloc", "string", "String", "as_str"]; pub const SYNTAX_CONTEXT: [&str; 3] = ["rustc_span", "hygiene", "SyntaxContext"]; pub const TO_OWNED: [&str; 3] = ["alloc", "borrow", "ToOwned"]; pub const TO_OWNED_METHOD: [&str; 4] = ["alloc", "borrow", "ToOwned", "to_owned"]; pub const TO_STRING: [&str; 3] = ["alloc", "string", "ToString"]; pub const TO_STRING_METHOD: [&str; 4] = ["alloc", "string", "ToString", "to_string"]; pub const TRANSMUTE: [&str; 4] = ["core", "intrinsics", "", "transmute"]; pub const TRY_FROM: [&str; 4] = ["core", "convert", "TryFrom", "try_from"]; pub const TRY_INTO_TRAIT: [&str; 3] = ["core", "convert", "TryInto"]; pub const VEC: [&str; 3] = ["alloc", "vec", "Vec"]; pub const VEC_AS_MUT_SLICE: [&str; 4] = ["alloc", "vec", "Vec", "as_mut_slice"]; pub const VEC_AS_SLICE: [&str; 4] = ["alloc", "vec", "Vec", "as_slice"]; pub const VEC_DEQUE: [&str; 4] = ["alloc", "collections", "vec_deque", "VecDeque"]; pub const VEC_FROM_ELEM: [&str; 3] = ["alloc", "vec", "from_elem"]; pub const VEC_NEW: [&str; 4] = ["alloc", "vec", "Vec", "new"]; pub const VEC_RESIZE: [&str; 4] = ["alloc", "vec", "Vec", "resize"]; pub const WEAK_ARC: [&str; 3] = ["alloc", "sync", "Weak"]; pub const WEAK_RC: [&str; 3] = ["alloc", "rc", "Weak"];
use std::collections::HashMap; use sendgrid::v3::*; fn main() { let mut cool_header = HashMap::with_capacity(2); cool_header.insert(String::from("x-cool"), String::from("indeed")); cool_header.insert(String::from("x-cooler"), String::from("cold")); let p = Personalization::new(Email::new("test@example.com")).add_headers(cool_header); let m = Message::new(Email::new("g@gmail.com")) .set_subject("Subject") .add_content( Content::new() .set_content_type("text/html") .set_value("Test"), ) .add_personalization(p); let mut env_vars = ::std::env::vars(); let api_key = env_vars.find(|v| v.0 == "SG_API_KEY").unwrap(); let sender = Sender::new(api_key.1); let code = sender.send(&m); println!("{:?}", code); } (#84) use std::collections::HashMap; use sendgrid::v3::*; fn main() { let mut cool_header = HashMap::with_capacity(2); cool_header.insert(String::from("x-cool"), String::from("indeed")); cool_header.insert(String::from("x-cooler"), String::from("cold")); let p = Personalization::new(Email::new("test@example.com")).add_headers(cool_header); let m = Message::new(Email::new("g@gmail.com")) .set_subject("Subject") .add_content( Content::new() .set_content_type("text/html") .set_value("Test"), ) .add_personalization(p); let api_key = ::std::env::var("SG_API_KEY").unwrap(); let sender = Sender::new(api_key); let code = sender.send(&m); println!("{:?}", code); }
//! Glium-based backend for the Vitral GUI library. #![deny(missing_docs)] use crate::atlas_cache::AtlasCache; use crate::canvas_zoom::CanvasZoom; use crate::{ Canvas, ImageBuffer, InputEvent, Keycode, MouseButton, Scene, SceneSwitch, TextureIndex, Vertex, }; use euclid::default::{Point2D, Size2D}; use glium::glutin::dpi::{LogicalSize, PhysicalPosition, PhysicalSize}; use glium::glutin::{self, Event, WindowEvent}; use glium::index::PrimitiveType; use glium::{self, Surface}; use std::error::Error; use std::fmt::Debug; use std::hash::Hash; /// Default texture type used by the backend. type GliumTexture = glium::texture::SrgbTexture2d; /// Glium-rendering backend for Vitral. pub struct Backend { display: glium::Display, events: glutin::EventsLoop, program: glium::Program, textures: Vec<GliumTexture>, render_buffer: RenderBuffer, zoom: CanvasZoom, window_size: Size2D<u32>, } impl Backend { /// Create a new Glium backend for Vitral. /// /// The backend requires an user-supplied vertex type as a type parameter and a shader program /// to render data of that type as argument to the constructor. pub fn new( display: glium::Display, events: glutin::EventsLoop, program: glium::Program, width: u32, height: u32, ) -> Backend { let (w, h) = get_size(&display); let render_buffer = RenderBuffer::new(&display, width, height); Backend { display, events, program, textures: Vec::new(), render_buffer, zoom: CanvasZoom::PixelPerfect, window_size: Size2D::new(w, h), } } /// Open a Glium window and start a backend for it. /// /// The custom shader must support a uniform named `tex` for texture data. pub fn start<S: Into<String>>( width: u32, height: u32, title: S, ) -> Result<Backend, Box<dyn Error>> { let events = glutin::EventsLoop::new(); let window = glutin::WindowBuilder::new().with_title(title); let context = glutin::ContextBuilder::new() .with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2))); let display = glium::Display::new(window, context, &events)?; let program = glium::Program::new(&display, DEFAULT_SHADER)?; { // Start the window as a good fit on the primary monitor. // Don't make it a completely fullscreen window, that might put the window title bar // outside the screen. const BUFFER: f64 = 8.0; let (width, height) = (width as f64, height as f64); let monitor_size = display .gl_window() .window() .get_primary_monitor() .get_dimensions(); // Get the most conservative DPI if there's a weird multi-monitor setup. let dpi_factor = display .gl_window() .window() .get_available_monitors() .map(|m| m.get_hidpi_factor()) .max_by(|x, y| x.partial_cmp(y).unwrap()) .expect("No monitors found!"); info!("Scaling starting size to monitor"); info!("Monitor size {:?}", monitor_size); info!("DPI Factor {}", dpi_factor); let mut window_size = PhysicalSize::new(width, height); while window_size.width + width <= monitor_size.width - BUFFER && window_size.height + height <= monitor_size.height - BUFFER { window_size.width += width; window_size.height += height; } info!("Adjusted window size: {:?}", window_size); let window_pos = PhysicalPosition::new( (monitor_size.width - window_size.width) / 2.0, (monitor_size.height - window_size.height) / 2.0, ); display .gl_window() .window() .set_inner_size(window_size.to_logical(dpi_factor)); display .gl_window() .window() .set_position(window_pos.to_logical(dpi_factor)); } Ok(Backend::new(display, events, program, width, height)) } /// Return the pixel resolution of the backend. /// /// Note that this is the logical size which will stay the same even when the /// desktop window is resized. pub fn canvas_size(&self) -> Size2D<u32> { self.render_buffer.size } /// Return the current number of textures. pub fn texture_count(&self) -> usize { self.textures.len() } /// Make a new empty internal texture. /// /// The new `TextureIndex` must equal the value `self.texture_count()` would have returned /// just before calling this. pub fn make_empty_texture(&mut self, width: u32, height: u32) -> TextureIndex { let tex = glium::texture::SrgbTexture2d::empty(&self.display, width, height).unwrap(); self.textures.push(tex); self.textures.len() - 1 } /// Rewrite an internal texture. pub fn write_to_texture(&mut self, img: &ImageBuffer, texture: TextureIndex) { assert!( texture < self.textures.len(), "Trying to write nonexistent texture" ); let rect = glium::Rect { left: 0, bottom: 0, width: img.size.width, height: img.size.height, }; let mut raw = glium::texture::RawImage2d::from_raw_rgba( img.pixels.clone(), (img.size.width, img.size.height), ); raw.format = glium::texture::ClientFormat::U8U8U8U8; self.textures[texture].write(rect, raw); } /// Make a new internal texture using image data. pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex { let mut raw = glium::texture::RawImage2d::from_raw_rgba( img.pixels, (img.size.width, img.size.height), ); raw.format = glium::texture::ClientFormat::U8U8U8U8; let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap(); self.textures.push(tex); self.textures.len() - 1 } /// Update or construct textures based on changes in atlas cache. pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>( &mut self, atlas_cache: &mut AtlasCache<T>, ) { for a in atlas_cache.atlases_mut() { let idx = a.texture(); // If there are sheets in the atlas that don't have corresponding textures yet, // construct those now. while idx >= self.texture_count() { self.make_empty_texture(a.size().width, a.size().height); } // Write the updated texture atlas to internal texture. a.update_texture(|buf, idx| self.write_to_texture(buf, idx)); } } fn dispatch<T>( &self, scene_stack: &mut Vec<Box<dyn Scene<T>>>, ctx: &mut T, event: InputEvent, ) -> Option<SceneSwitch<T>> { if !scene_stack.is_empty() { let idx = scene_stack.len() - 1; scene_stack[idx].input(ctx, event) } else { None } } fn process_events<T>( &mut self, canvas: &mut Canvas, scene_stack: &mut Vec<Box<dyn Scene<T>>>, ctx: &mut T, ) -> Result<Option<SceneSwitch<T>>, ()> { // polling and handling the events received by the window let mut event_list = Vec::new(); self.events.poll_events(|event| event_list.push(event)); // Accumulated scene switches from processing input let mut scene_switches = Vec::new(); for e in event_list { match e { Event::WindowEvent { ref event, window_id, } if window_id == self.display.gl_window().window().id() => match *event { WindowEvent::CloseRequested => return Err(()), WindowEvent::CursorMoved { position, .. } => { let position = position .to_physical(self.display.gl_window().window().get_hidpi_factor()); let pos = self.zoom.screen_to_canvas( self.window_size, self.render_buffer.size(), Point2D::new(position.x as f32, position.y as f32), ); canvas.input_mouse_move(pos.x as i32, pos.y as i32); } WindowEvent::MouseInput { state, button, .. } => canvas.input_mouse_button( match button { glutin::MouseButton::Left => MouseButton::Left, glutin::MouseButton::Right => MouseButton::Right, _ => MouseButton::Middle, }, state == glutin::ElementState::Pressed, ), WindowEvent::ReceivedCharacter(c) => { scene_switches.push(self.dispatch(scene_stack, ctx, InputEvent::Typed(c))); } WindowEvent::KeyboardInput { input: glutin::KeyboardInput { state, scancode, virtual_keycode, .. }, .. } => { let is_down = state == glutin::ElementState::Pressed; let key = virtual_keycode .and_then(|virtual_keycode| Keycode::try_from(virtual_keycode).ok()); // Glutin adjusts the Linux scancodes, take into account. Don't know if // this belongs here in the glium module or in the Keycode translation // maps... let scancode = if cfg!(target_os = "linux") { scancode + 8 } else { scancode }; let hardware_key = Keycode::from_scancode(scancode); if key.is_some() || hardware_key.is_some() { scene_switches.push(self.dispatch( scene_stack, ctx, InputEvent::KeyEvent { is_down, key, hardware_key, }, )); } } _ => (), }, // Events in other windows, ignore Event::WindowEvent { .. } => {} Event::Awakened => { // TODO: Suspend/awaken behavior } Event::DeviceEvent { .. } => {} Event::Suspended(_) => {} } } // Take the first scene switch that shows up. let scene_switch = scene_switches .into_iter() .fold(None, |prev, e| match (prev, e) { (Some(x), _) => Some(x), (None, y) => y, }); Ok(scene_switch) } fn render(&mut self, canvas: &mut Canvas) { let mut target = self.render_buffer.get_framebuffer_target(&self.display); target.clear_color(0.0, 0.0, 0.0, 0.0); let (w, h) = target.get_dimensions(); for batch in canvas.end_frame() { // building the uniforms let uniforms = uniform! { matrix: [ [2.0 / w as f32, 0.0, 0.0, -1.0], [0.0, -2.0 / h as f32, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0f32] ], tex: glium::uniforms::Sampler::new(&self.textures[batch.texture]) .magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest), }; let vertex_buffer = { glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() }; // building the index buffer let index_buffer = glium::IndexBuffer::new( &self.display, PrimitiveType::TrianglesList, &batch.triangle_indices, ) .unwrap(); let params = glium::draw_parameters::DrawParameters { scissor: batch.clip.map(|clip| glium::Rect { left: clip.origin.x as u32, bottom: h - (clip.origin.y + clip.size.height) as u32, width: clip.size.width as u32, height: clip.size.height as u32, }), blend: glium::Blend::alpha_blending(), ..Default::default() }; target .draw( &vertex_buffer, &index_buffer, &self.program, &uniforms, &params, ) .unwrap(); } } fn update_window_size(&mut self) { let (w, h) = get_size(&self.display); self.window_size = Size2D::new(w, h); } /// Display the backend and read input events. pub fn update<T>( &mut self, canvas: &mut Canvas, scene_stack: &mut Vec<Box<dyn Scene<T>>>, ctx: &mut T, ) -> Result<Option<SceneSwitch<T>>, ()> { self.update_window_size(); self.render(canvas); self.render_buffer.draw(&self.display, self.zoom); self.process_events(canvas, scene_stack, ctx) } /// Return an image for the current contents of the screen. pub fn screenshot(&self) -> ImageBuffer { self.render_buffer.screenshot() } } /// Shader for two parametrizable colors and discarding fully transparent pixels const DEFAULT_SHADER: glium::program::SourceCode<'_> = glium::program::SourceCode { vertex_shader: " #version 150 core uniform mat4 matrix; in vec2 pos; in vec2 tex_coord; in vec4 color; in vec4 back_color; out vec4 v_color; out vec4 v_back_color; out vec2 v_tex_coord; void main() { gl_Position = vec4(pos, 0.0, 1.0) * matrix; v_color = color; v_back_color = back_color; v_tex_coord = tex_coord; } ", fragment_shader: " #version 150 core uniform sampler2D tex; in vec4 v_color; in vec2 v_tex_coord; in vec4 v_back_color; out vec4 f_color; void main() { vec4 tex_color = texture(tex, v_tex_coord); // Discard fully transparent pixels to keep them from // writing into the depth buffer. if (tex_color.a == 0.0) discard; f_color = v_color * tex_color + v_back_color * (vec4(1, 1, 1, 1) - tex_color); } ", tessellation_control_shader: None, tessellation_evaluation_shader: None, geometry_shader: None, }; implement_vertex!(Vertex, pos, tex_coord, color, back_color); /// A deferred rendering buffer for pixel-perfect display. struct RenderBuffer { size: Size2D<u32>, buffer: glium::texture::SrgbTexture2d, depth_buffer: glium::framebuffer::DepthRenderBuffer, shader: glium::Program, } impl RenderBuffer { pub fn new(display: &glium::Display, width: u32, height: u32) -> RenderBuffer { let shader = program!( display, 150 => { vertex: " #version 150 core in vec2 pos; in vec2 tex_coord; out vec2 v_tex_coord; void main() { v_tex_coord = tex_coord; gl_Position = vec4(pos, 0.0, 1.0); }", fragment: " #version 150 core uniform sampler2D tex; in vec2 v_tex_coord; out vec4 f_color; void main() { vec4 tex_color = texture(tex, v_tex_coord); tex_color.a = 1.0; f_color = tex_color; }"}) .unwrap(); let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap(); let depth_buffer = glium::framebuffer::DepthRenderBuffer::new( display, glium::texture::DepthFormat::F32, width, height, ) .unwrap(); RenderBuffer { size: Size2D::new(width, height), buffer, depth_buffer, shader, } } /// Get the render target to the pixel-perfect framebuffer. pub fn get_framebuffer_target( &mut self, display: &glium::Display, ) -> glium::framebuffer::SimpleFrameBuffer<'_> { glium::framebuffer::SimpleFrameBuffer::with_depth_buffer( display, &self.buffer, &self.depth_buffer, ) .unwrap() } pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) { let mut target = display.draw(); target.clear_color(0.0, 0.0, 0.0, 0.0); let (w, h) = get_size(display); // Build the geometry for the on-screen rectangle. let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size); let (sx, sy) = (s_rect.origin.x, s_rect.origin.y); let (sw, sh) = (s_rect.size.width, s_rect.size.height); // XXX: This could use glium::Surface::blit_whole_color_to instead of // the handmade blitting, but that was buggy on Windows around // 2015-03. let vertices = { #[derive(Copy, Clone)] struct BlitVertex { pos: [f32; 2], tex_coord: [f32; 2], } implement_vertex!(BlitVertex, pos, tex_coord); glium::VertexBuffer::new( display, &[ BlitVertex { pos: [sx, sy], tex_coord: [0.0, 0.0], }, BlitVertex { pos: [sx + sw, sy], tex_coord: [1.0, 0.0], }, BlitVertex { pos: [sx + sw, sy + sh], tex_coord: [1.0, 1.0], }, BlitVertex { pos: [sx, sy + sh], tex_coord: [0.0, 1.0], }, ], ) .unwrap() }; let indices = glium::IndexBuffer::new( display, glium::index::PrimitiveType::TrianglesList, &[0u16, 1, 2, 0, 2, 3], ) .unwrap(); // Set up the rest of the draw parameters. let mut params: glium::DrawParameters<'_> = Default::default(); // Set an explicit viewport to apply the custom resolution that fixes // pixel perfect rounding errors. params.viewport = Some(glium::Rect { left: 0, bottom: 0, width: w, height: h, }); // TODO: Option to use smooth filter & non-pixel-perfect scaling let mag_filter = glium::uniforms::MagnifySamplerFilter::Nearest; let uniforms = glium::uniforms::UniformsStorage::new( "tex", glium::uniforms::Sampler( &self.buffer, glium::uniforms::SamplerBehavior { magnify_filter: mag_filter, minify_filter: glium::uniforms::MinifySamplerFilter::Linear, ..Default::default() }, ), ); // Draw the graphics buffer to the window. target .draw(&vertices, &indices, &self.shader, &uniforms, &params) .unwrap(); target.finish().unwrap(); } pub fn size(&self) -> Size2D<u32> { self.size } pub fn screenshot(&self) -> ImageBuffer { let image: glium::texture::RawImage2d<'_, u8> = self.buffer.read(); ImageBuffer::from_fn(image.width, image.height, |x, y| { let i = (x * 4 + (image.height - y - 1) * image.width * 4) as usize; image.data[i] as u32 + ((image.data[i + 1] as u32) << 8) + ((image.data[i + 2] as u32) << 16) + ((image.data[i + 3] as u32) << 24) }) } } fn get_size(display: &glium::Display) -> (u32, u32) { let size = display .gl_window() .window() .get_inner_size() .unwrap_or_else(|| LogicalSize::new(800.0, 600.0)) .to_physical(display.gl_window().window().get_hidpi_factor()); (size.width as u32, size.height as u32) } not sure what this comment referred to... //! Glium-based backend for the Vitral GUI library. #![deny(missing_docs)] use crate::atlas_cache::AtlasCache; use crate::canvas_zoom::CanvasZoom; use crate::{ Canvas, ImageBuffer, InputEvent, Keycode, MouseButton, Scene, SceneSwitch, TextureIndex, Vertex, }; use euclid::default::{Point2D, Size2D}; use glium::glutin::dpi::{LogicalSize, PhysicalPosition, PhysicalSize}; use glium::glutin::{self, Event, WindowEvent}; use glium::index::PrimitiveType; use glium::{self, Surface}; use std::error::Error; use std::fmt::Debug; use std::hash::Hash; /// Default texture type used by the backend. type GliumTexture = glium::texture::SrgbTexture2d; /// Glium-rendering backend for Vitral. pub struct Backend { display: glium::Display, events: glutin::EventsLoop, program: glium::Program, textures: Vec<GliumTexture>, render_buffer: RenderBuffer, zoom: CanvasZoom, window_size: Size2D<u32>, } impl Backend { /// Create a new Glium backend for Vitral. /// /// The backend requires an user-supplied vertex type as a type parameter and a shader program /// to render data of that type as argument to the constructor. pub fn new( display: glium::Display, events: glutin::EventsLoop, program: glium::Program, width: u32, height: u32, ) -> Backend { let (w, h) = get_size(&display); let render_buffer = RenderBuffer::new(&display, width, height); Backend { display, events, program, textures: Vec::new(), render_buffer, zoom: CanvasZoom::PixelPerfect, window_size: Size2D::new(w, h), } } /// Open a Glium window and start a backend for it. pub fn start<S: Into<String>>( width: u32, height: u32, title: S, ) -> Result<Backend, Box<dyn Error>> { let events = glutin::EventsLoop::new(); let window = glutin::WindowBuilder::new().with_title(title); let context = glutin::ContextBuilder::new() .with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2))); let display = glium::Display::new(window, context, &events)?; let program = glium::Program::new(&display, DEFAULT_SHADER)?; { // Start the window as a good fit on the primary monitor. // Don't make it a completely fullscreen window, that might put the window title bar // outside the screen. const BUFFER: f64 = 8.0; let (width, height) = (width as f64, height as f64); let monitor_size = display .gl_window() .window() .get_primary_monitor() .get_dimensions(); // Get the most conservative DPI if there's a weird multi-monitor setup. let dpi_factor = display .gl_window() .window() .get_available_monitors() .map(|m| m.get_hidpi_factor()) .max_by(|x, y| x.partial_cmp(y).unwrap()) .expect("No monitors found!"); info!("Scaling starting size to monitor"); info!("Monitor size {:?}", monitor_size); info!("DPI Factor {}", dpi_factor); let mut window_size = PhysicalSize::new(width, height); while window_size.width + width <= monitor_size.width - BUFFER && window_size.height + height <= monitor_size.height - BUFFER { window_size.width += width; window_size.height += height; } info!("Adjusted window size: {:?}", window_size); let window_pos = PhysicalPosition::new( (monitor_size.width - window_size.width) / 2.0, (monitor_size.height - window_size.height) / 2.0, ); display .gl_window() .window() .set_inner_size(window_size.to_logical(dpi_factor)); display .gl_window() .window() .set_position(window_pos.to_logical(dpi_factor)); } Ok(Backend::new(display, events, program, width, height)) } /// Return the pixel resolution of the backend. /// /// Note that this is the logical size which will stay the same even when the /// desktop window is resized. pub fn canvas_size(&self) -> Size2D<u32> { self.render_buffer.size } /// Return the current number of textures. pub fn texture_count(&self) -> usize { self.textures.len() } /// Make a new empty internal texture. /// /// The new `TextureIndex` must equal the value `self.texture_count()` would have returned /// just before calling this. pub fn make_empty_texture(&mut self, width: u32, height: u32) -> TextureIndex { let tex = glium::texture::SrgbTexture2d::empty(&self.display, width, height).unwrap(); self.textures.push(tex); self.textures.len() - 1 } /// Rewrite an internal texture. pub fn write_to_texture(&mut self, img: &ImageBuffer, texture: TextureIndex) { assert!( texture < self.textures.len(), "Trying to write nonexistent texture" ); let rect = glium::Rect { left: 0, bottom: 0, width: img.size.width, height: img.size.height, }; let mut raw = glium::texture::RawImage2d::from_raw_rgba( img.pixels.clone(), (img.size.width, img.size.height), ); raw.format = glium::texture::ClientFormat::U8U8U8U8; self.textures[texture].write(rect, raw); } /// Make a new internal texture using image data. pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex { let mut raw = glium::texture::RawImage2d::from_raw_rgba( img.pixels, (img.size.width, img.size.height), ); raw.format = glium::texture::ClientFormat::U8U8U8U8; let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap(); self.textures.push(tex); self.textures.len() - 1 } /// Update or construct textures based on changes in atlas cache. pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>( &mut self, atlas_cache: &mut AtlasCache<T>, ) { for a in atlas_cache.atlases_mut() { let idx = a.texture(); // If there are sheets in the atlas that don't have corresponding textures yet, // construct those now. while idx >= self.texture_count() { self.make_empty_texture(a.size().width, a.size().height); } // Write the updated texture atlas to internal texture. a.update_texture(|buf, idx| self.write_to_texture(buf, idx)); } } fn dispatch<T>( &self, scene_stack: &mut Vec<Box<dyn Scene<T>>>, ctx: &mut T, event: InputEvent, ) -> Option<SceneSwitch<T>> { if !scene_stack.is_empty() { let idx = scene_stack.len() - 1; scene_stack[idx].input(ctx, event) } else { None } } fn process_events<T>( &mut self, canvas: &mut Canvas, scene_stack: &mut Vec<Box<dyn Scene<T>>>, ctx: &mut T, ) -> Result<Option<SceneSwitch<T>>, ()> { // polling and handling the events received by the window let mut event_list = Vec::new(); self.events.poll_events(|event| event_list.push(event)); // Accumulated scene switches from processing input let mut scene_switches = Vec::new(); for e in event_list { match e { Event::WindowEvent { ref event, window_id, } if window_id == self.display.gl_window().window().id() => match *event { WindowEvent::CloseRequested => return Err(()), WindowEvent::CursorMoved { position, .. } => { let position = position .to_physical(self.display.gl_window().window().get_hidpi_factor()); let pos = self.zoom.screen_to_canvas( self.window_size, self.render_buffer.size(), Point2D::new(position.x as f32, position.y as f32), ); canvas.input_mouse_move(pos.x as i32, pos.y as i32); } WindowEvent::MouseInput { state, button, .. } => canvas.input_mouse_button( match button { glutin::MouseButton::Left => MouseButton::Left, glutin::MouseButton::Right => MouseButton::Right, _ => MouseButton::Middle, }, state == glutin::ElementState::Pressed, ), WindowEvent::ReceivedCharacter(c) => { scene_switches.push(self.dispatch(scene_stack, ctx, InputEvent::Typed(c))); } WindowEvent::KeyboardInput { input: glutin::KeyboardInput { state, scancode, virtual_keycode, .. }, .. } => { let is_down = state == glutin::ElementState::Pressed; let key = virtual_keycode .and_then(|virtual_keycode| Keycode::try_from(virtual_keycode).ok()); // Glutin adjusts the Linux scancodes, take into account. Don't know if // this belongs here in the glium module or in the Keycode translation // maps... let scancode = if cfg!(target_os = "linux") { scancode + 8 } else { scancode }; let hardware_key = Keycode::from_scancode(scancode); if key.is_some() || hardware_key.is_some() { scene_switches.push(self.dispatch( scene_stack, ctx, InputEvent::KeyEvent { is_down, key, hardware_key, }, )); } } _ => (), }, // Events in other windows, ignore Event::WindowEvent { .. } => {} Event::Awakened => { // TODO: Suspend/awaken behavior } Event::DeviceEvent { .. } => {} Event::Suspended(_) => {} } } // Take the first scene switch that shows up. let scene_switch = scene_switches .into_iter() .fold(None, |prev, e| match (prev, e) { (Some(x), _) => Some(x), (None, y) => y, }); Ok(scene_switch) } fn render(&mut self, canvas: &mut Canvas) { let mut target = self.render_buffer.get_framebuffer_target(&self.display); target.clear_color(0.0, 0.0, 0.0, 0.0); let (w, h) = target.get_dimensions(); for batch in canvas.end_frame() { // building the uniforms let uniforms = uniform! { matrix: [ [2.0 / w as f32, 0.0, 0.0, -1.0], [0.0, -2.0 / h as f32, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0f32] ], tex: glium::uniforms::Sampler::new(&self.textures[batch.texture]) .magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest), }; let vertex_buffer = { glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() }; // building the index buffer let index_buffer = glium::IndexBuffer::new( &self.display, PrimitiveType::TrianglesList, &batch.triangle_indices, ) .unwrap(); let params = glium::draw_parameters::DrawParameters { scissor: batch.clip.map(|clip| glium::Rect { left: clip.origin.x as u32, bottom: h - (clip.origin.y + clip.size.height) as u32, width: clip.size.width as u32, height: clip.size.height as u32, }), blend: glium::Blend::alpha_blending(), ..Default::default() }; target .draw( &vertex_buffer, &index_buffer, &self.program, &uniforms, &params, ) .unwrap(); } } fn update_window_size(&mut self) { let (w, h) = get_size(&self.display); self.window_size = Size2D::new(w, h); } /// Display the backend and read input events. pub fn update<T>( &mut self, canvas: &mut Canvas, scene_stack: &mut Vec<Box<dyn Scene<T>>>, ctx: &mut T, ) -> Result<Option<SceneSwitch<T>>, ()> { self.update_window_size(); self.render(canvas); self.render_buffer.draw(&self.display, self.zoom); self.process_events(canvas, scene_stack, ctx) } /// Return an image for the current contents of the screen. pub fn screenshot(&self) -> ImageBuffer { self.render_buffer.screenshot() } } /// Shader for two parametrizable colors and discarding fully transparent pixels const DEFAULT_SHADER: glium::program::SourceCode<'_> = glium::program::SourceCode { vertex_shader: " #version 150 core uniform mat4 matrix; in vec2 pos; in vec2 tex_coord; in vec4 color; in vec4 back_color; out vec4 v_color; out vec4 v_back_color; out vec2 v_tex_coord; void main() { gl_Position = vec4(pos, 0.0, 1.0) * matrix; v_color = color; v_back_color = back_color; v_tex_coord = tex_coord; } ", fragment_shader: " #version 150 core uniform sampler2D tex; in vec4 v_color; in vec2 v_tex_coord; in vec4 v_back_color; out vec4 f_color; void main() { vec4 tex_color = texture(tex, v_tex_coord); // Discard fully transparent pixels to keep them from // writing into the depth buffer. if (tex_color.a == 0.0) discard; f_color = v_color * tex_color + v_back_color * (vec4(1, 1, 1, 1) - tex_color); } ", tessellation_control_shader: None, tessellation_evaluation_shader: None, geometry_shader: None, }; implement_vertex!(Vertex, pos, tex_coord, color, back_color); /// A deferred rendering buffer for pixel-perfect display. struct RenderBuffer { size: Size2D<u32>, buffer: glium::texture::SrgbTexture2d, depth_buffer: glium::framebuffer::DepthRenderBuffer, shader: glium::Program, } impl RenderBuffer { pub fn new(display: &glium::Display, width: u32, height: u32) -> RenderBuffer { let shader = program!( display, 150 => { vertex: " #version 150 core in vec2 pos; in vec2 tex_coord; out vec2 v_tex_coord; void main() { v_tex_coord = tex_coord; gl_Position = vec4(pos, 0.0, 1.0); }", fragment: " #version 150 core uniform sampler2D tex; in vec2 v_tex_coord; out vec4 f_color; void main() { vec4 tex_color = texture(tex, v_tex_coord); tex_color.a = 1.0; f_color = tex_color; }"}) .unwrap(); let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap(); let depth_buffer = glium::framebuffer::DepthRenderBuffer::new( display, glium::texture::DepthFormat::F32, width, height, ) .unwrap(); RenderBuffer { size: Size2D::new(width, height), buffer, depth_buffer, shader, } } /// Get the render target to the pixel-perfect framebuffer. pub fn get_framebuffer_target( &mut self, display: &glium::Display, ) -> glium::framebuffer::SimpleFrameBuffer<'_> { glium::framebuffer::SimpleFrameBuffer::with_depth_buffer( display, &self.buffer, &self.depth_buffer, ) .unwrap() } pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) { let mut target = display.draw(); target.clear_color(0.0, 0.0, 0.0, 0.0); let (w, h) = get_size(display); // Build the geometry for the on-screen rectangle. let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size); let (sx, sy) = (s_rect.origin.x, s_rect.origin.y); let (sw, sh) = (s_rect.size.width, s_rect.size.height); // XXX: This could use glium::Surface::blit_whole_color_to instead of // the handmade blitting, but that was buggy on Windows around // 2015-03. let vertices = { #[derive(Copy, Clone)] struct BlitVertex { pos: [f32; 2], tex_coord: [f32; 2], } implement_vertex!(BlitVertex, pos, tex_coord); glium::VertexBuffer::new( display, &[ BlitVertex { pos: [sx, sy], tex_coord: [0.0, 0.0], }, BlitVertex { pos: [sx + sw, sy], tex_coord: [1.0, 0.0], }, BlitVertex { pos: [sx + sw, sy + sh], tex_coord: [1.0, 1.0], }, BlitVertex { pos: [sx, sy + sh], tex_coord: [0.0, 1.0], }, ], ) .unwrap() }; let indices = glium::IndexBuffer::new( display, glium::index::PrimitiveType::TrianglesList, &[0u16, 1, 2, 0, 2, 3], ) .unwrap(); // Set up the rest of the draw parameters. let mut params: glium::DrawParameters<'_> = Default::default(); // Set an explicit viewport to apply the custom resolution that fixes // pixel perfect rounding errors. params.viewport = Some(glium::Rect { left: 0, bottom: 0, width: w, height: h, }); // TODO: Option to use smooth filter & non-pixel-perfect scaling let mag_filter = glium::uniforms::MagnifySamplerFilter::Nearest; let uniforms = glium::uniforms::UniformsStorage::new( "tex", glium::uniforms::Sampler( &self.buffer, glium::uniforms::SamplerBehavior { magnify_filter: mag_filter, minify_filter: glium::uniforms::MinifySamplerFilter::Linear, ..Default::default() }, ), ); // Draw the graphics buffer to the window. target .draw(&vertices, &indices, &self.shader, &uniforms, &params) .unwrap(); target.finish().unwrap(); } pub fn size(&self) -> Size2D<u32> { self.size } pub fn screenshot(&self) -> ImageBuffer { let image: glium::texture::RawImage2d<'_, u8> = self.buffer.read(); ImageBuffer::from_fn(image.width, image.height, |x, y| { let i = (x * 4 + (image.height - y - 1) * image.width * 4) as usize; image.data[i] as u32 + ((image.data[i + 1] as u32) << 8) + ((image.data[i + 2] as u32) << 16) + ((image.data[i + 3] as u32) << 24) }) } } fn get_size(display: &glium::Display) -> (u32, u32) { let size = display .gl_window() .window() .get_inner_size() .unwrap_or_else(|| LogicalSize::new(800.0, 600.0)) .to_physical(display.gl_window().window().get_hidpi_factor()); (size.width as u32, size.height as u32) }
//! Rust rosetta example for normal distribution use rand; use rand_distr::{Normal,Distribution}; use math::{histogram::Histogram, traits::ToIterator}; /// Returns the mean of the provided samples /// /// ## Arguments /// * data -- reference to float32 array fn mean(data: &[f32]) -> Option<f32> { let sum: f32 = data.iter().sum(); Some(sum / data.len() as f32) } /// Returns standard deviation of the provided samples /// /// ## Arguments /// * data -- reference to float32 array fn standard_deviation(data: &[f32]) -> Option<f32> { let mean = mean(data).expect("invalid mean"); let sum = data.iter().fold(0.0, |acc, &x| acc + (x - mean).powi(2)); Some((sum / data.len() as f32).sqrt()) } /// Prints a histogram in the shell /// /// ## Arguments /// * data -- reference to float32 array /// * maxwidth -- the maxwidth of the histogram in # of characters /// * bincount -- number of bins in the histogram /// * ch -- character used to plot the graph fn print_histogram(data: &[f32], maxwidth: usize, bincount: usize, ch: char) { let min_val = data.iter().cloned().fold(f32::NAN, f32::min); let max_val = data.iter().cloned().fold(f32::NAN, f32::max); let histogram = Histogram::new(Some(&data.to_vec()), bincount, min_val, max_val).unwrap(); let max_bin_value = histogram.get_counters().iter().max().unwrap(); println!(); for x in histogram.to_iter() { let (bin_min, bin_max, freq) = x; let bar_width = (((freq as f64)/(*max_bin_value as f64))*(maxwidth as f64)) as u32; let bar_as_string = (1..bar_width).fold(String::new(), |b, _| b + &ch.to_string()); println!("({:>6},{:>6}) |{} {:.2}%", format!("{:.2}", bin_min), format!("{:.2}", bin_max), bar_as_string, (freq as f64)*100.0/(data.len() as f64)); } println!(); } /// Runs the demo to generate normal distribution of three different sample sizes fn main() { let expected_mean: f32 = 0.0; let expected_std_deviation: f32 = 4.0; let normal = Normal::new(expected_mean, expected_std_deviation).unwrap(); let mut rng = rand::thread_rng(); for &number_of_samples in &[1000, 10_000, 1_000_000] { let data: Vec<f32> = normal .sample_iter(&mut rng) .take(number_of_samples) .collect(); println!("Statistics for sample size {}:", number_of_samples); println!("\tMean: {:?}", mean(&data).expect("invalid mean")); println!("\tStandard deviation: {:?}", standard_deviation(&data).expect("invalid standard deviation")); print_histogram(&data, 80, 40, '-'); } } #[cfg(test)] mod tests { use super::{mean, standard_deviation, print_histogram}; use std::f32; fn approx(statistics: Option<f32>, value: f32) -> bool { (statistics.unwrap() - value).abs() <= f32::EPSILON } #[test] fn test_mean() { assert!(approx(mean(&[1.0]), 1.0)); assert!(approx(mean(&[1.0, 3.0]), 2.0)); assert!(approx(mean(&[1.0, 2.0, 3.0]), 2.0)); } #[test] fn test_standard_deviation() { assert!(approx(standard_deviation(&[0.0]), 0.0)); assert!(approx(standard_deviation(&[1.0, 1.0, 1.0]), 0.0)); assert!(approx( standard_deviation(&[1.0, 2.0, 3.0]), (2f32 / 3f32).sqrt() )); } #[test] fn test_print_histogram() { print_histogram(&[0.0,1.0,2.0,3.0], 10, 5, '-'); } } After running rustfmt //! Rust rosetta example for normal distribution use math::{histogram::Histogram, traits::ToIterator}; use rand; use rand_distr::{Distribution, Normal}; /// Returns the mean of the provided samples /// /// ## Arguments /// * data -- reference to float32 array fn mean(data: &[f32]) -> Option<f32> { let sum: f32 = data.iter().sum(); Some(sum / data.len() as f32) } /// Returns standard deviation of the provided samples /// /// ## Arguments /// * data -- reference to float32 array fn standard_deviation(data: &[f32]) -> Option<f32> { let mean = mean(data).expect("invalid mean"); let sum = data.iter().fold(0.0, |acc, &x| acc + (x - mean).powi(2)); Some((sum / data.len() as f32).sqrt()) } /// Prints a histogram in the shell /// /// ## Arguments /// * data -- reference to float32 array /// * maxwidth -- the maxwidth of the histogram in # of characters /// * bincount -- number of bins in the histogram /// * ch -- character used to plot the graph fn print_histogram(data: &[f32], maxwidth: usize, bincount: usize, ch: char) { let min_val = data.iter().cloned().fold(f32::NAN, f32::min); let max_val = data.iter().cloned().fold(f32::NAN, f32::max); let histogram = Histogram::new(Some(&data.to_vec()), bincount, min_val, max_val).unwrap(); let max_bin_value = histogram.get_counters().iter().max().unwrap(); println!(); for x in histogram.to_iter() { let (bin_min, bin_max, freq) = x; let bar_width = (((freq as f64) / (*max_bin_value as f64)) * (maxwidth as f64)) as u32; let bar_as_string = (1..bar_width).fold(String::new(), |b, _| b + &ch.to_string()); println!( "({:>6},{:>6}) |{} {:.2}%", format!("{:.2}", bin_min), format!("{:.2}", bin_max), bar_as_string, (freq as f64) * 100.0 / (data.len() as f64) ); } println!(); } /// Runs the demo to generate normal distribution of three different sample sizes fn main() { let expected_mean: f32 = 0.0; let expected_std_deviation: f32 = 4.0; let normal = Normal::new(expected_mean, expected_std_deviation).unwrap(); let mut rng = rand::thread_rng(); for &number_of_samples in &[1000, 10_000, 1_000_000] { let data: Vec<f32> = normal .sample_iter(&mut rng) .take(number_of_samples) .collect(); println!("Statistics for sample size {}:", number_of_samples); println!("\tMean: {:?}", mean(&data).expect("invalid mean")); println!( "\tStandard deviation: {:?}", standard_deviation(&data).expect("invalid standard deviation") ); print_histogram(&data, 80, 40, '-'); } } #[cfg(test)] mod tests { use super::{mean, print_histogram, standard_deviation}; use std::f32; fn approx(statistics: Option<f32>, value: f32) -> bool { (statistics.unwrap() - value).abs() <= f32::EPSILON } #[test] fn test_mean() { assert!(approx(mean(&[1.0]), 1.0)); assert!(approx(mean(&[1.0, 3.0]), 2.0)); assert!(approx(mean(&[1.0, 2.0, 3.0]), 2.0)); } #[test] fn test_standard_deviation() { assert!(approx(standard_deviation(&[0.0]), 0.0)); assert!(approx(standard_deviation(&[1.0, 1.0, 1.0]), 0.0)); assert!(approx( standard_deviation(&[1.0, 2.0, 3.0]), (2f32 / 3f32).sqrt() )); } #[test] fn test_print_histogram() { print_histogram(&[0.0, 1.0, 2.0, 3.0], 10, 5, '-'); } }
#![feature(libc)] #![allow(dead_code, non_camel_case_types, non_upper_case_globals)] #[macro_use] extern crate gstuff; extern crate libc; use std::ffi::CStr; use std::io::Write; use std::mem::uninitialized; use std::ptr::null_mut; use std::str::from_utf8_unchecked; pub type uint8_t = u8; pub type int32_t = i32; pub type uint32_t = u32; pub enum FILE {} include! ("/usr/local/include/ip2location.rs"); /// The high-level wrapper around the ip2location C library. pub struct Ip2Location (*mut IP2Location); impl Ip2Location { fn open (path: &str) -> Result<Ip2Location, String> { let i2l = unsafe {IP2Location_open (format! ("{}\0", path) .as_ptr() as *mut i8)}; if i2l == null_mut() {return ERR! ("!IP2Location_open ({})", path)} let rc = unsafe {IP2Location_open_mem (i2l, IP2LOCATION_SHARED_MEMORY)}; if rc != 0 {return ERR! ("!IP2Location_open_mem")} Ok (Ip2Location (i2l))} /// Get a country from the IP. pub fn ip2country (&self, ip: &str) -> Result<Option<[u8; 2]>, String> { let mut ipz: [u8; 64] = unsafe {uninitialized()}; let ipz = gstring! (ipz, {try_s! (write! (ipz, "{}\0", ip))}); assert! (self.0 != null_mut()); let rec = unsafe {IP2Location_get_country_short (self.0, ipz.as_ptr() as *mut i8)}; if rec == null_mut() {return ERR! ("!IP2Location_get_country_short")} if unsafe {(*rec).country_short} == null_mut() {return ERR! ("!country_short")} let country = unsafe {CStr::from_ptr ((*rec).country_short)} .to_bytes(); if country == b"-" {return Ok (None)} if country.len() != 2 {return ERR! ("ip2country] !iso2: '{}'.", unsafe {from_utf8_unchecked (country)})} let country = [country[0], country[1]]; unsafe {IP2Location_free_record (rec)}; Ok (Some (country))}} impl Drop for Ip2Location { fn drop (&mut self) { unsafe {IP2Location_close (self.0);} self.0 = null_mut()}} /// There's a rumor that ip2location is thread-safe, cf. http://stackoverflow.com/questions/13234711/is-ip2location-thread-safe. /// From what I've seen, it just reads an mmap-ed file, e.g. immutable. unsafe impl Sync for Ip2Location {} Open the open. #![feature(libc)] #![allow(dead_code, non_camel_case_types, non_upper_case_globals)] #[macro_use] extern crate gstuff; extern crate libc; use std::ffi::CStr; use std::io::Write; use std::mem::uninitialized; use std::ptr::null_mut; use std::str::from_utf8_unchecked; pub type uint8_t = u8; pub type int32_t = i32; pub type uint32_t = u32; pub enum FILE {} include! ("/usr/local/include/ip2location.rs"); /// The high-level wrapper around the ip2location C library. pub struct Ip2Location (*mut IP2Location); impl Ip2Location { /// mmap the BIN version of the ip2location database under the given `path`. pub fn open (path: &str) -> Result<Ip2Location, String> { let i2l = unsafe {IP2Location_open (format! ("{}\0", path) .as_ptr() as *mut i8)}; if i2l == null_mut() {return ERR! ("!IP2Location_open ({})", path)} let rc = unsafe {IP2Location_open_mem (i2l, IP2LOCATION_SHARED_MEMORY)}; if rc != 0 {return ERR! ("!IP2Location_open_mem")} Ok (Ip2Location (i2l))} /// Get a country from the IP. pub fn ip2country (&self, ip: &str) -> Result<Option<[u8; 2]>, String> { let mut ipz: [u8; 64] = unsafe {uninitialized()}; let ipz = gstring! (ipz, {try_s! (write! (ipz, "{}\0", ip))}); assert! (self.0 != null_mut()); let rec = unsafe {IP2Location_get_country_short (self.0, ipz.as_ptr() as *mut i8)}; if rec == null_mut() {return ERR! ("!IP2Location_get_country_short")} if unsafe {(*rec).country_short} == null_mut() {return ERR! ("!country_short")} let country = unsafe {CStr::from_ptr ((*rec).country_short)} .to_bytes(); if country == b"-" {return Ok (None)} if country.len() != 2 {return ERR! ("ip2country] !iso2: '{}'.", unsafe {from_utf8_unchecked (country)})} let country = [country[0], country[1]]; unsafe {IP2Location_free_record (rec)}; Ok (Some (country))}} impl Drop for Ip2Location { fn drop (&mut self) { unsafe {IP2Location_close (self.0);} self.0 = null_mut()}} /// There's a rumor that ip2location is thread-safe, cf. http://stackoverflow.com/questions/13234711/is-ip2location-thread-safe. /// From what I've seen, it just reads an mmap-ed file, e.g. immutable. unsafe impl Sync for Ip2Location {}
#![stable(feature = "", since = "1.30.0")] #![allow(non_camel_case_types)] //! Utilities related to FFI bindings. use ::fmt; /// Equivalent to C's `void` type when used as a [pointer]. /// /// In essence, `*const c_void` is equivalent to C's `const void*` /// and `*mut c_void` is equivalent to C's `void*`. That said, this is /// *not* the same as C's `void` return type, which is Rust's `()` type. /// /// To model pointers to opaque types in FFI, until `extern type` is /// stabilized, it is recommended to use a newtype wrapper around an empty /// byte array. See the [Nomicon] for details. /// /// [pointer]: ../../std/primitive.pointer.html /// [Nomicon]: https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs // N.B., for LLVM to recognize the void pointer type and by extension // functions like malloc(), we need to have it represented as i8* in // LLVM bitcode. The enum used here ensures this and prevents misuse // of the "raw" type by only having private variants. We need two // variants, because the compiler complains about the repr attribute // otherwise and we need at least one variant as otherwise the enum // would be uninhabited and at least dereferencing such pointers would // be UB. #[repr(u8)] #[stable(feature = "raw_os", since = "1.1.0")] pub enum c_void { #[unstable(feature = "c_void_variant", reason = "temporary implementation detail", issue = "0")] #[doc(hidden)] __variant1, #[unstable(feature = "c_void_variant", reason = "temporary implementation detail", issue = "0")] #[doc(hidden)] __variant2, } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for c_void { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("c_void") } } /// Basic implementation of a `va_list`. #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), all(target_arch = "aarch4", target_os = "ios"), windows))] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] extern { type VaListImpl; } #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), windows))] impl fmt::Debug for VaListImpl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "va_list* {:p}", self) } } /// AArch64 ABI implementation of a `va_list`. See the /// [Aarch64 Procedure Call Standard] for more details. /// /// [AArch64 Procedure Call Standard]: /// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf #[cfg(all(target_arch = "aarch64", not(windows)))] #[repr(C)] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] struct VaListImpl { stack: *mut (), gr_top: *mut (), vr_top: *mut (), gr_offs: i32, vr_offs: i32, } /// PowerPC ABI implementation of a `va_list`. #[cfg(all(target_arch = "powerpc", not(windows)))] #[repr(C)] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] struct VaListImpl { gpr: u8, fpr: u8, reserved: u16, overflow_arg_area: *mut (), reg_save_area: *mut (), } /// x86_64 ABI implementation of a `va_list`. #[cfg(all(target_arch = "x86_64", not(windows)))] #[repr(C)] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] struct VaListImpl { gp_offset: i32, fp_offset: i32, overflow_arg_area: *mut (), reg_save_area: *mut (), } /// A wrapper for a `va_list` #[lang = "va_list"] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] #[repr(transparent)] pub struct VaList<'a>(&'a mut VaListImpl); // The VaArgSafe trait needs to be used in public interfaces, however, the trait // itself must not be allowed to be used outside this module. Allowing users to // implement the trait for a new type (thereby allowing the va_arg intrinsic to // be used on a new type) is likely to cause undefined behavior. // // FIXME(dlrobertson): In order to use the VaArgSafe trait in a public interface // but also ensure it cannot be used elsewhere, the trait needs to be public // within a private module. Once RFC 2145 has been implemented look into // improving this. mod sealed_trait { /// Trait which whitelists the allowed types to be used with [VaList::arg] /// /// [VaList::va_arg]: struct.VaList.html#method.arg #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] pub trait VaArgSafe {} } macro_rules! impl_va_arg_safe { ($($t:ty),+) => { $( #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] impl sealed_trait::VaArgSafe for $t {} )+ } } impl_va_arg_safe!{i8, i16, i32, i64, usize} impl_va_arg_safe!{u8, u16, u32, u64, isize} impl_va_arg_safe!{f64} #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] impl<T> sealed_trait::VaArgSafe for *mut T {} #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] impl<T> sealed_trait::VaArgSafe for *const T {} impl<'a> VaList<'a> { /// Advance to the next arg. #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] pub unsafe fn arg<T: sealed_trait::VaArgSafe>(&mut self) -> T { va_arg(self) } /// Copies the `va_list` at the current location. #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] pub unsafe fn copy<F, R>(&self, f: F) -> R where F: for<'copy> FnOnce(VaList<'copy>) -> R { #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), all(target_arch = "aarch4", target_os = "ios"), windows))] let mut ap = va_copy(self); #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), not(windows)))] let mut ap_inner = va_copy(self); #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), not(windows)))] let mut ap = VaList(&mut ap_inner); let ret = f(VaList(ap.0)); va_end(&mut ap); ret } } extern "rust-intrinsic" { /// Destroy the arglist `ap` after initialization with `va_start` or /// `va_copy`. fn va_end(ap: &mut VaList); /// Copies the current location of arglist `src` to the arglist `dst`. #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), windows))] fn va_copy<'a>(src: &VaList<'a>) -> VaList<'a>; #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), not(windows)))] fn va_copy(src: &VaList) -> VaListImpl; /// Loads an argument of type `T` from the `va_list` `ap` and increment the /// argument `ap` points to. fn va_arg<T: sealed_trait::VaArgSafe>(ap: &mut VaList) -> T; } core: ensure VaList passes improper_ctypes lint Ensure the core::ffi::VaList structure passes the improper_ctypes lint. #![stable(feature = "", since = "1.30.0")] #![allow(non_camel_case_types)] //! Utilities related to FFI bindings. use ::fmt; /// Equivalent to C's `void` type when used as a [pointer]. /// /// In essence, `*const c_void` is equivalent to C's `const void*` /// and `*mut c_void` is equivalent to C's `void*`. That said, this is /// *not* the same as C's `void` return type, which is Rust's `()` type. /// /// To model pointers to opaque types in FFI, until `extern type` is /// stabilized, it is recommended to use a newtype wrapper around an empty /// byte array. See the [Nomicon] for details. /// /// [pointer]: ../../std/primitive.pointer.html /// [Nomicon]: https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs // N.B., for LLVM to recognize the void pointer type and by extension // functions like malloc(), we need to have it represented as i8* in // LLVM bitcode. The enum used here ensures this and prevents misuse // of the "raw" type by only having private variants. We need two // variants, because the compiler complains about the repr attribute // otherwise and we need at least one variant as otherwise the enum // would be uninhabited and at least dereferencing such pointers would // be UB. #[repr(u8)] #[stable(feature = "raw_os", since = "1.1.0")] pub enum c_void { #[unstable(feature = "c_void_variant", reason = "temporary implementation detail", issue = "0")] #[doc(hidden)] __variant1, #[unstable(feature = "c_void_variant", reason = "temporary implementation detail", issue = "0")] #[doc(hidden)] __variant2, } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for c_void { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("c_void") } } /// Basic implementation of a `va_list`. #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), all(target_arch = "aarch4", target_os = "ios"), windows))] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] extern { type VaListImpl; } #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), windows))] impl fmt::Debug for VaListImpl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "va_list* {:p}", self) } } /// AArch64 ABI implementation of a `va_list`. See the /// [Aarch64 Procedure Call Standard] for more details. /// /// [AArch64 Procedure Call Standard]: /// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf #[cfg(all(target_arch = "aarch64", not(windows)))] #[repr(C)] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] struct VaListImpl { stack: *mut c_void, gr_top: *mut c_void, vr_top: *mut c_void, gr_offs: i32, vr_offs: i32, } /// PowerPC ABI implementation of a `va_list`. #[cfg(all(target_arch = "powerpc", not(windows)))] #[repr(C)] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] struct VaListImpl { gpr: u8, fpr: u8, reserved: u16, overflow_arg_area: *mut c_void, reg_save_area: *mut c_void, } /// x86_64 ABI implementation of a `va_list`. #[cfg(all(target_arch = "x86_64", not(windows)))] #[repr(C)] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] struct VaListImpl { gp_offset: i32, fp_offset: i32, overflow_arg_area: *mut c_void, reg_save_area: *mut c_void, } /// A wrapper for a `va_list` #[lang = "va_list"] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] #[repr(transparent)] pub struct VaList<'a>(&'a mut VaListImpl); // The VaArgSafe trait needs to be used in public interfaces, however, the trait // itself must not be allowed to be used outside this module. Allowing users to // implement the trait for a new type (thereby allowing the va_arg intrinsic to // be used on a new type) is likely to cause undefined behavior. // // FIXME(dlrobertson): In order to use the VaArgSafe trait in a public interface // but also ensure it cannot be used elsewhere, the trait needs to be public // within a private module. Once RFC 2145 has been implemented look into // improving this. mod sealed_trait { /// Trait which whitelists the allowed types to be used with [VaList::arg] /// /// [VaList::va_arg]: struct.VaList.html#method.arg #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] pub trait VaArgSafe {} } macro_rules! impl_va_arg_safe { ($($t:ty),+) => { $( #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] impl sealed_trait::VaArgSafe for $t {} )+ } } impl_va_arg_safe!{i8, i16, i32, i64, usize} impl_va_arg_safe!{u8, u16, u32, u64, isize} impl_va_arg_safe!{f64} #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] impl<T> sealed_trait::VaArgSafe for *mut T {} #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] impl<T> sealed_trait::VaArgSafe for *const T {} impl<'a> VaList<'a> { /// Advance to the next arg. #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] pub unsafe fn arg<T: sealed_trait::VaArgSafe>(&mut self) -> T { va_arg(self) } /// Copies the `va_list` at the current location. #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", issue = "44930")] pub unsafe fn copy<F, R>(&self, f: F) -> R where F: for<'copy> FnOnce(VaList<'copy>) -> R { #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), all(target_arch = "aarch4", target_os = "ios"), windows))] let mut ap = va_copy(self); #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), not(windows)))] let mut ap_inner = va_copy(self); #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), not(windows)))] let mut ap = VaList(&mut ap_inner); let ret = f(VaList(ap.0)); va_end(&mut ap); ret } } extern "rust-intrinsic" { /// Destroy the arglist `ap` after initialization with `va_start` or /// `va_copy`. fn va_end(ap: &mut VaList); /// Copies the current location of arglist `src` to the arglist `dst`. #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), windows))] fn va_copy<'a>(src: &VaList<'a>) -> VaList<'a>; #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), not(windows)))] fn va_copy(src: &VaList) -> VaListImpl; /// Loads an argument of type `T` from the `va_list` `ap` and increment the /// argument `ap` points to. fn va_arg<T: sealed_trait::VaArgSafe>(ap: &mut VaList) -> T; }
//! Basic functions for dealing with memory. //! //! This module contains functions for querying the size and alignment of //! types, initializing and manipulating memory. #![stable(feature = "rust1", since = "1.0.0")] use clone; use cmp; use fmt; use hash; use intrinsics; use marker::{Copy, PhantomData, Sized}; use ptr; use ops::{Deref, DerefMut}; #[stable(feature = "rust1", since = "1.0.0")] #[doc(inline)] pub use intrinsics::transmute; /// Takes ownership and "forgets" about the value **without running its destructor**. /// /// Any resources the value manages, such as heap memory or a file handle, will linger /// forever in an unreachable state. However, it does not guarantee that pointers /// to this memory will remain valid. /// /// * If you want to leak memory, see [`Box::leak`][leak]. /// * If you want to obtain a raw pointer to the memory, see [`Box::into_raw`][into_raw]. /// * If you want to dispose of a value properly, running its destructor, see /// [`mem::drop`][drop]. /// /// # Safety /// /// `forget` is not marked as `unsafe`, because Rust's safety guarantees /// do not include a guarantee that destructors will always run. For example, /// a program can create a reference cycle using [`Rc`][rc], or call /// [`process::exit`][exit] to exit without running destructors. Thus, allowing /// `mem::forget` from safe code does not fundamentally change Rust's safety /// guarantees. /// /// That said, leaking resources such as memory or I/O objects is usually undesirable, /// so `forget` is only recommended for specialized use cases like those shown below. /// /// Because forgetting a value is allowed, any `unsafe` code you write must /// allow for this possibility. You cannot return a value and expect that the /// caller will necessarily run the value's destructor. /// /// [rc]: ../../std/rc/struct.Rc.html /// [exit]: ../../std/process/fn.exit.html /// /// # Examples /// /// Leak an I/O object, never closing the file: /// /// ```no_run /// use std::mem; /// use std::fs::File; /// /// let file = File::open("foo.txt").unwrap(); /// mem::forget(file); /// ``` /// /// The practical use cases for `forget` are rather specialized and mainly come /// up in unsafe or FFI code. /// /// ## Use case 1 /// /// You have created an uninitialized value using [`mem::uninitialized`][uninit]. /// You must either initialize or `forget` it on every computation path before /// Rust drops it automatically, like at the end of a scope or after a panic. /// Running the destructor on an uninitialized value would be [undefined behavior][ub]. /// /// ``` /// use std::mem; /// use std::ptr; /// /// # let some_condition = false; /// unsafe { /// let mut uninit_vec: Vec<u32> = mem::uninitialized(); /// /// if some_condition { /// // Initialize the variable. /// ptr::write(&mut uninit_vec, Vec::new()); /// } else { /// // Forget the uninitialized value so its destructor doesn't run. /// mem::forget(uninit_vec); /// } /// } /// ``` /// /// ## Use case 2 /// /// You have duplicated the bytes making up a value, without doing a proper /// [`Clone`][clone]. You need the value's destructor to run only once, /// because a double `free` is undefined behavior. /// /// An example is a possible implementation of [`mem::swap`][swap]: /// /// ``` /// use std::mem; /// use std::ptr; /// /// # #[allow(dead_code)] /// fn swap<T>(x: &mut T, y: &mut T) { /// unsafe { /// // Give ourselves some scratch space to work with /// let mut t: T = mem::uninitialized(); /// /// // Perform the swap, `&mut` pointers never alias /// ptr::copy_nonoverlapping(&*x, &mut t, 1); /// ptr::copy_nonoverlapping(&*y, x, 1); /// ptr::copy_nonoverlapping(&t, y, 1); /// /// // y and t now point to the same thing, but we need to completely /// // forget `t` because we do not want to run the destructor for `T` /// // on its value, which is still owned somewhere outside this function. /// mem::forget(t); /// } /// } /// ``` /// /// [drop]: fn.drop.html /// [uninit]: fn.uninitialized.html /// [clone]: ../clone/trait.Clone.html /// [swap]: fn.swap.html /// [box]: ../../std/boxed/struct.Box.html /// [leak]: ../../std/boxed/struct.Box.html#method.leak /// [into_raw]: ../../std/boxed/struct.Box.html#method.into_raw /// [ub]: ../../reference/behavior-considered-undefined.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn forget<T>(t: T) { ManuallyDrop::new(t); } /// Like [`forget`], but also accepts unsized values. /// /// This function is just a shim intended to be removed when the `unsized_locals` feature gets /// stabilized. /// /// [`forget`]: fn.forget.html #[inline] #[unstable(feature = "forget_unsized", issue = "0")] pub fn forget_unsized<T: ?Sized>(t: T) { unsafe { intrinsics::forget(t) } } /// Returns the size of a type in bytes. /// /// More specifically, this is the offset in bytes between successive elements /// in an array with that item type including alignment padding. Thus, for any /// type `T` and length `n`, `[T; n]` has a size of `n * size_of::<T>()`. /// /// In general, the size of a type is not stable across compilations, but /// specific types such as primitives are. /// /// The following table gives the size for primitives. /// /// Type | size_of::\<Type>() /// ---- | --------------- /// () | 0 /// bool | 1 /// u8 | 1 /// u16 | 2 /// u32 | 4 /// u64 | 8 /// u128 | 16 /// i8 | 1 /// i16 | 2 /// i32 | 4 /// i64 | 8 /// i128 | 16 /// f32 | 4 /// f64 | 8 /// char | 4 /// /// Furthermore, `usize` and `isize` have the same size. /// /// The types `*const T`, `&T`, `Box<T>`, `Option<&T>`, and `Option<Box<T>>` all have /// the same size. If `T` is Sized, all of those types have the same size as `usize`. /// /// The mutability of a pointer does not change its size. As such, `&T` and `&mut T` /// have the same size. Likewise for `*const T` and `*mut T`. /// /// # Size of `#[repr(C)]` items /// /// The `C` representation for items has a defined layout. With this layout, /// the size of items is also stable as long as all fields have a stable size. /// /// ## Size of Structs /// /// For `structs`, the size is determined by the following algorithm. /// /// For each field in the struct ordered by declaration order: /// /// 1. Add the size of the field. /// 2. Round up the current size to the nearest multiple of the next field's [alignment]. /// /// Finally, round the size of the struct to the nearest multiple of its [alignment]. /// The alignment of the struct is usually the largest alignment of all its /// fields; this can be changed with the use of `repr(align(N))`. /// /// Unlike `C`, zero sized structs are not rounded up to one byte in size. /// /// ## Size of Enums /// /// Enums that carry no data other than the discriminant have the same size as C enums /// on the platform they are compiled for. /// /// ## Size of Unions /// /// The size of a union is the size of its largest field. /// /// Unlike `C`, zero sized unions are not rounded up to one byte in size. /// /// # Examples /// /// ``` /// use std::mem; /// /// // Some primitives /// assert_eq!(4, mem::size_of::<i32>()); /// assert_eq!(8, mem::size_of::<f64>()); /// assert_eq!(0, mem::size_of::<()>()); /// /// // Some arrays /// assert_eq!(8, mem::size_of::<[i32; 2]>()); /// assert_eq!(12, mem::size_of::<[i32; 3]>()); /// assert_eq!(0, mem::size_of::<[i32; 0]>()); /// /// /// // Pointer size equality /// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>()); /// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Box<i32>>()); /// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Option<&i32>>()); /// assert_eq!(mem::size_of::<Box<i32>>(), mem::size_of::<Option<Box<i32>>>()); /// ``` /// /// Using `#[repr(C)]`. /// /// ``` /// use std::mem; /// /// #[repr(C)] /// struct FieldStruct { /// first: u8, /// second: u16, /// third: u8 /// } /// /// // The size of the first field is 1, so add 1 to the size. Size is 1. /// // The alignment of the second field is 2, so add 1 to the size for padding. Size is 2. /// // The size of the second field is 2, so add 2 to the size. Size is 4. /// // The alignment of the third field is 1, so add 0 to the size for padding. Size is 4. /// // The size of the third field is 1, so add 1 to the size. Size is 5. /// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its /// // fields is 2), so add 1 to the size for padding. Size is 6. /// assert_eq!(6, mem::size_of::<FieldStruct>()); /// /// #[repr(C)] /// struct TupleStruct(u8, u16, u8); /// /// // Tuple structs follow the same rules. /// assert_eq!(6, mem::size_of::<TupleStruct>()); /// /// // Note that reordering the fields can lower the size. We can remove both padding bytes /// // by putting `third` before `second`. /// #[repr(C)] /// struct FieldStructOptimized { /// first: u8, /// third: u8, /// second: u16 /// } /// /// assert_eq!(4, mem::size_of::<FieldStructOptimized>()); /// /// // Union size is the size of the largest field. /// #[repr(C)] /// union ExampleUnion { /// smaller: u8, /// larger: u16 /// } /// /// assert_eq!(2, mem::size_of::<ExampleUnion>()); /// ``` /// /// [alignment]: ./fn.align_of.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn size_of<T>() -> usize { intrinsics::size_of::<T>() } /// Returns the size of the pointed-to value in bytes. /// /// This is usually the same as `size_of::<T>()`. However, when `T` *has* no /// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object], /// then `size_of_val` can be used to get the dynamically-known size. /// /// [slice]: ../../std/primitive.slice.html /// [trait object]: ../../book/ch17-02-trait-objects.html /// /// # Examples /// /// ``` /// use std::mem; /// /// assert_eq!(4, mem::size_of_val(&5i32)); /// /// let x: [u8; 13] = [0; 13]; /// let y: &[u8] = &x; /// assert_eq!(13, mem::size_of_val(y)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn size_of_val<T: ?Sized>(val: &T) -> usize { unsafe { intrinsics::size_of_val(val) } } /// Returns the [ABI]-required minimum alignment of a type. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use std::mem; /// /// assert_eq!(4, mem::min_align_of::<i32>()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated(reason = "use `align_of` instead", since = "1.2.0")] pub fn min_align_of<T>() -> usize { intrinsics::min_align_of::<T>() } /// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use std::mem; /// /// assert_eq!(4, mem::min_align_of_val(&5i32)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated(reason = "use `align_of_val` instead", since = "1.2.0")] pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } /// Returns the [ABI]-required minimum alignment of a type. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// use std::mem; /// /// assert_eq!(4, mem::align_of::<i32>()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn align_of<T>() -> usize { intrinsics::min_align_of::<T>() } /// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// use std::mem; /// /// assert_eq!(4, mem::align_of_val(&5i32)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn align_of_val<T: ?Sized>(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } /// Returns `true` if dropping values of type `T` matters. /// /// This is purely an optimization hint, and may be implemented conservatively: /// it may return `true` for types that don't actually need to be dropped. /// As such always returning `true` would be a valid implementation of /// this function. However if this function actually returns `false`, then you /// can be certain dropping `T` has no side effect. /// /// Low level implementations of things like collections, which need to manually /// drop their data, should use this function to avoid unnecessarily /// trying to drop all their contents when they are destroyed. This might not /// make a difference in release builds (where a loop that has no side-effects /// is easily detected and eliminated), but is often a big win for debug builds. /// /// Note that `ptr::drop_in_place` already performs this check, so if your workload /// can be reduced to some small number of drop_in_place calls, using this is /// unnecessary. In particular note that you can drop_in_place a slice, and that /// will do a single needs_drop check for all the values. /// /// Types like Vec therefore just `drop_in_place(&mut self[..])` without using /// needs_drop explicitly. Types like HashMap, on the other hand, have to drop /// values one at a time and should use this API. /// /// /// # Examples /// /// Here's an example of how a collection might make use of needs_drop: /// /// ``` /// use std::{mem, ptr}; /// /// pub struct MyCollection<T> { /// # data: [T; 1], /// /* ... */ /// } /// # impl<T> MyCollection<T> { /// # fn iter_mut(&mut self) -> &mut [T] { &mut self.data } /// # fn free_buffer(&mut self) {} /// # } /// /// impl<T> Drop for MyCollection<T> { /// fn drop(&mut self) { /// unsafe { /// // drop the data /// if mem::needs_drop::<T>() { /// for x in self.iter_mut() { /// ptr::drop_in_place(x); /// } /// } /// self.free_buffer(); /// } /// } /// } /// ``` #[inline] #[stable(feature = "needs_drop", since = "1.21.0")] #[rustc_const_unstable(feature = "const_needs_drop")] pub const fn needs_drop<T>() -> bool { intrinsics::needs_drop::<T>() } /// Creates a value whose bytes are all zero. /// /// This has the same effect as allocating space with /// [`mem::uninitialized`][uninit] and then zeroing it out. It is useful for /// FFI sometimes, but should generally be avoided. /// /// There is no guarantee that an all-zero byte-pattern represents a valid value of /// some type `T`. If `T` has a destructor and the value is destroyed (due to /// a panic or the end of a scope) before being initialized, then the destructor /// will run on zeroed data, likely leading to [undefined behavior][ub]. /// /// See also the documentation for [`mem::uninitialized`][uninit], which has /// many of the same caveats. /// /// [uninit]: fn.uninitialized.html /// [ub]: ../../reference/behavior-considered-undefined.html /// /// # Examples /// /// ``` /// use std::mem; /// /// let x: i32 = unsafe { mem::zeroed() }; /// assert_eq!(0, x); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn zeroed<T>() -> T { intrinsics::panic_if_uninhabited::<T>(); intrinsics::init() } /// Bypasses Rust's normal memory-initialization checks by pretending to /// produce a value of type `T`, while doing nothing at all. /// /// **This is incredibly dangerous and should not be done lightly. Deeply /// consider initializing your memory with a default value instead.** /// /// This is useful for FFI functions and initializing arrays sometimes, /// but should generally be avoided. /// /// # Undefined behavior /// /// It is [undefined behavior][ub] to read uninitialized memory, even just an /// uninitialized boolean. For instance, if you branch on the value of such /// a boolean, your program may take one, both, or neither of the branches. /// /// Writing to the uninitialized value is similarly dangerous. Rust believes the /// value is initialized, and will therefore try to [`Drop`] the uninitialized /// value and its fields if you try to overwrite it in a normal manner. The only way /// to safely initialize an uninitialized value is with [`ptr::write`][write], /// [`ptr::copy`][copy], or [`ptr::copy_nonoverlapping`][copy_no]. /// /// If the value does implement [`Drop`], it must be initialized before /// it goes out of scope (and therefore would be dropped). Note that this /// includes a `panic` occurring and unwinding the stack suddenly. /// /// If you partially initialize an array, you may need to use /// [`ptr::drop_in_place`][drop_in_place] to remove the elements you have fully /// initialized followed by [`mem::forget`][mem_forget] to prevent drop running /// on the array. If a partially allocated array is dropped this will lead to /// undefined behaviour. /// /// # Examples /// /// Here's how to safely initialize an array of [`Vec`]s. /// /// ``` /// use std::mem; /// use std::ptr; /// /// // Only declare the array. This safely leaves it /// // uninitialized in a way that Rust will track for us. /// // However we can't initialize it element-by-element /// // safely, and we can't use the `[value; 1000]` /// // constructor because it only works with `Copy` data. /// let mut data: [Vec<u32>; 1000]; /// /// unsafe { /// // So we need to do this to initialize it. /// data = mem::uninitialized(); /// /// // DANGER ZONE: if anything panics or otherwise /// // incorrectly reads the array here, we will have /// // Undefined Behavior. /// /// // It's ok to mutably iterate the data, since this /// // doesn't involve reading it at all. /// // (ptr and len are statically known for arrays) /// for elem in &mut data[..] { /// // *elem = Vec::new() would try to drop the /// // uninitialized memory at `elem` -- bad! /// // /// // Vec::new doesn't allocate or do really /// // anything. It's only safe to call here /// // because we know it won't panic. /// ptr::write(elem, Vec::new()); /// } /// /// // SAFE ZONE: everything is initialized. /// } /// /// println!("{:?}", &data[0]); /// ``` /// /// This example emphasizes exactly how delicate and dangerous using `mem::uninitialized` /// can be. Note that the [`vec!`] macro *does* let you initialize every element with a /// value that is only [`Clone`], so the following is semantically equivalent and /// vastly less dangerous, as long as you can live with an extra heap /// allocation: /// /// ``` /// let data: Vec<Vec<u32>> = vec![Vec::new(); 1000]; /// println!("{:?}", &data[0]); /// ``` /// /// This example shows how to handle partially initialized arrays, which could /// be found in low-level datastructures. /// /// ``` /// use std::mem; /// use std::ptr; /// /// // Count the number of elements we have assigned. /// let mut data_len: usize = 0; /// let mut data: [String; 1000]; /// /// unsafe { /// data = mem::uninitialized(); /// /// for elem in &mut data[0..500] { /// ptr::write(elem, String::from("hello")); /// data_len += 1; /// } /// /// // For each item in the array, drop if we allocated it. /// for i in &mut data[0..data_len] { /// ptr::drop_in_place(i); /// } /// } /// // Forget the data. If this is allowed to drop, you may see a crash such as: /// // 'mem_uninit_test(2457,0x7fffb55dd380) malloc: *** error for object /// // 0x7ff3b8402920: pointer being freed was not allocated' /// mem::forget(data); /// ``` /// /// [`Vec`]: ../../std/vec/struct.Vec.html /// [`vec!`]: ../../std/macro.vec.html /// [`Clone`]: ../../std/clone/trait.Clone.html /// [ub]: ../../reference/behavior-considered-undefined.html /// [write]: ../ptr/fn.write.html /// [drop_in_place]: ../ptr/fn.drop_in_place.html /// [mem_zeroed]: fn.zeroed.html /// [mem_forget]: fn.forget.html /// [copy]: ../intrinsics/fn.copy.html /// [copy_no]: ../intrinsics/fn.copy_nonoverlapping.html /// [`Drop`]: ../ops/trait.Drop.html #[inline] #[rustc_deprecated(since = "2.0.0", reason = "use `mem::MaybeUninit::uninitialized` instead")] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn uninitialized<T>() -> T { intrinsics::panic_if_uninhabited::<T>(); intrinsics::uninit() } /// Swaps the values at two mutable locations, without deinitializing either one. /// /// # Examples /// /// ``` /// use std::mem; /// /// let mut x = 5; /// let mut y = 42; /// /// mem::swap(&mut x, &mut y); /// /// assert_eq!(42, x); /// assert_eq!(5, y); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn swap<T>(x: &mut T, y: &mut T) { unsafe { ptr::swap_nonoverlapping_one(x, y); } } /// Moves `src` into the referenced `dest`, returning the previous `dest` value. /// /// Neither value is dropped. /// /// # Examples /// /// A simple example: /// /// ``` /// use std::mem; /// /// let mut v: Vec<i32> = vec![1, 2]; /// /// let old_v = mem::replace(&mut v, vec![3, 4, 5]); /// assert_eq!(2, old_v.len()); /// assert_eq!(3, v.len()); /// ``` /// /// `replace` allows consumption of a struct field by replacing it with another value. /// Without `replace` you can run into issues like these: /// /// ```compile_fail,E0507 /// struct Buffer<T> { buf: Vec<T> } /// /// impl<T> Buffer<T> { /// fn get_and_reset(&mut self) -> Vec<T> { /// // error: cannot move out of dereference of `&mut`-pointer /// let buf = self.buf; /// self.buf = Vec::new(); /// buf /// } /// } /// ``` /// /// Note that `T` does not necessarily implement [`Clone`], so it can't even clone and reset /// `self.buf`. But `replace` can be used to disassociate the original value of `self.buf` from /// `self`, allowing it to be returned: /// /// ``` /// # #![allow(dead_code)] /// use std::mem; /// /// # struct Buffer<T> { buf: Vec<T> } /// impl<T> Buffer<T> { /// fn get_and_reset(&mut self) -> Vec<T> { /// mem::replace(&mut self.buf, Vec::new()) /// } /// } /// ``` /// /// [`Clone`]: ../../std/clone/trait.Clone.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn replace<T>(dest: &mut T, mut src: T) -> T { swap(dest, &mut src); src } /// Disposes of a value. /// /// This does call the argument's implementation of [`Drop`][drop]. /// /// This effectively does nothing for types which implement `Copy`, e.g. /// integers. Such values are copied and _then_ moved into the function, so the /// value persists after this function call. /// /// This function is not magic; it is literally defined as /// /// ``` /// pub fn drop<T>(_x: T) { } /// ``` /// /// Because `_x` is moved into the function, it is automatically dropped before /// the function returns. /// /// [drop]: ../ops/trait.Drop.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let v = vec![1, 2, 3]; /// /// drop(v); // explicitly drop the vector /// ``` /// /// Since [`RefCell`] enforces the borrow rules at runtime, `drop` can /// release a [`RefCell`] borrow: /// /// ``` /// use std::cell::RefCell; /// /// let x = RefCell::new(1); /// /// let mut mutable_borrow = x.borrow_mut(); /// *mutable_borrow = 1; /// /// drop(mutable_borrow); // relinquish the mutable borrow on this slot /// /// let borrow = x.borrow(); /// println!("{}", *borrow); /// ``` /// /// Integers and other types implementing [`Copy`] are unaffected by `drop`. /// /// ``` /// #[derive(Copy, Clone)] /// struct Foo(u8); /// /// let x = 1; /// let y = Foo(2); /// drop(x); // a copy of `x` is moved and dropped /// drop(y); // a copy of `y` is moved and dropped /// /// println!("x: {}, y: {}", x, y.0); // still available /// ``` /// /// [`RefCell`]: ../../std/cell/struct.RefCell.html /// [`Copy`]: ../../std/marker/trait.Copy.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn drop<T>(_x: T) { } /// Interprets `src` as having type `&U`, and then reads `src` without moving /// the contained value. /// /// This function will unsafely assume the pointer `src` is valid for /// [`size_of::<U>`][size_of] bytes by transmuting `&T` to `&U` and then reading /// the `&U`. It will also unsafely create a copy of the contained value instead of /// moving out of `src`. /// /// It is not a compile-time error if `T` and `U` have different sizes, but it /// is highly encouraged to only invoke this function where `T` and `U` have the /// same size. This function triggers [undefined behavior][ub] if `U` is larger than /// `T`. /// /// [ub]: ../../reference/behavior-considered-undefined.html /// [size_of]: fn.size_of.html /// /// # Examples /// /// ``` /// use std::mem; /// /// #[repr(packed)] /// struct Foo { /// bar: u8, /// } /// /// let foo_slice = [10u8]; /// /// unsafe { /// // Copy the data from 'foo_slice' and treat it as a 'Foo' /// let mut foo_struct: Foo = mem::transmute_copy(&foo_slice); /// assert_eq!(foo_struct.bar, 10); /// /// // Modify the copied data /// foo_struct.bar = 20; /// assert_eq!(foo_struct.bar, 20); /// } /// /// // The contents of 'foo_slice' should not have changed /// assert_eq!(foo_slice, [10]); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn transmute_copy<T, U>(src: &T) -> U { ptr::read_unaligned(src as *const T as *const U) } /// Opaque type representing the discriminant of an enum. /// /// See the [`discriminant`] function in this module for more information. /// /// [`discriminant`]: fn.discriminant.html #[stable(feature = "discriminant_value", since = "1.21.0")] pub struct Discriminant<T>(u64, PhantomData<fn() -> T>); // N.B. These trait implementations cannot be derived because we don't want any bounds on T. #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> Copy for Discriminant<T> {} #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> clone::Clone for Discriminant<T> { fn clone(&self) -> Self { *self } } #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> cmp::PartialEq for Discriminant<T> { fn eq(&self, rhs: &Self) -> bool { self.0 == rhs.0 } } #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> cmp::Eq for Discriminant<T> {} #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> hash::Hash for Discriminant<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state); } } #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> fmt::Debug for Discriminant<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("Discriminant") .field(&self.0) .finish() } } /// Returns a value uniquely identifying the enum variant in `v`. /// /// If `T` is not an enum, calling this function will not result in undefined behavior, but the /// return value is unspecified. /// /// # Stability /// /// The discriminant of an enum variant may change if the enum definition changes. A discriminant /// of some variant will not change between compilations with the same compiler. /// /// # Examples /// /// This can be used to compare enums that carry data, while disregarding /// the actual data: /// /// ``` /// use std::mem; /// /// enum Foo { A(&'static str), B(i32), C(i32) } /// /// assert!(mem::discriminant(&Foo::A("bar")) == mem::discriminant(&Foo::A("baz"))); /// assert!(mem::discriminant(&Foo::B(1)) == mem::discriminant(&Foo::B(2))); /// assert!(mem::discriminant(&Foo::B(3)) != mem::discriminant(&Foo::C(3))); /// ``` #[stable(feature = "discriminant_value", since = "1.21.0")] pub fn discriminant<T>(v: &T) -> Discriminant<T> { unsafe { Discriminant(intrinsics::discriminant_value(v), PhantomData) } } /// A wrapper to inhibit compiler from automatically calling `T`’s destructor. /// /// This wrapper is 0-cost. /// /// # Examples /// /// This wrapper helps with explicitly documenting the drop order dependencies between fields of /// the type: /// /// ```rust /// use std::mem::ManuallyDrop; /// struct Peach; /// struct Banana; /// struct Melon; /// struct FruitBox { /// // Immediately clear there’s something non-trivial going on with these fields. /// peach: ManuallyDrop<Peach>, /// melon: Melon, // Field that’s independent of the other two. /// banana: ManuallyDrop<Banana>, /// } /// /// impl Drop for FruitBox { /// fn drop(&mut self) { /// unsafe { /// // Explicit ordering in which field destructors are run specified in the intuitive /// // location – the destructor of the structure containing the fields. /// // Moreover, one can now reorder fields within the struct however much they want. /// ManuallyDrop::drop(&mut self.peach); /// ManuallyDrop::drop(&mut self.banana); /// } /// // After destructor for `FruitBox` runs (this function), the destructor for Melon gets /// // invoked in the usual manner, as it is not wrapped in `ManuallyDrop`. /// } /// } /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[lang = "manually_drop"] #[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct ManuallyDrop<T: ?Sized> { value: T, } impl<T> ManuallyDrop<T> { /// Wrap a value to be manually dropped. /// /// # Examples /// /// ```rust /// use std::mem::ManuallyDrop; /// ManuallyDrop::new(Box::new(())); /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[inline(always)] pub const fn new(value: T) -> ManuallyDrop<T> { ManuallyDrop { value } } /// Extracts the value from the `ManuallyDrop` container. /// /// This allows the value to be dropped again. /// /// # Examples /// /// ```rust /// use std::mem::ManuallyDrop; /// let x = ManuallyDrop::new(Box::new(())); /// let _: Box<()> = ManuallyDrop::into_inner(x); // This drops the `Box`. /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[inline(always)] pub const fn into_inner(slot: ManuallyDrop<T>) -> T { slot.value } /// Takes the contained value out. /// /// This method is primarily intended for moving out values in drop. /// Instead of using [`ManuallyDrop::drop`] to manually drop the value, /// you can use this method to take the value and use it however desired. /// `Drop` will be invoked on the returned value following normal end-of-scope rules. /// /// If you have ownership of the container, you can use [`ManuallyDrop::into_inner`] instead. /// /// # Safety /// /// This function semantically moves out the contained value without preventing further usage. /// It is up to the user of this method to ensure that this container is not used again. /// /// [`ManuallyDrop::drop`]: #method.drop /// [`ManuallyDrop::into_inner`]: #method.into_inner #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"] #[unstable(feature = "manually_drop_take", issue = "55422")] #[inline] pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T { ManuallyDrop::into_inner(ptr::read(slot)) } } impl<T: ?Sized> ManuallyDrop<T> { /// Manually drops the contained value. /// /// If you have ownership of the value, you can use [`ManuallyDrop::into_inner`] instead. /// /// # Safety /// /// This function runs the destructor of the contained value and thus the wrapped value /// now represents uninitialized data. It is up to the user of this method to ensure the /// uninitialized data is not actually used. /// /// [`ManuallyDrop::into_inner`]: #method.into_inner #[stable(feature = "manually_drop", since = "1.20.0")] #[inline] pub unsafe fn drop(slot: &mut ManuallyDrop<T>) { ptr::drop_in_place(&mut slot.value) } } #[stable(feature = "manually_drop", since = "1.20.0")] impl<T: ?Sized> Deref for ManuallyDrop<T> { type Target = T; #[inline(always)] fn deref(&self) -> &T { &self.value } } #[stable(feature = "manually_drop", since = "1.20.0")] impl<T: ?Sized> DerefMut for ManuallyDrop<T> { #[inline(always)] fn deref_mut(&mut self) -> &mut T { &mut self.value } } /// A newtype to construct uninitialized instances of `T`. /// /// The compiler, in general, assumes that variables are properly initialized /// at their respective type. For example, a variable of reference type must /// be aligned and non-NULL. This is an invariant that must *always* be upheld, /// even in unsafe code. As a consequence, zero-initializing a variable of reference /// type causes instantaneous undefined behavior, no matter whether that reference /// ever gets used to access memory: /// /// ```rust,no_run /// #![feature(maybe_uninit)] /// use std::mem::{self, MaybeUninit}; /// /// let x: &i32 = unsafe { mem::zeroed() }; // undefined behavior! /// // equivalent code with `MaybeUninit` /// let x: &i32 = unsafe { MaybeUninit::zeroed().into_initialized() }; // undefined behavior! /// ``` /// /// This is exploited by the compiler for various optimizations, such as eliding /// run-time checks and optimizing `enum` layout. /// /// Not initializing memory at all (instead of zero-initializing it) causes the same /// issue: after all, the initial value of the variable might just happen to be /// one that violates the invariant. Moreover, uninitialized memory is special /// in that the compiler knows that it does not have a fixed value. This makes /// it undefined behavior to have uninitialized data in a variable even if that /// variable has otherwise no restrictions about which values are valid: /// /// ```rust,no_run /// #![feature(maybe_uninit)] /// use std::mem::{self, MaybeUninit}; /// /// let x: i32 = unsafe { mem::uninitialized() }; // undefined behavior! /// // equivalent code with `MaybeUninit` /// let x: i32 = unsafe { MaybeUninit::uninitialized().into_initialized() }; // undefined behavior! /// ``` /// (Notice that the rules around uninitialized integers are not finalized yet, but /// until they are, it is advisable to avoid them.) /// /// `MaybeUninit` serves to enable unsafe code to deal with uninitialized data: /// it is a signal to the compiler indicating that the data here might *not* /// be initialized: /// /// ```rust /// #![feature(maybe_uninit)] /// use std::mem::MaybeUninit; /// /// // Create an explicitly uninitialized reference. The compiler knows that data inside /// // a `MaybeUninit` may be invalid, and hence this is not UB: /// let mut x = MaybeUninit::<&i32>::uninitialized(); /// // Set it to a valid value. /// x.set(&0); /// // Extract the initialized data -- this is only allowed *after* properly /// // initializing `x`! /// let x = unsafe { x.into_initialized() }; /// ``` /// /// The compiler then knows to not optimize this code. // FIXME before stabilizing, explain how to initialize a struct field-by-field. #[allow(missing_debug_implementations)] #[unstable(feature = "maybe_uninit", issue = "53491")] // NOTE after stabilizing `MaybeUninit` proceed to deprecate `mem::{uninitialized,zeroed}` pub union MaybeUninit<T> { uninit: (), value: ManuallyDrop<T>, } impl<T> MaybeUninit<T> { /// Create a new `MaybeUninit` initialized with the given value. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub const fn new(val: T) -> MaybeUninit<T> { MaybeUninit { value: ManuallyDrop::new(val) } } /// Creates a new `MaybeUninit` in an uninitialized state. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub const fn uninitialized() -> MaybeUninit<T> { MaybeUninit { uninit: () } } /// Creates a new `MaybeUninit` in an uninitialized state, with the memory being /// filled with `0` bytes. It depends on `T` whether that already makes for /// proper initialization. For example, `MaybeUninit<usize>::zeroed()` is initialized, /// but `MaybeUninit<&'static i32>::zeroed()` is not because references must not /// be null. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline] pub fn zeroed() -> MaybeUninit<T> { let mut u = MaybeUninit::<T>::uninitialized(); unsafe { u.as_mut_ptr().write_bytes(0u8, 1); } u } /// Sets the value of the `MaybeUninit`. This overwrites any previous value without dropping it. /// For your convenience, this also returns a mutable reference to the (now safely initialized) /// contents of `self`. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub fn set(&mut self, val: T) -> &mut T { unsafe { self.value = ManuallyDrop::new(val); self.get_mut() } } /// Gets a pointer to the contained value. Reading from this pointer or turning it /// into a reference will be undefined behavior unless the `MaybeUninit` is initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub fn as_ptr(&self) -> *const T { unsafe { &*self.value as *const T } } /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it /// into a reference will be undefined behavior unless the `MaybeUninit` is initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut T { unsafe { &mut *self.value as *mut T } } /// Extracts the value from the `MaybeUninit` container. This is a great way /// to ensure that the data will get dropped, because the resulting `T` is /// subject to the usual drop handling. /// /// # Unsafety /// /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state. Calling this when the content is not yet fully initialized causes undefined /// behavior. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub unsafe fn into_initialized(self) -> T { intrinsics::panic_if_uninhabited::<T>(); ManuallyDrop::into_inner(self.value) } /// Gets a reference to the contained value. /// /// # Unsafety /// /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state. Calling this when the content is not yet fully initialized causes undefined /// behavior. #[unstable(feature = "maybe_uninit_ref", issue = "53491")] #[inline(always)] pub unsafe fn get_ref(&self) -> &T { &*self.value } /// Gets a mutable reference to the contained value. /// /// # Unsafety /// /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state. Calling this when the content is not yet fully initialized causes undefined /// behavior. // FIXME(#53491): We currently rely on the above being incorrect, i.e., we have references // to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make // a final decision about the rules before stabilization. #[unstable(feature = "maybe_uninit_ref", issue = "53491")] #[inline(always)] pub unsafe fn get_mut(&mut self) -> &mut T { &mut *self.value } /// Gets a pointer to the first element of the array. #[unstable(feature = "maybe_uninit_slice", issue = "53491")] #[inline(always)] pub fn first_ptr(this: &[MaybeUninit<T>]) -> *const T { this as *const [MaybeUninit<T>] as *const T } /// Gets a mutable pointer to the first element of the array. #[unstable(feature = "maybe_uninit_slice", issue = "53491")] #[inline(always)] pub fn first_ptr_mut(this: &mut [MaybeUninit<T>]) -> *mut T { this as *mut [MaybeUninit<T>] as *mut T } } Add MaybeUninit::read_uninitialized Also remove a no-longer accurate comments //! Basic functions for dealing with memory. //! //! This module contains functions for querying the size and alignment of //! types, initializing and manipulating memory. #![stable(feature = "rust1", since = "1.0.0")] use clone; use cmp; use fmt; use hash; use intrinsics; use marker::{Copy, PhantomData, Sized}; use ptr; use ops::{Deref, DerefMut}; #[stable(feature = "rust1", since = "1.0.0")] #[doc(inline)] pub use intrinsics::transmute; /// Takes ownership and "forgets" about the value **without running its destructor**. /// /// Any resources the value manages, such as heap memory or a file handle, will linger /// forever in an unreachable state. However, it does not guarantee that pointers /// to this memory will remain valid. /// /// * If you want to leak memory, see [`Box::leak`][leak]. /// * If you want to obtain a raw pointer to the memory, see [`Box::into_raw`][into_raw]. /// * If you want to dispose of a value properly, running its destructor, see /// [`mem::drop`][drop]. /// /// # Safety /// /// `forget` is not marked as `unsafe`, because Rust's safety guarantees /// do not include a guarantee that destructors will always run. For example, /// a program can create a reference cycle using [`Rc`][rc], or call /// [`process::exit`][exit] to exit without running destructors. Thus, allowing /// `mem::forget` from safe code does not fundamentally change Rust's safety /// guarantees. /// /// That said, leaking resources such as memory or I/O objects is usually undesirable, /// so `forget` is only recommended for specialized use cases like those shown below. /// /// Because forgetting a value is allowed, any `unsafe` code you write must /// allow for this possibility. You cannot return a value and expect that the /// caller will necessarily run the value's destructor. /// /// [rc]: ../../std/rc/struct.Rc.html /// [exit]: ../../std/process/fn.exit.html /// /// # Examples /// /// Leak an I/O object, never closing the file: /// /// ```no_run /// use std::mem; /// use std::fs::File; /// /// let file = File::open("foo.txt").unwrap(); /// mem::forget(file); /// ``` /// /// The practical use cases for `forget` are rather specialized and mainly come /// up in unsafe or FFI code. /// /// ## Use case 1 /// /// You have created an uninitialized value using [`mem::uninitialized`][uninit]. /// You must either initialize or `forget` it on every computation path before /// Rust drops it automatically, like at the end of a scope or after a panic. /// Running the destructor on an uninitialized value would be [undefined behavior][ub]. /// /// ``` /// use std::mem; /// use std::ptr; /// /// # let some_condition = false; /// unsafe { /// let mut uninit_vec: Vec<u32> = mem::uninitialized(); /// /// if some_condition { /// // Initialize the variable. /// ptr::write(&mut uninit_vec, Vec::new()); /// } else { /// // Forget the uninitialized value so its destructor doesn't run. /// mem::forget(uninit_vec); /// } /// } /// ``` /// /// ## Use case 2 /// /// You have duplicated the bytes making up a value, without doing a proper /// [`Clone`][clone]. You need the value's destructor to run only once, /// because a double `free` is undefined behavior. /// /// An example is a possible implementation of [`mem::swap`][swap]: /// /// ``` /// use std::mem; /// use std::ptr; /// /// # #[allow(dead_code)] /// fn swap<T>(x: &mut T, y: &mut T) { /// unsafe { /// // Give ourselves some scratch space to work with /// let mut t: T = mem::uninitialized(); /// /// // Perform the swap, `&mut` pointers never alias /// ptr::copy_nonoverlapping(&*x, &mut t, 1); /// ptr::copy_nonoverlapping(&*y, x, 1); /// ptr::copy_nonoverlapping(&t, y, 1); /// /// // y and t now point to the same thing, but we need to completely /// // forget `t` because we do not want to run the destructor for `T` /// // on its value, which is still owned somewhere outside this function. /// mem::forget(t); /// } /// } /// ``` /// /// [drop]: fn.drop.html /// [uninit]: fn.uninitialized.html /// [clone]: ../clone/trait.Clone.html /// [swap]: fn.swap.html /// [box]: ../../std/boxed/struct.Box.html /// [leak]: ../../std/boxed/struct.Box.html#method.leak /// [into_raw]: ../../std/boxed/struct.Box.html#method.into_raw /// [ub]: ../../reference/behavior-considered-undefined.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn forget<T>(t: T) { ManuallyDrop::new(t); } /// Like [`forget`], but also accepts unsized values. /// /// This function is just a shim intended to be removed when the `unsized_locals` feature gets /// stabilized. /// /// [`forget`]: fn.forget.html #[inline] #[unstable(feature = "forget_unsized", issue = "0")] pub fn forget_unsized<T: ?Sized>(t: T) { unsafe { intrinsics::forget(t) } } /// Returns the size of a type in bytes. /// /// More specifically, this is the offset in bytes between successive elements /// in an array with that item type including alignment padding. Thus, for any /// type `T` and length `n`, `[T; n]` has a size of `n * size_of::<T>()`. /// /// In general, the size of a type is not stable across compilations, but /// specific types such as primitives are. /// /// The following table gives the size for primitives. /// /// Type | size_of::\<Type>() /// ---- | --------------- /// () | 0 /// bool | 1 /// u8 | 1 /// u16 | 2 /// u32 | 4 /// u64 | 8 /// u128 | 16 /// i8 | 1 /// i16 | 2 /// i32 | 4 /// i64 | 8 /// i128 | 16 /// f32 | 4 /// f64 | 8 /// char | 4 /// /// Furthermore, `usize` and `isize` have the same size. /// /// The types `*const T`, `&T`, `Box<T>`, `Option<&T>`, and `Option<Box<T>>` all have /// the same size. If `T` is Sized, all of those types have the same size as `usize`. /// /// The mutability of a pointer does not change its size. As such, `&T` and `&mut T` /// have the same size. Likewise for `*const T` and `*mut T`. /// /// # Size of `#[repr(C)]` items /// /// The `C` representation for items has a defined layout. With this layout, /// the size of items is also stable as long as all fields have a stable size. /// /// ## Size of Structs /// /// For `structs`, the size is determined by the following algorithm. /// /// For each field in the struct ordered by declaration order: /// /// 1. Add the size of the field. /// 2. Round up the current size to the nearest multiple of the next field's [alignment]. /// /// Finally, round the size of the struct to the nearest multiple of its [alignment]. /// The alignment of the struct is usually the largest alignment of all its /// fields; this can be changed with the use of `repr(align(N))`. /// /// Unlike `C`, zero sized structs are not rounded up to one byte in size. /// /// ## Size of Enums /// /// Enums that carry no data other than the discriminant have the same size as C enums /// on the platform they are compiled for. /// /// ## Size of Unions /// /// The size of a union is the size of its largest field. /// /// Unlike `C`, zero sized unions are not rounded up to one byte in size. /// /// # Examples /// /// ``` /// use std::mem; /// /// // Some primitives /// assert_eq!(4, mem::size_of::<i32>()); /// assert_eq!(8, mem::size_of::<f64>()); /// assert_eq!(0, mem::size_of::<()>()); /// /// // Some arrays /// assert_eq!(8, mem::size_of::<[i32; 2]>()); /// assert_eq!(12, mem::size_of::<[i32; 3]>()); /// assert_eq!(0, mem::size_of::<[i32; 0]>()); /// /// /// // Pointer size equality /// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>()); /// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Box<i32>>()); /// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Option<&i32>>()); /// assert_eq!(mem::size_of::<Box<i32>>(), mem::size_of::<Option<Box<i32>>>()); /// ``` /// /// Using `#[repr(C)]`. /// /// ``` /// use std::mem; /// /// #[repr(C)] /// struct FieldStruct { /// first: u8, /// second: u16, /// third: u8 /// } /// /// // The size of the first field is 1, so add 1 to the size. Size is 1. /// // The alignment of the second field is 2, so add 1 to the size for padding. Size is 2. /// // The size of the second field is 2, so add 2 to the size. Size is 4. /// // The alignment of the third field is 1, so add 0 to the size for padding. Size is 4. /// // The size of the third field is 1, so add 1 to the size. Size is 5. /// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its /// // fields is 2), so add 1 to the size for padding. Size is 6. /// assert_eq!(6, mem::size_of::<FieldStruct>()); /// /// #[repr(C)] /// struct TupleStruct(u8, u16, u8); /// /// // Tuple structs follow the same rules. /// assert_eq!(6, mem::size_of::<TupleStruct>()); /// /// // Note that reordering the fields can lower the size. We can remove both padding bytes /// // by putting `third` before `second`. /// #[repr(C)] /// struct FieldStructOptimized { /// first: u8, /// third: u8, /// second: u16 /// } /// /// assert_eq!(4, mem::size_of::<FieldStructOptimized>()); /// /// // Union size is the size of the largest field. /// #[repr(C)] /// union ExampleUnion { /// smaller: u8, /// larger: u16 /// } /// /// assert_eq!(2, mem::size_of::<ExampleUnion>()); /// ``` /// /// [alignment]: ./fn.align_of.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn size_of<T>() -> usize { intrinsics::size_of::<T>() } /// Returns the size of the pointed-to value in bytes. /// /// This is usually the same as `size_of::<T>()`. However, when `T` *has* no /// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object], /// then `size_of_val` can be used to get the dynamically-known size. /// /// [slice]: ../../std/primitive.slice.html /// [trait object]: ../../book/ch17-02-trait-objects.html /// /// # Examples /// /// ``` /// use std::mem; /// /// assert_eq!(4, mem::size_of_val(&5i32)); /// /// let x: [u8; 13] = [0; 13]; /// let y: &[u8] = &x; /// assert_eq!(13, mem::size_of_val(y)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn size_of_val<T: ?Sized>(val: &T) -> usize { unsafe { intrinsics::size_of_val(val) } } /// Returns the [ABI]-required minimum alignment of a type. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use std::mem; /// /// assert_eq!(4, mem::min_align_of::<i32>()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated(reason = "use `align_of` instead", since = "1.2.0")] pub fn min_align_of<T>() -> usize { intrinsics::min_align_of::<T>() } /// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use std::mem; /// /// assert_eq!(4, mem::min_align_of_val(&5i32)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated(reason = "use `align_of_val` instead", since = "1.2.0")] pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } /// Returns the [ABI]-required minimum alignment of a type. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// use std::mem; /// /// assert_eq!(4, mem::align_of::<i32>()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn align_of<T>() -> usize { intrinsics::min_align_of::<T>() } /// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. /// /// Every reference to a value of the type `T` must be a multiple of this number. /// /// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// /// ``` /// use std::mem; /// /// assert_eq!(4, mem::align_of_val(&5i32)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn align_of_val<T: ?Sized>(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } /// Returns `true` if dropping values of type `T` matters. /// /// This is purely an optimization hint, and may be implemented conservatively: /// it may return `true` for types that don't actually need to be dropped. /// As such always returning `true` would be a valid implementation of /// this function. However if this function actually returns `false`, then you /// can be certain dropping `T` has no side effect. /// /// Low level implementations of things like collections, which need to manually /// drop their data, should use this function to avoid unnecessarily /// trying to drop all their contents when they are destroyed. This might not /// make a difference in release builds (where a loop that has no side-effects /// is easily detected and eliminated), but is often a big win for debug builds. /// /// Note that `ptr::drop_in_place` already performs this check, so if your workload /// can be reduced to some small number of drop_in_place calls, using this is /// unnecessary. In particular note that you can drop_in_place a slice, and that /// will do a single needs_drop check for all the values. /// /// Types like Vec therefore just `drop_in_place(&mut self[..])` without using /// needs_drop explicitly. Types like HashMap, on the other hand, have to drop /// values one at a time and should use this API. /// /// /// # Examples /// /// Here's an example of how a collection might make use of needs_drop: /// /// ``` /// use std::{mem, ptr}; /// /// pub struct MyCollection<T> { /// # data: [T; 1], /// /* ... */ /// } /// # impl<T> MyCollection<T> { /// # fn iter_mut(&mut self) -> &mut [T] { &mut self.data } /// # fn free_buffer(&mut self) {} /// # } /// /// impl<T> Drop for MyCollection<T> { /// fn drop(&mut self) { /// unsafe { /// // drop the data /// if mem::needs_drop::<T>() { /// for x in self.iter_mut() { /// ptr::drop_in_place(x); /// } /// } /// self.free_buffer(); /// } /// } /// } /// ``` #[inline] #[stable(feature = "needs_drop", since = "1.21.0")] #[rustc_const_unstable(feature = "const_needs_drop")] pub const fn needs_drop<T>() -> bool { intrinsics::needs_drop::<T>() } /// Creates a value whose bytes are all zero. /// /// This has the same effect as allocating space with /// [`mem::uninitialized`][uninit] and then zeroing it out. It is useful for /// FFI sometimes, but should generally be avoided. /// /// There is no guarantee that an all-zero byte-pattern represents a valid value of /// some type `T`. If `T` has a destructor and the value is destroyed (due to /// a panic or the end of a scope) before being initialized, then the destructor /// will run on zeroed data, likely leading to [undefined behavior][ub]. /// /// See also the documentation for [`mem::uninitialized`][uninit], which has /// many of the same caveats. /// /// [uninit]: fn.uninitialized.html /// [ub]: ../../reference/behavior-considered-undefined.html /// /// # Examples /// /// ``` /// use std::mem; /// /// let x: i32 = unsafe { mem::zeroed() }; /// assert_eq!(0, x); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn zeroed<T>() -> T { intrinsics::panic_if_uninhabited::<T>(); intrinsics::init() } /// Bypasses Rust's normal memory-initialization checks by pretending to /// produce a value of type `T`, while doing nothing at all. /// /// **This is incredibly dangerous and should not be done lightly. Deeply /// consider initializing your memory with a default value instead.** /// /// This is useful for FFI functions and initializing arrays sometimes, /// but should generally be avoided. /// /// # Undefined behavior /// /// It is [undefined behavior][ub] to read uninitialized memory, even just an /// uninitialized boolean. For instance, if you branch on the value of such /// a boolean, your program may take one, both, or neither of the branches. /// /// Writing to the uninitialized value is similarly dangerous. Rust believes the /// value is initialized, and will therefore try to [`Drop`] the uninitialized /// value and its fields if you try to overwrite it in a normal manner. The only way /// to safely initialize an uninitialized value is with [`ptr::write`][write], /// [`ptr::copy`][copy], or [`ptr::copy_nonoverlapping`][copy_no]. /// /// If the value does implement [`Drop`], it must be initialized before /// it goes out of scope (and therefore would be dropped). Note that this /// includes a `panic` occurring and unwinding the stack suddenly. /// /// If you partially initialize an array, you may need to use /// [`ptr::drop_in_place`][drop_in_place] to remove the elements you have fully /// initialized followed by [`mem::forget`][mem_forget] to prevent drop running /// on the array. If a partially allocated array is dropped this will lead to /// undefined behaviour. /// /// # Examples /// /// Here's how to safely initialize an array of [`Vec`]s. /// /// ``` /// use std::mem; /// use std::ptr; /// /// // Only declare the array. This safely leaves it /// // uninitialized in a way that Rust will track for us. /// // However we can't initialize it element-by-element /// // safely, and we can't use the `[value; 1000]` /// // constructor because it only works with `Copy` data. /// let mut data: [Vec<u32>; 1000]; /// /// unsafe { /// // So we need to do this to initialize it. /// data = mem::uninitialized(); /// /// // DANGER ZONE: if anything panics or otherwise /// // incorrectly reads the array here, we will have /// // Undefined Behavior. /// /// // It's ok to mutably iterate the data, since this /// // doesn't involve reading it at all. /// // (ptr and len are statically known for arrays) /// for elem in &mut data[..] { /// // *elem = Vec::new() would try to drop the /// // uninitialized memory at `elem` -- bad! /// // /// // Vec::new doesn't allocate or do really /// // anything. It's only safe to call here /// // because we know it won't panic. /// ptr::write(elem, Vec::new()); /// } /// /// // SAFE ZONE: everything is initialized. /// } /// /// println!("{:?}", &data[0]); /// ``` /// /// This example emphasizes exactly how delicate and dangerous using `mem::uninitialized` /// can be. Note that the [`vec!`] macro *does* let you initialize every element with a /// value that is only [`Clone`], so the following is semantically equivalent and /// vastly less dangerous, as long as you can live with an extra heap /// allocation: /// /// ``` /// let data: Vec<Vec<u32>> = vec![Vec::new(); 1000]; /// println!("{:?}", &data[0]); /// ``` /// /// This example shows how to handle partially initialized arrays, which could /// be found in low-level datastructures. /// /// ``` /// use std::mem; /// use std::ptr; /// /// // Count the number of elements we have assigned. /// let mut data_len: usize = 0; /// let mut data: [String; 1000]; /// /// unsafe { /// data = mem::uninitialized(); /// /// for elem in &mut data[0..500] { /// ptr::write(elem, String::from("hello")); /// data_len += 1; /// } /// /// // For each item in the array, drop if we allocated it. /// for i in &mut data[0..data_len] { /// ptr::drop_in_place(i); /// } /// } /// // Forget the data. If this is allowed to drop, you may see a crash such as: /// // 'mem_uninit_test(2457,0x7fffb55dd380) malloc: *** error for object /// // 0x7ff3b8402920: pointer being freed was not allocated' /// mem::forget(data); /// ``` /// /// [`Vec`]: ../../std/vec/struct.Vec.html /// [`vec!`]: ../../std/macro.vec.html /// [`Clone`]: ../../std/clone/trait.Clone.html /// [ub]: ../../reference/behavior-considered-undefined.html /// [write]: ../ptr/fn.write.html /// [drop_in_place]: ../ptr/fn.drop_in_place.html /// [mem_zeroed]: fn.zeroed.html /// [mem_forget]: fn.forget.html /// [copy]: ../intrinsics/fn.copy.html /// [copy_no]: ../intrinsics/fn.copy_nonoverlapping.html /// [`Drop`]: ../ops/trait.Drop.html #[inline] #[rustc_deprecated(since = "2.0.0", reason = "use `mem::MaybeUninit::uninitialized` instead")] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn uninitialized<T>() -> T { intrinsics::panic_if_uninhabited::<T>(); intrinsics::uninit() } /// Swaps the values at two mutable locations, without deinitializing either one. /// /// # Examples /// /// ``` /// use std::mem; /// /// let mut x = 5; /// let mut y = 42; /// /// mem::swap(&mut x, &mut y); /// /// assert_eq!(42, x); /// assert_eq!(5, y); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn swap<T>(x: &mut T, y: &mut T) { unsafe { ptr::swap_nonoverlapping_one(x, y); } } /// Moves `src` into the referenced `dest`, returning the previous `dest` value. /// /// Neither value is dropped. /// /// # Examples /// /// A simple example: /// /// ``` /// use std::mem; /// /// let mut v: Vec<i32> = vec![1, 2]; /// /// let old_v = mem::replace(&mut v, vec![3, 4, 5]); /// assert_eq!(2, old_v.len()); /// assert_eq!(3, v.len()); /// ``` /// /// `replace` allows consumption of a struct field by replacing it with another value. /// Without `replace` you can run into issues like these: /// /// ```compile_fail,E0507 /// struct Buffer<T> { buf: Vec<T> } /// /// impl<T> Buffer<T> { /// fn get_and_reset(&mut self) -> Vec<T> { /// // error: cannot move out of dereference of `&mut`-pointer /// let buf = self.buf; /// self.buf = Vec::new(); /// buf /// } /// } /// ``` /// /// Note that `T` does not necessarily implement [`Clone`], so it can't even clone and reset /// `self.buf`. But `replace` can be used to disassociate the original value of `self.buf` from /// `self`, allowing it to be returned: /// /// ``` /// # #![allow(dead_code)] /// use std::mem; /// /// # struct Buffer<T> { buf: Vec<T> } /// impl<T> Buffer<T> { /// fn get_and_reset(&mut self) -> Vec<T> { /// mem::replace(&mut self.buf, Vec::new()) /// } /// } /// ``` /// /// [`Clone`]: ../../std/clone/trait.Clone.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn replace<T>(dest: &mut T, mut src: T) -> T { swap(dest, &mut src); src } /// Disposes of a value. /// /// This does call the argument's implementation of [`Drop`][drop]. /// /// This effectively does nothing for types which implement `Copy`, e.g. /// integers. Such values are copied and _then_ moved into the function, so the /// value persists after this function call. /// /// This function is not magic; it is literally defined as /// /// ``` /// pub fn drop<T>(_x: T) { } /// ``` /// /// Because `_x` is moved into the function, it is automatically dropped before /// the function returns. /// /// [drop]: ../ops/trait.Drop.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let v = vec![1, 2, 3]; /// /// drop(v); // explicitly drop the vector /// ``` /// /// Since [`RefCell`] enforces the borrow rules at runtime, `drop` can /// release a [`RefCell`] borrow: /// /// ``` /// use std::cell::RefCell; /// /// let x = RefCell::new(1); /// /// let mut mutable_borrow = x.borrow_mut(); /// *mutable_borrow = 1; /// /// drop(mutable_borrow); // relinquish the mutable borrow on this slot /// /// let borrow = x.borrow(); /// println!("{}", *borrow); /// ``` /// /// Integers and other types implementing [`Copy`] are unaffected by `drop`. /// /// ``` /// #[derive(Copy, Clone)] /// struct Foo(u8); /// /// let x = 1; /// let y = Foo(2); /// drop(x); // a copy of `x` is moved and dropped /// drop(y); // a copy of `y` is moved and dropped /// /// println!("x: {}, y: {}", x, y.0); // still available /// ``` /// /// [`RefCell`]: ../../std/cell/struct.RefCell.html /// [`Copy`]: ../../std/marker/trait.Copy.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn drop<T>(_x: T) { } /// Interprets `src` as having type `&U`, and then reads `src` without moving /// the contained value. /// /// This function will unsafely assume the pointer `src` is valid for /// [`size_of::<U>`][size_of] bytes by transmuting `&T` to `&U` and then reading /// the `&U`. It will also unsafely create a copy of the contained value instead of /// moving out of `src`. /// /// It is not a compile-time error if `T` and `U` have different sizes, but it /// is highly encouraged to only invoke this function where `T` and `U` have the /// same size. This function triggers [undefined behavior][ub] if `U` is larger than /// `T`. /// /// [ub]: ../../reference/behavior-considered-undefined.html /// [size_of]: fn.size_of.html /// /// # Examples /// /// ``` /// use std::mem; /// /// #[repr(packed)] /// struct Foo { /// bar: u8, /// } /// /// let foo_slice = [10u8]; /// /// unsafe { /// // Copy the data from 'foo_slice' and treat it as a 'Foo' /// let mut foo_struct: Foo = mem::transmute_copy(&foo_slice); /// assert_eq!(foo_struct.bar, 10); /// /// // Modify the copied data /// foo_struct.bar = 20; /// assert_eq!(foo_struct.bar, 20); /// } /// /// // The contents of 'foo_slice' should not have changed /// assert_eq!(foo_slice, [10]); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn transmute_copy<T, U>(src: &T) -> U { ptr::read_unaligned(src as *const T as *const U) } /// Opaque type representing the discriminant of an enum. /// /// See the [`discriminant`] function in this module for more information. /// /// [`discriminant`]: fn.discriminant.html #[stable(feature = "discriminant_value", since = "1.21.0")] pub struct Discriminant<T>(u64, PhantomData<fn() -> T>); // N.B. These trait implementations cannot be derived because we don't want any bounds on T. #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> Copy for Discriminant<T> {} #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> clone::Clone for Discriminant<T> { fn clone(&self) -> Self { *self } } #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> cmp::PartialEq for Discriminant<T> { fn eq(&self, rhs: &Self) -> bool { self.0 == rhs.0 } } #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> cmp::Eq for Discriminant<T> {} #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> hash::Hash for Discriminant<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state); } } #[stable(feature = "discriminant_value", since = "1.21.0")] impl<T> fmt::Debug for Discriminant<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("Discriminant") .field(&self.0) .finish() } } /// Returns a value uniquely identifying the enum variant in `v`. /// /// If `T` is not an enum, calling this function will not result in undefined behavior, but the /// return value is unspecified. /// /// # Stability /// /// The discriminant of an enum variant may change if the enum definition changes. A discriminant /// of some variant will not change between compilations with the same compiler. /// /// # Examples /// /// This can be used to compare enums that carry data, while disregarding /// the actual data: /// /// ``` /// use std::mem; /// /// enum Foo { A(&'static str), B(i32), C(i32) } /// /// assert!(mem::discriminant(&Foo::A("bar")) == mem::discriminant(&Foo::A("baz"))); /// assert!(mem::discriminant(&Foo::B(1)) == mem::discriminant(&Foo::B(2))); /// assert!(mem::discriminant(&Foo::B(3)) != mem::discriminant(&Foo::C(3))); /// ``` #[stable(feature = "discriminant_value", since = "1.21.0")] pub fn discriminant<T>(v: &T) -> Discriminant<T> { unsafe { Discriminant(intrinsics::discriminant_value(v), PhantomData) } } /// A wrapper to inhibit compiler from automatically calling `T`’s destructor. /// /// This wrapper is 0-cost. /// /// # Examples /// /// This wrapper helps with explicitly documenting the drop order dependencies between fields of /// the type: /// /// ```rust /// use std::mem::ManuallyDrop; /// struct Peach; /// struct Banana; /// struct Melon; /// struct FruitBox { /// // Immediately clear there’s something non-trivial going on with these fields. /// peach: ManuallyDrop<Peach>, /// melon: Melon, // Field that’s independent of the other two. /// banana: ManuallyDrop<Banana>, /// } /// /// impl Drop for FruitBox { /// fn drop(&mut self) { /// unsafe { /// // Explicit ordering in which field destructors are run specified in the intuitive /// // location – the destructor of the structure containing the fields. /// // Moreover, one can now reorder fields within the struct however much they want. /// ManuallyDrop::drop(&mut self.peach); /// ManuallyDrop::drop(&mut self.banana); /// } /// // After destructor for `FruitBox` runs (this function), the destructor for Melon gets /// // invoked in the usual manner, as it is not wrapped in `ManuallyDrop`. /// } /// } /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[lang = "manually_drop"] #[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct ManuallyDrop<T: ?Sized> { value: T, } impl<T> ManuallyDrop<T> { /// Wrap a value to be manually dropped. /// /// # Examples /// /// ```rust /// use std::mem::ManuallyDrop; /// ManuallyDrop::new(Box::new(())); /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[inline(always)] pub const fn new(value: T) -> ManuallyDrop<T> { ManuallyDrop { value } } /// Extracts the value from the `ManuallyDrop` container. /// /// This allows the value to be dropped again. /// /// # Examples /// /// ```rust /// use std::mem::ManuallyDrop; /// let x = ManuallyDrop::new(Box::new(())); /// let _: Box<()> = ManuallyDrop::into_inner(x); // This drops the `Box`. /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] #[inline(always)] pub const fn into_inner(slot: ManuallyDrop<T>) -> T { slot.value } /// Takes the contained value out. /// /// This method is primarily intended for moving out values in drop. /// Instead of using [`ManuallyDrop::drop`] to manually drop the value, /// you can use this method to take the value and use it however desired. /// `Drop` will be invoked on the returned value following normal end-of-scope rules. /// /// If you have ownership of the container, you can use [`ManuallyDrop::into_inner`] instead. /// /// # Safety /// /// This function semantically moves out the contained value without preventing further usage. /// It is up to the user of this method to ensure that this container is not used again. /// /// [`ManuallyDrop::drop`]: #method.drop /// [`ManuallyDrop::into_inner`]: #method.into_inner #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"] #[unstable(feature = "manually_drop_take", issue = "55422")] #[inline] pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T { ManuallyDrop::into_inner(ptr::read(slot)) } } impl<T: ?Sized> ManuallyDrop<T> { /// Manually drops the contained value. /// /// If you have ownership of the value, you can use [`ManuallyDrop::into_inner`] instead. /// /// # Safety /// /// This function runs the destructor of the contained value and thus the wrapped value /// now represents uninitialized data. It is up to the user of this method to ensure the /// uninitialized data is not actually used. /// /// [`ManuallyDrop::into_inner`]: #method.into_inner #[stable(feature = "manually_drop", since = "1.20.0")] #[inline] pub unsafe fn drop(slot: &mut ManuallyDrop<T>) { ptr::drop_in_place(&mut slot.value) } } #[stable(feature = "manually_drop", since = "1.20.0")] impl<T: ?Sized> Deref for ManuallyDrop<T> { type Target = T; #[inline(always)] fn deref(&self) -> &T { &self.value } } #[stable(feature = "manually_drop", since = "1.20.0")] impl<T: ?Sized> DerefMut for ManuallyDrop<T> { #[inline(always)] fn deref_mut(&mut self) -> &mut T { &mut self.value } } /// A newtype to construct uninitialized instances of `T`. /// /// The compiler, in general, assumes that variables are properly initialized /// at their respective type. For example, a variable of reference type must /// be aligned and non-NULL. This is an invariant that must *always* be upheld, /// even in unsafe code. As a consequence, zero-initializing a variable of reference /// type causes instantaneous undefined behavior, no matter whether that reference /// ever gets used to access memory: /// /// ```rust,no_run /// #![feature(maybe_uninit)] /// use std::mem::{self, MaybeUninit}; /// /// let x: &i32 = unsafe { mem::zeroed() }; // undefined behavior! /// // equivalent code with `MaybeUninit` /// let x: &i32 = unsafe { MaybeUninit::zeroed().into_initialized() }; // undefined behavior! /// ``` /// /// This is exploited by the compiler for various optimizations, such as eliding /// run-time checks and optimizing `enum` layout. /// /// Not initializing memory at all (instead of zero-initializing it) causes the same /// issue: after all, the initial value of the variable might just happen to be /// one that violates the invariant. Moreover, uninitialized memory is special /// in that the compiler knows that it does not have a fixed value. This makes /// it undefined behavior to have uninitialized data in a variable even if that /// variable has otherwise no restrictions about which values are valid: /// /// ```rust,no_run /// #![feature(maybe_uninit)] /// use std::mem::{self, MaybeUninit}; /// /// let x: i32 = unsafe { mem::uninitialized() }; // undefined behavior! /// // equivalent code with `MaybeUninit` /// let x: i32 = unsafe { MaybeUninit::uninitialized().into_initialized() }; // undefined behavior! /// ``` /// (Notice that the rules around uninitialized integers are not finalized yet, but /// until they are, it is advisable to avoid them.) /// /// `MaybeUninit` serves to enable unsafe code to deal with uninitialized data: /// it is a signal to the compiler indicating that the data here might *not* /// be initialized: /// /// ```rust /// #![feature(maybe_uninit)] /// use std::mem::MaybeUninit; /// /// // Create an explicitly uninitialized reference. The compiler knows that data inside /// // a `MaybeUninit` may be invalid, and hence this is not UB: /// let mut x = MaybeUninit::<&i32>::uninitialized(); /// // Set it to a valid value. /// x.set(&0); /// // Extract the initialized data -- this is only allowed *after* properly /// // initializing `x`! /// let x = unsafe { x.into_initialized() }; /// ``` /// /// The compiler then knows to not optimize this code. // FIXME before stabilizing, explain how to initialize a struct field-by-field. #[allow(missing_debug_implementations)] #[unstable(feature = "maybe_uninit", issue = "53491")] // NOTE after stabilizing `MaybeUninit` proceed to deprecate `mem::{uninitialized,zeroed}` pub union MaybeUninit<T> { uninit: (), value: ManuallyDrop<T>, } impl<T> MaybeUninit<T> { /// Create a new `MaybeUninit` initialized with the given value. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub const fn new(val: T) -> MaybeUninit<T> { MaybeUninit { value: ManuallyDrop::new(val) } } /// Creates a new `MaybeUninit` in an uninitialized state. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub const fn uninitialized() -> MaybeUninit<T> { MaybeUninit { uninit: () } } /// Creates a new `MaybeUninit` in an uninitialized state, with the memory being /// filled with `0` bytes. It depends on `T` whether that already makes for /// proper initialization. For example, `MaybeUninit<usize>::zeroed()` is initialized, /// but `MaybeUninit<&'static i32>::zeroed()` is not because references must not /// be null. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline] pub fn zeroed() -> MaybeUninit<T> { let mut u = MaybeUninit::<T>::uninitialized(); unsafe { u.as_mut_ptr().write_bytes(0u8, 1); } u } /// Sets the value of the `MaybeUninit`. This overwrites any previous value without dropping it. /// For your convenience, this also returns a mutable reference to the (now safely initialized) /// contents of `self`. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub fn set(&mut self, val: T) -> &mut T { unsafe { self.value = ManuallyDrop::new(val); self.get_mut() } } /// Gets a pointer to the contained value. Reading from this pointer or turning it /// into a reference will be undefined behavior unless the `MaybeUninit` is initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub fn as_ptr(&self) -> *const T { unsafe { &*self.value as *const T } } /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it /// into a reference will be undefined behavior unless the `MaybeUninit` is initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut T { unsafe { &mut *self.value as *mut T } } /// Extracts the value from the `MaybeUninit` container. This is a great way /// to ensure that the data will get dropped, because the resulting `T` is /// subject to the usual drop handling. /// /// # Unsafety /// /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state. Calling this when the content is not yet fully initialized causes undefined /// behavior. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub unsafe fn into_initialized(self) -> T { intrinsics::panic_if_uninhabited::<T>(); ManuallyDrop::into_inner(self.value) } /// Reads the value from the `MaybeUninit` container. The resulting `T` is subject /// to the usual drop handling. /// /// # Unsafety /// /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state. Calling this when the content is not yet fully initialized causes undefined /// behavior. /// /// Moreover, this leaves a copy of the same data behind in the `MaybeUninit`. When using /// multiple copies of the data (by calling `read_initialized` multiple times, or first /// calling `read_initialized` and then [`into_initialized`]), it is your responsibility /// to ensure that that data may indeed be duplicated. /// /// # Examples /// /// Correct usage of this method: /// /// ```rust /// #![feature(maybe_uninit)] /// use std::mem::MaybeUninit; /// /// let mut x = MaybeUninit::<u32>::uninitialized(); /// x.set(13); /// let x1 = unsafe { x.read_initialized() }; /// // `u32` is `Copy`, so we may read multiple times. /// let x2 = unsafe { x.read_initialized() }; /// /// let mut x = MaybeUninit::<Option<Vec<u32>>>::uninitialized(); /// x.set(None); /// let x1 = unsafe { x.read_initialized() }; /// // Duplicating a `None` value is okay, so we may read multiple times. /// let x2 = unsafe { x.read_initialized() }; /// ``` /// /// *Incorrect* usafe of this method: /// /// ```rust,no_run /// #![feature(maybe_uninit)] /// use std::mem::MaybeUninit; /// /// let mut x = MaybeUninit::<Option<Vec<u32>>>::uninitialized(); /// x.set(Some(vec![0,1,2])); /// let x1 = unsafe { x.read_initialized() }; /// let x2 = unsafe { x.read_initialized() }; /// // We now created two copies of the same vector, leading to a double-free when /// // they both get dropped! /// ``` #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] pub unsafe fn read_initialized(&self) -> T { intrinsics::panic_if_uninhabited::<T>(); self.as_ptr().read() } /// Gets a reference to the contained value. /// /// # Unsafety /// /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state. Calling this when the content is not yet fully initialized causes undefined /// behavior. #[unstable(feature = "maybe_uninit_ref", issue = "53491")] #[inline(always)] pub unsafe fn get_ref(&self) -> &T { &*self.value } /// Gets a mutable reference to the contained value. /// /// # Unsafety /// /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state. Calling this when the content is not yet fully initialized causes undefined /// behavior. // FIXME(#53491): We currently rely on the above being incorrect, i.e., we have references // to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make // a final decision about the rules before stabilization. #[unstable(feature = "maybe_uninit_ref", issue = "53491")] #[inline(always)] pub unsafe fn get_mut(&mut self) -> &mut T { &mut *self.value } /// Gets a pointer to the first element of the array. #[unstable(feature = "maybe_uninit_slice", issue = "53491")] #[inline(always)] pub fn first_ptr(this: &[MaybeUninit<T>]) -> *const T { this as *const [MaybeUninit<T>] as *const T } /// Gets a mutable pointer to the first element of the array. #[unstable(feature = "maybe_uninit_slice", issue = "53491")] #[inline(always)] pub fn first_ptr_mut(this: &mut [MaybeUninit<T>]) -> *mut T { this as *mut [MaybeUninit<T>] as *mut T } }
//! Types that pin data to its location in memory. //! //! It is sometimes useful to have objects that are guaranteed not to move, //! in the sense that their placement in memory does not change, and can thus be relied upon. //! A prime example of such a scenario would be building self-referential structs, //! as moving an object with pointers to itself will invalidate them, which could cause undefined //! behavior. //! //! A [`Pin<P>`] ensures that the pointee of any pointer type `P` has a stable location in memory, //! meaning it cannot be moved elsewhere and its memory cannot be deallocated //! until it gets dropped. We say that the pointee is "pinned". //! //! By default, all types in Rust are movable. Rust allows passing all types by-value, //! and common smart-pointer types such as [`Box<T>`] and `&mut T` allow replacing and //! moving the values they contain: you can move out of a [`Box<T>`], or you can use [`mem::swap`]. //! [`Pin<P>`] wraps a pointer type `P`, so [`Pin`]`<`[`Box`]`<T>>` functions much like a regular //! [`Box<T>`]: when a [`Pin`]`<`[`Box`]`<T>>` gets dropped, so do its contents, and the memory gets //! deallocated. Similarly, [`Pin`]`<&mut T>` is a lot like `&mut T`. However, [`Pin<P>`] does //! not let clients actually obtain a [`Box<T>`] or `&mut T` to pinned data, which implies that you //! cannot use operations such as [`mem::swap`]: //! //! ``` //! use std::pin::Pin; //! fn swap_pins<T>(x: Pin<&mut T>, y: Pin<&mut T>) { //! // `mem::swap` needs `&mut T`, but we cannot get it. //! // We are stuck, we cannot swap the contents of these references. //! // We could use `Pin::get_unchecked_mut`, but that is unsafe for a reason: //! // we are not allowed to use it for moving things out of the `Pin`. //! } //! ``` //! //! It is worth reiterating that [`Pin<P>`] does *not* change the fact that a Rust compiler //! considers all types movable. [`mem::swap`] remains callable for any `T`. Instead, [`Pin<P>`] //! prevents certain *values* (pointed to by pointers wrapped in [`Pin<P>`]) from being //! moved by making it impossible to call methods that require `&mut T` on them //! (like [`mem::swap`]). //! //! [`Pin<P>`] can be used to wrap any pointer type `P`, and as such it interacts with //! [`Deref`] and [`DerefMut`]. A [`Pin<P>`] where `P: Deref` should be considered //! as a "`P`-style pointer" to a pinned `P::Target` -- so, a [`Pin`]`<`[`Box`]`<T>>` is //! an owned pointer to a pinned `T`, and a [`Pin`]`<`[`Rc`]`<T>>` is a reference-counted //! pointer to a pinned `T`. //! For correctness, [`Pin<P>`] relies on the implementations of [`Deref`] and //! [`DerefMut`] not to move out of their `self` parameter, and only ever to //! return a pointer to pinned data when they are called on a pinned pointer. //! //! # `Unpin` //! //! Many types are always freely movable, even when pinned, because they do not //! rely on having a stable address. This includes all the basic types (like //! [`bool`], [`i32`], and references) as well as types consisting solely of these //! types. Types that do not care about pinning implement the [`Unpin`] //! auto-trait, which cancels the effect of [`Pin<P>`]. For `T: Unpin`, //! [`Pin`]`<`[`Box`]`<T>>` and [`Box<T>`] function identically, as do [`Pin`]`<&mut T>` and //! `&mut T`. //! //! Note that pinning and [`Unpin`] only affect the pointed-to type `P::Target`, not the pointer //! type `P` itself that got wrapped in [`Pin<P>`]. For example, whether or not [`Box<T>`] is //! [`Unpin`] has no effect on the behavior of [`Pin`]`<`[`Box`]`<T>>` (here, `T` is the //! pointed-to type). //! //! # Example: self-referential struct //! //! ```rust //! use std::pin::Pin; //! use std::marker::PhantomPinned; //! use std::ptr::NonNull; //! //! // This is a self-referential struct because the slice field points to the data field. //! // We cannot inform the compiler about that with a normal reference, //! // as this pattern cannot be described with the usual borrowing rules. //! // Instead we use a raw pointer, though one which is known not to be null, //! // as we know it's pointing at the string. //! struct Unmovable { //! data: String, //! slice: NonNull<String>, //! _pin: PhantomPinned, //! } //! //! impl Unmovable { //! // To ensure the data doesn't move when the function returns, //! // we place it in the heap where it will stay for the lifetime of the object, //! // and the only way to access it would be through a pointer to it. //! fn new(data: String) -> Pin<Box<Self>> { //! let res = Unmovable { //! data, //! // we only create the pointer once the data is in place //! // otherwise it will have already moved before we even started //! slice: NonNull::dangling(), //! _pin: PhantomPinned, //! }; //! let mut boxed = Box::pin(res); //! //! let slice = NonNull::from(&boxed.data); //! // we know this is safe because modifying a field doesn't move the whole struct //! unsafe { //! let mut_ref: Pin<&mut Self> = Pin::as_mut(&mut boxed); //! Pin::get_unchecked_mut(mut_ref).slice = slice; //! } //! boxed //! } //! } //! //! let unmoved = Unmovable::new("hello".to_string()); //! // The pointer should point to the correct location, //! // so long as the struct hasn't moved. //! // Meanwhile, we are free to move the pointer around. //! # #[allow(unused_mut)] //! let mut still_unmoved = unmoved; //! assert_eq!(still_unmoved.slice, NonNull::from(&still_unmoved.data)); //! //! // Since our type doesn't implement Unpin, this will fail to compile: //! // let mut new_unmoved = Unmovable::new("world".to_string()); //! // std::mem::swap(&mut *still_unmoved, &mut *new_unmoved); //! ``` //! //! # Example: intrusive doubly-linked list //! //! In an intrusive doubly-linked list, the collection does not actually allocate //! the memory for the elements itself. Allocation is controlled by the clients, //! and elements can live on a stack frame that lives shorter than the collection does. //! //! To make this work, every element has pointers to its predecessor and successor in //! the list. Elements can only be added when they are pinned, because moving the elements //! around would invalidate the pointers. Moreover, the [`Drop`] implementation of a linked //! list element will patch the pointers of its predecessor and successor to remove itself //! from the list. //! //! Crucially, we have to be able to rely on [`drop`] being called. If an element //! could be deallocated or otherwise invalidated without calling [`drop`], the pointers into it //! from its neighbouring elements would become invalid, which would break the data structure. //! //! Therefore, pinning also comes with a [`drop`]-related guarantee. //! //! # `Drop` guarantee //! //! The purpose of pinning is to be able to rely on the placement of some data in memory. //! To make this work, not just moving the data is restricted; deallocating, repurposing, or //! otherwise invalidating the memory used to store the data is restricted, too. //! Concretely, for pinned data you have to maintain the invariant //! that *its memory will not get invalidated or repurposed from the moment it gets pinned until //! when [`drop`] is called*. Memory can be invalidated by deallocation, but also by //! replacing a [`Some(v)`] by [`None`], or calling [`Vec::set_len`] to "kill" some elements //! off of a vector. It can be repurposed by using [`ptr::write`] to overwrite it without //! calling the destructor first. //! //! This is exactly the kind of guarantee that the intrusive linked list from the previous //! section needs to function correctly. //! //! Notice that this guarantee does *not* mean that memory does not leak! It is still //! completely okay not ever to call [`drop`] on a pinned element (e.g., you can still //! call [`mem::forget`] on a [`Pin`]`<`[`Box`]`<T>>`). In the example of the doubly-linked //! list, that element would just stay in the list. However you may not free or reuse the storage //! *without calling [`drop`]*. //! //! # `Drop` implementation //! //! If your type uses pinning (such as the two examples above), you have to be careful //! when implementing [`Drop`]. The [`drop`] function takes `&mut self`, but this //! is called *even if your type was previously pinned*! It is as if the //! compiler automatically called [`Pin::get_unchecked_mut`]. //! //! This can never cause a problem in safe code because implementing a type that //! relies on pinning requires unsafe code, but be aware that deciding to make //! use of pinning in your type (for example by implementing some operation on //! [`Pin`]`<&Self>` or [`Pin`]`<&mut Self>`) has consequences for your [`Drop`] //! implementation as well: if an element of your type could have been pinned, //! you must treat [`Drop`] as implicitly taking [`Pin`]`<&mut Self>`. //! //! For example, you could implement `Drop` as follows: //! //! ```rust,no_run //! # use std::pin::Pin; //! # struct Type { } //! impl Drop for Type { //! fn drop(&mut self) { //! // `new_unchecked` is okay because we know this value is never used //! // again after being dropped. //! inner_drop(unsafe { Pin::new_unchecked(self)}); //! fn inner_drop(this: Pin<&mut Type>) { //! // Actual drop code goes here. //! } //! } //! } //! ``` //! //! The function `inner_drop` has the type that [`drop`] *should* have, so this makes sure that //! you do not accidentally use `self`/`this` in a way that is in conflict with pinning. //! //! Moreover, if your type is `#[repr(packed)]`, the compiler will automatically //! move fields around to be able to drop them. As a consequence, you cannot use //! pinning with a `#[repr(packed)]` type. //! //! # Projections and Structural Pinning //! //! When working with pinned structs, the question arises how one can access the //! fields of that struct in a method that takes just [`Pin`]`<&mut Struct>`. //! The usual approach is to write helper methods (so called *projections*) //! that turn [`Pin`]`<&mut Struct>` into a reference to the field, but what //! type should that reference have? Is it [`Pin`]`<&mut Field>` or `&mut Field`? //! The same question arises with the fields of an `enum`, and also when considering //! container/wrapper types such as [`Vec<T>`], [`Box<T>`], or [`RefCell<T>`]. //! (This question applies to both mutable and shared references, we just //! use the more common case of mutable references here for illustration.) //! //! It turns out that it is actually up to the author of the data structure //! to decide whether the pinned projection for a particular field turns //! [`Pin`]`<&mut Struct>` into [`Pin`]`<&mut Field>` or `&mut Field`. There are some //! constraints though, and the most important constraint is *consistency*: //! every field can be *either* projected to a pinned reference, *or* have //! pinning removed as part of the projection. If both are done for the same field, //! that will likely be unsound! //! //! As the author of a data structure you get to decide for each field whether pinning //! "propagates" to this field or not. Pinning that propagates is also called "structural", //! because it follows the structure of the type. //! In the following subsections, we describe the considerations that have to be made //! for either choice. //! //! ## Pinning *is not* structural for `field` //! //! It may seem counter-intuitive that the field of a pinned struct might not be pinned, //! but that is actually the easiest choice: if a [`Pin`]`<&mut Field>` is never created, //! nothing can go wrong! So, if you decide that some field does not have structural pinning, //! all you have to ensure is that you never create a pinned reference to that field. //! //! Fields without structural pinning may have a projection method that turns //! [`Pin`]`<&mut Struct>` into `&mut Field`: //! //! ```rust,no_run //! # use std::pin::Pin; //! # type Field = i32; //! # struct Struct { field: Field } //! impl Struct { //! fn pin_get_field<'a>(self: Pin<&'a mut Self>) -> &'a mut Field { //! // This is okay because `field` is never considered pinned. //! unsafe { &mut self.get_unchecked_mut().field } //! } //! } //! ``` //! //! You may also `impl Unpin for Struct` *even if* the type of `field` //! is not [`Unpin`]. What that type thinks about pinning is not relevant //! when no [`Pin`]`<&mut Field>` is ever created. //! //! ## Pinning *is* structural for `field` //! //! The other option is to decide that pinning is "structural" for `field`, //! meaning that if the struct is pinned then so is the field. //! //! This allows writing a projection that creates a [`Pin`]`<&mut Field>`, thus //! witnessing that the field is pinned: //! //! ```rust,no_run //! # use std::pin::Pin; //! # type Field = i32; //! # struct Struct { field: Field } //! impl Struct { //! fn pin_get_field<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut Field> { //! // This is okay because `field` is pinned when `self` is. //! unsafe { self.map_unchecked_mut(|s| &mut s.field) } //! } //! } //! ``` //! //! However, structural pinning comes with a few extra requirements: //! //! 1. The struct must only be [`Unpin`] if all the structural fields are //! [`Unpin`]. This is the default, but [`Unpin`] is a safe trait, so as the author of //! the struct it is your responsibility *not* to add something like //! `impl<T> Unpin for Struct<T>`. (Notice that adding a projection operation //! requires unsafe code, so the fact that [`Unpin`] is a safe trait does not break //! the principle that you only have to worry about any of this if you use `unsafe`.) //! 2. The destructor of the struct must not move structural fields out of its argument. This //! is the exact point that was raised in the [previous section][drop-impl]: `drop` takes //! `&mut self`, but the struct (and hence its fields) might have been pinned before. //! You have to guarantee that you do not move a field inside your [`Drop`] implementation. //! In particular, as explained previously, this means that your struct must *not* //! be `#[repr(packed)]`. //! See that section for how to write [`drop`] in a way that the compiler can help you //! not accidentally break pinning. //! 3. You must make sure that you uphold the [`Drop` guarantee][drop-guarantee]: //! once your struct is pinned, the memory that contains the //! content is not overwritten or deallocated without calling the content's destructors. //! This can be tricky, as witnessed by [`VecDeque<T>`]: the destructor of [`VecDeque<T>`] //! can fail to call [`drop`] on all elements if one of the destructors panics. This violates //! the [`Drop`] guarantee, because it can lead to elements being deallocated without //! their destructor being called. ([`VecDeque<T>`] has no pinning projections, so this //! does not cause unsoundness.) //! 4. You must not offer any other operations that could lead to data being moved out of //! the structural fields when your type is pinned. For example, if the struct contains an //! [`Option<T>`] and there is a `take`-like operation with type //! `fn(Pin<&mut Struct<T>>) -> Option<T>`, //! that operation can be used to move a `T` out of a pinned `Struct<T>` -- which means //! pinning cannot be structural for the field holding this data. //! //! For a more complex example of moving data out of a pinned type, imagine if [`RefCell<T>`] //! had a method `fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T>`. //! Then we could do the following: //! ```compile_fail //! fn exploit_ref_cell<T>(rc: Pin<&mut RefCell<T>>) { //! { let p = rc.as_mut().get_pin_mut(); } // Here we get pinned access to the `T`. //! let rc_shr: &RefCell<T> = rc.into_ref().get_ref(); //! let b = rc_shr.borrow_mut(); //! let content = &mut *b; // And here we have `&mut T` to the same data. //! } //! ``` //! This is catastrophic, it means we can first pin the content of the [`RefCell<T>`] //! (using `RefCell::get_pin_mut`) and then move that content using the mutable //! reference we got later. //! //! ## Examples //! //! For a type like [`Vec<T>`], both possibilites (structural pinning or not) make sense. //! A [`Vec<T>`] with structural pinning could have `get_pin`/`get_pin_mut` methods to get //! pinned references to elements. However, it could *not* allow calling //! [`pop`][Vec::pop] on a pinned [`Vec<T>`] because that would move the (structurally pinned) //! contents! Nor could it allow [`push`][Vec::push], which might reallocate and thus also move the //! contents. //! //! A [`Vec<T>`] without structural pinning could `impl<T> Unpin for Vec<T>`, because the contents //! are never pinned and the [`Vec<T>`] itself is fine with being moved as well. //! At that point pinning just has no effect on the vector at all. //! //! In the standard library, pointer types generally do not have structural pinning, //! and thus they do not offer pinning projections. This is why `Box<T>: Unpin` holds for all `T`. //! It makes sense to do this for pointer types, because moving the `Box<T>` //! does not actually move the `T`: the [`Box<T>`] can be freely movable (aka `Unpin`) even if //! the `T` is not. In fact, even [`Pin`]`<`[`Box`]`<T>>` and [`Pin`]`<&mut T>` are always //! [`Unpin`] themselves, for the same reason: their contents (the `T`) are pinned, but the //! pointers themselves can be moved without moving the pinned data. For both [`Box<T>`] and //! [`Pin`]`<`[`Box`]`<T>>`, whether the content is pinned is entirely independent of whether the //! pointer is pinned, meaning pinning is *not* structural. //! //! When implementing a [`Future`] combinator, you will usually need structural pinning //! for the nested futures, as you need to get pinned references to them to call [`poll`]. //! But if your combinator contains any other data that does not need to be pinned, //! you can make those fields not structural and hence freely access them with a //! mutable reference even when you just have [`Pin`]`<&mut Self>` (such as in your own //! [`poll`] implementation). //! //! [`Pin<P>`]: struct.Pin.html //! [`Unpin`]: ../marker/trait.Unpin.html //! [`Deref`]: ../ops/trait.Deref.html //! [`DerefMut`]: ../ops/trait.DerefMut.html //! [`mem::swap`]: ../mem/fn.swap.html //! [`mem::forget`]: ../mem/fn.forget.html //! [`Box<T>`]: ../../std/boxed/struct.Box.html //! [`Vec<T>`]: ../../std/vec/struct.Vec.html //! [`Vec::set_len`]: ../../std/vec/struct.Vec.html#method.set_len //! [`Pin`]: struct.Pin.html //! [`Box`]: ../../std/boxed/struct.Box.html //! [Vec::pop]: ../../std/vec/struct.Vec.html#method.pop //! [Vec::push]: ../../std/vec/struct.Vec.html#method.push //! [`Rc`]: ../../std/rc/struct.Rc.html //! [`RefCell<T>`]: ../../std/cell/struct.RefCell.html //! [`Drop`]: ../../std/ops/trait.Drop.html //! [`drop`]: ../../std/ops/trait.Drop.html#tymethod.drop //! [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html //! [`Option<T>`]: ../../std/option/enum.Option.html //! [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html //! [`RefCell<T>`]: ../cell/struct.RefCell.html //! [`None`]: ../option/enum.Option.html#variant.None //! [`Some(v)`]: ../option/enum.Option.html#variant.Some //! [`ptr::write`]: ../ptr/fn.write.html //! [`Future`]: ../future/trait.Future.html //! [drop-impl]: #drop-implementation //! [drop-guarantee]: #drop-guarantee //! [`poll`]: ../../std/future/trait.Future.html#tymethod.poll //! [`Pin::get_unchecked_mut`]: struct.Pin.html#method.get_unchecked_mut #![stable(feature = "pin", since = "1.33.0")] use crate::fmt; use crate::marker::{Sized, Unpin}; use crate::cmp::{self, PartialEq, PartialOrd}; use crate::ops::{Deref, DerefMut, Receiver, CoerceUnsized, DispatchFromDyn}; /// A pinned pointer. /// /// This is a wrapper around a kind of pointer which makes that pointer "pin" its /// value in place, preventing the value referenced by that pointer from being moved /// unless it implements [`Unpin`]. /// /// *See the [`pin` module] documentation for an explanation of pinning.* /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html /// [`pin` module]: ../../std/pin/index.html // // Note: the derives below, and the explicit `PartialEq` and `PartialOrd` // implementations, are allowed because they all only use `&P`, so they cannot move // the value behind `pointer`. #[stable(feature = "pin", since = "1.33.0")] #[lang = "pin"] #[fundamental] #[repr(transparent)] #[derive(Copy, Clone, Hash, Eq, Ord)] pub struct Pin<P> { pointer: P, } #[stable(feature = "pin_partialeq_partialord_impl_applicability", since = "1.34.0")] impl<P, Q> PartialEq<Pin<Q>> for Pin<P> where P: PartialEq<Q>, { fn eq(&self, other: &Pin<Q>) -> bool { self.pointer == other.pointer } fn ne(&self, other: &Pin<Q>) -> bool { self.pointer != other.pointer } } #[stable(feature = "pin_partialeq_partialord_impl_applicability", since = "1.34.0")] impl<P, Q> PartialOrd<Pin<Q>> for Pin<P> where P: PartialOrd<Q>, { fn partial_cmp(&self, other: &Pin<Q>) -> Option<cmp::Ordering> { self.pointer.partial_cmp(&other.pointer) } fn lt(&self, other: &Pin<Q>) -> bool { self.pointer < other.pointer } fn le(&self, other: &Pin<Q>) -> bool { self.pointer <= other.pointer } fn gt(&self, other: &Pin<Q>) -> bool { self.pointer > other.pointer } fn ge(&self, other: &Pin<Q>) -> bool { self.pointer >= other.pointer } } impl<P: Deref> Pin<P> where P::Target: Unpin, { /// Construct a new `Pin<P>` around a pointer to some data of a type that /// implements [`Unpin`]. /// /// Unlike `Pin::new_unchecked`, this method is safe because the pointer /// `P` dereferences to an [`Unpin`] type, which cancels the pinning guarantees. /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn new(pointer: P) -> Pin<P> { // Safety: the value pointed to is `Unpin`, and so has no requirements // around pinning. unsafe { Pin::new_unchecked(pointer) } } /// Unwraps this `Pin<P>` returning the underlying pointer. /// /// This requires that the data inside this `Pin` is [`Unpin`] so that we /// can ignore the pinning invariants when unwrapping it. /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html #[unstable(feature = "pin_into_inner", issue = "60245")] #[inline(always)] pub fn into_inner(pin: Pin<P>) -> P { pin.pointer } } impl<P: Deref> Pin<P> { /// Construct a new `Pin<P>` around a reference to some data of a type that /// may or may not implement `Unpin`. /// /// If `pointer` dereferences to an `Unpin` type, `Pin::new` should be used /// instead. /// /// # Safety /// /// This constructor is unsafe because we cannot guarantee that the data /// pointed to by `pointer` is pinned, meaning that the data will not be moved or /// its storage invalidated until it gets dropped. If the constructed `Pin<P>` does /// not guarantee that the data `P` points to is pinned, that is a violation of /// the API contract and may lead to undefined behavior in later (safe) operations. /// /// By using this method, you are making a promise about the `P::Deref` and /// `P::DerefMut` implementations, if they exist. Most importantly, they /// must not move out of their `self` arguments: `Pin::as_mut` and `Pin::as_ref` /// will call `DerefMut::deref_mut` and `Deref::deref` *on the pinned pointer* /// and expect these methods to uphold the pinning invariants. /// Moreover, by calling this method you promise that the reference `P` /// dereferences to will not be moved out of again; in particular, it /// must not be possible to obtain a `&mut P::Target` and then /// move out of that reference (using, for example [`mem::swap`]). /// /// For example, calling `Pin::new_unchecked` on an `&'a mut T` is unsafe because /// while you are able to pin it for the given lifetime `'a`, you have no control /// over whether it is kept pinned once `'a` ends: /// ``` /// use std::mem; /// use std::pin::Pin; /// /// fn move_pinned_ref<T>(mut a: T, mut b: T) { /// unsafe { /// let p: Pin<&mut T> = Pin::new_unchecked(&mut a); /// // This should mean the pointee `a` can never move again. /// } /// mem::swap(&mut a, &mut b); /// // The address of `a` changed to `b`'s stack slot, so `a` got moved even /// // though we have previously pinned it! We have violated the pinning API contract. /// } /// ``` /// A value, once pinned, must remain pinned forever (unless its type implements `Unpin`). /// /// Similarily, calling `Pin::new_unchecked` on an `Rc<T>` is unsafe because there could be /// aliases to the same data that are not subject to the pinning restrictions: /// ``` /// use std::rc::Rc; /// use std::pin::Pin; /// /// fn move_pinned_rc<T>(mut x: Rc<T>) { /// let pinned = unsafe { Pin::new_unchecked(x.clone()) }; /// { /// let p: Pin<&T> = pinned.as_ref(); /// // This should mean the pointee can never move again. /// } /// drop(pinned); /// let content = Rc::get_mut(&mut x).unwrap(); /// // Now, if `x` was the only reference, we have a mutable reference to /// // data that we pinned above, which we could use to move it as we have /// // seen in the previous example. We have violated the pinning API contract. /// } /// ``` /// /// [`mem::swap`]: ../../std/mem/fn.swap.html #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub unsafe fn new_unchecked(pointer: P) -> Pin<P> { Pin { pointer } } /// Gets a pinned shared reference from this pinned pointer. /// /// This is a generic method to go from `&Pin<Pointer<T>>` to `Pin<&T>`. /// It is safe because, as part of the contract of `Pin::new_unchecked`, /// the pointee cannot move after `Pin<Pointer<T>>` got created. /// "Malicious" implementations of `Pointer::Deref` are likewise /// ruled out by the contract of `Pin::new_unchecked`. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn as_ref(self: &Pin<P>) -> Pin<&P::Target> { unsafe { Pin::new_unchecked(&*self.pointer) } } /// Unwraps this `Pin<P>` returning the underlying pointer. /// /// # Safety /// /// This function is unsafe. You must guarantee that you will continue to /// treat the pointer `P` as pinned after you call this function, so that /// the invariants on the `Pin` type can be upheld. If the code using the /// resulting `P` does not continue to maintain the pinning invariants that /// is a violation of the API contract and may lead to undefined behavior in /// later (safe) operations. /// /// If the underlying data is [`Unpin`], [`Pin::into_inner`] should be used /// instead. /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html /// [`Pin::into_inner`]: #method.into_inner #[unstable(feature = "pin_into_inner", issue = "60245")] #[inline(always)] pub unsafe fn into_inner_unchecked(pin: Pin<P>) -> P { pin.pointer } } impl<P: DerefMut> Pin<P> { /// Gets a pinned mutable reference from this pinned pointer. /// /// This is a generic method to go from `&mut Pin<Pointer<T>>` to `Pin<&mut T>`. /// It is safe because, as part of the contract of `Pin::new_unchecked`, /// the pointee cannot move after `Pin<Pointer<T>>` got created. /// "Malicious" implementations of `Pointer::DerefMut` are likewise /// ruled out by the contract of `Pin::new_unchecked`. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn as_mut(self: &mut Pin<P>) -> Pin<&mut P::Target> { unsafe { Pin::new_unchecked(&mut *self.pointer) } } /// Assigns a new value to the memory behind the pinned reference. /// /// This overwrites pinned data, but that is okay: its destructor gets /// run before being overwritten, so no pinning guarantee is violated. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn set(self: &mut Pin<P>, value: P::Target) where P::Target: Sized, { *(self.pointer) = value; } } impl<'a, T: ?Sized> Pin<&'a T> { /// Constructs a new pin by mapping the interior value. /// /// For example, if you wanted to get a `Pin` of a field of something, /// you could use this to get access to that field in one line of code. /// However, there are several gotchas with these "pinning projections"; /// see the [`pin` module] documentation for further details on that topic. /// /// # Safety /// /// This function is unsafe. You must guarantee that the data you return /// will not move so long as the argument value does not move (for example, /// because it is one of the fields of that value), and also that you do /// not move out of the argument you receive to the interior function. /// /// [`pin` module]: ../../std/pin/index.html#projections-and-structural-pinning #[stable(feature = "pin", since = "1.33.0")] pub unsafe fn map_unchecked<U, F>(self: Pin<&'a T>, func: F) -> Pin<&'a U> where F: FnOnce(&T) -> &U, { let pointer = &*self.pointer; let new_pointer = func(pointer); Pin::new_unchecked(new_pointer) } /// Gets a shared reference out of a pin. /// /// This is safe because it is not possible to move out of a shared reference. /// It may seem like there is an issue here with interior mutability: in fact, /// it *is* possible to move a `T` out of a `&RefCell<T>`. However, this is /// not a problem as long as there does not also exist a `Pin<&T>` pointing /// to the same data, and `RefCell<T>` does not let you create a pinned reference /// to its contents. See the discussion on ["pinning projections"] for further /// details. /// /// Note: `Pin` also implements `Deref` to the target, which can be used /// to access the inner value. However, `Deref` only provides a reference /// that lives for as long as the borrow of the `Pin`, not the lifetime of /// the `Pin` itself. This method allows turning the `Pin` into a reference /// with the same lifetime as the original `Pin`. /// /// ["pinning projections"]: ../../std/pin/index.html#projections-and-structural-pinning #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn get_ref(self: Pin<&'a T>) -> &'a T { self.pointer } } impl<'a, T: ?Sized> Pin<&'a mut T> { /// Converts this `Pin<&mut T>` into a `Pin<&T>` with the same lifetime. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn into_ref(self: Pin<&'a mut T>) -> Pin<&'a T> { Pin { pointer: self.pointer } } /// Gets a mutable reference to the data inside of this `Pin`. /// /// This requires that the data inside this `Pin` is `Unpin`. /// /// Note: `Pin` also implements `DerefMut` to the data, which can be used /// to access the inner value. However, `DerefMut` only provides a reference /// that lives for as long as the borrow of the `Pin`, not the lifetime of /// the `Pin` itself. This method allows turning the `Pin` into a reference /// with the same lifetime as the original `Pin`. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn get_mut(self: Pin<&'a mut T>) -> &'a mut T where T: Unpin, { self.pointer } /// Gets a mutable reference to the data inside of this `Pin`. /// /// # Safety /// /// This function is unsafe. You must guarantee that you will never move /// the data out of the mutable reference you receive when you call this /// function, so that the invariants on the `Pin` type can be upheld. /// /// If the underlying data is `Unpin`, `Pin::get_mut` should be used /// instead. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub unsafe fn get_unchecked_mut(self: Pin<&'a mut T>) -> &'a mut T { self.pointer } /// Construct a new pin by mapping the interior value. /// /// For example, if you wanted to get a `Pin` of a field of something, /// you could use this to get access to that field in one line of code. /// However, there are several gotchas with these "pinning projections"; /// see the [`pin` module] documentation for further details on that topic. /// /// # Safety /// /// This function is unsafe. You must guarantee that the data you return /// will not move so long as the argument value does not move (for example, /// because it is one of the fields of that value), and also that you do /// not move out of the argument you receive to the interior function. /// /// [`pin` module]: ../../std/pin/index.html#projections-and-structural-pinning #[stable(feature = "pin", since = "1.33.0")] pub unsafe fn map_unchecked_mut<U, F>(self: Pin<&'a mut T>, func: F) -> Pin<&'a mut U> where F: FnOnce(&mut T) -> &mut U, { let pointer = Pin::get_unchecked_mut(self); let new_pointer = func(pointer); Pin::new_unchecked(new_pointer) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: Deref> Deref for Pin<P> { type Target = P::Target; fn deref(&self) -> &P::Target { Pin::get_ref(Pin::as_ref(self)) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: DerefMut> DerefMut for Pin<P> where P::Target: Unpin { fn deref_mut(&mut self) -> &mut P::Target { Pin::get_mut(Pin::as_mut(self)) } } #[unstable(feature = "receiver_trait", issue = "0")] impl<P: Receiver> Receiver for Pin<P> {} #[stable(feature = "pin", since = "1.33.0")] impl<P: fmt::Debug> fmt::Debug for Pin<P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.pointer, f) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: fmt::Display> fmt::Display for Pin<P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.pointer, f) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: fmt::Pointer> fmt::Pointer for Pin<P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&self.pointer, f) } } // Note: this means that any impl of `CoerceUnsized` that allows coercing from // a type that impls `Deref<Target=impl !Unpin>` to a type that impls // `Deref<Target=Unpin>` is unsound. Any such impl would probably be unsound // for other reasons, though, so we just need to take care not to allow such // impls to land in std. #[stable(feature = "pin", since = "1.33.0")] impl<P, U> CoerceUnsized<Pin<U>> for Pin<P> where P: CoerceUnsized<U>, {} #[stable(feature = "pin", since = "1.33.0")] impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U>, {} move of packed fields might or might not occur when they actually are sufficiently aligned //! Types that pin data to its location in memory. //! //! It is sometimes useful to have objects that are guaranteed not to move, //! in the sense that their placement in memory does not change, and can thus be relied upon. //! A prime example of such a scenario would be building self-referential structs, //! as moving an object with pointers to itself will invalidate them, which could cause undefined //! behavior. //! //! A [`Pin<P>`] ensures that the pointee of any pointer type `P` has a stable location in memory, //! meaning it cannot be moved elsewhere and its memory cannot be deallocated //! until it gets dropped. We say that the pointee is "pinned". //! //! By default, all types in Rust are movable. Rust allows passing all types by-value, //! and common smart-pointer types such as [`Box<T>`] and `&mut T` allow replacing and //! moving the values they contain: you can move out of a [`Box<T>`], or you can use [`mem::swap`]. //! [`Pin<P>`] wraps a pointer type `P`, so [`Pin`]`<`[`Box`]`<T>>` functions much like a regular //! [`Box<T>`]: when a [`Pin`]`<`[`Box`]`<T>>` gets dropped, so do its contents, and the memory gets //! deallocated. Similarly, [`Pin`]`<&mut T>` is a lot like `&mut T`. However, [`Pin<P>`] does //! not let clients actually obtain a [`Box<T>`] or `&mut T` to pinned data, which implies that you //! cannot use operations such as [`mem::swap`]: //! //! ``` //! use std::pin::Pin; //! fn swap_pins<T>(x: Pin<&mut T>, y: Pin<&mut T>) { //! // `mem::swap` needs `&mut T`, but we cannot get it. //! // We are stuck, we cannot swap the contents of these references. //! // We could use `Pin::get_unchecked_mut`, but that is unsafe for a reason: //! // we are not allowed to use it for moving things out of the `Pin`. //! } //! ``` //! //! It is worth reiterating that [`Pin<P>`] does *not* change the fact that a Rust compiler //! considers all types movable. [`mem::swap`] remains callable for any `T`. Instead, [`Pin<P>`] //! prevents certain *values* (pointed to by pointers wrapped in [`Pin<P>`]) from being //! moved by making it impossible to call methods that require `&mut T` on them //! (like [`mem::swap`]). //! //! [`Pin<P>`] can be used to wrap any pointer type `P`, and as such it interacts with //! [`Deref`] and [`DerefMut`]. A [`Pin<P>`] where `P: Deref` should be considered //! as a "`P`-style pointer" to a pinned `P::Target` -- so, a [`Pin`]`<`[`Box`]`<T>>` is //! an owned pointer to a pinned `T`, and a [`Pin`]`<`[`Rc`]`<T>>` is a reference-counted //! pointer to a pinned `T`. //! For correctness, [`Pin<P>`] relies on the implementations of [`Deref`] and //! [`DerefMut`] not to move out of their `self` parameter, and only ever to //! return a pointer to pinned data when they are called on a pinned pointer. //! //! # `Unpin` //! //! Many types are always freely movable, even when pinned, because they do not //! rely on having a stable address. This includes all the basic types (like //! [`bool`], [`i32`], and references) as well as types consisting solely of these //! types. Types that do not care about pinning implement the [`Unpin`] //! auto-trait, which cancels the effect of [`Pin<P>`]. For `T: Unpin`, //! [`Pin`]`<`[`Box`]`<T>>` and [`Box<T>`] function identically, as do [`Pin`]`<&mut T>` and //! `&mut T`. //! //! Note that pinning and [`Unpin`] only affect the pointed-to type `P::Target`, not the pointer //! type `P` itself that got wrapped in [`Pin<P>`]. For example, whether or not [`Box<T>`] is //! [`Unpin`] has no effect on the behavior of [`Pin`]`<`[`Box`]`<T>>` (here, `T` is the //! pointed-to type). //! //! # Example: self-referential struct //! //! ```rust //! use std::pin::Pin; //! use std::marker::PhantomPinned; //! use std::ptr::NonNull; //! //! // This is a self-referential struct because the slice field points to the data field. //! // We cannot inform the compiler about that with a normal reference, //! // as this pattern cannot be described with the usual borrowing rules. //! // Instead we use a raw pointer, though one which is known not to be null, //! // as we know it's pointing at the string. //! struct Unmovable { //! data: String, //! slice: NonNull<String>, //! _pin: PhantomPinned, //! } //! //! impl Unmovable { //! // To ensure the data doesn't move when the function returns, //! // we place it in the heap where it will stay for the lifetime of the object, //! // and the only way to access it would be through a pointer to it. //! fn new(data: String) -> Pin<Box<Self>> { //! let res = Unmovable { //! data, //! // we only create the pointer once the data is in place //! // otherwise it will have already moved before we even started //! slice: NonNull::dangling(), //! _pin: PhantomPinned, //! }; //! let mut boxed = Box::pin(res); //! //! let slice = NonNull::from(&boxed.data); //! // we know this is safe because modifying a field doesn't move the whole struct //! unsafe { //! let mut_ref: Pin<&mut Self> = Pin::as_mut(&mut boxed); //! Pin::get_unchecked_mut(mut_ref).slice = slice; //! } //! boxed //! } //! } //! //! let unmoved = Unmovable::new("hello".to_string()); //! // The pointer should point to the correct location, //! // so long as the struct hasn't moved. //! // Meanwhile, we are free to move the pointer around. //! # #[allow(unused_mut)] //! let mut still_unmoved = unmoved; //! assert_eq!(still_unmoved.slice, NonNull::from(&still_unmoved.data)); //! //! // Since our type doesn't implement Unpin, this will fail to compile: //! // let mut new_unmoved = Unmovable::new("world".to_string()); //! // std::mem::swap(&mut *still_unmoved, &mut *new_unmoved); //! ``` //! //! # Example: intrusive doubly-linked list //! //! In an intrusive doubly-linked list, the collection does not actually allocate //! the memory for the elements itself. Allocation is controlled by the clients, //! and elements can live on a stack frame that lives shorter than the collection does. //! //! To make this work, every element has pointers to its predecessor and successor in //! the list. Elements can only be added when they are pinned, because moving the elements //! around would invalidate the pointers. Moreover, the [`Drop`] implementation of a linked //! list element will patch the pointers of its predecessor and successor to remove itself //! from the list. //! //! Crucially, we have to be able to rely on [`drop`] being called. If an element //! could be deallocated or otherwise invalidated without calling [`drop`], the pointers into it //! from its neighbouring elements would become invalid, which would break the data structure. //! //! Therefore, pinning also comes with a [`drop`]-related guarantee. //! //! # `Drop` guarantee //! //! The purpose of pinning is to be able to rely on the placement of some data in memory. //! To make this work, not just moving the data is restricted; deallocating, repurposing, or //! otherwise invalidating the memory used to store the data is restricted, too. //! Concretely, for pinned data you have to maintain the invariant //! that *its memory will not get invalidated or repurposed from the moment it gets pinned until //! when [`drop`] is called*. Memory can be invalidated by deallocation, but also by //! replacing a [`Some(v)`] by [`None`], or calling [`Vec::set_len`] to "kill" some elements //! off of a vector. It can be repurposed by using [`ptr::write`] to overwrite it without //! calling the destructor first. //! //! This is exactly the kind of guarantee that the intrusive linked list from the previous //! section needs to function correctly. //! //! Notice that this guarantee does *not* mean that memory does not leak! It is still //! completely okay not ever to call [`drop`] on a pinned element (e.g., you can still //! call [`mem::forget`] on a [`Pin`]`<`[`Box`]`<T>>`). In the example of the doubly-linked //! list, that element would just stay in the list. However you may not free or reuse the storage //! *without calling [`drop`]*. //! //! # `Drop` implementation //! //! If your type uses pinning (such as the two examples above), you have to be careful //! when implementing [`Drop`]. The [`drop`] function takes `&mut self`, but this //! is called *even if your type was previously pinned*! It is as if the //! compiler automatically called [`Pin::get_unchecked_mut`]. //! //! This can never cause a problem in safe code because implementing a type that //! relies on pinning requires unsafe code, but be aware that deciding to make //! use of pinning in your type (for example by implementing some operation on //! [`Pin`]`<&Self>` or [`Pin`]`<&mut Self>`) has consequences for your [`Drop`] //! implementation as well: if an element of your type could have been pinned, //! you must treat [`Drop`] as implicitly taking [`Pin`]`<&mut Self>`. //! //! For example, you could implement `Drop` as follows: //! //! ```rust,no_run //! # use std::pin::Pin; //! # struct Type { } //! impl Drop for Type { //! fn drop(&mut self) { //! // `new_unchecked` is okay because we know this value is never used //! // again after being dropped. //! inner_drop(unsafe { Pin::new_unchecked(self)}); //! fn inner_drop(this: Pin<&mut Type>) { //! // Actual drop code goes here. //! } //! } //! } //! ``` //! //! The function `inner_drop` has the type that [`drop`] *should* have, so this makes sure that //! you do not accidentally use `self`/`this` in a way that is in conflict with pinning. //! //! Moreover, if your type is `#[repr(packed)]`, the compiler will automatically //! move fields around to be able to drop them. In a packed struct, it might even do //! that for fields that happen to be sufficiently aligned. As a consequence, you cannot use //! pinning with a `#[repr(packed)]` type. //! //! # Projections and Structural Pinning //! //! When working with pinned structs, the question arises how one can access the //! fields of that struct in a method that takes just [`Pin`]`<&mut Struct>`. //! The usual approach is to write helper methods (so called *projections*) //! that turn [`Pin`]`<&mut Struct>` into a reference to the field, but what //! type should that reference have? Is it [`Pin`]`<&mut Field>` or `&mut Field`? //! The same question arises with the fields of an `enum`, and also when considering //! container/wrapper types such as [`Vec<T>`], [`Box<T>`], or [`RefCell<T>`]. //! (This question applies to both mutable and shared references, we just //! use the more common case of mutable references here for illustration.) //! //! It turns out that it is actually up to the author of the data structure //! to decide whether the pinned projection for a particular field turns //! [`Pin`]`<&mut Struct>` into [`Pin`]`<&mut Field>` or `&mut Field`. There are some //! constraints though, and the most important constraint is *consistency*: //! every field can be *either* projected to a pinned reference, *or* have //! pinning removed as part of the projection. If both are done for the same field, //! that will likely be unsound! //! //! As the author of a data structure you get to decide for each field whether pinning //! "propagates" to this field or not. Pinning that propagates is also called "structural", //! because it follows the structure of the type. //! In the following subsections, we describe the considerations that have to be made //! for either choice. //! //! ## Pinning *is not* structural for `field` //! //! It may seem counter-intuitive that the field of a pinned struct might not be pinned, //! but that is actually the easiest choice: if a [`Pin`]`<&mut Field>` is never created, //! nothing can go wrong! So, if you decide that some field does not have structural pinning, //! all you have to ensure is that you never create a pinned reference to that field. //! //! Fields without structural pinning may have a projection method that turns //! [`Pin`]`<&mut Struct>` into `&mut Field`: //! //! ```rust,no_run //! # use std::pin::Pin; //! # type Field = i32; //! # struct Struct { field: Field } //! impl Struct { //! fn pin_get_field<'a>(self: Pin<&'a mut Self>) -> &'a mut Field { //! // This is okay because `field` is never considered pinned. //! unsafe { &mut self.get_unchecked_mut().field } //! } //! } //! ``` //! //! You may also `impl Unpin for Struct` *even if* the type of `field` //! is not [`Unpin`]. What that type thinks about pinning is not relevant //! when no [`Pin`]`<&mut Field>` is ever created. //! //! ## Pinning *is* structural for `field` //! //! The other option is to decide that pinning is "structural" for `field`, //! meaning that if the struct is pinned then so is the field. //! //! This allows writing a projection that creates a [`Pin`]`<&mut Field>`, thus //! witnessing that the field is pinned: //! //! ```rust,no_run //! # use std::pin::Pin; //! # type Field = i32; //! # struct Struct { field: Field } //! impl Struct { //! fn pin_get_field<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut Field> { //! // This is okay because `field` is pinned when `self` is. //! unsafe { self.map_unchecked_mut(|s| &mut s.field) } //! } //! } //! ``` //! //! However, structural pinning comes with a few extra requirements: //! //! 1. The struct must only be [`Unpin`] if all the structural fields are //! [`Unpin`]. This is the default, but [`Unpin`] is a safe trait, so as the author of //! the struct it is your responsibility *not* to add something like //! `impl<T> Unpin for Struct<T>`. (Notice that adding a projection operation //! requires unsafe code, so the fact that [`Unpin`] is a safe trait does not break //! the principle that you only have to worry about any of this if you use `unsafe`.) //! 2. The destructor of the struct must not move structural fields out of its argument. This //! is the exact point that was raised in the [previous section][drop-impl]: `drop` takes //! `&mut self`, but the struct (and hence its fields) might have been pinned before. //! You have to guarantee that you do not move a field inside your [`Drop`] implementation. //! In particular, as explained previously, this means that your struct must *not* //! be `#[repr(packed)]`. //! See that section for how to write [`drop`] in a way that the compiler can help you //! not accidentally break pinning. //! 3. You must make sure that you uphold the [`Drop` guarantee][drop-guarantee]: //! once your struct is pinned, the memory that contains the //! content is not overwritten or deallocated without calling the content's destructors. //! This can be tricky, as witnessed by [`VecDeque<T>`]: the destructor of [`VecDeque<T>`] //! can fail to call [`drop`] on all elements if one of the destructors panics. This violates //! the [`Drop`] guarantee, because it can lead to elements being deallocated without //! their destructor being called. ([`VecDeque<T>`] has no pinning projections, so this //! does not cause unsoundness.) //! 4. You must not offer any other operations that could lead to data being moved out of //! the structural fields when your type is pinned. For example, if the struct contains an //! [`Option<T>`] and there is a `take`-like operation with type //! `fn(Pin<&mut Struct<T>>) -> Option<T>`, //! that operation can be used to move a `T` out of a pinned `Struct<T>` -- which means //! pinning cannot be structural for the field holding this data. //! //! For a more complex example of moving data out of a pinned type, imagine if [`RefCell<T>`] //! had a method `fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T>`. //! Then we could do the following: //! ```compile_fail //! fn exploit_ref_cell<T>(rc: Pin<&mut RefCell<T>>) { //! { let p = rc.as_mut().get_pin_mut(); } // Here we get pinned access to the `T`. //! let rc_shr: &RefCell<T> = rc.into_ref().get_ref(); //! let b = rc_shr.borrow_mut(); //! let content = &mut *b; // And here we have `&mut T` to the same data. //! } //! ``` //! This is catastrophic, it means we can first pin the content of the [`RefCell<T>`] //! (using `RefCell::get_pin_mut`) and then move that content using the mutable //! reference we got later. //! //! ## Examples //! //! For a type like [`Vec<T>`], both possibilites (structural pinning or not) make sense. //! A [`Vec<T>`] with structural pinning could have `get_pin`/`get_pin_mut` methods to get //! pinned references to elements. However, it could *not* allow calling //! [`pop`][Vec::pop] on a pinned [`Vec<T>`] because that would move the (structurally pinned) //! contents! Nor could it allow [`push`][Vec::push], which might reallocate and thus also move the //! contents. //! //! A [`Vec<T>`] without structural pinning could `impl<T> Unpin for Vec<T>`, because the contents //! are never pinned and the [`Vec<T>`] itself is fine with being moved as well. //! At that point pinning just has no effect on the vector at all. //! //! In the standard library, pointer types generally do not have structural pinning, //! and thus they do not offer pinning projections. This is why `Box<T>: Unpin` holds for all `T`. //! It makes sense to do this for pointer types, because moving the `Box<T>` //! does not actually move the `T`: the [`Box<T>`] can be freely movable (aka `Unpin`) even if //! the `T` is not. In fact, even [`Pin`]`<`[`Box`]`<T>>` and [`Pin`]`<&mut T>` are always //! [`Unpin`] themselves, for the same reason: their contents (the `T`) are pinned, but the //! pointers themselves can be moved without moving the pinned data. For both [`Box<T>`] and //! [`Pin`]`<`[`Box`]`<T>>`, whether the content is pinned is entirely independent of whether the //! pointer is pinned, meaning pinning is *not* structural. //! //! When implementing a [`Future`] combinator, you will usually need structural pinning //! for the nested futures, as you need to get pinned references to them to call [`poll`]. //! But if your combinator contains any other data that does not need to be pinned, //! you can make those fields not structural and hence freely access them with a //! mutable reference even when you just have [`Pin`]`<&mut Self>` (such as in your own //! [`poll`] implementation). //! //! [`Pin<P>`]: struct.Pin.html //! [`Unpin`]: ../marker/trait.Unpin.html //! [`Deref`]: ../ops/trait.Deref.html //! [`DerefMut`]: ../ops/trait.DerefMut.html //! [`mem::swap`]: ../mem/fn.swap.html //! [`mem::forget`]: ../mem/fn.forget.html //! [`Box<T>`]: ../../std/boxed/struct.Box.html //! [`Vec<T>`]: ../../std/vec/struct.Vec.html //! [`Vec::set_len`]: ../../std/vec/struct.Vec.html#method.set_len //! [`Pin`]: struct.Pin.html //! [`Box`]: ../../std/boxed/struct.Box.html //! [Vec::pop]: ../../std/vec/struct.Vec.html#method.pop //! [Vec::push]: ../../std/vec/struct.Vec.html#method.push //! [`Rc`]: ../../std/rc/struct.Rc.html //! [`RefCell<T>`]: ../../std/cell/struct.RefCell.html //! [`Drop`]: ../../std/ops/trait.Drop.html //! [`drop`]: ../../std/ops/trait.Drop.html#tymethod.drop //! [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html //! [`Option<T>`]: ../../std/option/enum.Option.html //! [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html //! [`RefCell<T>`]: ../cell/struct.RefCell.html //! [`None`]: ../option/enum.Option.html#variant.None //! [`Some(v)`]: ../option/enum.Option.html#variant.Some //! [`ptr::write`]: ../ptr/fn.write.html //! [`Future`]: ../future/trait.Future.html //! [drop-impl]: #drop-implementation //! [drop-guarantee]: #drop-guarantee //! [`poll`]: ../../std/future/trait.Future.html#tymethod.poll //! [`Pin::get_unchecked_mut`]: struct.Pin.html#method.get_unchecked_mut #![stable(feature = "pin", since = "1.33.0")] use crate::fmt; use crate::marker::{Sized, Unpin}; use crate::cmp::{self, PartialEq, PartialOrd}; use crate::ops::{Deref, DerefMut, Receiver, CoerceUnsized, DispatchFromDyn}; /// A pinned pointer. /// /// This is a wrapper around a kind of pointer which makes that pointer "pin" its /// value in place, preventing the value referenced by that pointer from being moved /// unless it implements [`Unpin`]. /// /// *See the [`pin` module] documentation for an explanation of pinning.* /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html /// [`pin` module]: ../../std/pin/index.html // // Note: the derives below, and the explicit `PartialEq` and `PartialOrd` // implementations, are allowed because they all only use `&P`, so they cannot move // the value behind `pointer`. #[stable(feature = "pin", since = "1.33.0")] #[lang = "pin"] #[fundamental] #[repr(transparent)] #[derive(Copy, Clone, Hash, Eq, Ord)] pub struct Pin<P> { pointer: P, } #[stable(feature = "pin_partialeq_partialord_impl_applicability", since = "1.34.0")] impl<P, Q> PartialEq<Pin<Q>> for Pin<P> where P: PartialEq<Q>, { fn eq(&self, other: &Pin<Q>) -> bool { self.pointer == other.pointer } fn ne(&self, other: &Pin<Q>) -> bool { self.pointer != other.pointer } } #[stable(feature = "pin_partialeq_partialord_impl_applicability", since = "1.34.0")] impl<P, Q> PartialOrd<Pin<Q>> for Pin<P> where P: PartialOrd<Q>, { fn partial_cmp(&self, other: &Pin<Q>) -> Option<cmp::Ordering> { self.pointer.partial_cmp(&other.pointer) } fn lt(&self, other: &Pin<Q>) -> bool { self.pointer < other.pointer } fn le(&self, other: &Pin<Q>) -> bool { self.pointer <= other.pointer } fn gt(&self, other: &Pin<Q>) -> bool { self.pointer > other.pointer } fn ge(&self, other: &Pin<Q>) -> bool { self.pointer >= other.pointer } } impl<P: Deref> Pin<P> where P::Target: Unpin, { /// Construct a new `Pin<P>` around a pointer to some data of a type that /// implements [`Unpin`]. /// /// Unlike `Pin::new_unchecked`, this method is safe because the pointer /// `P` dereferences to an [`Unpin`] type, which cancels the pinning guarantees. /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn new(pointer: P) -> Pin<P> { // Safety: the value pointed to is `Unpin`, and so has no requirements // around pinning. unsafe { Pin::new_unchecked(pointer) } } /// Unwraps this `Pin<P>` returning the underlying pointer. /// /// This requires that the data inside this `Pin` is [`Unpin`] so that we /// can ignore the pinning invariants when unwrapping it. /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html #[unstable(feature = "pin_into_inner", issue = "60245")] #[inline(always)] pub fn into_inner(pin: Pin<P>) -> P { pin.pointer } } impl<P: Deref> Pin<P> { /// Construct a new `Pin<P>` around a reference to some data of a type that /// may or may not implement `Unpin`. /// /// If `pointer` dereferences to an `Unpin` type, `Pin::new` should be used /// instead. /// /// # Safety /// /// This constructor is unsafe because we cannot guarantee that the data /// pointed to by `pointer` is pinned, meaning that the data will not be moved or /// its storage invalidated until it gets dropped. If the constructed `Pin<P>` does /// not guarantee that the data `P` points to is pinned, that is a violation of /// the API contract and may lead to undefined behavior in later (safe) operations. /// /// By using this method, you are making a promise about the `P::Deref` and /// `P::DerefMut` implementations, if they exist. Most importantly, they /// must not move out of their `self` arguments: `Pin::as_mut` and `Pin::as_ref` /// will call `DerefMut::deref_mut` and `Deref::deref` *on the pinned pointer* /// and expect these methods to uphold the pinning invariants. /// Moreover, by calling this method you promise that the reference `P` /// dereferences to will not be moved out of again; in particular, it /// must not be possible to obtain a `&mut P::Target` and then /// move out of that reference (using, for example [`mem::swap`]). /// /// For example, calling `Pin::new_unchecked` on an `&'a mut T` is unsafe because /// while you are able to pin it for the given lifetime `'a`, you have no control /// over whether it is kept pinned once `'a` ends: /// ``` /// use std::mem; /// use std::pin::Pin; /// /// fn move_pinned_ref<T>(mut a: T, mut b: T) { /// unsafe { /// let p: Pin<&mut T> = Pin::new_unchecked(&mut a); /// // This should mean the pointee `a` can never move again. /// } /// mem::swap(&mut a, &mut b); /// // The address of `a` changed to `b`'s stack slot, so `a` got moved even /// // though we have previously pinned it! We have violated the pinning API contract. /// } /// ``` /// A value, once pinned, must remain pinned forever (unless its type implements `Unpin`). /// /// Similarily, calling `Pin::new_unchecked` on an `Rc<T>` is unsafe because there could be /// aliases to the same data that are not subject to the pinning restrictions: /// ``` /// use std::rc::Rc; /// use std::pin::Pin; /// /// fn move_pinned_rc<T>(mut x: Rc<T>) { /// let pinned = unsafe { Pin::new_unchecked(x.clone()) }; /// { /// let p: Pin<&T> = pinned.as_ref(); /// // This should mean the pointee can never move again. /// } /// drop(pinned); /// let content = Rc::get_mut(&mut x).unwrap(); /// // Now, if `x` was the only reference, we have a mutable reference to /// // data that we pinned above, which we could use to move it as we have /// // seen in the previous example. We have violated the pinning API contract. /// } /// ``` /// /// [`mem::swap`]: ../../std/mem/fn.swap.html #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub unsafe fn new_unchecked(pointer: P) -> Pin<P> { Pin { pointer } } /// Gets a pinned shared reference from this pinned pointer. /// /// This is a generic method to go from `&Pin<Pointer<T>>` to `Pin<&T>`. /// It is safe because, as part of the contract of `Pin::new_unchecked`, /// the pointee cannot move after `Pin<Pointer<T>>` got created. /// "Malicious" implementations of `Pointer::Deref` are likewise /// ruled out by the contract of `Pin::new_unchecked`. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn as_ref(self: &Pin<P>) -> Pin<&P::Target> { unsafe { Pin::new_unchecked(&*self.pointer) } } /// Unwraps this `Pin<P>` returning the underlying pointer. /// /// # Safety /// /// This function is unsafe. You must guarantee that you will continue to /// treat the pointer `P` as pinned after you call this function, so that /// the invariants on the `Pin` type can be upheld. If the code using the /// resulting `P` does not continue to maintain the pinning invariants that /// is a violation of the API contract and may lead to undefined behavior in /// later (safe) operations. /// /// If the underlying data is [`Unpin`], [`Pin::into_inner`] should be used /// instead. /// /// [`Unpin`]: ../../std/marker/trait.Unpin.html /// [`Pin::into_inner`]: #method.into_inner #[unstable(feature = "pin_into_inner", issue = "60245")] #[inline(always)] pub unsafe fn into_inner_unchecked(pin: Pin<P>) -> P { pin.pointer } } impl<P: DerefMut> Pin<P> { /// Gets a pinned mutable reference from this pinned pointer. /// /// This is a generic method to go from `&mut Pin<Pointer<T>>` to `Pin<&mut T>`. /// It is safe because, as part of the contract of `Pin::new_unchecked`, /// the pointee cannot move after `Pin<Pointer<T>>` got created. /// "Malicious" implementations of `Pointer::DerefMut` are likewise /// ruled out by the contract of `Pin::new_unchecked`. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn as_mut(self: &mut Pin<P>) -> Pin<&mut P::Target> { unsafe { Pin::new_unchecked(&mut *self.pointer) } } /// Assigns a new value to the memory behind the pinned reference. /// /// This overwrites pinned data, but that is okay: its destructor gets /// run before being overwritten, so no pinning guarantee is violated. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn set(self: &mut Pin<P>, value: P::Target) where P::Target: Sized, { *(self.pointer) = value; } } impl<'a, T: ?Sized> Pin<&'a T> { /// Constructs a new pin by mapping the interior value. /// /// For example, if you wanted to get a `Pin` of a field of something, /// you could use this to get access to that field in one line of code. /// However, there are several gotchas with these "pinning projections"; /// see the [`pin` module] documentation for further details on that topic. /// /// # Safety /// /// This function is unsafe. You must guarantee that the data you return /// will not move so long as the argument value does not move (for example, /// because it is one of the fields of that value), and also that you do /// not move out of the argument you receive to the interior function. /// /// [`pin` module]: ../../std/pin/index.html#projections-and-structural-pinning #[stable(feature = "pin", since = "1.33.0")] pub unsafe fn map_unchecked<U, F>(self: Pin<&'a T>, func: F) -> Pin<&'a U> where F: FnOnce(&T) -> &U, { let pointer = &*self.pointer; let new_pointer = func(pointer); Pin::new_unchecked(new_pointer) } /// Gets a shared reference out of a pin. /// /// This is safe because it is not possible to move out of a shared reference. /// It may seem like there is an issue here with interior mutability: in fact, /// it *is* possible to move a `T` out of a `&RefCell<T>`. However, this is /// not a problem as long as there does not also exist a `Pin<&T>` pointing /// to the same data, and `RefCell<T>` does not let you create a pinned reference /// to its contents. See the discussion on ["pinning projections"] for further /// details. /// /// Note: `Pin` also implements `Deref` to the target, which can be used /// to access the inner value. However, `Deref` only provides a reference /// that lives for as long as the borrow of the `Pin`, not the lifetime of /// the `Pin` itself. This method allows turning the `Pin` into a reference /// with the same lifetime as the original `Pin`. /// /// ["pinning projections"]: ../../std/pin/index.html#projections-and-structural-pinning #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn get_ref(self: Pin<&'a T>) -> &'a T { self.pointer } } impl<'a, T: ?Sized> Pin<&'a mut T> { /// Converts this `Pin<&mut T>` into a `Pin<&T>` with the same lifetime. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn into_ref(self: Pin<&'a mut T>) -> Pin<&'a T> { Pin { pointer: self.pointer } } /// Gets a mutable reference to the data inside of this `Pin`. /// /// This requires that the data inside this `Pin` is `Unpin`. /// /// Note: `Pin` also implements `DerefMut` to the data, which can be used /// to access the inner value. However, `DerefMut` only provides a reference /// that lives for as long as the borrow of the `Pin`, not the lifetime of /// the `Pin` itself. This method allows turning the `Pin` into a reference /// with the same lifetime as the original `Pin`. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn get_mut(self: Pin<&'a mut T>) -> &'a mut T where T: Unpin, { self.pointer } /// Gets a mutable reference to the data inside of this `Pin`. /// /// # Safety /// /// This function is unsafe. You must guarantee that you will never move /// the data out of the mutable reference you receive when you call this /// function, so that the invariants on the `Pin` type can be upheld. /// /// If the underlying data is `Unpin`, `Pin::get_mut` should be used /// instead. #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub unsafe fn get_unchecked_mut(self: Pin<&'a mut T>) -> &'a mut T { self.pointer } /// Construct a new pin by mapping the interior value. /// /// For example, if you wanted to get a `Pin` of a field of something, /// you could use this to get access to that field in one line of code. /// However, there are several gotchas with these "pinning projections"; /// see the [`pin` module] documentation for further details on that topic. /// /// # Safety /// /// This function is unsafe. You must guarantee that the data you return /// will not move so long as the argument value does not move (for example, /// because it is one of the fields of that value), and also that you do /// not move out of the argument you receive to the interior function. /// /// [`pin` module]: ../../std/pin/index.html#projections-and-structural-pinning #[stable(feature = "pin", since = "1.33.0")] pub unsafe fn map_unchecked_mut<U, F>(self: Pin<&'a mut T>, func: F) -> Pin<&'a mut U> where F: FnOnce(&mut T) -> &mut U, { let pointer = Pin::get_unchecked_mut(self); let new_pointer = func(pointer); Pin::new_unchecked(new_pointer) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: Deref> Deref for Pin<P> { type Target = P::Target; fn deref(&self) -> &P::Target { Pin::get_ref(Pin::as_ref(self)) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: DerefMut> DerefMut for Pin<P> where P::Target: Unpin { fn deref_mut(&mut self) -> &mut P::Target { Pin::get_mut(Pin::as_mut(self)) } } #[unstable(feature = "receiver_trait", issue = "0")] impl<P: Receiver> Receiver for Pin<P> {} #[stable(feature = "pin", since = "1.33.0")] impl<P: fmt::Debug> fmt::Debug for Pin<P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.pointer, f) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: fmt::Display> fmt::Display for Pin<P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.pointer, f) } } #[stable(feature = "pin", since = "1.33.0")] impl<P: fmt::Pointer> fmt::Pointer for Pin<P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&self.pointer, f) } } // Note: this means that any impl of `CoerceUnsized` that allows coercing from // a type that impls `Deref<Target=impl !Unpin>` to a type that impls // `Deref<Target=Unpin>` is unsound. Any such impl would probably be unsound // for other reasons, though, so we just need to take care not to allow such // impls to land in std. #[stable(feature = "pin", since = "1.33.0")] impl<P, U> CoerceUnsized<Pin<U>> for Pin<P> where P: CoerceUnsized<U>, {} #[stable(feature = "pin", since = "1.33.0")] impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U>, {}
//! Manually manage memory through raw pointers. //! //! *[See also the pointer primitive types](../../std/primitive.pointer.html).* //! //! # Safety //! //! Many functions in this module take raw pointers as arguments and read from //! or write to them. For this to be safe, these pointers must be *valid*. //! Whether a pointer is valid depends on the operation it is used for //! (read or write), and the extent of the memory that is accessed (i.e., //! how many bytes are read/written). Most functions use `*mut T` and `*const T` //! to access only a single value, in which case the documentation omits the size //! and implicitly assumes it to be `size_of::<T>()` bytes. //! //! The precise rules for validity are not determined yet. The guarantees that are //! provided at this point are very minimal: //! //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst]. //! * All pointers (except for the null pointer) are valid for all operations of //! [size zero][zst]. //! * All accesses performed by functions in this module are *non-atomic* in the sense //! of [atomic operations] used to synchronize between threads. This means it is //! undefined behavior to perform two concurrent accesses to the same location from different //! threads unless both accesses only read from memory. Notice that this explicitly //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot //! be used for inter-thread synchronization. //! * The result of casting a reference to a pointer is valid for as long as the //! underlying object is live and no reference (just raw pointers) is used to //! access the same memory. //! //! These axioms, along with careful use of [`offset`] for pointer arithmetic, //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees //! will be provided eventually, as the [aliasing] rules are being determined. For more //! information, see the [book] as well as the section in the reference devoted //! to [undefined behavior][ub]. //! //! ## Alignment //! //! Valid raw pointers as defined above are not necessarily properly aligned (where //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be //! aligned to `mem::align_of::<T>()`). However, most functions require their //! arguments to be properly aligned, and will explicitly state //! this requirement in their documentation. Notable exceptions to this are //! [`read_unaligned`] and [`write_unaligned`]. //! //! When a function requires proper alignment, it does so even if the access //! has size 0, i.e., even if memory is not actually touched. Consider using //! [`NonNull::dangling`] in such cases. //! //! [aliasing]: ../../nomicon/aliasing.html //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer //! [ub]: ../../reference/behavior-considered-undefined.html //! [null]: ./fn.null.html //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts //! [atomic operations]: ../../std/sync/atomic/index.html //! [`copy`]: ../../std/ptr/fn.copy.html //! [`offset`]: ../../std/primitive.pointer.html#method.offset //! [`read_unaligned`]: ./fn.read_unaligned.html //! [`write_unaligned`]: ./fn.write_unaligned.html //! [`read_volatile`]: ./fn.read_volatile.html //! [`write_volatile`]: ./fn.write_volatile.html //! [`NonNull::dangling`]: ./struct.NonNull.html#method.dangling #![stable(feature = "rust1", since = "1.0.0")] use convert::From; use intrinsics; use ops::{CoerceUnsized, DispatchFromDyn}; use fmt; use hash; use marker::{PhantomData, Unsize}; use mem::{self, MaybeUninit}; use cmp::Ordering::{self, Less, Equal, Greater}; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy_nonoverlapping; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::write_bytes; /// Executes the destructor (if any) of the pointed-to value. /// /// This is semantically equivalent to calling [`ptr::read`] and discarding /// the result, but has the following advantages: /// /// * It is *required* to use `drop_in_place` to drop unsized types like /// trait objects, because they can't be read out onto the stack and /// dropped normally. /// /// * It is friendlier to the optimizer to do this over [`ptr::read`] when /// dropping manually allocated memory (e.g., when writing Box/Rc/Vec), /// as the compiler doesn't need to prove that it's sound to elide the /// copy. /// /// [`ptr::read`]: ../ptr/fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `to_drop` must be [valid] for reads. /// /// * `to_drop` must be properly aligned. See the example below for how to drop /// an unaligned pointer. /// /// Additionally, if `T` is not [`Copy`], using the pointed-to value after /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = /// foo` counts as a use because it will cause the value to be dropped /// again. [`write`] can be used to overwrite data without causing it to be /// dropped. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`write`]: ../ptr/fn.write.html /// /// # Examples /// /// Manually remove the last item from a vector: /// /// ``` /// use std::ptr; /// use std::rc::Rc; /// /// let last = Rc::new(1); /// let weak = Rc::downgrade(&last); /// /// let mut v = vec![Rc::new(0), last]; /// /// unsafe { /// // Get a raw pointer to the last element in `v`. /// let ptr = &mut v[1] as *mut _; /// // Shorten `v` to prevent the last item from being dropped. We do that first, /// // to prevent issues if the `drop_in_place` below panics. /// v.set_len(1); /// // Without a call `drop_in_place`, the last item would never be dropped, /// // and the memory it manages would be leaked. /// ptr::drop_in_place(ptr); /// } /// /// assert_eq!(v, &[0.into()]); /// /// // Ensure that the last item was dropped. /// assert!(weak.upgrade().is_none()); /// ``` /// /// Unaligned values cannot be dropped in place, they must be copied to an aligned /// location first: /// ``` /// use std::ptr; /// use std::mem; /// /// unsafe fn drop_after_copy<T>(to_drop: *mut T) { /// let mut copy: T = mem::uninitialized(); /// ptr::copy(to_drop, &mut copy, 1); /// drop(copy); /// } /// /// #[repr(packed, C)] /// struct Packed { /// _padding: u8, /// unaligned: Vec<i32>, /// } /// /// let mut p = Packed { _padding: 0, unaligned: vec![42] }; /// unsafe { /// drop_after_copy(&mut p.unaligned as *mut _); /// mem::forget(p); /// } /// ``` /// /// Notice that the compiler performs this copy automatically when dropping packed structs, /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place` /// manually. #[stable(feature = "drop_in_place", since = "1.8.0")] #[inline(always)] pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { real_drop_in_place(&mut *to_drop) } // The real `drop_in_place` -- the one that gets called implicitly when variables go // out of scope -- should have a safe reference and not a raw pointer as argument // type. When we drop a local variable, we access it with a pointer that behaves // like a safe reference; transmuting that to a raw pointer does not mean we can // actually access it with raw pointers. #[lang = "drop_in_place"] #[allow(unconditional_recursion)] unsafe fn real_drop_in_place<T: ?Sized>(to_drop: &mut T) { // Code here does not matter - this is replaced by the // real drop glue by the compiler. real_drop_in_place(to_drop) } /// Creates a null raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *const i32 = ptr::null(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn null<T>() -> *const T { 0 as *const T } /// Creates a null mutable raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *mut i32 = ptr::null_mut(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn null_mut<T>() -> *mut T { 0 as *mut T } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. /// /// But for the following two exceptions, this function is semantically /// equivalent to [`mem::swap`]: /// /// * It operates on raw pointers instead of references. When references are /// available, [`mem::swap`] should be preferred. /// /// * The two pointed-to values may overlap. If the values do overlap, then the /// overlapping region of memory from `x` will be used. This is demonstrated /// in the second example below. /// /// [`mem::swap`]: ../mem/fn.swap.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * Both `x` and `y` must be [valid] for reads and writes. /// /// * Both `x` and `y` must be properly aligned. /// /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Swapping two non-overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]` /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]` /// /// unsafe { /// ptr::swap(x, y); /// assert_eq!([2, 3, 0, 1], array); /// } /// ``` /// /// Swapping two overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]` /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]` /// /// unsafe { /// ptr::swap(x, y); /// // The indices `1..3` of the slice overlap between `x` and `y`. /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]` /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`). /// // This implementation is defined to make the latter choice. /// assert_eq!([1, 0, 1, 2], array); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn swap<T>(x: *mut T, y: *mut T) { // Give ourselves some scratch space to work with. // We do not have to worry about drops: `MaybeUninit` does nothing when dropped. let mut tmp = MaybeUninit::<T>::uninitialized(); // Perform the swap copy_nonoverlapping(x, tmp.as_mut_ptr(), 1); copy(y, x, 1); // `x` and `y` may overlap copy_nonoverlapping(tmp.as_ptr(), y, 1); } /// Swaps `count * size_of::<T>()` bytes between the two regions of memory /// beginning at `x` and `y`. The two regions must *not* overlap. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * Both `x` and `y` must be [valid] for reads and writes of `count * /// size_of::<T>()` bytes. /// /// * Both `x` and `y` must be properly aligned. /// /// * The region of memory beginning at `x` with a size of `count * /// size_of::<T>()` bytes must *not* overlap with the region of memory /// beginning at `y` with the same size. /// /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`, /// the pointers must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::ptr; /// /// let mut x = [1, 2, 3, 4]; /// let mut y = [7, 8, 9]; /// /// unsafe { /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2); /// } /// /// assert_eq!(x, [7, 8, 3, 4]); /// assert_eq!(y, [1, 2, 9]); /// ``` #[inline] #[stable(feature = "swap_nonoverlapping", since = "1.27.0")] pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; let len = mem::size_of::<T>() * count; swap_nonoverlapping_bytes(x, y, len) } #[inline] pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. if mem::size_of::<T>() < 32 { let z = read(x); copy_nonoverlapping(y, x, 1); write(y, z); } else { swap_nonoverlapping(x, y, 1); } } #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // The approach here is to utilize simd to swap x & y efficiently. Testing reveals // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel // Haswell E processors. LLVM is more able to optimize if we give a struct a // #[repr(simd)], even if we don't actually use this struct directly. // // FIXME repr(simd) broken on emscripten and redox // It's also broken on big-endian powerpc64 and s390x. #42778 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox", target_endian = "big")), repr(simd))] struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); let block_size = mem::size_of::<Block>(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively let mut i = 0; while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused let mut t = mem::MaybeUninit::<Block>::uninitialized(); let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available copy_nonoverlapping(x, t, block_size); copy_nonoverlapping(y, x, block_size); copy_nonoverlapping(t, y, block_size); i += block_size; } if i < len { // Swap any remaining bytes let mut t = mem::MaybeUninit::<UnalignedBlock>::uninitialized(); let rem = len - i; let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); copy_nonoverlapping(x, t, rem); copy_nonoverlapping(y, x, rem); copy_nonoverlapping(t, y, rem); } } /// Moves `src` into the pointed `dst`, returning the previous `dst` value. /// /// Neither value is dropped. /// /// This function is semantically equivalent to [`mem::replace`] except that it /// operates on raw pointers instead of references. When references are /// available, [`mem::replace`] should be preferred. /// /// [`mem::replace`]: ../mem/fn.replace.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// * `dst` must be properly aligned. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// ``` /// use std::ptr; /// /// let mut rust = vec!['b', 'u', 's', 't']; /// /// // `mem::replace` would have the same effect without requiring the unsafe /// // block. /// let b = unsafe { /// ptr::replace(&mut rust[0], 'r') /// }; /// /// assert_eq!(b, 'b'); /// assert_eq!(rust, &['r', 'u', 's', 't']); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T { mem::swap(&mut *dst, &mut src); // cannot overlap src } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads. /// /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the /// case. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// // Create a bitwise copy of the value at `a` in `tmp`. /// let tmp = ptr::read(a); /// /// // Exiting at this point (either by explicitly returning or by /// // calling a function which panics) would cause the value in `tmp` to /// // be dropped while the same value is still referenced by `a`. This /// // could trigger undefined behavior if `T` is not `Copy`. /// /// // Create a bitwise copy of the value at `b` in `a`. /// // This is safe because mutable references cannot alias. /// ptr::copy_nonoverlapping(b, a, 1); /// /// // As above, exiting here could trigger undefined behavior because /// // the same value is referenced by `a` and `b`. /// /// // Move `tmp` into `b`. /// ptr::write(b, tmp); /// /// // `tmp` has been moved (`write` takes ownership of its second argument), /// // so nothing is dropped implicitly here. /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// ## Ownership of the Returned Value /// /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`]. /// If `T` is not [`Copy`], using both the returned value and the value at /// `*src` can violate memory safety. Note that assigning to `*src` counts as a /// use because it will attempt to drop the value at `*src`. /// /// [`write`] can be used to overwrite data without causing it to be dropped. /// /// ``` /// use std::ptr; /// /// let mut s = String::from("foo"); /// unsafe { /// // `s2` now points to the same underlying memory as `s`. /// let mut s2: String = ptr::read(&s); /// /// assert_eq!(s2, "foo"); /// /// // Assigning to `s2` causes its original value to be dropped. Beyond /// // this point, `s` must no longer be used, as the underlying memory has /// // been freed. /// s2 = String::default(); /// assert_eq!(s2, ""); /// /// // Assigning to `s` would cause the old value to be dropped again, /// // resulting in undefined behavior. /// // s = String::from("bar"); // ERROR /// /// // `ptr::write` can be used to overwrite a value without dropping it. /// ptr::write(&mut s, String::from("bar")); /// } /// /// assert_eq!(s, "bar"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// [`write`]: ./fn.write.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read<T>(src: *const T) -> T { let mut tmp = MaybeUninit::<T>::uninitialized(); copy_nonoverlapping(src, tmp.as_mut_ptr(), 1); tmp.into_initialized() } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// Unlike [`read`], `read_unaligned` works with unaligned pointers. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL. /// /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// [`write_unaligned`]: ./fn.write_unaligned.html /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access members of a packed struct by reference: /// /// ``` /// use std::ptr; /// /// #[repr(packed, C)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let x = Packed { /// _padding: 0x00, /// unaligned: 0x01020304, /// }; /// /// let v = unsafe { /// // Take the address of a 32-bit integer which is not aligned. /// // This must be done as a raw pointer; unaligned references are invalid. /// let unaligned = &x.unaligned as *const u32; /// /// // Dereferencing normally will emit an aligned load instruction, /// // causing undefined behavior. /// // let v = *unaligned; // ERROR /// /// // Instead, use `read_unaligned` to read improperly aligned values. /// let v = ptr::read_unaligned(unaligned); /// /// v /// }; /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); /// ``` #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn read_unaligned<T>(src: *const T) -> T { let mut tmp = MaybeUninit::<T>::uninitialized(); copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>()); tmp.into_initialized() } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// `write` does not drop the contents of `dst`. This is safe, but it could leak /// allocations or resources, so care should be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been [`read`] from. /// /// [`read`]: ./fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the /// case. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// [`write_unaligned`]: ./fn.write_unaligned.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write(y, z); /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// // Create a bitwise copy of the value at `a` in `tmp`. /// let tmp = ptr::read(a); /// /// // Exiting at this point (either by explicitly returning or by /// // calling a function which panics) would cause the value in `tmp` to /// // be dropped while the same value is still referenced by `a`. This /// // could trigger undefined behavior if `T` is not `Copy`. /// /// // Create a bitwise copy of the value at `b` in `a`. /// // This is safe because mutable references cannot alias. /// ptr::copy_nonoverlapping(b, a, 1); /// /// // As above, exiting here could trigger undefined behavior because /// // the same value is referenced by `a` and `b`. /// /// // Move `tmp` into `b`. /// ptr::write(b, tmp); /// /// // `tmp` has been moved (`write` takes ownership of its second argument), /// // so nothing is dropped implicitly here. /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn write<T>(dst: *mut T, src: T) { intrinsics::move_val_init(&mut *dst, src) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike [`write`], the pointer may be unaligned. /// /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care should be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been read with [`read_unaligned`]. /// /// [`write`]: ./fn.write.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access fields in a packed struct: /// /// ``` /// use std::{mem, ptr}; /// /// #[repr(packed, C)] /// #[derive(Default)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let v = 0x01020304; /// let mut x: Packed = unsafe { mem::zeroed() }; /// /// unsafe { /// // Take a reference to a 32-bit integer which is not aligned. /// let unaligned = &mut x.unaligned as *mut u32; /// /// // Dereferencing normally will emit an aligned store instruction, /// // causing undefined behavior because the pointer is not aligned. /// // *unaligned = v; // ERROR /// /// // Instead, use `write_unaligned` to write improperly aligned values. /// ptr::write_unaligned(unaligned, v); /// } /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); /// ``` #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) { copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>()); mem::forget(src); } /// Performs a volatile read of the value from `src` without moving it. This /// leaves the memory in `src` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory accessed with `read_volatile` or [`write_volatile`] should not be /// accessed with non-volatile operations. /// /// [`write_volatile`]: ./fn.write_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g., if a zero-sized type is passed to `read_volatile`) are noops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads. /// /// * `src` must be properly aligned. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// However, storing non-[`Copy`] types in volatile memory is almost certainly /// incorrect. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn read_volatile<T>(src: *const T) -> T { intrinsics::volatile_load(src) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory accessed with [`read_volatile`] or `write_volatile` should not be /// accessed with non-volatile operations. /// /// `write_volatile` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care should be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// [`read_volatile`]: ./fn.read_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g., if a zero-sized type is passed to `write_volatile`) are noops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// * `dst` must be properly aligned. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `write_volatile` and any other operation (reading or writing) /// on the same location is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write_volatile(y, z); /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn write_volatile<T>(dst: *mut T, src: T) { intrinsics::volatile_store(dst, src); } #[lang = "const_ptr"] impl<T: ?Sized> *const T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "Follow the rabbit"; /// let ptr: *const u8 = s.as_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *const u8) == null() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1) as char); /// println!("{}", *ptr.offset(2) as char); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *const T where T: Sized { intrinsics::offset(self, count) } /// Calculates the offset from a pointer using wrapping arithmetic. /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_offset(step); /// } /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized { unsafe { intrinsics::arith_offset(self, count) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset /// [`wrapping_offset_from`]: #method.wrapping_offset_from /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); // This is the same sequence that Clang emits for pointer subtraction. // It can be neither `nsw` nor `nuw` because the input is treated as // unsigned but then the output is treated as signed, so neither works. let d = isize::wrapping_sub(self as _, origin as _); intrinsics::exact_div(d, pointee_size as _) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *const i32 = 3 as _; /// let ptr2: *const i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); let d = isize::wrapping_sub(self as _, origin as _); d.wrapping_div(pointee_size as _) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// See [`ptr::read`] for safety concerns and examples. /// /// [`ptr::read`]: ./ptr/fn.read.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// See [`ptr::read_volatile`] for safety concerns and examples. /// /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// See [`ptr::read_unaligned`] for safety concerns and examples. /// /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy`]. /// /// See [`ptr::copy`] for safety concerns and examples. /// /// [`ptr::copy`]: ./ptr/fn.copy.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. /// /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. /// /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } #[lang = "mut_ptr"] impl<T: ?Sized> *mut T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *mut u8) == null_mut() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1)); /// println!("{}", *ptr.offset(2)); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized { intrinsics::offset(self, count) as *mut T } /// Calculates the offset from a pointer using wrapping arithmetic. /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let mut data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *mut u8 = data.as_mut_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// while ptr != end_rounded_up { /// unsafe { /// *ptr = 0; /// } /// ptr = ptr.wrapping_offset(step); /// } /// assert_eq!(&data, &[0, 2, 0, 4, 0]); /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized { unsafe { intrinsics::arith_offset(self, count) as *mut T } } /// Returns `None` if the pointer is null, or else returns a mutable /// reference to the value wrapped in `Some`. /// /// # Safety /// /// As with `as_ref`, this is unsafe because it cannot verify the validity /// of the returned pointer, nor can it ensure that the lifetime `'a` /// returned is indeed a valid lifetime for the contained data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// let first_value = unsafe { ptr.as_mut().unwrap() }; /// *first_value = 4; /// println!("{:?}", s); // It'll print: "[4, 2, 3]". /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> { if self.is_null() { None } else { Some(&mut *self) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset-1 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1 /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).offset_from(origin) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *mut i32 = 3 as _; /// let ptr2: *mut i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).wrapping_offset_from(origin) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// See [`ptr::read`] for safety concerns and examples. /// /// [`ptr::read`]: ./ptr/fn.read.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// See [`ptr::read_volatile`] for safety concerns and examples. /// /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// See [`ptr::read_unaligned`] for safety concerns and examples. /// /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy`]. /// /// See [`ptr::copy`] for safety concerns and examples. /// /// [`ptr::copy`]: ./ptr/fn.copy.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. /// /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. /// /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy`]. /// /// See [`ptr::copy`] for safety concerns and examples. /// /// [`ptr::copy`]: ./ptr/fn.copy.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from(self, src: *const T, count: usize) where T: Sized, { copy(src, self, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`]. /// /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. /// /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize) where T: Sized, { copy_nonoverlapping(src, self, count) } /// Executes the destructor (if any) of the pointed-to value. /// /// See [`ptr::drop_in_place`] for safety concerns and examples. /// /// [`ptr::drop_in_place`]: ./ptr/fn.drop_in_place.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn drop_in_place(self) { drop_in_place(self) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// See [`ptr::write`] for safety concerns and examples. /// /// [`ptr::write`]: ./ptr/fn.write.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write(self, val: T) where T: Sized, { write(self, val) } /// Invokes memset on the specified pointer, setting `count * size_of::<T>()` /// bytes of memory starting at `self` to `val`. /// /// See [`ptr::write_bytes`] for safety concerns and examples. /// /// [`ptr::write_bytes`]: ./ptr/fn.write_bytes.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_bytes(self, val: u8, count: usize) where T: Sized, { write_bytes(self, val, count) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// See [`ptr::write_volatile`] for safety concerns and examples. /// /// [`ptr::write_volatile`]: ./ptr/fn.write_volatile.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_volatile(self, val: T) where T: Sized, { write_volatile(self, val) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike `write`, the pointer may be unaligned. /// /// See [`ptr::write_unaligned`] for safety concerns and examples. /// /// [`ptr::write_unaligned`]: ./ptr/fn.write_unaligned.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_unaligned(self, val: T) where T: Sized, { write_unaligned(self, val) } /// Replaces the value at `self` with `src`, returning the old /// value, without dropping either. /// /// See [`ptr::replace`] for safety concerns and examples. /// /// [`ptr::replace`]: ./ptr/fn.replace.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn replace(self, src: T) -> T where T: Sized, { replace(self, src) } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. They may overlap, unlike `mem::swap` which is /// otherwise equivalent. /// /// See [`ptr::swap`] for safety concerns and examples. /// /// [`ptr::swap`]: ./ptr/fn.swap.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn swap(self, with: *mut T) where T: Sized, { swap(self, with) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } /// Align pointer `p`. /// /// Calculate offset (in terms of elements of `stride` stride) that has to be applied /// to pointer `p` so that pointer `p` would get aligned to `a`. /// /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated /// constants. /// /// If we ever decide to make it possible to call the intrinsic with `a` that is not a /// power-of-two, it will probably be more prudent to just change to a naive implementation rather /// than trying to adapt this to accommodate that change. /// /// Any questions go to @nagisa. #[lang="align_offset"] pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize { /// Calculate multiplicative modular inverse of `x` modulo `m`. /// /// This implementation is tailored for align_offset and has following preconditions: /// /// * `m` is a power-of-two; /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead) /// /// Implementation of this function shall not panic. Ever. #[inline] fn mod_inv(x: usize, m: usize) -> usize { /// Multiplicative modular inverse table modulo 2⁴ = 16. /// /// Note, that this table does not contain values where inverse does not exist (i.e., for /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.) const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15]; /// Modulo for which the `INV_TABLE_MOD_16` is intended. const INV_TABLE_MOD: usize = 16; /// INV_TABLE_MOD² const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD; let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize; if m <= INV_TABLE_MOD { table_inverse & (m - 1) } else { // We iterate "up" using the following formula: // // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$ // // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`. let mut inverse = table_inverse; let mut going_mod = INV_TABLE_MOD_SQUARED; loop { // y = y * (2 - xy) mod n // // Note, that we use wrapping operations here intentionally – the original formula // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod // usize::max_value()` instead, because we take the result `mod n` at the end // anyway. inverse = inverse.wrapping_mul( 2usize.wrapping_sub(x.wrapping_mul(inverse)) ) & (going_mod - 1); if going_mod > m { return inverse & (m - 1); } going_mod = going_mod.wrapping_mul(going_mod); } } } let stride = ::mem::size_of::<T>(); let a_minus_one = a.wrapping_sub(1); let pmoda = p as usize & a_minus_one; if pmoda == 0 { // Already aligned. Yay! return 0; } if stride <= 1 { return if stride == 0 { // If the pointer is not aligned, and the element is zero-sized, then no amount of // elements will ever align the pointer. !0 } else { a.wrapping_sub(pmoda) }; } let smoda = stride & a_minus_one; // a is power-of-two so cannot be 0. stride = 0 is handled above. let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)); let gcd = 1usize << gcdpow; if p as usize & (gcd - 1) == 0 { // This branch solves for the following linear congruence equation: // // $$ p + so ≡ 0 mod a $$ // // $p$ here is the pointer value, $s$ – stride of `T`, $o$ offset in `T`s, and $a$ – the // requested alignment. // // g = gcd(a, s) // o = (a - (p mod a))/g * ((s/g)⁻¹ mod a) // // The first term is “the relative alignment of p to a”, the second term is “how does // incrementing p by s bytes change the relative alignment of p”. Division by `g` is // necessary to make this equation well formed if $a$ and $s$ are not co-prime. // // Furthermore, the result produced by this solution is not “minimal”, so it is necessary // to take the result $o mod lcm(s, a)$. We can replace $lcm(s, a)$ with just a $a / g$. let j = a.wrapping_sub(pmoda) >> gcdpow; let k = smoda >> gcdpow; return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow); } // Cannot be aligned at all. usize::max_value() } // Equality for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *const T { #[inline] fn eq(&self, other: &*const T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *const T {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *mut T { #[inline] fn eq(&self, other: &*mut T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *mut T {} /// Compares raw pointers for equality. /// /// This is the same as using the `==` operator, but less generic: /// the arguments have to be `*const T` raw pointers, /// not anything that implements `PartialEq`. /// /// This can be used to compare `&T` references (which coerce to `*const T` implicitly) /// by their address rather than comparing the values they point to /// (which is what the `PartialEq for &T` implementation does). /// /// # Examples /// /// ``` /// use std::ptr; /// /// let five = 5; /// let other_five = 5; /// let five_ref = &five; /// let same_five_ref = &five; /// let other_five_ref = &other_five; /// /// assert!(five_ref == same_five_ref); /// assert!(five_ref == other_five_ref); /// /// assert!(ptr::eq(five_ref, same_five_ref)); /// assert!(!ptr::eq(five_ref, other_five_ref)); /// ``` #[stable(feature = "ptr_eq", since = "1.17.0")] #[inline] pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool { a == b } /// Hash a raw pointer. /// /// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly) /// by its address rather than the value it points to /// (which is what the `Hash for &T` implementation does). /// /// # Examples /// /// ``` /// #![feature(ptr_hash)] /// use std::collections::hash_map::DefaultHasher; /// use std::hash::{Hash, Hasher}; /// use std::ptr; /// /// let five = 5; /// let five_ref = &five; /// /// let mut hasher = DefaultHasher::new(); /// ptr::hash(five_ref, &mut hasher); /// let actual = hasher.finish(); /// /// let mut hasher = DefaultHasher::new(); /// (five_ref as *const i32).hash(&mut hasher); /// let expected = hasher.finish(); /// /// assert_eq!(actual, expected); /// ``` #[unstable(feature = "ptr_hash", reason = "newly added", issue = "56286")] pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) { use hash::Hash; hashee.hash(into); } // Impls for function pointers macro_rules! fnptr_impls_safety_abi { ($FnTy: ty, $($Arg: ident),*) => { #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialEq for $FnTy { #[inline] fn eq(&self, other: &Self) -> bool { *self as usize == *other as usize } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Eq for $FnTy {} #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialOrd for $FnTy { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { (*self as usize).partial_cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Ord for $FnTy { #[inline] fn cmp(&self, other: &Self) -> Ordering { (*self as usize).cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> hash::Hash for $FnTy { fn hash<HH: hash::Hasher>(&self, state: &mut HH) { state.write_usize(*self as usize) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Pointer for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Debug for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } } } macro_rules! fnptr_impls_args { ($($Arg: ident),+) => { fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } }; () => { // No variadic functions with 0 parameters fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } }; } fnptr_impls_args! { } fnptr_impls_args! { A } fnptr_impls_args! { A, B } fnptr_impls_args! { A, B, C } fnptr_impls_args! { A, B, C, D } fnptr_impls_args! { A, B, C, D, E } fnptr_impls_args! { A, B, C, D, E, F } fnptr_impls_args! { A, B, C, D, E, F, G } fnptr_impls_args! { A, B, C, D, E, F, G, H } fnptr_impls_args! { A, B, C, D, E, F, G, H, I } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L } // Comparison for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *const T { #[inline] fn cmp(&self, other: &*const T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *const T { #[inline] fn partial_cmp(&self, other: &*const T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*const T) -> bool { *self < *other } #[inline] fn le(&self, other: &*const T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*const T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*const T) -> bool { *self >= *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *mut T { #[inline] fn cmp(&self, other: &*mut T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *mut T { #[inline] fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*mut T) -> bool { *self < *other } #[inline] fn le(&self, other: &*mut T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*mut T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*mut T) -> bool { *self >= *other } } /// A wrapper around a raw non-null `*mut T` that indicates that the possessor /// of this wrapper owns the referent. Useful for building abstractions like /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`. /// /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`. /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies /// the kind of strong aliasing guarantees an instance of `T` can expect: /// the referent of the pointer should not be modified without a unique path to /// its owning Unique. /// /// If you're uncertain of whether it's correct to use `Unique` for your purposes, /// consider using `NonNull`, which has weaker semantics. /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct /// for any type which upholds Unique's aliasing requirements. #[unstable(feature = "ptr_internals", issue = "0", reason = "use NonNull instead and consider PhantomData<T> \ (if you also use #[may_dangle]), Send, and/or Sync")] #[doc(hidden)] #[repr(transparent)] #[rustc_layout_scalar_valid_range_start(1)] pub struct Unique<T: ?Sized> { pointer: *const T, // NOTE: this marker has no consequences for variance, but is necessary // for dropck to understand that we logically own a `T`. // // For details, see: // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data _marker: PhantomData<T>, } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Debug for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } /// `Unique` pointers are `Send` if `T` is `Send` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Send + ?Sized> Send for Unique<T> { } /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: Sized> Unique<T> { /// Creates a new `Unique` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. // FIXME: rename to dangling() to match NonNull? pub const fn empty() -> Self { unsafe { Unique::new_unchecked(mem::align_of::<T>() as *mut T) } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Unique<T> { /// Creates a new `Unique`. /// /// # Safety /// /// `ptr` must be non-null. pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { Unique { pointer: ptr as _, _marker: PhantomData } } /// Creates a new `Unique` if `ptr` is non-null. pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(unsafe { Unique { pointer: ptr as _, _marker: PhantomData } }) } else { None } } /// Acquires the underlying `*mut` pointer. pub const fn as_ptr(self) -> *mut T { self.pointer as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Clone for Unique<T> { fn clone(&self) -> Self { *self } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Copy for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Pointer for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<&mut T> for Unique<T> { fn from(reference: &mut T) -> Self { unsafe { Unique { pointer: reference as *mut T, _marker: PhantomData } } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<&T> for Unique<T> { fn from(reference: &T) -> Self { unsafe { Unique { pointer: reference as *const T, _marker: PhantomData } } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> { fn from(p: NonNull<T>) -> Self { unsafe { Unique { pointer: p.pointer, _marker: PhantomData } } } } /// `*mut T` but non-zero and covariant. /// /// This is often the correct thing to use when building data structures using /// raw pointers, but is ultimately more dangerous to use because of its additional /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`! /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect /// for your use case, you should include some PhantomData in your type to /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`. /// Usually this won't be necessary; covariance is correct for most safe abstractions, /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they /// provide a public API that follows the normal shared XOR mutable rules of Rust. /// /// Notice that `NonNull<T>` has a `From` instance for `&T`. However, this does /// not change the fact that mutating through a (pointer derived from a) shared /// reference is undefined behavior unless the mutation happens inside an /// [`UnsafeCell<T>`]. The same goes for creating a mutable reference from a shared /// reference. When using this `From` instance without an `UnsafeCell<T>`, /// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr` /// is never used for mutation. /// /// [`UnsafeCell<T>`]: ../cell/struct.UnsafeCell.html #[stable(feature = "nonnull", since = "1.25.0")] #[repr(transparent)] #[rustc_layout_scalar_valid_range_start(1)] pub struct NonNull<T: ?Sized> { pointer: *const T, } /// `NonNull` pointers are not `Send` because the data they reference may be aliased. // N.B., this impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Send for NonNull<T> { } /// `NonNull` pointers are not `Sync` because the data they reference may be aliased. // N.B., this impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Sync for NonNull<T> { } impl<T: Sized> NonNull<T> { /// Creates a new `NonNull` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_nonnull"))] pub const fn dangling() -> Self { unsafe { let ptr = mem::align_of::<T>() as *mut T; NonNull::new_unchecked(ptr) } } } impl<T: ?Sized> NonNull<T> { /// Creates a new `NonNull`. /// /// # Safety /// /// `ptr` must be non-null. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { NonNull { pointer: ptr as _ } } /// Creates a new `NonNull` if `ptr` is non-null. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(unsafe { Self::new_unchecked(ptr) }) } else { None } } /// Acquires the underlying `*mut` pointer. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub const fn as_ptr(self) -> *mut T { self.pointer as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } /// Cast to a pointer of another type #[stable(feature = "nonnull_cast", since = "1.27.0")] #[inline] #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_nonnull"))] pub const fn cast<U>(self) -> NonNull<U> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Clone for NonNull<T> { fn clone(&self) -> Self { *self } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Copy for NonNull<T> { } #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { } #[unstable(feature = "dispatch_from_dyn", issue = "0")] impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> { } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Debug for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Pointer for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Eq for NonNull<T> {} #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialEq for NonNull<T> { #[inline] fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Ord for NonNull<T> { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.as_ptr().cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialOrd for NonNull<T> { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_ptr().partial_cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> hash::Hash for NonNull<T> { #[inline] fn hash<H: hash::Hasher>(&self, state: &mut H) { self.as_ptr().hash(state) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<Unique<T>> for NonNull<T> { #[inline] fn from(unique: Unique<T>) -> Self { unsafe { NonNull { pointer: unique.pointer } } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> From<&mut T> for NonNull<T> { #[inline] fn from(reference: &mut T) -> Self { unsafe { NonNull { pointer: reference as *mut T } } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> From<&T> for NonNull<T> { #[inline] fn from(reference: &T) -> Self { unsafe { NonNull { pointer: reference as *const T } } } } Make `ptr::eq` documentation mention smart-pointer behavior Resolves #59214 //! Manually manage memory through raw pointers. //! //! *[See also the pointer primitive types](../../std/primitive.pointer.html).* //! //! # Safety //! //! Many functions in this module take raw pointers as arguments and read from //! or write to them. For this to be safe, these pointers must be *valid*. //! Whether a pointer is valid depends on the operation it is used for //! (read or write), and the extent of the memory that is accessed (i.e., //! how many bytes are read/written). Most functions use `*mut T` and `*const T` //! to access only a single value, in which case the documentation omits the size //! and implicitly assumes it to be `size_of::<T>()` bytes. //! //! The precise rules for validity are not determined yet. The guarantees that are //! provided at this point are very minimal: //! //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst]. //! * All pointers (except for the null pointer) are valid for all operations of //! [size zero][zst]. //! * All accesses performed by functions in this module are *non-atomic* in the sense //! of [atomic operations] used to synchronize between threads. This means it is //! undefined behavior to perform two concurrent accesses to the same location from different //! threads unless both accesses only read from memory. Notice that this explicitly //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot //! be used for inter-thread synchronization. //! * The result of casting a reference to a pointer is valid for as long as the //! underlying object is live and no reference (just raw pointers) is used to //! access the same memory. //! //! These axioms, along with careful use of [`offset`] for pointer arithmetic, //! are enough to correctly implement many useful things in unsafe code. Stronger guarantees //! will be provided eventually, as the [aliasing] rules are being determined. For more //! information, see the [book] as well as the section in the reference devoted //! to [undefined behavior][ub]. //! //! ## Alignment //! //! Valid raw pointers as defined above are not necessarily properly aligned (where //! "proper" alignment is defined by the pointee type, i.e., `*const T` must be //! aligned to `mem::align_of::<T>()`). However, most functions require their //! arguments to be properly aligned, and will explicitly state //! this requirement in their documentation. Notable exceptions to this are //! [`read_unaligned`] and [`write_unaligned`]. //! //! When a function requires proper alignment, it does so even if the access //! has size 0, i.e., even if memory is not actually touched. Consider using //! [`NonNull::dangling`] in such cases. //! //! [aliasing]: ../../nomicon/aliasing.html //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer //! [ub]: ../../reference/behavior-considered-undefined.html //! [null]: ./fn.null.html //! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts //! [atomic operations]: ../../std/sync/atomic/index.html //! [`copy`]: ../../std/ptr/fn.copy.html //! [`offset`]: ../../std/primitive.pointer.html#method.offset //! [`read_unaligned`]: ./fn.read_unaligned.html //! [`write_unaligned`]: ./fn.write_unaligned.html //! [`read_volatile`]: ./fn.read_volatile.html //! [`write_volatile`]: ./fn.write_volatile.html //! [`NonNull::dangling`]: ./struct.NonNull.html#method.dangling #![stable(feature = "rust1", since = "1.0.0")] use convert::From; use intrinsics; use ops::{CoerceUnsized, DispatchFromDyn}; use fmt; use hash; use marker::{PhantomData, Unsize}; use mem::{self, MaybeUninit}; use cmp::Ordering::{self, Less, Equal, Greater}; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy_nonoverlapping; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::write_bytes; /// Executes the destructor (if any) of the pointed-to value. /// /// This is semantically equivalent to calling [`ptr::read`] and discarding /// the result, but has the following advantages: /// /// * It is *required* to use `drop_in_place` to drop unsized types like /// trait objects, because they can't be read out onto the stack and /// dropped normally. /// /// * It is friendlier to the optimizer to do this over [`ptr::read`] when /// dropping manually allocated memory (e.g., when writing Box/Rc/Vec), /// as the compiler doesn't need to prove that it's sound to elide the /// copy. /// /// [`ptr::read`]: ../ptr/fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `to_drop` must be [valid] for reads. /// /// * `to_drop` must be properly aligned. See the example below for how to drop /// an unaligned pointer. /// /// Additionally, if `T` is not [`Copy`], using the pointed-to value after /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = /// foo` counts as a use because it will cause the value to be dropped /// again. [`write`] can be used to overwrite data without causing it to be /// dropped. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`write`]: ../ptr/fn.write.html /// /// # Examples /// /// Manually remove the last item from a vector: /// /// ``` /// use std::ptr; /// use std::rc::Rc; /// /// let last = Rc::new(1); /// let weak = Rc::downgrade(&last); /// /// let mut v = vec![Rc::new(0), last]; /// /// unsafe { /// // Get a raw pointer to the last element in `v`. /// let ptr = &mut v[1] as *mut _; /// // Shorten `v` to prevent the last item from being dropped. We do that first, /// // to prevent issues if the `drop_in_place` below panics. /// v.set_len(1); /// // Without a call `drop_in_place`, the last item would never be dropped, /// // and the memory it manages would be leaked. /// ptr::drop_in_place(ptr); /// } /// /// assert_eq!(v, &[0.into()]); /// /// // Ensure that the last item was dropped. /// assert!(weak.upgrade().is_none()); /// ``` /// /// Unaligned values cannot be dropped in place, they must be copied to an aligned /// location first: /// ``` /// use std::ptr; /// use std::mem; /// /// unsafe fn drop_after_copy<T>(to_drop: *mut T) { /// let mut copy: T = mem::uninitialized(); /// ptr::copy(to_drop, &mut copy, 1); /// drop(copy); /// } /// /// #[repr(packed, C)] /// struct Packed { /// _padding: u8, /// unaligned: Vec<i32>, /// } /// /// let mut p = Packed { _padding: 0, unaligned: vec![42] }; /// unsafe { /// drop_after_copy(&mut p.unaligned as *mut _); /// mem::forget(p); /// } /// ``` /// /// Notice that the compiler performs this copy automatically when dropping packed structs, /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place` /// manually. #[stable(feature = "drop_in_place", since = "1.8.0")] #[inline(always)] pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { real_drop_in_place(&mut *to_drop) } // The real `drop_in_place` -- the one that gets called implicitly when variables go // out of scope -- should have a safe reference and not a raw pointer as argument // type. When we drop a local variable, we access it with a pointer that behaves // like a safe reference; transmuting that to a raw pointer does not mean we can // actually access it with raw pointers. #[lang = "drop_in_place"] #[allow(unconditional_recursion)] unsafe fn real_drop_in_place<T: ?Sized>(to_drop: &mut T) { // Code here does not matter - this is replaced by the // real drop glue by the compiler. real_drop_in_place(to_drop) } /// Creates a null raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *const i32 = ptr::null(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn null<T>() -> *const T { 0 as *const T } /// Creates a null mutable raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *mut i32 = ptr::null_mut(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_promotable] pub const fn null_mut<T>() -> *mut T { 0 as *mut T } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. /// /// But for the following two exceptions, this function is semantically /// equivalent to [`mem::swap`]: /// /// * It operates on raw pointers instead of references. When references are /// available, [`mem::swap`] should be preferred. /// /// * The two pointed-to values may overlap. If the values do overlap, then the /// overlapping region of memory from `x` will be used. This is demonstrated /// in the second example below. /// /// [`mem::swap`]: ../mem/fn.swap.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * Both `x` and `y` must be [valid] for reads and writes. /// /// * Both `x` and `y` must be properly aligned. /// /// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Swapping two non-overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]` /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]` /// /// unsafe { /// ptr::swap(x, y); /// assert_eq!([2, 3, 0, 1], array); /// } /// ``` /// /// Swapping two overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]` /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]` /// /// unsafe { /// ptr::swap(x, y); /// // The indices `1..3` of the slice overlap between `x` and `y`. /// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are /// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]` /// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`). /// // This implementation is defined to make the latter choice. /// assert_eq!([1, 0, 1, 2], array); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn swap<T>(x: *mut T, y: *mut T) { // Give ourselves some scratch space to work with. // We do not have to worry about drops: `MaybeUninit` does nothing when dropped. let mut tmp = MaybeUninit::<T>::uninitialized(); // Perform the swap copy_nonoverlapping(x, tmp.as_mut_ptr(), 1); copy(y, x, 1); // `x` and `y` may overlap copy_nonoverlapping(tmp.as_ptr(), y, 1); } /// Swaps `count * size_of::<T>()` bytes between the two regions of memory /// beginning at `x` and `y`. The two regions must *not* overlap. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * Both `x` and `y` must be [valid] for reads and writes of `count * /// size_of::<T>()` bytes. /// /// * Both `x` and `y` must be properly aligned. /// /// * The region of memory beginning at `x` with a size of `count * /// size_of::<T>()` bytes must *not* overlap with the region of memory /// beginning at `y` with the same size. /// /// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`, /// the pointers must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::ptr; /// /// let mut x = [1, 2, 3, 4]; /// let mut y = [7, 8, 9]; /// /// unsafe { /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2); /// } /// /// assert_eq!(x, [7, 8, 3, 4]); /// assert_eq!(y, [1, 2, 9]); /// ``` #[inline] #[stable(feature = "swap_nonoverlapping", since = "1.27.0")] pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; let len = mem::size_of::<T>() * count; swap_nonoverlapping_bytes(x, y, len) } #[inline] pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. if mem::size_of::<T>() < 32 { let z = read(x); copy_nonoverlapping(y, x, 1); write(y, z); } else { swap_nonoverlapping(x, y, 1); } } #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // The approach here is to utilize simd to swap x & y efficiently. Testing reveals // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel // Haswell E processors. LLVM is more able to optimize if we give a struct a // #[repr(simd)], even if we don't actually use this struct directly. // // FIXME repr(simd) broken on emscripten and redox // It's also broken on big-endian powerpc64 and s390x. #42778 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox", target_endian = "big")), repr(simd))] struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); let block_size = mem::size_of::<Block>(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively let mut i = 0; while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused let mut t = mem::MaybeUninit::<Block>::uninitialized(); let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available copy_nonoverlapping(x, t, block_size); copy_nonoverlapping(y, x, block_size); copy_nonoverlapping(t, y, block_size); i += block_size; } if i < len { // Swap any remaining bytes let mut t = mem::MaybeUninit::<UnalignedBlock>::uninitialized(); let rem = len - i; let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); copy_nonoverlapping(x, t, rem); copy_nonoverlapping(y, x, rem); copy_nonoverlapping(t, y, rem); } } /// Moves `src` into the pointed `dst`, returning the previous `dst` value. /// /// Neither value is dropped. /// /// This function is semantically equivalent to [`mem::replace`] except that it /// operates on raw pointers instead of references. When references are /// available, [`mem::replace`] should be preferred. /// /// [`mem::replace`]: ../mem/fn.replace.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// * `dst` must be properly aligned. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// ``` /// use std::ptr; /// /// let mut rust = vec!['b', 'u', 's', 't']; /// /// // `mem::replace` would have the same effect without requiring the unsafe /// // block. /// let b = unsafe { /// ptr::replace(&mut rust[0], 'r') /// }; /// /// assert_eq!(b, 'b'); /// assert_eq!(rust, &['r', 'u', 's', 't']); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T { mem::swap(&mut *dst, &mut src); // cannot overlap src } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads. /// /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the /// case. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// // Create a bitwise copy of the value at `a` in `tmp`. /// let tmp = ptr::read(a); /// /// // Exiting at this point (either by explicitly returning or by /// // calling a function which panics) would cause the value in `tmp` to /// // be dropped while the same value is still referenced by `a`. This /// // could trigger undefined behavior if `T` is not `Copy`. /// /// // Create a bitwise copy of the value at `b` in `a`. /// // This is safe because mutable references cannot alias. /// ptr::copy_nonoverlapping(b, a, 1); /// /// // As above, exiting here could trigger undefined behavior because /// // the same value is referenced by `a` and `b`. /// /// // Move `tmp` into `b`. /// ptr::write(b, tmp); /// /// // `tmp` has been moved (`write` takes ownership of its second argument), /// // so nothing is dropped implicitly here. /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// ## Ownership of the Returned Value /// /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`]. /// If `T` is not [`Copy`], using both the returned value and the value at /// `*src` can violate memory safety. Note that assigning to `*src` counts as a /// use because it will attempt to drop the value at `*src`. /// /// [`write`] can be used to overwrite data without causing it to be dropped. /// /// ``` /// use std::ptr; /// /// let mut s = String::from("foo"); /// unsafe { /// // `s2` now points to the same underlying memory as `s`. /// let mut s2: String = ptr::read(&s); /// /// assert_eq!(s2, "foo"); /// /// // Assigning to `s2` causes its original value to be dropped. Beyond /// // this point, `s` must no longer be used, as the underlying memory has /// // been freed. /// s2 = String::default(); /// assert_eq!(s2, ""); /// /// // Assigning to `s` would cause the old value to be dropped again, /// // resulting in undefined behavior. /// // s = String::from("bar"); // ERROR /// /// // `ptr::write` can be used to overwrite a value without dropping it. /// ptr::write(&mut s, String::from("bar")); /// } /// /// assert_eq!(s, "bar"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// [`write`]: ./fn.write.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read<T>(src: *const T) -> T { let mut tmp = MaybeUninit::<T>::uninitialized(); copy_nonoverlapping(src, tmp.as_mut_ptr(), 1); tmp.into_initialized() } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// Unlike [`read`], `read_unaligned` works with unaligned pointers. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL. /// /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// [`write_unaligned`]: ./fn.write_unaligned.html /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access members of a packed struct by reference: /// /// ``` /// use std::ptr; /// /// #[repr(packed, C)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let x = Packed { /// _padding: 0x00, /// unaligned: 0x01020304, /// }; /// /// let v = unsafe { /// // Take the address of a 32-bit integer which is not aligned. /// // This must be done as a raw pointer; unaligned references are invalid. /// let unaligned = &x.unaligned as *const u32; /// /// // Dereferencing normally will emit an aligned load instruction, /// // causing undefined behavior. /// // let v = *unaligned; // ERROR /// /// // Instead, use `read_unaligned` to read improperly aligned values. /// let v = ptr::read_unaligned(unaligned); /// /// v /// }; /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); /// ``` #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn read_unaligned<T>(src: *const T) -> T { let mut tmp = MaybeUninit::<T>::uninitialized(); copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>()); tmp.into_initialized() } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// `write` does not drop the contents of `dst`. This is safe, but it could leak /// allocations or resources, so care should be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been [`read`] from. /// /// [`read`]: ./fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the /// case. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// [`write_unaligned`]: ./fn.write_unaligned.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write(y, z); /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// // Create a bitwise copy of the value at `a` in `tmp`. /// let tmp = ptr::read(a); /// /// // Exiting at this point (either by explicitly returning or by /// // calling a function which panics) would cause the value in `tmp` to /// // be dropped while the same value is still referenced by `a`. This /// // could trigger undefined behavior if `T` is not `Copy`. /// /// // Create a bitwise copy of the value at `b` in `a`. /// // This is safe because mutable references cannot alias. /// ptr::copy_nonoverlapping(b, a, 1); /// /// // As above, exiting here could trigger undefined behavior because /// // the same value is referenced by `a` and `b`. /// /// // Move `tmp` into `b`. /// ptr::write(b, tmp); /// /// // `tmp` has been moved (`write` takes ownership of its second argument), /// // so nothing is dropped implicitly here. /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn write<T>(dst: *mut T, src: T) { intrinsics::move_val_init(&mut *dst, src) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike [`write`], the pointer may be unaligned. /// /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care should be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been read with [`read_unaligned`]. /// /// [`write`]: ./fn.write.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access fields in a packed struct: /// /// ``` /// use std::{mem, ptr}; /// /// #[repr(packed, C)] /// #[derive(Default)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let v = 0x01020304; /// let mut x: Packed = unsafe { mem::zeroed() }; /// /// unsafe { /// // Take a reference to a 32-bit integer which is not aligned. /// let unaligned = &mut x.unaligned as *mut u32; /// /// // Dereferencing normally will emit an aligned store instruction, /// // causing undefined behavior because the pointer is not aligned. /// // *unaligned = v; // ERROR /// /// // Instead, use `write_unaligned` to write improperly aligned values. /// ptr::write_unaligned(unaligned, v); /// } /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); /// ``` #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) { copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>()); mem::forget(src); } /// Performs a volatile read of the value from `src` without moving it. This /// leaves the memory in `src` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory accessed with `read_volatile` or [`write_volatile`] should not be /// accessed with non-volatile operations. /// /// [`write_volatile`]: ./fn.write_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g., if a zero-sized type is passed to `read_volatile`) are noops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid] for reads. /// /// * `src` must be properly aligned. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// However, storing non-[`Copy`] types in volatile memory is almost certainly /// incorrect. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn read_volatile<T>(src: *const T) -> T { intrinsics::volatile_load(src) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory accessed with [`read_volatile`] or `write_volatile` should not be /// accessed with non-volatile operations. /// /// `write_volatile` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care should be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// [`read_volatile`]: ./fn.read_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g., if a zero-sized type is passed to `write_volatile`) are noops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid] for writes. /// /// * `dst` must be properly aligned. /// /// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `write_volatile` and any other operation (reading or writing) /// on the same location is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write_volatile(y, z); /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn write_volatile<T>(dst: *mut T, src: T) { intrinsics::volatile_store(dst, src); } #[lang = "const_ptr"] impl<T: ?Sized> *const T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "Follow the rabbit"; /// let ptr: *const u8 = s.as_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *const u8) == null() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1) as char); /// println!("{}", *ptr.offset(2) as char); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *const T where T: Sized { intrinsics::offset(self, count) } /// Calculates the offset from a pointer using wrapping arithmetic. /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_offset(step); /// } /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized { unsafe { intrinsics::arith_offset(self, count) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset /// [`wrapping_offset_from`]: #method.wrapping_offset_from /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); // This is the same sequence that Clang emits for pointer subtraction. // It can be neither `nsw` nor `nuw` because the input is treated as // unsigned but then the output is treated as signed, so neither works. let d = isize::wrapping_sub(self as _, origin as _); intrinsics::exact_div(d, pointee_size as _) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *const i32 = 3 as _; /// let ptr2: *const i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); let d = isize::wrapping_sub(self as _, origin as _); d.wrapping_div(pointee_size as _) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// See [`ptr::read`] for safety concerns and examples. /// /// [`ptr::read`]: ./ptr/fn.read.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// See [`ptr::read_volatile`] for safety concerns and examples. /// /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// See [`ptr::read_unaligned`] for safety concerns and examples. /// /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy`]. /// /// See [`ptr::copy`] for safety concerns and examples. /// /// [`ptr::copy`]: ./ptr/fn.copy.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. /// /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. /// /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } #[lang = "mut_ptr"] impl<T: ?Sized> *mut T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *mut u8) == null_mut() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1)); /// println!("{}", *ptr.offset(2)); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized { intrinsics::offset(self, count) as *mut T } /// Calculates the offset from a pointer using wrapping arithmetic. /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let mut data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *mut u8 = data.as_mut_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// while ptr != end_rounded_up { /// unsafe { /// *ptr = 0; /// } /// ptr = ptr.wrapping_offset(step); /// } /// assert_eq!(&data, &[0, 2, 0, 4, 0]); /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized { unsafe { intrinsics::arith_offset(self, count) as *mut T } } /// Returns `None` if the pointer is null, or else returns a mutable /// reference to the value wrapped in `Some`. /// /// # Safety /// /// As with `as_ref`, this is unsafe because it cannot verify the validity /// of the returned pointer, nor can it ensure that the lifetime `'a` /// returned is indeed a valid lifetime for the contained data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// let first_value = unsafe { ptr.as_mut().unwrap() }; /// *first_value = 4; /// println!("{:?}", s); // It'll print: "[4, 2, 3]". /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> { if self.is_null() { None } else { Some(&mut *self) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset-1 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1 /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).offset_from(origin) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *mut i32 = 3 as _; /// let ptr2: *mut i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).wrapping_offset_from(origin) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g., a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// See [`ptr::read`] for safety concerns and examples. /// /// [`ptr::read`]: ./ptr/fn.read.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// See [`ptr::read_volatile`] for safety concerns and examples. /// /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// See [`ptr::read_unaligned`] for safety concerns and examples. /// /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy`]. /// /// See [`ptr::copy`] for safety concerns and examples. /// /// [`ptr::copy`]: ./ptr/fn.copy.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`]. /// /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. /// /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy`]. /// /// See [`ptr::copy`] for safety concerns and examples. /// /// [`ptr::copy`]: ./ptr/fn.copy.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from(self, src: *const T, count: usize) where T: Sized, { copy(src, self, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`]. /// /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples. /// /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize) where T: Sized, { copy_nonoverlapping(src, self, count) } /// Executes the destructor (if any) of the pointed-to value. /// /// See [`ptr::drop_in_place`] for safety concerns and examples. /// /// [`ptr::drop_in_place`]: ./ptr/fn.drop_in_place.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn drop_in_place(self) { drop_in_place(self) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// See [`ptr::write`] for safety concerns and examples. /// /// [`ptr::write`]: ./ptr/fn.write.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write(self, val: T) where T: Sized, { write(self, val) } /// Invokes memset on the specified pointer, setting `count * size_of::<T>()` /// bytes of memory starting at `self` to `val`. /// /// See [`ptr::write_bytes`] for safety concerns and examples. /// /// [`ptr::write_bytes`]: ./ptr/fn.write_bytes.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_bytes(self, val: u8, count: usize) where T: Sized, { write_bytes(self, val, count) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// See [`ptr::write_volatile`] for safety concerns and examples. /// /// [`ptr::write_volatile`]: ./ptr/fn.write_volatile.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_volatile(self, val: T) where T: Sized, { write_volatile(self, val) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike `write`, the pointer may be unaligned. /// /// See [`ptr::write_unaligned`] for safety concerns and examples. /// /// [`ptr::write_unaligned`]: ./ptr/fn.write_unaligned.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_unaligned(self, val: T) where T: Sized, { write_unaligned(self, val) } /// Replaces the value at `self` with `src`, returning the old /// value, without dropping either. /// /// See [`ptr::replace`] for safety concerns and examples. /// /// [`ptr::replace`]: ./ptr/fn.replace.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn replace(self, src: T) -> T where T: Sized, { replace(self, src) } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. They may overlap, unlike `mem::swap` which is /// otherwise equivalent. /// /// See [`ptr::swap`] for safety concerns and examples. /// /// [`ptr::swap`]: ./ptr/fn.swap.html #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn swap(self, with: *mut T) where T: Sized, { swap(self, with) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } /// Align pointer `p`. /// /// Calculate offset (in terms of elements of `stride` stride) that has to be applied /// to pointer `p` so that pointer `p` would get aligned to `a`. /// /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated /// constants. /// /// If we ever decide to make it possible to call the intrinsic with `a` that is not a /// power-of-two, it will probably be more prudent to just change to a naive implementation rather /// than trying to adapt this to accommodate that change. /// /// Any questions go to @nagisa. #[lang="align_offset"] pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize { /// Calculate multiplicative modular inverse of `x` modulo `m`. /// /// This implementation is tailored for align_offset and has following preconditions: /// /// * `m` is a power-of-two; /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead) /// /// Implementation of this function shall not panic. Ever. #[inline] fn mod_inv(x: usize, m: usize) -> usize { /// Multiplicative modular inverse table modulo 2⁴ = 16. /// /// Note, that this table does not contain values where inverse does not exist (i.e., for /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.) const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15]; /// Modulo for which the `INV_TABLE_MOD_16` is intended. const INV_TABLE_MOD: usize = 16; /// INV_TABLE_MOD² const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD; let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize; if m <= INV_TABLE_MOD { table_inverse & (m - 1) } else { // We iterate "up" using the following formula: // // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$ // // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`. let mut inverse = table_inverse; let mut going_mod = INV_TABLE_MOD_SQUARED; loop { // y = y * (2 - xy) mod n // // Note, that we use wrapping operations here intentionally – the original formula // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod // usize::max_value()` instead, because we take the result `mod n` at the end // anyway. inverse = inverse.wrapping_mul( 2usize.wrapping_sub(x.wrapping_mul(inverse)) ) & (going_mod - 1); if going_mod > m { return inverse & (m - 1); } going_mod = going_mod.wrapping_mul(going_mod); } } } let stride = ::mem::size_of::<T>(); let a_minus_one = a.wrapping_sub(1); let pmoda = p as usize & a_minus_one; if pmoda == 0 { // Already aligned. Yay! return 0; } if stride <= 1 { return if stride == 0 { // If the pointer is not aligned, and the element is zero-sized, then no amount of // elements will ever align the pointer. !0 } else { a.wrapping_sub(pmoda) }; } let smoda = stride & a_minus_one; // a is power-of-two so cannot be 0. stride = 0 is handled above. let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)); let gcd = 1usize << gcdpow; if p as usize & (gcd - 1) == 0 { // This branch solves for the following linear congruence equation: // // $$ p + so ≡ 0 mod a $$ // // $p$ here is the pointer value, $s$ – stride of `T`, $o$ offset in `T`s, and $a$ – the // requested alignment. // // g = gcd(a, s) // o = (a - (p mod a))/g * ((s/g)⁻¹ mod a) // // The first term is “the relative alignment of p to a”, the second term is “how does // incrementing p by s bytes change the relative alignment of p”. Division by `g` is // necessary to make this equation well formed if $a$ and $s$ are not co-prime. // // Furthermore, the result produced by this solution is not “minimal”, so it is necessary // to take the result $o mod lcm(s, a)$. We can replace $lcm(s, a)$ with just a $a / g$. let j = a.wrapping_sub(pmoda) >> gcdpow; let k = smoda >> gcdpow; return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow); } // Cannot be aligned at all. usize::max_value() } // Equality for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *const T { #[inline] fn eq(&self, other: &*const T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *const T {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *mut T { #[inline] fn eq(&self, other: &*mut T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *mut T {} /// Compares raw pointers for equality. /// /// This is the same as using the `==` operator, but less generic: /// the arguments have to be `*const T` raw pointers, /// not anything that implements `PartialEq`. /// /// This can be used to compare `&T` references (which coerce to `*const T` implicitly) /// by their address rather than comparing the values they point to /// (which is what the `PartialEq for &T` implementation does). /// /// Smart pointer types, such as `Box`, `Rc`, and `Arc` do not compare /// using this function, instead they compare the values rather than /// their addresses. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let five = 5; /// let other_five = 5; /// let five_ref = &five; /// let same_five_ref = &five; /// let other_five_ref = &other_five; /// /// assert!(five_ref == same_five_ref); /// assert!(five_ref == other_five_ref); /// /// assert!(ptr::eq(five_ref, same_five_ref)); /// assert!(!ptr::eq(five_ref, other_five_ref)); /// ``` #[stable(feature = "ptr_eq", since = "1.17.0")] #[inline] pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool { a == b } /// Hash a raw pointer. /// /// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly) /// by its address rather than the value it points to /// (which is what the `Hash for &T` implementation does). /// /// # Examples /// /// ``` /// #![feature(ptr_hash)] /// use std::collections::hash_map::DefaultHasher; /// use std::hash::{Hash, Hasher}; /// use std::ptr; /// /// let five = 5; /// let five_ref = &five; /// /// let mut hasher = DefaultHasher::new(); /// ptr::hash(five_ref, &mut hasher); /// let actual = hasher.finish(); /// /// let mut hasher = DefaultHasher::new(); /// (five_ref as *const i32).hash(&mut hasher); /// let expected = hasher.finish(); /// /// assert_eq!(actual, expected); /// ``` #[unstable(feature = "ptr_hash", reason = "newly added", issue = "56286")] pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) { use hash::Hash; hashee.hash(into); } // Impls for function pointers macro_rules! fnptr_impls_safety_abi { ($FnTy: ty, $($Arg: ident),*) => { #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialEq for $FnTy { #[inline] fn eq(&self, other: &Self) -> bool { *self as usize == *other as usize } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Eq for $FnTy {} #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialOrd for $FnTy { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { (*self as usize).partial_cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Ord for $FnTy { #[inline] fn cmp(&self, other: &Self) -> Ordering { (*self as usize).cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> hash::Hash for $FnTy { fn hash<HH: hash::Hasher>(&self, state: &mut HH) { state.write_usize(*self as usize) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Pointer for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Debug for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } } } macro_rules! fnptr_impls_args { ($($Arg: ident),+) => { fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } }; () => { // No variadic functions with 0 parameters fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } }; } fnptr_impls_args! { } fnptr_impls_args! { A } fnptr_impls_args! { A, B } fnptr_impls_args! { A, B, C } fnptr_impls_args! { A, B, C, D } fnptr_impls_args! { A, B, C, D, E } fnptr_impls_args! { A, B, C, D, E, F } fnptr_impls_args! { A, B, C, D, E, F, G } fnptr_impls_args! { A, B, C, D, E, F, G, H } fnptr_impls_args! { A, B, C, D, E, F, G, H, I } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L } // Comparison for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *const T { #[inline] fn cmp(&self, other: &*const T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *const T { #[inline] fn partial_cmp(&self, other: &*const T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*const T) -> bool { *self < *other } #[inline] fn le(&self, other: &*const T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*const T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*const T) -> bool { *self >= *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *mut T { #[inline] fn cmp(&self, other: &*mut T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *mut T { #[inline] fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*mut T) -> bool { *self < *other } #[inline] fn le(&self, other: &*mut T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*mut T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*mut T) -> bool { *self >= *other } } /// A wrapper around a raw non-null `*mut T` that indicates that the possessor /// of this wrapper owns the referent. Useful for building abstractions like /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`. /// /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`. /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies /// the kind of strong aliasing guarantees an instance of `T` can expect: /// the referent of the pointer should not be modified without a unique path to /// its owning Unique. /// /// If you're uncertain of whether it's correct to use `Unique` for your purposes, /// consider using `NonNull`, which has weaker semantics. /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct /// for any type which upholds Unique's aliasing requirements. #[unstable(feature = "ptr_internals", issue = "0", reason = "use NonNull instead and consider PhantomData<T> \ (if you also use #[may_dangle]), Send, and/or Sync")] #[doc(hidden)] #[repr(transparent)] #[rustc_layout_scalar_valid_range_start(1)] pub struct Unique<T: ?Sized> { pointer: *const T, // NOTE: this marker has no consequences for variance, but is necessary // for dropck to understand that we logically own a `T`. // // For details, see: // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data _marker: PhantomData<T>, } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Debug for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } /// `Unique` pointers are `Send` if `T` is `Send` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Send + ?Sized> Send for Unique<T> { } /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: Sized> Unique<T> { /// Creates a new `Unique` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. // FIXME: rename to dangling() to match NonNull? pub const fn empty() -> Self { unsafe { Unique::new_unchecked(mem::align_of::<T>() as *mut T) } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Unique<T> { /// Creates a new `Unique`. /// /// # Safety /// /// `ptr` must be non-null. pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { Unique { pointer: ptr as _, _marker: PhantomData } } /// Creates a new `Unique` if `ptr` is non-null. pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(unsafe { Unique { pointer: ptr as _, _marker: PhantomData } }) } else { None } } /// Acquires the underlying `*mut` pointer. pub const fn as_ptr(self) -> *mut T { self.pointer as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Clone for Unique<T> { fn clone(&self) -> Self { *self } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Copy for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Pointer for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<&mut T> for Unique<T> { fn from(reference: &mut T) -> Self { unsafe { Unique { pointer: reference as *mut T, _marker: PhantomData } } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<&T> for Unique<T> { fn from(reference: &T) -> Self { unsafe { Unique { pointer: reference as *const T, _marker: PhantomData } } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> { fn from(p: NonNull<T>) -> Self { unsafe { Unique { pointer: p.pointer, _marker: PhantomData } } } } /// `*mut T` but non-zero and covariant. /// /// This is often the correct thing to use when building data structures using /// raw pointers, but is ultimately more dangerous to use because of its additional /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`! /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect /// for your use case, you should include some PhantomData in your type to /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`. /// Usually this won't be necessary; covariance is correct for most safe abstractions, /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they /// provide a public API that follows the normal shared XOR mutable rules of Rust. /// /// Notice that `NonNull<T>` has a `From` instance for `&T`. However, this does /// not change the fact that mutating through a (pointer derived from a) shared /// reference is undefined behavior unless the mutation happens inside an /// [`UnsafeCell<T>`]. The same goes for creating a mutable reference from a shared /// reference. When using this `From` instance without an `UnsafeCell<T>`, /// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr` /// is never used for mutation. /// /// [`UnsafeCell<T>`]: ../cell/struct.UnsafeCell.html #[stable(feature = "nonnull", since = "1.25.0")] #[repr(transparent)] #[rustc_layout_scalar_valid_range_start(1)] pub struct NonNull<T: ?Sized> { pointer: *const T, } /// `NonNull` pointers are not `Send` because the data they reference may be aliased. // N.B., this impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Send for NonNull<T> { } /// `NonNull` pointers are not `Sync` because the data they reference may be aliased. // N.B., this impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Sync for NonNull<T> { } impl<T: Sized> NonNull<T> { /// Creates a new `NonNull` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_nonnull"))] pub const fn dangling() -> Self { unsafe { let ptr = mem::align_of::<T>() as *mut T; NonNull::new_unchecked(ptr) } } } impl<T: ?Sized> NonNull<T> { /// Creates a new `NonNull`. /// /// # Safety /// /// `ptr` must be non-null. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { NonNull { pointer: ptr as _ } } /// Creates a new `NonNull` if `ptr` is non-null. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(unsafe { Self::new_unchecked(ptr) }) } else { None } } /// Acquires the underlying `*mut` pointer. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub const fn as_ptr(self) -> *mut T { self.pointer as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } /// Cast to a pointer of another type #[stable(feature = "nonnull_cast", since = "1.27.0")] #[inline] #[cfg_attr(not(stage0), rustc_const_unstable(feature = "const_ptr_nonnull"))] pub const fn cast<U>(self) -> NonNull<U> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Clone for NonNull<T> { fn clone(&self) -> Self { *self } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Copy for NonNull<T> { } #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { } #[unstable(feature = "dispatch_from_dyn", issue = "0")] impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> { } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Debug for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Pointer for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Eq for NonNull<T> {} #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialEq for NonNull<T> { #[inline] fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Ord for NonNull<T> { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.as_ptr().cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialOrd for NonNull<T> { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_ptr().partial_cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> hash::Hash for NonNull<T> { #[inline] fn hash<H: hash::Hasher>(&self, state: &mut H) { self.as_ptr().hash(state) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<Unique<T>> for NonNull<T> { #[inline] fn from(unique: Unique<T>) -> Self { unsafe { NonNull { pointer: unique.pointer } } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> From<&mut T> for NonNull<T> { #[inline] fn from(reference: &mut T) -> Self { unsafe { NonNull { pointer: reference as *mut T } } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> From<&T> for NonNull<T> { #[inline] fn from(reference: &T) -> Self { unsafe { NonNull { pointer: reference as *const T } } } }
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory //! Manually manage memory through raw pointers. //! //! *[See also the pointer primitive types](../../std/primitive.pointer.html).* //! //! # Safety //! //! Most functions in this module [dereference raw pointers]. //! //! In order for a pointer dereference to be safe, the pointer must be "valid". //! A valid pointer is one that satisfies **all** of the following conditions: //! //! * The pointer is not null. //! * The pointer is not dangling (it does not point to memory which has been //! freed). //! * The pointer satisfies [LLVM's pointer aliasing rules]. //! //! [dereference raw pointers]: https://doc.rust-lang.org/book/second-edition/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer //! [LLVM's pointer aliasing rules]: https://llvm.org/docs/LangRef.html#pointer-aliasing-rules #![stable(feature = "rust1", since = "1.0.0")] use convert::From; use intrinsics; use ops::CoerceUnsized; use fmt; use hash; use marker::{PhantomData, Unsize}; use mem; use nonzero::NonZero; use cmp::Ordering::{self, Less, Equal, Greater}; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy_nonoverlapping; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::write_bytes; /// Executes the destructor (if any) of the pointed-to value. /// /// This is semantically equivalent to calling [`ptr::read`] and discarding /// the result, but has the following advantages: /// /// * It is *required* to use `drop_in_place` to drop unsized types like /// trait objects, because they can't be read out onto the stack and /// dropped normally. /// /// * It is friendlier to the optimizer to do this over [`ptr::read`] when /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec), /// as the compiler doesn't need to prove that it's sound to elide the /// copy. /// /// [`ptr::read`]: ../ptr/fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `to_drop` must be [valid]. /// /// * `to_drop` must be properly aligned. /// /// Additionally, if `T` is not [`Copy`], using the pointed-to value after /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = /// foo` counts as a use because it will cause the the value to be dropped /// again. [`write`] can be used to overwrite data without causing it to be /// dropped. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`write`]: ../ptr/fn.write.html /// /// # Examples /// /// Manually remove the last item from a vector: /// /// ``` /// use std::ptr; /// use std::rc::Rc; /// /// let last = Rc::new(1); /// let weak = Rc::downgrade(&last); /// /// let mut v = vec![Rc::new(0), last]; /// /// unsafe { /// // Without a call `drop_in_place`, the last item would never be dropped, /// // and the memory it manages would be leaked. /// ptr::drop_in_place(&mut v[1]); /// v.set_len(1); /// } /// /// assert_eq!(v, &[0.into()]); /// /// // Ensure that the last item was dropped. /// assert!(weak.upgrade().is_none()); /// ``` #[stable(feature = "drop_in_place", since = "1.8.0")] #[lang = "drop_in_place"] #[allow(unconditional_recursion)] pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { // Code here does not matter - this is replaced by the // real drop glue by the compiler. drop_in_place(to_drop); } /// Creates a null raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *const i32 = ptr::null(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub const fn null<T>() -> *const T { 0 as *const T } /// Creates a null mutable raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *mut i32 = ptr::null_mut(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub const fn null_mut<T>() -> *mut T { 0 as *mut T } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. /// /// But for the following two exceptions, this function is semantically /// equivalent to [`mem::swap`]: /// /// * It operates on raw pointers instead of references. When references are /// available, [`mem::swap`] should be preferred. /// /// * The two pointed-to values may overlap. If the values do overlap, then the /// overlapping region of memory from `x` will be used. This is demonstrated /// in the examples below. /// /// [`mem::swap`]: ../mem/fn.swap.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * Both `x` and `y` must be [valid]. /// /// * Both `x` and `y` must be properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Swapping two non-overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; /// /// unsafe { /// ptr::swap(x, y); /// assert_eq!([2, 3, 0, 1], array); /// } /// ``` /// /// Swapping two overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; /// /// unsafe { /// ptr::swap(x, y); /// assert_eq!([1, 0, 1, 2], array); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn swap<T>(x: *mut T, y: *mut T) { // Give ourselves some scratch space to work with let mut tmp: T = mem::uninitialized(); // Perform the swap copy_nonoverlapping(x, &mut tmp, 1); copy(y, x, 1); // `x` and `y` may overlap copy_nonoverlapping(&tmp, y, 1); // y and t now point to the same thing, but we need to completely forget `tmp` // because it's no longer relevant. mem::forget(tmp); } /// Swaps a sequence of values at two mutable locations of the same type. /// /// # Safety /// /// The two arguments must each point to the beginning of `count` locations /// of valid memory, and the two memory ranges must not overlap. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::ptr; /// /// let mut x = [1, 2, 3, 4]; /// let mut y = [7, 8, 9]; /// /// unsafe { /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2); /// } /// /// assert_eq!(x, [7, 8, 3, 4]); /// assert_eq!(y, [1, 2, 9]); /// ``` #[inline] #[stable(feature = "swap_nonoverlapping", since = "1.27.0")] pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; let len = mem::size_of::<T>() * count; swap_nonoverlapping_bytes(x, y, len) } #[inline] pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. if mem::size_of::<T>() < 32 { let z = read(x); copy_nonoverlapping(y, x, 1); write(y, z); } else { swap_nonoverlapping(x, y, 1); } } #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // The approach here is to utilize simd to swap x & y efficiently. Testing reveals // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel // Haswell E processors. LLVM is more able to optimize if we give a struct a // #[repr(simd)], even if we don't actually use this struct directly. // // FIXME repr(simd) broken on emscripten and redox // It's also broken on big-endian powerpc64 and s390x. #42778 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox", target_endian = "big")), repr(simd))] struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); let block_size = mem::size_of::<Block>(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively let mut i = 0; while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused let mut t: Block = mem::uninitialized(); let t = &mut t as *mut _ as *mut u8; let x = x.add(i); let y = y.add(i); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available copy_nonoverlapping(x, t, block_size); copy_nonoverlapping(y, x, block_size); copy_nonoverlapping(t, y, block_size); i += block_size; } if i < len { // Swap any remaining bytes let mut t: UnalignedBlock = mem::uninitialized(); let rem = len - i; let t = &mut t as *mut _ as *mut u8; let x = x.add(i); let y = y.add(i); copy_nonoverlapping(x, t, rem); copy_nonoverlapping(y, x, rem); copy_nonoverlapping(t, y, rem); } } /// Moves `src` into the pointed `dest`, returning the previous `dest` value. /// /// Neither value is dropped. /// /// This function is semantically equivalent to [`mem::replace`] except that it /// operates on raw pointers instead of references. When references are /// available, [`mem::replace`] should be preferred. /// /// [`mem::replace`]: ../mem/fn.replace.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dest` must be [valid]. /// /// * `dest` must be properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// ``` /// use std::ptr; /// /// let mut rust = vec!['b', 'u', 's', 't']; /// /// // `mem::replace` would have the same effect without requiring the unsafe /// // block. /// let b = unsafe { /// ptr::replace(&mut rust[0], 'r') /// }; /// /// assert_eq!(b, 'b'); /// assert_eq!(rust, &['r', 'u', 's', 't']); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T { mem::swap(&mut *dest, &mut src); // cannot overlap src } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid]. /// /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the /// case. /// /// ## Ownership of the Returned Value /// /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`]. /// If `T` is not [`Copy`], using both the returned value and the value at /// `*src` can violate memory safety. Note that assigning to `src` counts as a /// use because it will attempt to drop the value at `*src`. /// /// [`write`] can be used to overwrite data without causing it to be dropped. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// [`write`]: ./fn.write.html /// /// ``` /// use std::ptr; /// /// let mut s = String::new("foo"); /// unsafe { /// // `s2` now points to the same underlying memory as `s1`. /// let mut s2 = ptr::read(&s); /// /// assert_eq!(s2, "foo"); /// /// // Assigning to `s2` causes its original value to be dropped. Beyond /// // this point, `s` must no longer be used, as the underlying memory has /// // been freed. /// s2 = String::default(); /// /// // Assigning to `s` would cause the old value to be dropped again, /// // resulting in undefined behavior. /// // s = String::new("bar"); // ERROR /// /// // `ptr::write` can be used to overwrite a value without dropping it. /// ptr::write(&s, String::new("bar")); /// } /// /// assert_eq!(s, "bar"); /// ``` /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// // Create a bitwise copy of the value at `a` in `tmp`. /// let tmp = ptr::read(a); /// /// // Exiting at this point (either by explicitly returning or by /// // calling a function which panics) would cause the value in `tmp` to /// // be dropped while the same value is still referenced by `a`. This /// // could trigger undefined behavior if `T` is not `Copy`. /// /// // Create a bitwise copy of the value at `b` in `a`. /// // This is safe because mutable references cannot alias. /// ptr::copy_nonoverlapping(b, a, 1); /// /// // As above, exiting here could trigger undefined behavior because /// // the same value is referenced by `a` and `b`. /// /// // Move `tmp` into `b`. /// ptr::write(b, tmp); /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read<T>(src: *const T) -> T { let mut tmp: T = mem::uninitialized(); copy_nonoverlapping(src, &mut tmp, 1); tmp } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// Unlike [`read`], `read_unaligned` works with unaligned pointers. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid]. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// [`write_unaligned`]: ./fn.write_unaligned.html /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access members of a packed struct by reference: /// /// ``` /// use std::ptr; /// /// #[repr(packed, C)] /// #[derive(Default)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let x = Packed { /// _padding: 0x00, /// unaligned: 0x01020304, /// }; /// /// let v = unsafe { /// // Take a reference to a 32-bit integer which is not aligned. /// let unaligned = &x.unaligned; /// /// // Dereferencing normally will emit an unaligned load instruction, /// // causing undefined behavior. /// // let v = *unaligned; // ERROR /// /// // Instead, use `read_unaligned` to read improperly aligned values. /// let v = ptr::read_unaligned(unaligned); /// /// v /// }; /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); /// ``` #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn read_unaligned<T>(src: *const T) -> T { let mut tmp: T = mem::uninitialized(); copy_nonoverlapping(src as *const u8, &mut tmp as *mut T as *mut u8, mem::size_of::<T>()); tmp } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// `write` does not drop the contents of `dst`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been [`read`] from. /// /// [`read`]: ./fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid]. /// /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the /// case. /// /// [valid]: ../ptr/index.html#safety /// [`write_unaligned`]: ./fn.write_unaligned.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write(y, z); /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// let tmp = ptr::read(a); /// ptr::copy_nonoverlapping(b, a, 1); /// ptr::write(b, tmp); /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn write<T>(dst: *mut T, src: T) { intrinsics::move_val_init(&mut *dst, src) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike [`write`], the pointer may be unaligned. /// /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care must be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been read with [`read_unaligned`]. /// /// [`write`]: ./fn.write.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid]. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access fields in a packed struct: /// /// ``` /// use std::{mem, ptr}; /// /// #[repr(packed, C)] /// #[derive(Default)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let v = 0x01020304; /// let mut x: Packed = unsafe { mem::zeroed() }; /// /// unsafe { /// // Take a reference to a 32-bit integer which is not aligned. /// let unaligned = &mut x.unaligned; /// /// // Dereferencing normally will emit an unaligned store instruction, /// // causing undefined behavior. /// // *unaligned = v; // ERROR /// /// // Instead, use `write_unaligned` to write improperly aligned values. /// ptr::write_unaligned(unaligned, v); /// } /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) { copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>()); mem::forget(src); } /// Performs a volatile read of the value from `src` without moving it. This /// leaves the memory in `src` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory read with `read_volatile` should almost always be written to using /// [`write_volatile`]. /// /// [`write_volatile`]: ./fn.write_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid]. /// /// * `src` must be properly aligned. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// However, storing non-[`Copy`] types in volatile memory is almost certainly /// incorrect. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn read_volatile<T>(src: *const T) -> T { intrinsics::volatile_load(src) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory written with `write_volatile` should almost always be read from using /// [`read_volatile`]. /// /// `write_volatile` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care must be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// [`read_volatile`]: ./fn.read_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid]. /// /// * `dst` must be properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `write_volatile` and any other operation (reading or writing) /// on the same location is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write_volatile(y, z); /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn write_volatile<T>(dst: *mut T, src: T) { intrinsics::volatile_store(dst, src); } #[lang = "const_ptr"] impl<T: ?Sized> *const T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "Follow the rabbit"; /// let ptr: *const u8 = s.as_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *const u8) == null() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of *the same* allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1) as char); /// println!("{}", *ptr.offset(2) as char); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *const T where T: Sized { intrinsics::offset(self, count) } /// Calculates the offset from a pointer using wrapping arithmetic. /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_offset(step); /// } /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized { unsafe { intrinsics::arith_offset(self, count) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset /// [`wrapping_offset_from`]: #method.wrapping_offset_from /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); // This is the same sequence that Clang emits for pointer subtraction. // It can be neither `nsw` nor `nuw` because the input is treated as // unsigned but then the output is treated as signed, so neither works. let d = isize::wrapping_sub(self as _, origin as _); intrinsics::exact_div(d, pointee_size as _) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *const i32 = 3 as _; /// let ptr2: *const i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); let d = isize::wrapping_sub(self as _, origin as _); d.wrapping_div(pointee_size as _) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// The pointer must be aligned; use `read_unaligned` if that is not the case. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_volatile(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy`. /// /// This is semantically equivalent to C's `memmove`. /// /// # Safety /// /// Care must be taken with the ownership of `self` and `dest`. /// This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `dest`, or prevent the contents /// of `self` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`. /// /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. /// /// # Safety /// /// Beyond requiring that the program must be allowed to access both regions /// of memory, it is Undefined Behavior for source and destination to /// overlap. Care must also be taken with the ownership of `self` and /// `self`. This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `dest`, or prevent the contents /// of `self` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } #[lang = "mut_ptr"] impl<T: ?Sized> *mut T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *mut u8) == null_mut() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of *the same* allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1)); /// println!("{}", *ptr.offset(2)); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized { intrinsics::offset(self, count) as *mut T } /// Calculates the offset from a pointer using wrapping arithmetic. /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let mut data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *mut u8 = data.as_mut_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// while ptr != end_rounded_up { /// unsafe { /// *ptr = 0; /// } /// ptr = ptr.wrapping_offset(step); /// } /// assert_eq!(&data, &[0, 2, 0, 4, 0]); /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized { unsafe { intrinsics::arith_offset(self, count) as *mut T } } /// Returns `None` if the pointer is null, or else returns a mutable /// reference to the value wrapped in `Some`. /// /// # Safety /// /// As with `as_ref`, this is unsafe because it cannot verify the validity /// of the returned pointer, nor can it ensure that the lifetime `'a` /// returned is indeed a valid lifetime for the contained data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// let first_value = unsafe { ptr.as_mut().unwrap() }; /// *first_value = 4; /// println!("{:?}", s); // It'll print: "[4, 2, 3]". /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> { if self.is_null() { None } else { Some(&mut *self) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset-1 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1 /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).offset_from(origin) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *mut i32 = 3 as _; /// let ptr2: *mut i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).wrapping_offset_from(origin) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// The pointer must be aligned; use `read_unaligned` if that is not the case. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_volatile(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy`. /// /// This is semantically equivalent to C's `memmove`. /// /// # Safety /// /// Care must be taken with the ownership of `self` and `dest`. /// This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `self`, or prevent the contents /// of `dest` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`. /// /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. /// /// # Safety /// /// Beyond requiring that the program must be allowed to access both regions /// of memory, it is Undefined Behavior for source and destination to /// overlap. Care must also be taken with the ownership of `self` and /// `self`. This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `dest`, or prevent the contents /// of `self` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may overlap. /// /// NOTE: this has the *opposite* argument order of `ptr::copy`. /// /// This is semantically equivalent to C's `memmove`. /// /// # Safety /// /// Care must be taken with the ownership of `src` and `self`. /// This method semantically moves the values of `src` into `self`. /// However it does not drop the contents of `self`, or prevent the contents /// of `src` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst: Vec<T> = Vec::with_capacity(elts); /// dst.set_len(elts); /// dst.as_mut_ptr().copy_from(ptr, elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from(self, src: *const T, count: usize) where T: Sized, { copy(src, self, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`. /// /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. /// /// # Safety /// /// Beyond requiring that the program must be allowed to access both regions /// of memory, it is Undefined Behavior for source and destination to /// overlap. Care must also be taken with the ownership of `src` and /// `self`. This method semantically moves the values of `src` into `self`. /// However it does not drop the contents of `self`, or prevent the contents /// of `src` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst: Vec<T> = Vec::with_capacity(elts); /// dst.set_len(elts); /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize) where T: Sized, { copy_nonoverlapping(src, self, count) } /// Executes the destructor (if any) of the pointed-to value. /// /// This has two use cases: /// /// * It is *required* to use `drop_in_place` to drop unsized types like /// trait objects, because they can't be read out onto the stack and /// dropped normally. /// /// * It is friendlier to the optimizer to do this over `ptr::read` when /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec), /// as the compiler doesn't need to prove that it's sound to elide the /// copy. /// /// # Safety /// /// This has all the same safety problems as `ptr::read` with respect to /// invalid pointers, types, and double drops. #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn drop_in_place(self) { drop_in_place(self) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// # Safety /// /// This operation is marked unsafe because it writes through a raw pointer. /// /// It does not drop the contents of `self`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `val`. Semantically, `val` is moved into the /// location pointed to by `self`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// /// The pointer must be aligned; use `write_unaligned` if that is not the case. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// y.write(z); /// assert_eq!(y.read(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write(self, val: T) where T: Sized, { write(self, val) } /// Invokes memset on the specified pointer, setting `count * size_of::<T>()` /// bytes of memory starting at `self` to `val`. /// /// # Examples /// /// ``` /// let mut vec = vec![0; 4]; /// unsafe { /// let vec_ptr = vec.as_mut_ptr(); /// vec_ptr.write_bytes(b'a', 2); /// } /// assert_eq!(vec, [b'a', b'a', 0, 0]); /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_bytes(self, val: u8, count: usize) where T: Sized, { write_bytes(self, val, count) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// This operation is marked unsafe because it accepts a raw pointer. /// /// It does not drop the contents of `self`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `write_volatile` and any other operation (reading or writing) /// on the same location is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// y.write_volatile(z); /// assert_eq!(y.read_volatile(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_volatile(self, val: T) where T: Sized, { write_volatile(self, val) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike `write`, the pointer may be unaligned. /// /// # Safety /// /// This operation is marked unsafe because it writes through a raw pointer. /// /// It does not drop the contents of `self`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `self`. Semantically, `self` is moved into the /// location pointed to by `val`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// y.write_unaligned(z); /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_unaligned(self, val: T) where T: Sized, { write_unaligned(self, val) } /// Replaces the value at `self` with `src`, returning the old /// value, without dropping either. /// /// # Safety /// /// This is only unsafe because it accepts a raw pointer. /// Otherwise, this operation is identical to `mem::replace`. #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn replace(self, src: T) -> T where T: Sized, { replace(self, src) } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. They may overlap, unlike `mem::swap` which is /// otherwise equivalent. /// /// # Safety /// /// This function copies the memory through the raw pointers passed to it /// as arguments. /// /// Ensure that these pointers are valid before calling `swap`. #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn swap(self, with: *mut T) where T: Sized, { swap(self, with) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } /// Align pointer `p`. /// /// Calculate offset (in terms of elements of `stride` stride) that has to be applied /// to pointer `p` so that pointer `p` would get aligned to `a`. /// /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated /// constants. /// /// If we ever decide to make it possible to call the intrinsic with `a` that is not a /// power-of-two, it will probably be more prudent to just change to a naive implementation rather /// than trying to adapt this to accommodate that change. /// /// Any questions go to @nagisa. #[lang="align_offset"] pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize { /// Calculate multiplicative modular inverse of `x` modulo `m`. /// /// This implementation is tailored for align_offset and has following preconditions: /// /// * `m` is a power-of-two; /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead) /// /// Implementation of this function shall not panic. Ever. #[inline] fn mod_inv(x: usize, m: usize) -> usize { /// Multiplicative modular inverse table modulo 2⁴ = 16. /// /// Note, that this table does not contain values where inverse does not exist (i.e. for /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.) const INV_TABLE_MOD_16: [usize; 8] = [1, 11, 13, 7, 9, 3, 5, 15]; /// Modulo for which the `INV_TABLE_MOD_16` is intended. const INV_TABLE_MOD: usize = 16; /// INV_TABLE_MOD² const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD; let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1]; if m <= INV_TABLE_MOD { table_inverse & (m - 1) } else { // We iterate "up" using the following formula: // // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$ // // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`. let mut inverse = table_inverse; let mut going_mod = INV_TABLE_MOD_SQUARED; loop { // y = y * (2 - xy) mod n // // Note, that we use wrapping operations here intentionally – the original formula // uses e.g. subtraction `mod n`. It is entirely fine to do them `mod // usize::max_value()` instead, because we take the result `mod n` at the end // anyway. inverse = inverse.wrapping_mul( 2usize.wrapping_sub(x.wrapping_mul(inverse)) ) & (going_mod - 1); if going_mod > m { return inverse & (m - 1); } going_mod = going_mod.wrapping_mul(going_mod); } } } let stride = ::mem::size_of::<T>(); let a_minus_one = a.wrapping_sub(1); let pmoda = p as usize & a_minus_one; if pmoda == 0 { // Already aligned. Yay! return 0; } if stride <= 1 { return if stride == 0 { // If the pointer is not aligned, and the element is zero-sized, then no amount of // elements will ever align the pointer. !0 } else { a.wrapping_sub(pmoda) }; } let smoda = stride & a_minus_one; // a is power-of-two so cannot be 0. stride = 0 is handled above. let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)); let gcd = 1usize << gcdpow; if gcd == 1 { // This branch solves for the variable $o$ in following linear congruence equation: // // ⎰ p + o ≡ 0 (mod a) # $p + o$ must be aligned to specified alignment $a$ // ⎱ o ≡ 0 (mod s) # offset $o$ must be a multiple of stride $s$ // // where // // * a, s are co-prime // // This gives us the formula below: // // o = (a - (p mod a)) * (s⁻¹ mod a) * s // // The first term is “the relative alignment of p to a”, the second term is “how does // incrementing p by one s change the relative alignment of p”, the third term is // translating change in units of s to a byte count. // // Furthermore, the result produced by this solution is not “minimal”, so it is necessary // to take the result $o mod lcm(s, a)$. Since $s$ and $a$ are co-prime (i.e. $gcd(s, a) = // 1$) and $lcm(s, a) = s * a / gcd(s, a)$, we can replace $lcm(s, a)$ with just a $s * a$. // // (Author note: we decided later on to express the offset in "elements" rather than bytes, // which drops the multiplication by `s` on both sides of the modulo.) return intrinsics::unchecked_rem(a.wrapping_sub(pmoda).wrapping_mul(mod_inv(smoda, a)), a); } if p as usize & (gcd - 1) == 0 { // This can be aligned, but `a` and `stride` are not co-prime, so a somewhat adapted // formula is used. let j = a.wrapping_sub(pmoda) >> gcdpow; let k = smoda >> gcdpow; return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow); } // Cannot be aligned at all. usize::max_value() } // Equality for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *const T { #[inline] fn eq(&self, other: &*const T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *const T {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *mut T { #[inline] fn eq(&self, other: &*mut T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *mut T {} /// Compare raw pointers for equality. /// /// This is the same as using the `==` operator, but less generic: /// the arguments have to be `*const T` raw pointers, /// not anything that implements `PartialEq`. /// /// This can be used to compare `&T` references (which coerce to `*const T` implicitly) /// by their address rather than comparing the values they point to /// (which is what the `PartialEq for &T` implementation does). /// /// # Examples /// /// ``` /// use std::ptr; /// /// let five = 5; /// let other_five = 5; /// let five_ref = &five; /// let same_five_ref = &five; /// let other_five_ref = &other_five; /// /// assert!(five_ref == same_five_ref); /// assert!(five_ref == other_five_ref); /// /// assert!(ptr::eq(five_ref, same_five_ref)); /// assert!(!ptr::eq(five_ref, other_five_ref)); /// ``` #[stable(feature = "ptr_eq", since = "1.17.0")] #[inline] pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool { a == b } // Impls for function pointers macro_rules! fnptr_impls_safety_abi { ($FnTy: ty, $($Arg: ident),*) => { #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialEq for $FnTy { #[inline] fn eq(&self, other: &Self) -> bool { *self as usize == *other as usize } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Eq for $FnTy {} #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialOrd for $FnTy { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { (*self as usize).partial_cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Ord for $FnTy { #[inline] fn cmp(&self, other: &Self) -> Ordering { (*self as usize).cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> hash::Hash for $FnTy { fn hash<HH: hash::Hasher>(&self, state: &mut HH) { state.write_usize(*self as usize) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Pointer for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Debug for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } } } macro_rules! fnptr_impls_args { ($($Arg: ident),+) => { fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } }; () => { // No variadic functions with 0 parameters fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } }; } fnptr_impls_args! { } fnptr_impls_args! { A } fnptr_impls_args! { A, B } fnptr_impls_args! { A, B, C } fnptr_impls_args! { A, B, C, D } fnptr_impls_args! { A, B, C, D, E } fnptr_impls_args! { A, B, C, D, E, F } fnptr_impls_args! { A, B, C, D, E, F, G } fnptr_impls_args! { A, B, C, D, E, F, G, H } fnptr_impls_args! { A, B, C, D, E, F, G, H, I } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L } // Comparison for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *const T { #[inline] fn cmp(&self, other: &*const T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *const T { #[inline] fn partial_cmp(&self, other: &*const T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*const T) -> bool { *self < *other } #[inline] fn le(&self, other: &*const T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*const T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*const T) -> bool { *self >= *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *mut T { #[inline] fn cmp(&self, other: &*mut T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *mut T { #[inline] fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*mut T) -> bool { *self < *other } #[inline] fn le(&self, other: &*mut T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*mut T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*mut T) -> bool { *self >= *other } } /// A wrapper around a raw non-null `*mut T` that indicates that the possessor /// of this wrapper owns the referent. Useful for building abstractions like /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`. /// /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`. /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies /// the kind of strong aliasing guarantees an instance of `T` can expect: /// the referent of the pointer should not be modified without a unique path to /// its owning Unique. /// /// If you're uncertain of whether it's correct to use `Unique` for your purposes, /// consider using `NonNull`, which has weaker semantics. /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct /// for any type which upholds Unique's aliasing requirements. #[unstable(feature = "ptr_internals", issue = "0", reason = "use NonNull instead and consider PhantomData<T> \ (if you also use #[may_dangle]), Send, and/or Sync")] #[doc(hidden)] #[repr(transparent)] pub struct Unique<T: ?Sized> { pointer: NonZero<*const T>, // NOTE: this marker has no consequences for variance, but is necessary // for dropck to understand that we logically own a `T`. // // For details, see: // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data _marker: PhantomData<T>, } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Debug for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } /// `Unique` pointers are `Send` if `T` is `Send` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Send + ?Sized> Send for Unique<T> { } /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: Sized> Unique<T> { /// Creates a new `Unique` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. // FIXME: rename to dangling() to match NonNull? pub const fn empty() -> Self { unsafe { Unique::new_unchecked(mem::align_of::<T>() as *mut T) } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Unique<T> { /// Creates a new `Unique`. /// /// # Safety /// /// `ptr` must be non-null. pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { Unique { pointer: NonZero(ptr as _), _marker: PhantomData } } /// Creates a new `Unique` if `ptr` is non-null. pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(Unique { pointer: NonZero(ptr as _), _marker: PhantomData }) } else { None } } /// Acquires the underlying `*mut` pointer. pub fn as_ptr(self) -> *mut T { self.pointer.0 as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Clone for Unique<T> { fn clone(&self) -> Self { *self } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Copy for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Pointer for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> { fn from(reference: &'a mut T) -> Self { Unique { pointer: NonZero(reference as _), _marker: PhantomData } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<&'a T> for Unique<T> { fn from(reference: &'a T) -> Self { Unique { pointer: NonZero(reference as _), _marker: PhantomData } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> { fn from(p: NonNull<T>) -> Self { Unique { pointer: p.pointer, _marker: PhantomData } } } /// `*mut T` but non-zero and covariant. /// /// This is often the correct thing to use when building data structures using /// raw pointers, but is ultimately more dangerous to use because of its additional /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`! /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect /// for your use case, you should include some PhantomData in your type to /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`. /// Usually this won't be necessary; covariance is correct for most safe abstractions, /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they /// provide a public API that follows the normal shared XOR mutable rules of Rust. #[stable(feature = "nonnull", since = "1.25.0")] #[repr(transparent)] pub struct NonNull<T: ?Sized> { pointer: NonZero<*const T>, } /// `NonNull` pointers are not `Send` because the data they reference may be aliased. // NB: This impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Send for NonNull<T> { } /// `NonNull` pointers are not `Sync` because the data they reference may be aliased. // NB: This impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Sync for NonNull<T> { } impl<T: Sized> NonNull<T> { /// Creates a new `NonNull` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. #[stable(feature = "nonnull", since = "1.25.0")] pub fn dangling() -> Self { unsafe { let ptr = mem::align_of::<T>() as *mut T; NonNull::new_unchecked(ptr) } } } impl<T: ?Sized> NonNull<T> { /// Creates a new `NonNull`. /// /// # Safety /// /// `ptr` must be non-null. #[stable(feature = "nonnull", since = "1.25.0")] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { NonNull { pointer: NonZero(ptr as _) } } /// Creates a new `NonNull` if `ptr` is non-null. #[stable(feature = "nonnull", since = "1.25.0")] pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(NonNull { pointer: NonZero(ptr as _) }) } else { None } } /// Acquires the underlying `*mut` pointer. #[stable(feature = "nonnull", since = "1.25.0")] pub fn as_ptr(self) -> *mut T { self.pointer.0 as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } /// Cast to a pointer of another type #[stable(feature = "nonnull_cast", since = "1.27.0")] pub fn cast<U>(self) -> NonNull<U> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Clone for NonNull<T> { fn clone(&self) -> Self { *self } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Copy for NonNull<T> { } #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Debug for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Pointer for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Eq for NonNull<T> {} #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialEq for NonNull<T> { fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Ord for NonNull<T> { fn cmp(&self, other: &Self) -> Ordering { self.as_ptr().cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialOrd for NonNull<T> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_ptr().partial_cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> hash::Hash for NonNull<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.as_ptr().hash(state) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<Unique<T>> for NonNull<T> { fn from(unique: Unique<T>) -> Self { NonNull { pointer: unique.pointer } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> { fn from(reference: &'a mut T) -> Self { NonNull { pointer: NonZero(reference as _) } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a T> for NonNull<T> { fn from(reference: &'a T) -> Self { NonNull { pointer: NonZero(reference as _) } } } Mention alignment in top-level docs This also removes the overlong link that failed tidy xD. // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory //! Manually manage memory through raw pointers. //! //! *[See also the pointer primitive types](../../std/primitive.pointer.html).* //! //! # Safety //! //! Many functions in this module take raw pointers as arguments and dereference //! them. For this to be safe, these pointers must be valid. A valid pointer //! is one that satisfies **all** of the following conditions: //! //! * The pointer is not null. //! * The pointer is not dangling (it does not point to memory which has been //! freed). //! * The pointer satisfies [LLVM's pointer aliasing rules]. //! //! Valid pointers are not necessarily properly aligned. However, except for //! [`read_unaligned`] and [`write_unaligned`], most functions require their //! arguments to be aligned. Any alignment requirements will be explicitly //! stated in the function's documentation. //! //! [LLVM's pointer aliasing rules]: https://llvm.org/docs/LangRef.html#pointer-aliasing-rules //! [`read_unaligned`]: ./fn.read_unaligned.html //! [`write_unaligned`]: ./fn.write_unaligned.html #![stable(feature = "rust1", since = "1.0.0")] use convert::From; use intrinsics; use ops::CoerceUnsized; use fmt; use hash; use marker::{PhantomData, Unsize}; use mem; use nonzero::NonZero; use cmp::Ordering::{self, Less, Equal, Greater}; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy_nonoverlapping; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::copy; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::write_bytes; /// Executes the destructor (if any) of the pointed-to value. /// /// This is semantically equivalent to calling [`ptr::read`] and discarding /// the result, but has the following advantages: /// /// * It is *required* to use `drop_in_place` to drop unsized types like /// trait objects, because they can't be read out onto the stack and /// dropped normally. /// /// * It is friendlier to the optimizer to do this over [`ptr::read`] when /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec), /// as the compiler doesn't need to prove that it's sound to elide the /// copy. /// /// [`ptr::read`]: ../ptr/fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `to_drop` must be [valid]. /// /// * `to_drop` must be properly aligned. /// /// Additionally, if `T` is not [`Copy`], using the pointed-to value after /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = /// foo` counts as a use because it will cause the the value to be dropped /// again. [`write`] can be used to overwrite data without causing it to be /// dropped. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`write`]: ../ptr/fn.write.html /// /// # Examples /// /// Manually remove the last item from a vector: /// /// ``` /// use std::ptr; /// use std::rc::Rc; /// /// let last = Rc::new(1); /// let weak = Rc::downgrade(&last); /// /// let mut v = vec![Rc::new(0), last]; /// /// unsafe { /// // Without a call `drop_in_place`, the last item would never be dropped, /// // and the memory it manages would be leaked. /// ptr::drop_in_place(&mut v[1]); /// v.set_len(1); /// } /// /// assert_eq!(v, &[0.into()]); /// /// // Ensure that the last item was dropped. /// assert!(weak.upgrade().is_none()); /// ``` #[stable(feature = "drop_in_place", since = "1.8.0")] #[lang = "drop_in_place"] #[allow(unconditional_recursion)] pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { // Code here does not matter - this is replaced by the // real drop glue by the compiler. drop_in_place(to_drop); } /// Creates a null raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *const i32 = ptr::null(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub const fn null<T>() -> *const T { 0 as *const T } /// Creates a null mutable raw pointer. /// /// # Examples /// /// ``` /// use std::ptr; /// /// let p: *mut i32 = ptr::null_mut(); /// assert!(p.is_null()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub const fn null_mut<T>() -> *mut T { 0 as *mut T } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. /// /// But for the following two exceptions, this function is semantically /// equivalent to [`mem::swap`]: /// /// * It operates on raw pointers instead of references. When references are /// available, [`mem::swap`] should be preferred. /// /// * The two pointed-to values may overlap. If the values do overlap, then the /// overlapping region of memory from `x` will be used. This is demonstrated /// in the examples below. /// /// [`mem::swap`]: ../mem/fn.swap.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * Both `x` and `y` must be [valid]. /// /// * Both `x` and `y` must be properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Swapping two non-overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; /// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; /// /// unsafe { /// ptr::swap(x, y); /// assert_eq!([2, 3, 0, 1], array); /// } /// ``` /// /// Swapping two overlapping regions: /// /// ``` /// use std::ptr; /// /// let mut array = [0, 1, 2, 3]; /// /// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; /// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; /// /// unsafe { /// ptr::swap(x, y); /// assert_eq!([1, 0, 1, 2], array); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn swap<T>(x: *mut T, y: *mut T) { // Give ourselves some scratch space to work with let mut tmp: T = mem::uninitialized(); // Perform the swap copy_nonoverlapping(x, &mut tmp, 1); copy(y, x, 1); // `x` and `y` may overlap copy_nonoverlapping(&tmp, y, 1); // y and t now point to the same thing, but we need to completely forget `tmp` // because it's no longer relevant. mem::forget(tmp); } /// Swaps a sequence of values at two mutable locations of the same type. /// /// # Safety /// /// The two arguments must each point to the beginning of `count` locations /// of valid memory, and the two memory ranges must not overlap. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::ptr; /// /// let mut x = [1, 2, 3, 4]; /// let mut y = [7, 8, 9]; /// /// unsafe { /// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2); /// } /// /// assert_eq!(x, [7, 8, 3, 4]); /// assert_eq!(y, [1, 2, 9]); /// ``` #[inline] #[stable(feature = "swap_nonoverlapping", since = "1.27.0")] pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; let len = mem::size_of::<T>() * count; swap_nonoverlapping_bytes(x, y, len) } #[inline] pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. if mem::size_of::<T>() < 32 { let z = read(x); copy_nonoverlapping(y, x, 1); write(y, z); } else { swap_nonoverlapping(x, y, 1); } } #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // The approach here is to utilize simd to swap x & y efficiently. Testing reveals // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel // Haswell E processors. LLVM is more able to optimize if we give a struct a // #[repr(simd)], even if we don't actually use this struct directly. // // FIXME repr(simd) broken on emscripten and redox // It's also broken on big-endian powerpc64 and s390x. #42778 #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox", target_endian = "big")), repr(simd))] struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); let block_size = mem::size_of::<Block>(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively let mut i = 0; while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused let mut t: Block = mem::uninitialized(); let t = &mut t as *mut _ as *mut u8; let x = x.add(i); let y = y.add(i); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available copy_nonoverlapping(x, t, block_size); copy_nonoverlapping(y, x, block_size); copy_nonoverlapping(t, y, block_size); i += block_size; } if i < len { // Swap any remaining bytes let mut t: UnalignedBlock = mem::uninitialized(); let rem = len - i; let t = &mut t as *mut _ as *mut u8; let x = x.add(i); let y = y.add(i); copy_nonoverlapping(x, t, rem); copy_nonoverlapping(y, x, rem); copy_nonoverlapping(t, y, rem); } } /// Moves `src` into the pointed `dest`, returning the previous `dest` value. /// /// Neither value is dropped. /// /// This function is semantically equivalent to [`mem::replace`] except that it /// operates on raw pointers instead of references. When references are /// available, [`mem::replace`] should be preferred. /// /// [`mem::replace`]: ../mem/fn.replace.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dest` must be [valid]. /// /// * `dest` must be properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// ``` /// use std::ptr; /// /// let mut rust = vec!['b', 'u', 's', 't']; /// /// // `mem::replace` would have the same effect without requiring the unsafe /// // block. /// let b = unsafe { /// ptr::replace(&mut rust[0], 'r') /// }; /// /// assert_eq!(b, 'b'); /// assert_eq!(rust, &['r', 'u', 's', 't']); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T { mem::swap(&mut *dest, &mut src); // cannot overlap src } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid]. /// /// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the /// case. /// /// ## Ownership of the Returned Value /// /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`]. /// If `T` is not [`Copy`], using both the returned value and the value at /// `*src` can violate memory safety. Note that assigning to `src` counts as a /// use because it will attempt to drop the value at `*src`. /// /// [`write`] can be used to overwrite data without causing it to be dropped. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// [`write`]: ./fn.write.html /// /// ``` /// use std::ptr; /// /// let mut s = String::new("foo"); /// unsafe { /// // `s2` now points to the same underlying memory as `s1`. /// let mut s2 = ptr::read(&s); /// /// assert_eq!(s2, "foo"); /// /// // Assigning to `s2` causes its original value to be dropped. Beyond /// // this point, `s` must no longer be used, as the underlying memory has /// // been freed. /// s2 = String::default(); /// /// // Assigning to `s` would cause the old value to be dropped again, /// // resulting in undefined behavior. /// // s = String::new("bar"); // ERROR /// /// // `ptr::write` can be used to overwrite a value without dropping it. /// ptr::write(&s, String::new("bar")); /// } /// /// assert_eq!(s, "bar"); /// ``` /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// // Create a bitwise copy of the value at `a` in `tmp`. /// let tmp = ptr::read(a); /// /// // Exiting at this point (either by explicitly returning or by /// // calling a function which panics) would cause the value in `tmp` to /// // be dropped while the same value is still referenced by `a`. This /// // could trigger undefined behavior if `T` is not `Copy`. /// /// // Create a bitwise copy of the value at `b` in `a`. /// // This is safe because mutable references cannot alias. /// ptr::copy_nonoverlapping(b, a, 1); /// /// // As above, exiting here could trigger undefined behavior because /// // the same value is referenced by `a` and `b`. /// /// // Move `tmp` into `b`. /// ptr::write(b, tmp); /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read<T>(src: *const T) -> T { let mut tmp: T = mem::uninitialized(); copy_nonoverlapping(src, &mut tmp, 1); tmp } /// Reads the value from `src` without moving it. This leaves the /// memory in `src` unchanged. /// /// Unlike [`read`], `read_unaligned` works with unaligned pointers. /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid]. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// [`write_unaligned`]: ./fn.write_unaligned.html /// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access members of a packed struct by reference: /// /// ``` /// use std::ptr; /// /// #[repr(packed, C)] /// #[derive(Default)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let x = Packed { /// _padding: 0x00, /// unaligned: 0x01020304, /// }; /// /// let v = unsafe { /// // Take a reference to a 32-bit integer which is not aligned. /// let unaligned = &x.unaligned; /// /// // Dereferencing normally will emit an unaligned load instruction, /// // causing undefined behavior. /// // let v = *unaligned; // ERROR /// /// // Instead, use `read_unaligned` to read improperly aligned values. /// let v = ptr::read_unaligned(unaligned); /// /// v /// }; /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); /// ``` #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn read_unaligned<T>(src: *const T) -> T { let mut tmp: T = mem::uninitialized(); copy_nonoverlapping(src as *const u8, &mut tmp as *mut T as *mut u8, mem::size_of::<T>()); tmp } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// `write` does not drop the contents of `dst`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been [`read`] from. /// /// [`read`]: ./fn.read.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid]. /// /// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the /// case. /// /// [valid]: ../ptr/index.html#safety /// [`write_unaligned`]: ./fn.write_unaligned.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write(y, z); /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` /// /// Manually implement [`mem::swap`]: /// /// ``` /// use std::ptr; /// /// fn swap<T>(a: &mut T, b: &mut T) { /// unsafe { /// let tmp = ptr::read(a); /// ptr::copy_nonoverlapping(b, a, 1); /// ptr::write(b, tmp); /// } /// } /// /// let mut foo = "foo".to_owned(); /// let mut bar = "bar".to_owned(); /// /// swap(&mut foo, &mut bar); /// /// assert_eq!(foo, "bar"); /// assert_eq!(bar, "foo"); /// ``` /// /// [`mem::swap`]: ../mem/fn.swap.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn write<T>(dst: *mut T, src: T) { intrinsics::move_val_init(&mut *dst, src) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike [`write`], the pointer may be unaligned. /// /// `write_unaligned` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care must be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been read with [`read_unaligned`]. /// /// [`write`]: ./fn.write.html /// [`read_unaligned`]: ./fn.read_unaligned.html /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid]. /// /// [valid]: ../ptr/index.html#safety /// /// # Examples /// /// Access fields in a packed struct: /// /// ``` /// use std::{mem, ptr}; /// /// #[repr(packed, C)] /// #[derive(Default)] /// struct Packed { /// _padding: u8, /// unaligned: u32, /// } /// /// let v = 0x01020304; /// let mut x: Packed = unsafe { mem::zeroed() }; /// /// unsafe { /// // Take a reference to a 32-bit integer which is not aligned. /// let unaligned = &mut x.unaligned; /// /// // Dereferencing normally will emit an unaligned store instruction, /// // causing undefined behavior. /// // *unaligned = v; // ERROR /// /// // Instead, use `write_unaligned` to write improperly aligned values. /// ptr::write_unaligned(unaligned, v); /// } /// /// // Accessing unaligned values directly is safe. /// assert!(x.unaligned == v); /// ``` #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) { copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>()); mem::forget(src); } /// Performs a volatile read of the value from `src` without moving it. This /// leaves the memory in `src` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory read with `read_volatile` should almost always be written to using /// [`write_volatile`]. /// /// [`write_volatile`]: ./fn.write_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `src` must be [valid]. /// /// * `src` must be properly aligned. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// However, storing non-[`Copy`] types in volatile memory is almost certainly /// incorrect. /// /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn read_volatile<T>(src: *const T) -> T { intrinsics::volatile_load(src) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// Memory written with `write_volatile` should almost always be read from using /// [`read_volatile`]. /// /// `write_volatile` does not drop the contents of `dst`. This is safe, but it /// could leak allocations or resources, so care must be taken not to overwrite /// an object that should be dropped. /// /// Additionally, it does not drop `src`. Semantically, `src` is moved into the /// location pointed to by `dst`. /// /// [`read_volatile`]: ./fn.read_volatile.html /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// * `dst` must be [valid]. /// /// * `dst` must be properly aligned. /// /// [valid]: ../ptr/index.html#safety /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `write_volatile` and any other operation (reading or writing) /// on the same location is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// std::ptr::write_volatile(y, z); /// assert_eq!(std::ptr::read_volatile(y), 12); /// } /// ``` #[inline] #[stable(feature = "volatile", since = "1.9.0")] pub unsafe fn write_volatile<T>(dst: *mut T, src: T) { intrinsics::volatile_store(dst, src); } #[lang = "const_ptr"] impl<T: ?Sized> *const T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "Follow the rabbit"; /// let ptr: *const u8 = s.as_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *const u8) == null() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of *the same* allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1) as char); /// println!("{}", *ptr.offset(2) as char); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *const T where T: Sized { intrinsics::offset(self, count) } /// Calculates the offset from a pointer using wrapping arithmetic. /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_offset(step); /// } /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized { unsafe { intrinsics::arith_offset(self, count) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset /// [`wrapping_offset_from`]: #method.wrapping_offset_from /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); // This is the same sequence that Clang emits for pointer subtraction. // It can be neither `nsw` nor `nuw` because the input is treated as // unsigned but then the output is treated as signed, so neither works. let d = isize::wrapping_sub(self as _, origin as _); intrinsics::exact_div(d, pointee_size as _) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let a = [0; 5]; /// let ptr1: *const i32 = &a[1]; /// let ptr2: *const i32 = &a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *const i32 = 3 as _; /// let ptr2: *const i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::<T>(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); let d = isize::wrapping_sub(self as _, origin as _); d.wrapping_div(pointee_size as _) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// The pointer must be aligned; use `read_unaligned` if that is not the case. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_volatile(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy`. /// /// This is semantically equivalent to C's `memmove`. /// /// # Safety /// /// Care must be taken with the ownership of `self` and `dest`. /// This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `dest`, or prevent the contents /// of `self` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`. /// /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. /// /// # Safety /// /// Beyond requiring that the program must be allowed to access both regions /// of memory, it is Undefined Behavior for source and destination to /// overlap. Care must also be taken with the ownership of `self` and /// `self`. This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `dest`, or prevent the contents /// of `self` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } #[lang = "mut_ptr"] impl<T: ?Sized> *mut T { /// Returns `true` if the pointer is null. /// /// Note that unsized types have many possible null pointers, as only the /// raw data pointer is considered, not their length, vtable, etc. /// Therefore, two pointers that are null may still not compare equal to /// each other. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// assert!(!ptr.is_null()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool { // Compare via a cast to a thin pointer, so fat pointers are only // considering their "data" part for null-ness. (self as *mut u8) == null_mut() } /// Returns `None` if the pointer is null, or else returns a reference to /// the value wrapped in `Some`. /// /// # Safety /// /// While this method and its mutable counterpart are useful for /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. /// /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does /// not necessarily reflect the actual lifetime of the data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// if let Some(val_back) = ptr.as_ref() { /// println!("We got back the value: {}!", val_back); /// } /// } /// ``` /// /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can /// dereference the pointer directly. /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// /// unsafe { /// let val_back = &*ptr; /// println!("We got back the value: {}!", val_back); /// } /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_ref<'a>(self) -> Option<&'a T> { if self.is_null() { None } else { Some(&*self) } } /// Calculates the offset from a pointer. /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of *the same* allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// /// unsafe { /// println!("{}", *ptr.offset(1)); /// println!("{}", *ptr.offset(2)); /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized { intrinsics::offset(self, count) as *mut T } /// Calculates the offset from a pointer using wrapping arithmetic. /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// In particular, the resulting pointer may *not* be used to access a /// different allocated object than the one `self` points to. In other /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is /// *not* the same as `y`, and dereferencing it is undefined behavior /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let mut data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *mut u8 = data.as_mut_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_offset(6); /// /// while ptr != end_rounded_up { /// unsafe { /// *ptr = 0; /// } /// ptr = ptr.wrapping_offset(step); /// } /// assert_eq!(&data, &[0, 2, 0, 4, 0]); /// ``` #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized { unsafe { intrinsics::arith_offset(self, count) as *mut T } } /// Returns `None` if the pointer is null, or else returns a mutable /// reference to the value wrapped in `Some`. /// /// # Safety /// /// As with `as_ref`, this is unsafe because it cannot verify the validity /// of the returned pointer, nor can it ensure that the lifetime `'a` /// returned is indeed a valid lifetime for the contained data. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); /// let first_value = unsafe { ptr.as_mut().unwrap() }; /// *first_value = 4; /// println!("{:?}", s); // It'll print: "[4, 2, 3]". /// ``` #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> { if self.is_null() { None } else { Some(&mut *self) } } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// This function is the inverse of [`offset`]. /// /// [`offset`]: #method.offset-1 /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1 /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and other pointer must be either in bounds or one /// byte past the end of the same allocated object. /// /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. /// /// * The distance between the pointers, in bytes, must be an exact multiple /// of the size of `T`. /// /// * The distance being in bounds cannot rely on "wrapping around" the address space. /// /// The compiler and standard library generally try to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using [`wrapping_offset_from`] instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Panics /// /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// unsafe { /// assert_eq!(ptr2.offset_from(ptr1), 2); /// assert_eq!(ptr1.offset_from(ptr2), -2); /// assert_eq!(ptr1.offset(2), ptr2); /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).offset_from(origin) } /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`. /// /// If the address different between the two pointers is not a multiple of /// `mem::size_of::<T>()` then the result of the division is rounded towards /// zero. /// /// Though this method is safe for any two pointers, note that its result /// will be mostly useless if the two pointers aren't into the same allocated /// object, for example if they point to two different local variables. /// /// # Panics /// /// This function panics if `T` is a zero-sized type. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(ptr_wrapping_offset_from)] /// /// let mut a = [0; 5]; /// let ptr1: *mut i32 = &mut a[1]; /// let ptr2: *mut i32 = &mut a[3]; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); /// assert_eq!(ptr1.wrapping_offset(2), ptr2); /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); /// /// let ptr1: *mut i32 = 3 as _; /// let ptr2: *mut i32 = 13 as _; /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] #[inline] pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).wrapping_offset_from(origin) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a `usize`. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// /// unsafe { /// println!("{}", *ptr.add(1) as char); /// println!("{}", *ptr.add(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, { self.offset(count as isize) } /// Calculates the offset from a pointer (convenience for /// `.offset((count as isize).wrapping_neg())`). /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// If any of the following conditions are violated, the result is Undefined /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one /// byte past the end of an allocated object. /// /// * The computed offset cannot exceed `isize::MAX` **bytes**. /// /// * The offset being in bounds cannot rely on "wrapping around" the address /// space. That is, the infinite-precision sum must fit in a usize. /// /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space. /// However, some 32-bit and 16-bit platforms may successfully serve a request for /// more than `isize::MAX` bytes with things like Physical Address /// Extension. As such, memory acquired directly from allocators or memory /// mapped files *may* be too large to handle with this function. /// /// Consider using `wrapping_offset` instead if these constraints are /// difficult to satisfy. The only advantage of this method is that it /// enables more aggressive compiler optimizations. /// /// # Examples /// /// Basic usage: /// /// ``` /// let s: &str = "123"; /// /// unsafe { /// let end: *const u8 = s.as_ptr().add(3); /// println!("{}", *end.sub(1) as char); /// println!("{}", *end.sub(2) as char); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, { self.offset((count as isize).wrapping_neg()) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset(count as isize)`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.add(count)` instead when possible, because `add` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let step = 2; /// let end_rounded_up = ptr.wrapping_add(6); /// /// // This loop prints "1, 3, 5, " /// while ptr != end_rounded_up { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_add(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, { self.wrapping_offset(count as isize) } /// Calculates the offset from a pointer using wrapping arithmetic. /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`) /// /// `count` is in units of T; e.g. a `count` of 3 represents a pointer /// offset of `3 * size_of::<T>()` bytes. /// /// # Safety /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). /// /// Always use `.sub(count)` instead when possible, because `sub` /// allows the compiler to optimize better. /// /// # Examples /// /// Basic usage: /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); /// let start_rounded_down = ptr.wrapping_sub(2); /// ptr = ptr.wrapping_add(4); /// let step = 2; /// // This loop prints "5, 3, 1, " /// while ptr != start_rounded_down { /// unsafe { /// print!("{}, ", *ptr); /// } /// ptr = ptr.wrapping_sub(step); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, { self.wrapping_offset((count as isize).wrapping_neg()) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// The pointer must be aligned; use `read_unaligned` if that is not the case. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, { read(self) } /// Performs a volatile read of the value from `self` without moving it. This /// leaves the memory in `self` unchanged. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `read_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `read_volatile` and any write operation to the same location /// is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_volatile(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, { read_volatile(self) } /// Reads the value from `self` without moving it. This leaves the /// memory in `self` unchanged. /// /// Unlike `read`, the pointer may be unaligned. /// /// # Safety /// /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let x = 12; /// let y = &x as *const i32; /// /// unsafe { /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, { read_unaligned(self) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy`. /// /// This is semantically equivalent to C's `memmove`. /// /// # Safety /// /// Care must be taken with the ownership of `self` and `dest`. /// This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `self`, or prevent the contents /// of `dest` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, { copy(self, dest, count) } /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *same* argument order as `ptr::copy_nonoverlapping`. /// /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. /// /// # Safety /// /// Beyond requiring that the program must be allowed to access both regions /// of memory, it is Undefined Behavior for source and destination to /// overlap. Care must also be taken with the ownership of `self` and /// `self`. This method semantically moves the values of `self` into `dest`. /// However it does not drop the contents of `dest`, or prevent the contents /// of `self` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst = Vec::with_capacity(elts); /// dst.set_len(elts); /// ptr.copy_to_nonoverlapping(dst.as_mut_ptr(), elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, { copy_nonoverlapping(self, dest, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may overlap. /// /// NOTE: this has the *opposite* argument order of `ptr::copy`. /// /// This is semantically equivalent to C's `memmove`. /// /// # Safety /// /// Care must be taken with the ownership of `src` and `self`. /// This method semantically moves the values of `src` into `self`. /// However it does not drop the contents of `self`, or prevent the contents /// of `src` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst: Vec<T> = Vec::with_capacity(elts); /// dst.set_len(elts); /// dst.as_mut_ptr().copy_from(ptr, elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from(self, src: *const T, count: usize) where T: Sized, { copy(src, self, count) } /// Copies `count * size_of<T>` bytes from `src` to `self`. The source /// and destination may *not* overlap. /// /// NOTE: this has the *opposite* argument order of `ptr::copy_nonoverlapping`. /// /// `copy_nonoverlapping` is semantically equivalent to C's `memcpy`. /// /// # Safety /// /// Beyond requiring that the program must be allowed to access both regions /// of memory, it is Undefined Behavior for source and destination to /// overlap. Care must also be taken with the ownership of `src` and /// `self`. This method semantically moves the values of `src` into `self`. /// However it does not drop the contents of `self`, or prevent the contents /// of `src` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` /// # #[allow(dead_code)] /// unsafe fn from_buf_raw<T: Copy>(ptr: *const T, elts: usize) -> Vec<T> { /// let mut dst: Vec<T> = Vec::with_capacity(elts); /// dst.set_len(elts); /// dst.as_mut_ptr().copy_from_nonoverlapping(ptr, elts); /// dst /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize) where T: Sized, { copy_nonoverlapping(src, self, count) } /// Executes the destructor (if any) of the pointed-to value. /// /// This has two use cases: /// /// * It is *required* to use `drop_in_place` to drop unsized types like /// trait objects, because they can't be read out onto the stack and /// dropped normally. /// /// * It is friendlier to the optimizer to do this over `ptr::read` when /// dropping manually allocated memory (e.g. when writing Box/Rc/Vec), /// as the compiler doesn't need to prove that it's sound to elide the /// copy. /// /// # Safety /// /// This has all the same safety problems as `ptr::read` with respect to /// invalid pointers, types, and double drops. #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn drop_in_place(self) { drop_in_place(self) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// # Safety /// /// This operation is marked unsafe because it writes through a raw pointer. /// /// It does not drop the contents of `self`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `val`. Semantically, `val` is moved into the /// location pointed to by `self`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// /// The pointer must be aligned; use `write_unaligned` if that is not the case. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// y.write(z); /// assert_eq!(y.read(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write(self, val: T) where T: Sized, { write(self, val) } /// Invokes memset on the specified pointer, setting `count * size_of::<T>()` /// bytes of memory starting at `self` to `val`. /// /// # Examples /// /// ``` /// let mut vec = vec![0; 4]; /// unsafe { /// let vec_ptr = vec.as_mut_ptr(); /// vec_ptr.write_bytes(b'a', 2); /// } /// assert_eq!(vec, [b'a', b'a', 0, 0]); /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_bytes(self, val: u8, count: usize) where T: Sized, { write_bytes(self, val, count) } /// Performs a volatile write of a memory location with the given value without /// reading or dropping the old value. /// /// Volatile operations are intended to act on I/O memory, and are guaranteed /// to not be elided or reordered by the compiler across other volatile /// operations. /// /// # Notes /// /// Rust does not currently have a rigorously and formally defined memory model, /// so the precise semantics of what "volatile" means here is subject to change /// over time. That being said, the semantics will almost always end up pretty /// similar to [C11's definition of volatile][c11]. /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types /// (e.g. if a zero-sized type is passed to `write_volatile`) are no-ops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf /// /// # Safety /// /// This operation is marked unsafe because it accepts a raw pointer. /// /// It does not drop the contents of `self`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile /// accesses behave exactly like non-atomic accesses in that regard. In particular, /// a race between a `write_volatile` and any other operation (reading or writing) /// on the same location is undefined behavior. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// y.write_volatile(z); /// assert_eq!(y.read_volatile(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_volatile(self, val: T) where T: Sized, { write_volatile(self, val) } /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// /// Unlike `write`, the pointer may be unaligned. /// /// # Safety /// /// This operation is marked unsafe because it writes through a raw pointer. /// /// It does not drop the contents of `self`. This is safe, but it could leak /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// /// Additionally, it does not drop `self`. Semantically, `self` is moved into the /// location pointed to by `val`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// /// # Examples /// /// Basic usage: /// /// ``` /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; /// /// unsafe { /// y.write_unaligned(z); /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_unaligned(self, val: T) where T: Sized, { write_unaligned(self, val) } /// Replaces the value at `self` with `src`, returning the old /// value, without dropping either. /// /// # Safety /// /// This is only unsafe because it accepts a raw pointer. /// Otherwise, this operation is identical to `mem::replace`. #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn replace(self, src: T) -> T where T: Sized, { replace(self, src) } /// Swaps the values at two mutable locations of the same type, without /// deinitializing either. They may overlap, unlike `mem::swap` which is /// otherwise equivalent. /// /// # Safety /// /// This function copies the memory through the raw pointers passed to it /// as arguments. /// /// Ensure that these pointers are valid before calling `swap`. #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn swap(self, with: *mut T) where T: Sized, { swap(self, with) } /// Computes the offset that needs to be applied to the pointer in order to make it aligned to /// `align`. /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be /// used with the `offset` or `offset_to` methods. /// /// There are no guarantees whatsover that offsetting the pointer will not overflow or go /// beyond the allocation that the pointer points into. It is up to the caller to ensure that /// the returned offset is correct in all terms other than alignment. /// /// # Panics /// /// The function panics if `align` is not a power-of-two. /// /// # Examples /// /// Accessing adjacent `u8` as `u16` /// /// ``` /// # #![feature(align_offset)] /// # fn foo(n: usize) { /// # use std::mem::align_of; /// # unsafe { /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::<u16>()); /// if offset < x.len() - n - 1 { /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point /// // outside the allocation /// } /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] pub fn align_offset(self, align: usize) -> usize where T: Sized { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } unsafe { align_offset(self, align) } } } /// Align pointer `p`. /// /// Calculate offset (in terms of elements of `stride` stride) that has to be applied /// to pointer `p` so that pointer `p` would get aligned to `a`. /// /// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. /// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated /// constants. /// /// If we ever decide to make it possible to call the intrinsic with `a` that is not a /// power-of-two, it will probably be more prudent to just change to a naive implementation rather /// than trying to adapt this to accommodate that change. /// /// Any questions go to @nagisa. #[lang="align_offset"] pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize { /// Calculate multiplicative modular inverse of `x` modulo `m`. /// /// This implementation is tailored for align_offset and has following preconditions: /// /// * `m` is a power-of-two; /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead) /// /// Implementation of this function shall not panic. Ever. #[inline] fn mod_inv(x: usize, m: usize) -> usize { /// Multiplicative modular inverse table modulo 2⁴ = 16. /// /// Note, that this table does not contain values where inverse does not exist (i.e. for /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.) const INV_TABLE_MOD_16: [usize; 8] = [1, 11, 13, 7, 9, 3, 5, 15]; /// Modulo for which the `INV_TABLE_MOD_16` is intended. const INV_TABLE_MOD: usize = 16; /// INV_TABLE_MOD² const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD; let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1]; if m <= INV_TABLE_MOD { table_inverse & (m - 1) } else { // We iterate "up" using the following formula: // // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$ // // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`. let mut inverse = table_inverse; let mut going_mod = INV_TABLE_MOD_SQUARED; loop { // y = y * (2 - xy) mod n // // Note, that we use wrapping operations here intentionally – the original formula // uses e.g. subtraction `mod n`. It is entirely fine to do them `mod // usize::max_value()` instead, because we take the result `mod n` at the end // anyway. inverse = inverse.wrapping_mul( 2usize.wrapping_sub(x.wrapping_mul(inverse)) ) & (going_mod - 1); if going_mod > m { return inverse & (m - 1); } going_mod = going_mod.wrapping_mul(going_mod); } } } let stride = ::mem::size_of::<T>(); let a_minus_one = a.wrapping_sub(1); let pmoda = p as usize & a_minus_one; if pmoda == 0 { // Already aligned. Yay! return 0; } if stride <= 1 { return if stride == 0 { // If the pointer is not aligned, and the element is zero-sized, then no amount of // elements will ever align the pointer. !0 } else { a.wrapping_sub(pmoda) }; } let smoda = stride & a_minus_one; // a is power-of-two so cannot be 0. stride = 0 is handled above. let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)); let gcd = 1usize << gcdpow; if gcd == 1 { // This branch solves for the variable $o$ in following linear congruence equation: // // ⎰ p + o ≡ 0 (mod a) # $p + o$ must be aligned to specified alignment $a$ // ⎱ o ≡ 0 (mod s) # offset $o$ must be a multiple of stride $s$ // // where // // * a, s are co-prime // // This gives us the formula below: // // o = (a - (p mod a)) * (s⁻¹ mod a) * s // // The first term is “the relative alignment of p to a”, the second term is “how does // incrementing p by one s change the relative alignment of p”, the third term is // translating change in units of s to a byte count. // // Furthermore, the result produced by this solution is not “minimal”, so it is necessary // to take the result $o mod lcm(s, a)$. Since $s$ and $a$ are co-prime (i.e. $gcd(s, a) = // 1$) and $lcm(s, a) = s * a / gcd(s, a)$, we can replace $lcm(s, a)$ with just a $s * a$. // // (Author note: we decided later on to express the offset in "elements" rather than bytes, // which drops the multiplication by `s` on both sides of the modulo.) return intrinsics::unchecked_rem(a.wrapping_sub(pmoda).wrapping_mul(mod_inv(smoda, a)), a); } if p as usize & (gcd - 1) == 0 { // This can be aligned, but `a` and `stride` are not co-prime, so a somewhat adapted // formula is used. let j = a.wrapping_sub(pmoda) >> gcdpow; let k = smoda >> gcdpow; return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow); } // Cannot be aligned at all. usize::max_value() } // Equality for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *const T { #[inline] fn eq(&self, other: &*const T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *const T {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialEq for *mut T { #[inline] fn eq(&self, other: &*mut T) -> bool { *self == *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Eq for *mut T {} /// Compare raw pointers for equality. /// /// This is the same as using the `==` operator, but less generic: /// the arguments have to be `*const T` raw pointers, /// not anything that implements `PartialEq`. /// /// This can be used to compare `&T` references (which coerce to `*const T` implicitly) /// by their address rather than comparing the values they point to /// (which is what the `PartialEq for &T` implementation does). /// /// # Examples /// /// ``` /// use std::ptr; /// /// let five = 5; /// let other_five = 5; /// let five_ref = &five; /// let same_five_ref = &five; /// let other_five_ref = &other_five; /// /// assert!(five_ref == same_five_ref); /// assert!(five_ref == other_five_ref); /// /// assert!(ptr::eq(five_ref, same_five_ref)); /// assert!(!ptr::eq(five_ref, other_five_ref)); /// ``` #[stable(feature = "ptr_eq", since = "1.17.0")] #[inline] pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool { a == b } // Impls for function pointers macro_rules! fnptr_impls_safety_abi { ($FnTy: ty, $($Arg: ident),*) => { #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialEq for $FnTy { #[inline] fn eq(&self, other: &Self) -> bool { *self as usize == *other as usize } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Eq for $FnTy {} #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> PartialOrd for $FnTy { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { (*self as usize).partial_cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> Ord for $FnTy { #[inline] fn cmp(&self, other: &Self) -> Ordering { (*self as usize).cmp(&(*other as usize)) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> hash::Hash for $FnTy { fn hash<HH: hash::Hasher>(&self, state: &mut HH) { state.write_usize(*self as usize) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Pointer for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl<Ret, $($Arg),*> fmt::Debug for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&(*self as *const ()), f) } } } } macro_rules! fnptr_impls_args { ($($Arg: ident),+) => { fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } }; () => { // No variadic functions with 0 parameters fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } }; } fnptr_impls_args! { } fnptr_impls_args! { A } fnptr_impls_args! { A, B } fnptr_impls_args! { A, B, C } fnptr_impls_args! { A, B, C, D } fnptr_impls_args! { A, B, C, D, E } fnptr_impls_args! { A, B, C, D, E, F } fnptr_impls_args! { A, B, C, D, E, F, G } fnptr_impls_args! { A, B, C, D, E, F, G, H } fnptr_impls_args! { A, B, C, D, E, F, G, H, I } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K } fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L } // Comparison for pointers #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *const T { #[inline] fn cmp(&self, other: &*const T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *const T { #[inline] fn partial_cmp(&self, other: &*const T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*const T) -> bool { *self < *other } #[inline] fn le(&self, other: &*const T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*const T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*const T) -> bool { *self >= *other } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Ord for *mut T { #[inline] fn cmp(&self, other: &*mut T) -> Ordering { if self < other { Less } else if self == other { Equal } else { Greater } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> PartialOrd for *mut T { #[inline] fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> { Some(self.cmp(other)) } #[inline] fn lt(&self, other: &*mut T) -> bool { *self < *other } #[inline] fn le(&self, other: &*mut T) -> bool { *self <= *other } #[inline] fn gt(&self, other: &*mut T) -> bool { *self > *other } #[inline] fn ge(&self, other: &*mut T) -> bool { *self >= *other } } /// A wrapper around a raw non-null `*mut T` that indicates that the possessor /// of this wrapper owns the referent. Useful for building abstractions like /// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`. /// /// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`. /// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies /// the kind of strong aliasing guarantees an instance of `T` can expect: /// the referent of the pointer should not be modified without a unique path to /// its owning Unique. /// /// If you're uncertain of whether it's correct to use `Unique` for your purposes, /// consider using `NonNull`, which has weaker semantics. /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct /// for any type which upholds Unique's aliasing requirements. #[unstable(feature = "ptr_internals", issue = "0", reason = "use NonNull instead and consider PhantomData<T> \ (if you also use #[may_dangle]), Send, and/or Sync")] #[doc(hidden)] #[repr(transparent)] pub struct Unique<T: ?Sized> { pointer: NonZero<*const T>, // NOTE: this marker has no consequences for variance, but is necessary // for dropck to understand that we logically own a `T`. // // For details, see: // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data _marker: PhantomData<T>, } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Debug for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } /// `Unique` pointers are `Send` if `T` is `Send` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Send + ?Sized> Send for Unique<T> { } /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. #[unstable(feature = "ptr_internals", issue = "0")] unsafe impl<T: Sync + ?Sized> Sync for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: Sized> Unique<T> { /// Creates a new `Unique` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. // FIXME: rename to dangling() to match NonNull? pub const fn empty() -> Self { unsafe { Unique::new_unchecked(mem::align_of::<T>() as *mut T) } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Unique<T> { /// Creates a new `Unique`. /// /// # Safety /// /// `ptr` must be non-null. pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { Unique { pointer: NonZero(ptr as _), _marker: PhantomData } } /// Creates a new `Unique` if `ptr` is non-null. pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(Unique { pointer: NonZero(ptr as _), _marker: PhantomData }) } else { None } } /// Acquires the underlying `*mut` pointer. pub fn as_ptr(self) -> *mut T { self.pointer.0 as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Clone for Unique<T> { fn clone(&self) -> Self { *self } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> Copy for Unique<T> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> { } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> fmt::Pointer for Unique<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<&'a mut T> for Unique<T> { fn from(reference: &'a mut T) -> Self { Unique { pointer: NonZero(reference as _), _marker: PhantomData } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<&'a T> for Unique<T> { fn from(reference: &'a T) -> Self { Unique { pointer: NonZero(reference as _), _marker: PhantomData } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<NonNull<T>> for Unique<T> { fn from(p: NonNull<T>) -> Self { Unique { pointer: p.pointer, _marker: PhantomData } } } /// `*mut T` but non-zero and covariant. /// /// This is often the correct thing to use when building data structures using /// raw pointers, but is ultimately more dangerous to use because of its additional /// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`! /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value /// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`. /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect /// for your use case, you should include some PhantomData in your type to /// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`. /// Usually this won't be necessary; covariance is correct for most safe abstractions, /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they /// provide a public API that follows the normal shared XOR mutable rules of Rust. #[stable(feature = "nonnull", since = "1.25.0")] #[repr(transparent)] pub struct NonNull<T: ?Sized> { pointer: NonZero<*const T>, } /// `NonNull` pointers are not `Send` because the data they reference may be aliased. // NB: This impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Send for NonNull<T> { } /// `NonNull` pointers are not `Sync` because the data they reference may be aliased. // NB: This impl is unnecessary, but should provide better error messages. #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> !Sync for NonNull<T> { } impl<T: Sized> NonNull<T> { /// Creates a new `NonNull` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. /// /// Note that the pointer value may potentially represent a valid pointer to /// a `T`, which means this must not be used as a "not yet initialized" /// sentinel value. Types that lazily allocate must track initialization by /// some other means. #[stable(feature = "nonnull", since = "1.25.0")] pub fn dangling() -> Self { unsafe { let ptr = mem::align_of::<T>() as *mut T; NonNull::new_unchecked(ptr) } } } impl<T: ?Sized> NonNull<T> { /// Creates a new `NonNull`. /// /// # Safety /// /// `ptr` must be non-null. #[stable(feature = "nonnull", since = "1.25.0")] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { NonNull { pointer: NonZero(ptr as _) } } /// Creates a new `NonNull` if `ptr` is non-null. #[stable(feature = "nonnull", since = "1.25.0")] pub fn new(ptr: *mut T) -> Option<Self> { if !ptr.is_null() { Some(NonNull { pointer: NonZero(ptr as _) }) } else { None } } /// Acquires the underlying `*mut` pointer. #[stable(feature = "nonnull", since = "1.25.0")] pub fn as_ptr(self) -> *mut T { self.pointer.0 as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } /// Mutably dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } /// Cast to a pointer of another type #[stable(feature = "nonnull_cast", since = "1.27.0")] pub fn cast<U>(self) -> NonNull<U> { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Clone for NonNull<T> { fn clone(&self) -> Self { *self } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Copy for NonNull<T> { } #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> { } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Debug for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> fmt::Pointer for NonNull<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Eq for NonNull<T> {} #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialEq for NonNull<T> { fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> Ord for NonNull<T> { fn cmp(&self, other: &Self) -> Ordering { self.as_ptr().cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> PartialOrd for NonNull<T> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_ptr().partial_cmp(&other.as_ptr()) } } #[stable(feature = "nonnull", since = "1.25.0")] impl<T: ?Sized> hash::Hash for NonNull<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.as_ptr().hash(state) } } #[unstable(feature = "ptr_internals", issue = "0")] impl<T: ?Sized> From<Unique<T>> for NonNull<T> { fn from(unique: Unique<T>) -> Self { NonNull { pointer: unique.pointer } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a mut T> for NonNull<T> { fn from(reference: &'a mut T) -> Self { NonNull { pointer: NonZero(reference as _) } } } #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a T> for NonNull<T> { fn from(reference: &'a T) -> Self { NonNull { pointer: NonZero(reference as _) } } }
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{ conversion::ConvertError, types::{make_ident, TypeName}, }; use indoc::indoc; use once_cell::sync::OnceCell; use std::collections::HashMap; use syn::{parse_quote, GenericArgument, PathArguments, Type, TypePath, TypePtr}; //// The behavior of the type. #[derive(Debug)] enum Behavior { CxxContainerByValueSafe, CxxContainerNotByValueSafe, CxxString, RustStr, RustString, RustByValue, CByValue, CVariableLengthByValue, CVoid, } /// Details about known special types, mostly primitives. #[derive(Debug)] struct TypeDetails { /// The name used by cxx (in Rust code) for this type. rs_name: String, /// C++ equivalent name for a Rust type. cpp_name: String, //// The behavior of the type. behavior: Behavior, /// Any extra non-canonical names extra_non_canonical_name: Option<String>, } impl TypeDetails { fn new( rs_name: impl Into<String>, cpp_name: impl Into<String>, behavior: Behavior, extra_non_canonical_name: Option<String>, ) -> Self { TypeDetails { rs_name: rs_name.into(), cpp_name: cpp_name.into(), behavior, extra_non_canonical_name, } } /// Whether and how to include this in the prelude given to bindgen. fn get_prelude_entry(&self) -> Option<String> { match self.behavior { Behavior::RustString | Behavior::RustStr | Behavior::CxxString | Behavior::CxxContainerByValueSafe | Behavior::CxxContainerNotByValueSafe => { let tn = TypeName::new_from_user_input(&self.rs_name); let cxx_name = tn.get_final_ident(); let (templating, payload) = match self.behavior { Behavior::CxxContainerByValueSafe | Behavior::CxxContainerNotByValueSafe => { ("template<typename T> ", "T* ptr") } _ => ("", "char* ptr"), }; Some(format!( indoc! {" /** * <div rustbindgen=\"true\" replaces=\"{}\"> */ {}class {} {{ {}; }}; "}, self.cpp_name, templating, cxx_name, payload )) } _ => None, } } fn to_type_path(&self) -> TypePath { let segs = self.rs_name.split("::").map(make_ident); parse_quote! { #(#segs)::* } } fn to_typename(&self) -> TypeName { TypeName::new_from_user_input(&self.rs_name) } } /// Database of known types. pub(crate) struct TypeDatabase { by_rs_name: HashMap<TypeName, TypeDetails>, canonical_names: HashMap<TypeName, TypeName>, } /// Returns a database of known types. pub(crate) fn known_types() -> &'static TypeDatabase { static KNOWN_TYPES: OnceCell<TypeDatabase> = OnceCell::new(); KNOWN_TYPES.get_or_init(create_type_database) } impl TypeDatabase { fn get(&self, ty: &TypeName) -> Option<&TypeDetails> { // The following line is important. It says that // when we encounter something like 'std::unique_ptr' // in the bindgen-generated bindings, we'll immediately // start to refer to that as 'UniquePtr' henceforth. let canonical_name = self.canonical_names.get(ty).unwrap_or(ty); self.by_rs_name.get(canonical_name) } /// Prelude of C++ for squirting into bindgen. This configures /// bindgen to output simpler types to replace some STL types /// that bindgen just can't cope with. Although we then replace /// those types with cxx types (e.g. UniquePtr), this intermediate /// step is still necessary because bindgen can't otherwise /// give us the templated types (e.g. when faced with the STL /// unique_ptr, bindgen would normally give us std_unique_ptr /// as opposed to std_unique_ptr<T>.) pub(crate) fn get_prelude(&self) -> String { itertools::join( self.by_rs_name .values() .filter_map(|t| t.get_prelude_entry()), "\n", ) } /// Types which are known to be safe (or unsafe) to hold and pass by /// value in Rust. pub(crate) fn get_pod_safe_types(&self) -> impl Iterator<Item = (&TypeName, bool)> { self.by_rs_name.iter().map(|(tn, td)| { ( tn, match td.behavior { Behavior::CxxContainerByValueSafe | Behavior::RustStr | Behavior::RustString | Behavior::RustByValue | Behavior::CByValue | Behavior::CVariableLengthByValue => true, Behavior::CxxString | Behavior::CxxContainerNotByValueSafe | Behavior::CVoid => false, }, ) }) } /// Whether this TypePath should be treated as a value in C++ /// but a reference in Rust. This only applies to rust::Str /// (C++ name) which is &str in Rust. pub(crate) fn should_dereference_in_cpp(&self, typ: &TypePath) -> bool { let tn = TypeName::from_type_path(typ); self.get(&tn) .map(|td| matches!(td.behavior, Behavior::RustStr)) .unwrap_or(false) } /// Here we substitute any names which we know are Special from /// our type database, e.g. std::unique_ptr -> UniquePtr. /// We strip off and ignore /// any PathArguments within this TypePath - callers should /// put them back again if needs be. pub(crate) fn known_type_substitute_path(&self, typ: &TypePath) -> Option<TypePath> { let tn = TypeName::from_type_path(typ); self.get(&tn).map(|td| td.to_type_path()) } pub(crate) fn special_cpp_name(&self, rs: &TypeName) -> Option<String> { self.get(rs).map(|x| x.cpp_name.to_string()) } pub(crate) fn is_known_type(&self, ty: &TypeName) -> bool { self.get(ty).is_some() } pub(crate) fn known_type_type_path(&self, ty: &TypeName) -> Option<TypePath> { self.get(ty).map(|td| td.to_type_path()) } /// Whether this is one of the ctypes (mostly variable length integers) /// which we need to wrap. pub(crate) fn is_ctype(&self, ty: &TypeName) -> bool { self.get(ty) .map(|td| { matches!( td.behavior, Behavior::CVariableLengthByValue | Behavior::CVoid ) }) .unwrap_or(false) } /// Whether this is a generic type acceptable to cxx. Otherwise, /// if we encounter a generic, we'll replace it with a synthesized concrete /// type. pub(crate) fn is_cxx_acceptable_generic(&self, ty: &TypeName) -> bool { self.get(ty) .map(|x| { matches!( x.behavior, Behavior::CxxContainerByValueSafe | Behavior::CxxContainerNotByValueSafe ) }) .unwrap_or(false) } pub(crate) fn convertible_from_strs(&self, ty: &TypeName) -> bool { self.get(ty) .map(|x| matches!(x.behavior, Behavior::CxxString)) .unwrap_or(false) } } fn create_type_database() -> TypeDatabase { let mut by_rs_name = HashMap::new(); let mut do_insert = |td: TypeDetails| by_rs_name.insert(td.to_typename(), td); do_insert(TypeDetails::new( "cxx::UniquePtr", "std::unique_ptr", Behavior::CxxContainerByValueSafe, None, )); do_insert(TypeDetails::new( "cxx::CxxVector", "std::vector", Behavior::CxxContainerNotByValueSafe, None, )); do_insert(TypeDetails::new( "cxx::SharedPtr", "std::shared_ptr", Behavior::CxxContainerByValueSafe, None, )); do_insert(TypeDetails::new( "cxx::CxxString", "std::string", Behavior::CxxString, None, )); do_insert(TypeDetails::new( "str", "rust::Str", Behavior::RustStr, None, )); do_insert(TypeDetails::new( "String", "rust::String", Behavior::RustString, None, )); do_insert(TypeDetails::new( "i8", "int8_t", Behavior::CByValue, Some("std::os::raw::c_schar".into()), )); do_insert(TypeDetails::new( "u8", "uint8_t", Behavior::CByValue, Some("std::os::raw::c_uchar".into()), )); for (cpp_type, rust_type) in (4..7) .map(|x| 2i32.pow(x)) .map(|x| { vec![ (format!("uint{}_t", x), format!("u{}", x)), (format!("int{}_t", x), format!("i{}", x)), ] }) .flatten() { do_insert(TypeDetails::new( rust_type, cpp_type, Behavior::CByValue, None, )); } do_insert(TypeDetails::new("bool", "bool", Behavior::CByValue, None)); do_insert(TypeDetails::new( "std::pin::Pin", "Pin", Behavior::RustByValue, // because this is actually Pin<&something> None, )); let mut insert_ctype = |cname: &str| { let concatenated_name = cname.replace(" ", ""); let td = TypeDetails::new( format!("autocxx::c_{}", concatenated_name), cname, Behavior::CVariableLengthByValue, Some(format!("std::os::raw::c_{}", concatenated_name)), ); by_rs_name.insert(td.to_typename(), td); let td = TypeDetails::new( format!("autocxx::c_u{}", concatenated_name), format!("unsigned {}", cname), Behavior::CVariableLengthByValue, Some(format!("std::os::raw::c_u{}", concatenated_name)), ); by_rs_name.insert(td.to_typename(), td); }; insert_ctype("long"); insert_ctype("int"); insert_ctype("short"); insert_ctype("long long"); let td = TypeDetails::new("f32", "float", Behavior::CByValue, None); by_rs_name.insert(td.to_typename(), td); let td = TypeDetails::new("f64", "double", Behavior::CByValue, None); by_rs_name.insert(td.to_typename(), td); let td = TypeDetails::new("std::os::raw::c_char", "char", Behavior::CByValue, None); by_rs_name.insert(td.to_typename(), td); let td = TypeDetails::new( "autocxx::c_void", "void", Behavior::CVoid, Some("std::os::raw::c_void".into()), ); by_rs_name.insert(td.to_typename(), td); let mut by_cppname = HashMap::new(); for td in by_rs_name.values() { let rs_name = td.to_typename(); if let Some(extra_non_canonical_name) = &td.extra_non_canonical_name { by_cppname.insert( TypeName::new_from_user_input(extra_non_canonical_name), rs_name.clone(), ); } by_cppname.insert(TypeName::new_from_user_input(&td.cpp_name), rs_name); } TypeDatabase { by_rs_name, canonical_names: by_cppname, } } /// This is worked out basically using trial and error. /// Excluding std* and rust* is obvious, but the other items... /// in theory bindgen ought to be smart enough to work out that /// they're not used and therefore not generate code for them. /// But it doesm unless we blocklist them. This is obviously /// a bit sensitive to the particular STL in use so one day /// it would be good to dig into bindgen's behavior here - TODO. const BINDGEN_BLOCKLIST: &[&str] = &["std.*", "__gnu.*", ".*mbstate_t.*", "rust.*"]; /// Get the list of types to give to bindgen to ask it _not_ to /// generate code for. pub(crate) fn get_initial_blocklist() -> Vec<String> { BINDGEN_BLOCKLIST.iter().map(|s| s.to_string()).collect() } /// If a given type lacks a copy constructor, we should always use /// std::move in wrapper functions. pub(crate) fn type_lacks_copy_constructor(ty: &Type) -> bool { // In future we may wish to look this up in KNOWN_TYPES. match ty { Type::Path(typ) => { let tn = TypeName::from_type_path(typ); tn.to_cpp_name().starts_with("std::unique_ptr") } _ => false, } } pub(crate) fn confirm_inner_type_is_acceptable_generic_payload( path_args: &PathArguments, desc: &TypeName, ) -> Result<(), ConvertError> { // For now, all supported generics accept the same payloads. This // may change in future in which case we'll need to accept more arguments here. match path_args { PathArguments::None => Ok(()), PathArguments::Parenthesized(_) => Err(ConvertError::TemplatedTypeContainingNonPathArg( desc.clone(), )), PathArguments::AngleBracketed(ab) => { for inner in &ab.args { match inner { GenericArgument::Type(Type::Path(typ)) => { if let Some(more_generics) = typ.path.segments.last() { confirm_inner_type_is_acceptable_generic_payload( &more_generics.arguments, desc, )?; } } _ => { return Err(ConvertError::TemplatedTypeContainingNonPathArg( desc.clone(), )) } } } Ok(()) } } } pub(crate) fn ensure_pointee_is_valid(ptr: &TypePtr) -> Result<(), ConvertError> { match *ptr.elem { Type::Path(..) => Ok(()), _ => Err(ConvertError::InvalidPointee), } } Simplify known_types. // Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{ conversion::ConvertError, types::{make_ident, TypeName}, }; use indoc::indoc; use once_cell::sync::OnceCell; use std::collections::HashMap; use syn::{parse_quote, GenericArgument, PathArguments, Type, TypePath, TypePtr}; //// The behavior of the type. #[derive(Debug)] enum Behavior { CxxContainerByValueSafe, CxxContainerNotByValueSafe, CxxString, RustStr, RustString, RustByValue, CByValue, CVariableLengthByValue, CVoid, } /// Details about known special types, mostly primitives. #[derive(Debug)] struct TypeDetails { /// The name used by cxx (in Rust code) for this type. rs_name: String, /// C++ equivalent name for a Rust type. cpp_name: String, //// The behavior of the type. behavior: Behavior, /// Any extra non-canonical names extra_non_canonical_name: Option<String>, } impl TypeDetails { fn new( rs_name: impl Into<String>, cpp_name: impl Into<String>, behavior: Behavior, extra_non_canonical_name: Option<String>, ) -> Self { TypeDetails { rs_name: rs_name.into(), cpp_name: cpp_name.into(), behavior, extra_non_canonical_name, } } /// Whether and how to include this in the prelude given to bindgen. fn get_prelude_entry(&self) -> Option<String> { match self.behavior { Behavior::RustString | Behavior::RustStr | Behavior::CxxString | Behavior::CxxContainerByValueSafe | Behavior::CxxContainerNotByValueSafe => { let tn = TypeName::new_from_user_input(&self.rs_name); let cxx_name = tn.get_final_ident(); let (templating, payload) = match self.behavior { Behavior::CxxContainerByValueSafe | Behavior::CxxContainerNotByValueSafe => { ("template<typename T> ", "T* ptr") } _ => ("", "char* ptr"), }; Some(format!( indoc! {" /** * <div rustbindgen=\"true\" replaces=\"{}\"> */ {}class {} {{ {}; }}; "}, self.cpp_name, templating, cxx_name, payload )) } _ => None, } } fn to_type_path(&self) -> TypePath { let segs = self.rs_name.split("::").map(make_ident); parse_quote! { #(#segs)::* } } fn to_typename(&self) -> TypeName { TypeName::new_from_user_input(&self.rs_name) } } /// Database of known types. #[derive(Default)] pub(crate) struct TypeDatabase { by_rs_name: HashMap<TypeName, TypeDetails>, canonical_names: HashMap<TypeName, TypeName>, } /// Returns a database of known types. pub(crate) fn known_types() -> &'static TypeDatabase { static KNOWN_TYPES: OnceCell<TypeDatabase> = OnceCell::new(); KNOWN_TYPES.get_or_init(create_type_database) } impl TypeDatabase { fn get(&self, ty: &TypeName) -> Option<&TypeDetails> { // The following line is important. It says that // when we encounter something like 'std::unique_ptr' // in the bindgen-generated bindings, we'll immediately // start to refer to that as 'UniquePtr' henceforth. let canonical_name = self.canonical_names.get(ty).unwrap_or(ty); self.by_rs_name.get(canonical_name) } /// Prelude of C++ for squirting into bindgen. This configures /// bindgen to output simpler types to replace some STL types /// that bindgen just can't cope with. Although we then replace /// those types with cxx types (e.g. UniquePtr), this intermediate /// step is still necessary because bindgen can't otherwise /// give us the templated types (e.g. when faced with the STL /// unique_ptr, bindgen would normally give us std_unique_ptr /// as opposed to std_unique_ptr<T>.) pub(crate) fn get_prelude(&self) -> String { itertools::join( self.by_rs_name .values() .filter_map(|t| t.get_prelude_entry()), "\n", ) } /// Types which are known to be safe (or unsafe) to hold and pass by /// value in Rust. pub(crate) fn get_pod_safe_types(&self) -> impl Iterator<Item = (&TypeName, bool)> { self.by_rs_name.iter().map(|(tn, td)| { ( tn, match td.behavior { Behavior::CxxContainerByValueSafe | Behavior::RustStr | Behavior::RustString | Behavior::RustByValue | Behavior::CByValue | Behavior::CVariableLengthByValue => true, Behavior::CxxString | Behavior::CxxContainerNotByValueSafe | Behavior::CVoid => false, }, ) }) } /// Whether this TypePath should be treated as a value in C++ /// but a reference in Rust. This only applies to rust::Str /// (C++ name) which is &str in Rust. pub(crate) fn should_dereference_in_cpp(&self, typ: &TypePath) -> bool { let tn = TypeName::from_type_path(typ); self.get(&tn) .map(|td| matches!(td.behavior, Behavior::RustStr)) .unwrap_or(false) } /// Here we substitute any names which we know are Special from /// our type database, e.g. std::unique_ptr -> UniquePtr. /// We strip off and ignore /// any PathArguments within this TypePath - callers should /// put them back again if needs be. pub(crate) fn known_type_substitute_path(&self, typ: &TypePath) -> Option<TypePath> { let tn = TypeName::from_type_path(typ); self.get(&tn).map(|td| td.to_type_path()) } pub(crate) fn special_cpp_name(&self, rs: &TypeName) -> Option<String> { self.get(rs).map(|x| x.cpp_name.to_string()) } pub(crate) fn is_known_type(&self, ty: &TypeName) -> bool { self.get(ty).is_some() } pub(crate) fn known_type_type_path(&self, ty: &TypeName) -> Option<TypePath> { self.get(ty).map(|td| td.to_type_path()) } /// Whether this is one of the ctypes (mostly variable length integers) /// which we need to wrap. pub(crate) fn is_ctype(&self, ty: &TypeName) -> bool { self.get(ty) .map(|td| { matches!( td.behavior, Behavior::CVariableLengthByValue | Behavior::CVoid ) }) .unwrap_or(false) } /// Whether this is a generic type acceptable to cxx. Otherwise, /// if we encounter a generic, we'll replace it with a synthesized concrete /// type. pub(crate) fn is_cxx_acceptable_generic(&self, ty: &TypeName) -> bool { self.get(ty) .map(|x| { matches!( x.behavior, Behavior::CxxContainerByValueSafe | Behavior::CxxContainerNotByValueSafe ) }) .unwrap_or(false) } pub(crate) fn convertible_from_strs(&self, ty: &TypeName) -> bool { self.get(ty) .map(|x| matches!(x.behavior, Behavior::CxxString)) .unwrap_or(false) } fn insert(&mut self, td: TypeDetails) { let rs_name = td.to_typename(); if let Some(extra_non_canonical_name) = &td.extra_non_canonical_name { self.canonical_names.insert( TypeName::new_from_user_input(extra_non_canonical_name), rs_name.clone(), ); } self.canonical_names .insert(TypeName::new_from_user_input(&td.cpp_name), rs_name.clone()); self.by_rs_name.insert(rs_name, td); } } fn create_type_database() -> TypeDatabase { let mut db = TypeDatabase::default(); db.insert(TypeDetails::new( "cxx::UniquePtr", "std::unique_ptr", Behavior::CxxContainerByValueSafe, None, )); db.insert(TypeDetails::new( "cxx::CxxVector", "std::vector", Behavior::CxxContainerNotByValueSafe, None, )); db.insert(TypeDetails::new( "cxx::SharedPtr", "std::shared_ptr", Behavior::CxxContainerByValueSafe, None, )); db.insert(TypeDetails::new( "cxx::CxxString", "std::string", Behavior::CxxString, None, )); db.insert(TypeDetails::new( "str", "rust::Str", Behavior::RustStr, None, )); db.insert(TypeDetails::new( "String", "rust::String", Behavior::RustString, None, )); db.insert(TypeDetails::new( "i8", "int8_t", Behavior::CByValue, Some("std::os::raw::c_schar".into()), )); db.insert(TypeDetails::new( "u8", "uint8_t", Behavior::CByValue, Some("std::os::raw::c_uchar".into()), )); for (cpp_type, rust_type) in (4..7) .map(|x| 2i32.pow(x)) .map(|x| { vec![ (format!("uint{}_t", x), format!("u{}", x)), (format!("int{}_t", x), format!("i{}", x)), ] }) .flatten() { db.insert(TypeDetails::new( rust_type, cpp_type, Behavior::CByValue, None, )); } db.insert(TypeDetails::new("bool", "bool", Behavior::CByValue, None)); db.insert(TypeDetails::new( "std::pin::Pin", "Pin", Behavior::RustByValue, // because this is actually Pin<&something> None, )); let mut insert_ctype = |cname: &str| { let concatenated_name = cname.replace(" ", ""); db.insert(TypeDetails::new( format!("autocxx::c_{}", concatenated_name), cname, Behavior::CVariableLengthByValue, Some(format!("std::os::raw::c_{}", concatenated_name)), )); db.insert(TypeDetails::new( format!("autocxx::c_u{}", concatenated_name), format!("unsigned {}", cname), Behavior::CVariableLengthByValue, Some(format!("std::os::raw::c_u{}", concatenated_name)), )); }; insert_ctype("long"); insert_ctype("int"); insert_ctype("short"); insert_ctype("long long"); db.insert(TypeDetails::new("f32", "float", Behavior::CByValue, None)); db.insert(TypeDetails::new("f64", "double", Behavior::CByValue, None)); db.insert(TypeDetails::new( "std::os::raw::c_char", "char", Behavior::CByValue, None, )); db.insert(TypeDetails::new( "autocxx::c_void", "void", Behavior::CVoid, Some("std::os::raw::c_void".into()), )); db } /// This is worked out basically using trial and error. /// Excluding std* and rust* is obvious, but the other items... /// in theory bindgen ought to be smart enough to work out that /// they're not used and therefore not generate code for them. /// But it doesm unless we blocklist them. This is obviously /// a bit sensitive to the particular STL in use so one day /// it would be good to dig into bindgen's behavior here - TODO. const BINDGEN_BLOCKLIST: &[&str] = &["std.*", "__gnu.*", ".*mbstate_t.*", "rust.*"]; /// Get the list of types to give to bindgen to ask it _not_ to /// generate code for. pub(crate) fn get_initial_blocklist() -> Vec<String> { BINDGEN_BLOCKLIST.iter().map(|s| s.to_string()).collect() } /// If a given type lacks a copy constructor, we should always use /// std::move in wrapper functions. pub(crate) fn type_lacks_copy_constructor(ty: &Type) -> bool { // In future we may wish to look this up in KNOWN_TYPES. match ty { Type::Path(typ) => { let tn = TypeName::from_type_path(typ); tn.to_cpp_name().starts_with("std::unique_ptr") } _ => false, } } pub(crate) fn confirm_inner_type_is_acceptable_generic_payload( path_args: &PathArguments, desc: &TypeName, ) -> Result<(), ConvertError> { // For now, all supported generics accept the same payloads. This // may change in future in which case we'll need to accept more arguments here. match path_args { PathArguments::None => Ok(()), PathArguments::Parenthesized(_) => Err(ConvertError::TemplatedTypeContainingNonPathArg( desc.clone(), )), PathArguments::AngleBracketed(ab) => { for inner in &ab.args { match inner { GenericArgument::Type(Type::Path(typ)) => { if let Some(more_generics) = typ.path.segments.last() { confirm_inner_type_is_acceptable_generic_payload( &more_generics.arguments, desc, )?; } } _ => { return Err(ConvertError::TemplatedTypeContainingNonPathArg( desc.clone(), )) } } } Ok(()) } } } pub(crate) fn ensure_pointee_is_valid(ptr: &TypePtr) -> Result<(), ConvertError> { match *ptr.elem { Type::Path(..) => Ok(()), _ => Err(ConvertError::InvalidPointee), } }
// Copyright (C) 2019 O.S. Systems Sofware LTDA // // SPDX-License-Identifier: Apache-2.0 pub mod info; pub mod probe { use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Request { pub custom_server: String, } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Response { pub update_available: bool, pub try_again_in: i32, } } pub mod state { use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Response { pub busy: bool, pub current_state: String, } } pub mod abort_download { use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Response { pub message: String, } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Refused { pub error: String, } } pub mod log { use serde::{Deserialize, Serialize}; use std::collections::HashMap; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Entry { pub level: Level, pub message: String, pub time: String, pub data: HashMap<String, String>, } #[derive(Clone, Debug, Deserialize, Serialize)] pub enum Level { Error, Info, Warning, Debug, Trace, } } sdk: Fix log level serialization and fields Signed-off-by: Jonathas-Conceicao <33cd5f63a6435f6b1365c76100ff4a5652c45928@ossystems.com.br> // Copyright (C) 2019 O.S. Systems Sofware LTDA // // SPDX-License-Identifier: Apache-2.0 pub mod info; pub mod probe { use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Request { pub custom_server: String, } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Response { pub update_available: bool, pub try_again_in: i32, } } pub mod state { use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Response { pub busy: bool, pub current_state: String, } } pub mod abort_download { use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Response { pub message: String, } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Refused { pub error: String, } } pub mod log { use serde::{Deserialize, Serialize}; use std::collections::HashMap; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Entry { pub level: Level, pub message: String, pub time: String, pub data: HashMap<String, String>, } #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(rename_all = "lowercase")] pub enum Level { Critical, Error, Warning, Info, Debug, Trace, } }
#![allow(dead_code, unused_must_use, unused_variables, unused_imports)] use std::thread::{self,Thread,Builder}; use std::sync::mpsc::{self,channel,Receiver}; use mio::tcp::*; use mio::*; use bytes::{ByteBuf,MutByteBuf}; use std::collections::HashMap; use std::io::{self,Read,ErrorKind}; use nom::HexDisplay; use std::error::Error; use mio::util::Slab; use std::net::SocketAddr; use std::str::FromStr; use time::{Duration,precise_time_s}; use rand::random; use uuid::Uuid; use network::{Backend,ClientResult,ServerMessage,ServerMessageType,ConnectionError,ProxyOrder,RequiredEvents}; use network::proxy::{Server,ProxyClient,ProxyConfiguration,Readiness}; use messages::{TcpFront,Command,Instance}; const SERVER: Token = Token(0); #[derive(Debug,Clone,PartialEq,Eq)] pub enum ConnectionStatus { Initial, ClientConnected, Connected, ClientClosed, ServerClosed, Closed } pub struct Client { sock: TcpStream, backend: Option<TcpStream>, front_buf: Option<MutByteBuf>, back_buf: Option<MutByteBuf>, token: Option<Token>, backend_token: Option<Token>, accept_token: Token, back_interest: EventSet, front_interest: EventSet, front_timeout: Option<Timeout>, back_timeout: Option<Timeout>, status: ConnectionStatus, rx_count: usize, tx_count: usize, app_id: Option<String>, request_id: String, readiness: Readiness, } impl Client { fn new(sock: TcpStream, accept_token: Token) -> Option<Client> { Some(Client { sock: sock, backend: None, front_buf: Some(ByteBuf::mut_with_capacity(2048)), back_buf: Some(ByteBuf::mut_with_capacity(2048)), token: None, backend_token: None, accept_token: accept_token, back_interest: EventSet::all(), front_interest: EventSet::all(), front_timeout: None, back_timeout: None, status: ConnectionStatus::Connected, rx_count: 0, tx_count: 0, app_id: None, request_id: Uuid::new_v4().hyphenated().to_string(), readiness: Readiness::new(), }) } } impl ProxyClient for Client { fn front_socket(&self) -> &TcpStream { &self.sock } fn back_socket(&self) -> Option<&TcpStream> { self.backend.as_ref() } fn front_token(&self) -> Option<Token> { self.token } fn back_token(&self) -> Option<Token> { self.backend_token } fn log_context(&self) -> String { if let Some(ref app_id) = self.app_id { format!("TCP\t{}\t{}\t", self.request_id, app_id) } else { format!("TCP\t{}\tunknown\t", self.request_id) } } fn set_back_socket(&mut self, socket: TcpStream) { self.backend = Some(socket); } fn set_front_token(&mut self, token: Token) { self.token = Some(token); } fn set_back_token(&mut self, token: Token) { self.backend_token = Some(token); } fn set_tokens(&mut self, token: Token, backend: Token) { self.token = Some(token); self.backend_token = Some(backend); } fn front_timeout(&mut self) -> Option<Timeout> { self.front_timeout.take() } fn back_timeout(&mut self) -> Option<Timeout> { self.back_timeout.take() } fn set_front_timeout(&mut self, timeout: Timeout) { self.front_timeout = Some(timeout) } fn set_back_timeout(&mut self, timeout: Timeout) { self.back_timeout = Some(timeout) } //FIXME: too much cloning in there, should optimize fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) { debug!("{}\tTCP\tPROXY [{} -> {}] CLOSED BACKEND", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize()); let addr = self.backend.as_ref().and_then(|sock| sock.peer_addr().ok()); self.backend = None; self.backend_token = None; (self.app_id.clone(), addr) } fn front_hup(&mut self) -> ClientResult { if self.status == ConnectionStatus::ServerClosed || self.status == ConnectionStatus::ClientConnected { // the server never answered, the client closed self.status = ConnectionStatus::Closed; ClientResult::CloseClient } else { self.status = ConnectionStatus::ClientClosed; ClientResult::Continue } } fn back_hup(&mut self) -> ClientResult { if self.status == ConnectionStatus::ClientClosed { self.status = ConnectionStatus::Closed; ClientResult::CloseClient } else { self.status = ConnectionStatus::ServerClosed; ClientResult::Continue } } fn writable(&mut self) -> (RequiredEvents, ClientResult) { trace!("{}\tTCP\tin writable()", self.request_id); if let Some(buf) = self.back_buf.take() { //trace!("in writable 2: back_buf contains {} bytes", buf.remaining()); let mut b = buf.flip(); match self.sock.try_write_buf(&mut b) { Ok(None) => { error!("{}\tTCP\tclient flushing buf; WOULDBLOCK", self.request_id); self.front_interest.insert(EventSet::writable()); } Ok(Some(r)) => { //FIXME what happens if not everything was written? debug!("{}\tTCP\tFRONT [{}<-{}]: wrote {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), r); self.tx_count = self.tx_count + r; //self.front_interest.insert(EventSet::readable()); if r > 0 { self.readiness.front_readiness.remove(EventSet::writable()); } self.front_interest.remove(EventSet::writable()); self.back_interest.insert(EventSet::readable()); } Err(e) => error!("{}\tTCP\tnot implemented; client err={:?}", self.request_id, e), } self.back_buf = Some(b.flip()); } (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) } fn readable(&mut self) -> (RequiredEvents, ClientResult) { let mut buf = self.front_buf.take().unwrap(); //trace!("in readable(): front_mut_buf contains {} bytes", buf.remaining()); match self.sock.try_read_buf(&mut buf) { Ok(None) => { error!("{}\tTCP\tWe just got readable, but were unable to read from the socket?", self.request_id); } Ok(Some(r)) => { println!("{}\tTCP\tFRONT [{}->{}]: read {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), r); if r > 0 { self.readiness.front_readiness.remove(EventSet::readable()); } self.front_interest.remove(EventSet::readable()); self.back_interest.insert(EventSet::writable()); self.rx_count = self.rx_count + r; // prepare to provide this to writable } Err(e) => { error!("{}\tTCP\tnot implemented; client err={:?}", self.request_id, e); //self.front_interest.remove(EventSet::readable()); } }; self.front_buf = Some(buf); (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) } fn back_writable(&mut self) -> (RequiredEvents, ClientResult) { if let Some(buf) = self.front_buf.take() { //trace!("in back_writable 2: front_buf contains {} bytes", buf.remaining()); let mut b = buf.flip(); if let Some(ref mut sock) = self.backend { match sock.try_write_buf(&mut b) { Ok(None) => { error!("{}\tTCP\tclient flushing buf; WOULDBLOCK", self.request_id); self.back_interest.insert(EventSet::writable()); } Ok(Some(r)) => { //FIXME what happens if not everything was written? debug!("{}\tTCP\tBACK [{}->{}]: wrote {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), r); if r > 0 { self.readiness.back_readiness.remove(EventSet::writable()); } self.front_interest.insert(EventSet::readable()); self.back_interest.remove(EventSet::writable()); self.back_interest.insert(EventSet::readable()); } Err(e) => error!("{}\tTCP\tnot implemented; client err={:?}", self.request_id, e), } } self.front_buf = Some(b.flip()); } (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) } fn back_readable(&mut self) -> (RequiredEvents, ClientResult) { let mut buf = self.back_buf.take().unwrap(); //trace!("{}\tTCP\tin back_readable(): back_mut_buf contains {} bytes", self.request_id, buf.remaining()); if let Some(ref mut sock) = self.backend { match sock.try_read_buf(&mut buf) { Ok(None) => { error!("{}\tTCP\tWe just got readable, but were unable to read from the socket?", self.request_id); } Ok(Some(r)) => { println!("{}\tTCP\tBACK [{}<-{}]: read {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), r); if r > 0 { self.readiness.back_readiness.remove(EventSet::readable()); } self.back_interest.remove(EventSet::readable()); self.front_interest.insert(EventSet::writable()); // prepare to provide this to writable } Err(e) => { error!("{}\tTCP\tnot implemented; client err={:?}", self.request_id, e); //self.interest.remove(EventSet::readable()); } }; } self.back_buf = Some(buf); (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) } fn readiness(&mut self) -> &mut Readiness { &mut self.readiness } } pub struct ApplicationListener { app_id: String, sock: TcpListener, token: Option<Token>, front_address: SocketAddr, back_addresses: Vec<SocketAddr> } type ClientToken = Token; pub struct ServerConfiguration { fronts: HashMap<String, Token>, instances: HashMap<String, Vec<Backend>>, listeners: Slab<ApplicationListener>, tx: mpsc::Sender<ServerMessage>, front_timeout: u64, back_timeout: u64, } impl ServerConfiguration { pub fn new(max_listeners: usize, tx: mpsc::Sender<ServerMessage>) -> ServerConfiguration { ServerConfiguration { instances: HashMap::new(), listeners: Slab::new_starting_at(Token(0), max_listeners), fronts: HashMap::new(), tx: tx, front_timeout: 50000, back_timeout: 50000, } } fn add_tcp_front(&mut self, app_id: &str, front: &SocketAddr, event_loop: &mut EventLoop<TcpServer>) -> Option<Token> { if let Ok(listener) = TcpListener::bind(front) { let addresses: Vec<SocketAddr> = if let Some(ads) = self.instances.get(app_id) { let v: Vec<SocketAddr> = ads.iter().map(|backend| backend.address).collect(); v } else { Vec::new() }; let al = ApplicationListener { app_id: String::from(app_id), sock: listener, token: None, front_address: *front, back_addresses: addresses }; if let Ok(tok) = self.listeners.insert(al) { self.listeners[tok].token = Some(tok); self.fronts.insert(String::from(app_id), tok); event_loop.register(&self.listeners[tok].sock, tok, EventSet::readable(), PollOpt::level()); info!("TCP\tregistered listener for app {} on port {}", app_id, front.port()); Some(tok) } else { error!("TCP\tcould not register listener for app {} on port {}", app_id, front.port()); None } } else { error!("TCP\tcould not declare listener for app {} on port {}", app_id, front.port()); None } } pub fn remove_tcp_front(&mut self, app_id: String, event_loop: &mut EventLoop<TcpServer>) -> Option<Token>{ info!("TCP\tremoving tcp_front {:?}", app_id); // ToDo // Removes all listeners for the given app_id // an app can't have two listeners. Is this a problem? if let Some(&tok) = self.fronts.get(&app_id) { if self.listeners.contains(tok) { event_loop.deregister(&self.listeners[tok].sock); self.listeners.remove(tok); warn!("TCP\tremoved server {:?}", tok); //self.listeners[tok].sock.shutdown(Shutdown::Both); Some(tok) } else { None } } else { None } } pub fn add_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut EventLoop<TcpServer>) -> Option<Token> { if let Some(addrs) = self.instances.get_mut(app_id) { let backend = Backend::new(*instance_address); addrs.push(backend); } if self.instances.get(app_id).is_none() { let backend = Backend::new(*instance_address); self.instances.insert(String::from(app_id), vec![backend]); } if let Some(&tok) = self.fronts.get(app_id) { let application_listener = &mut self.listeners[tok]; application_listener.back_addresses.push(*instance_address); Some(tok) } else { error!("TCP\tNo front for this instance"); None } } pub fn remove_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut EventLoop<TcpServer>) -> Option<Token>{ // ToDo None } } impl ProxyConfiguration<TcpServer, Client> for ServerConfiguration { fn connect_to_backend(&mut self, client:&mut Client) ->Result<(),ConnectionError> { let rnd = random::<usize>(); let idx = rnd % self.listeners[client.accept_token].back_addresses.len(); client.app_id = Some(self.listeners[client.accept_token].app_id.clone()); let backend_addr = try!(self.listeners[client.accept_token].back_addresses.get(idx).ok_or(ConnectionError::ToBeDefined)); let stream = try!(TcpStream::connect(backend_addr).map_err(|_| ConnectionError::ToBeDefined)); stream.set_nodelay(true); client.set_back_socket(stream); Ok(()) } fn notify(&mut self, event_loop: &mut EventLoop<TcpServer>, message: ProxyOrder) { match message { ProxyOrder::Command(id, Command::AddTcpFront(tcp_front)) => { trace!("TCP\t{:?}", tcp_front); let addr_string = tcp_front.ip_address + &tcp_front.port.to_string(); if let Ok(front) = addr_string.parse() { if let Some(token) = self.add_tcp_front(&tcp_front.app_id, &front, event_loop) { self.tx.send(ServerMessage{ id: id, message: ServerMessageType::AddedFront}); } else { error!("TCP\tCouldn't add tcp front"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot add tcp front"))}); } } else { error!("TCP\tCouldn't parse tcp front address"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot parse the address"))}); } }, ProxyOrder::Command(id, Command::RemoveTcpFront(front)) => { trace!("TCP\t{:?}", front); let _ = self.remove_tcp_front(front.app_id, event_loop); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::RemovedFront}); }, ProxyOrder::Command(id, Command::AddInstance(instance)) => { trace!("TCP\t{:?}", instance); let addr_string = instance.ip_address + ":" + &instance.port.to_string(); let addr = &addr_string.parse().unwrap(); if let Some(token) = self.add_instance(&instance.app_id, addr, event_loop) { self.tx.send(ServerMessage{ id: id, message: ServerMessageType::AddedInstance}); } else { error!("TCP\tCouldn't add tcp instance"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot add tcp instance"))}); } }, ProxyOrder::Command(id, Command::RemoveInstance(instance)) => { trace!("TCP\t{:?}", instance); let addr_string = instance.ip_address + ":" + &instance.port.to_string(); let addr = &addr_string.parse().unwrap(); if let Some(token) = self.remove_instance(&instance.app_id, addr, event_loop) { self.tx.send(ServerMessage{ id: id, message: ServerMessageType::RemovedInstance}); } else { error!("TCP\tCouldn't remove tcp instance"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot remove tcp instance"))}); } }, ProxyOrder::Stop(id) => { event_loop.shutdown(); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Stopped}); }, ProxyOrder::Command(id, _) => { error!("TCP\tunsupported message, ignoring"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("unsupported message"))}); } } } fn accept(&mut self, token: Token) -> Option<(Client, bool)> { if self.listeners.contains(token) { let accepted = self.listeners[token].sock.accept(); if let Ok(Some((frontend_sock, _))) = accepted { frontend_sock.set_nodelay(true); if let Some(c) = Client::new(frontend_sock, token) { return Some((c, true)); } } } None } fn close_backend(&mut self, app_id: String, addr: &SocketAddr) { if let Some(app_instances) = self.instances.get_mut(&app_id) { if let Some(ref mut backend) = app_instances.iter_mut().find(|backend| &backend.address == addr) { backend.dec_connections(); } } } fn front_timeout(&self) -> u64 { self.front_timeout } fn back_timeout(&self) -> u64 { self.back_timeout } } pub type TcpServer = Server<ServerConfiguration,Client>; pub fn start() { let mut event_loop = EventLoop::new().unwrap(); info!("TCP\tlisten for connections"); //event_loop.register(&listener, SERVER, EventSet::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); let (tx,rx) = channel::<ServerMessage>(); let configuration = ServerConfiguration::new(10, tx); let mut s = TcpServer::new(10, 500, configuration); { let front: SocketAddr = FromStr::from_str("127.0.0.1:1234").unwrap(); let back: SocketAddr = FromStr::from_str("127.0.0.1:5678").unwrap(); s.configuration().add_tcp_front("yolo", &front, &mut event_loop); s.configuration().add_instance("yolo", &back, &mut event_loop); } { let front: SocketAddr = FromStr::from_str("127.0.0.1:1235").unwrap(); let back: SocketAddr = FromStr::from_str("127.0.0.1:5678").unwrap(); s.configuration().add_tcp_front("yolo", &front, &mut event_loop); s.configuration().add_instance("yolo", &back, &mut event_loop); } thread::spawn(move|| { info!("TCP\tstarting event loop"); event_loop.run(&mut s).unwrap(); info!("TCP\tending event loop"); }); } pub fn start_listener(max_listeners: usize, max_connections: usize, tx: mpsc::Sender<ServerMessage>) -> (Sender<ProxyOrder>,thread::JoinHandle<()>) { let mut event_loop = EventLoop::new().unwrap(); let channel = event_loop.channel(); let notify_tx = tx.clone(); let configuration = ServerConfiguration::new(max_listeners, tx); let mut server = TcpServer::new(max_listeners, max_connections, configuration); let front: SocketAddr = FromStr::from_str("127.0.0.1:8443").unwrap(); server.configuration().add_tcp_front("yolo", &front, &mut event_loop); let join_guard = thread::spawn(move|| { info!("TCP\tstarting event loop"); event_loop.run(&mut server).unwrap(); info!("TCP\tending event loop"); //notify_tx.send(ServerMessage::Stopped); }); (channel, join_guard) } #[cfg(test)] mod tests { use super::*; use std::net::{TcpListener, TcpStream, Shutdown}; use std::io::{Read,Write}; use std::time::Duration; use std::{thread,str}; #[allow(unused_mut, unused_must_use, unused_variables)] #[test] fn mi() { thread::spawn(|| { start_server(); }); start(); thread::sleep(Duration::from_millis(300)); let mut s1 = TcpStream::connect("127.0.0.1:1234").unwrap(); let mut s3 = TcpStream::connect("127.0.0.1:1234").unwrap(); thread::sleep(Duration::from_millis(300)); let mut s2 = TcpStream::connect("127.0.0.1:1234").unwrap(); s1.write(&b"hello"[..]); println!("s1 sent"); s2.write(&b"pouet pouet"[..]); println!("s2 sent"); thread::sleep(Duration::from_millis(500)); let mut res = [0; 128]; s1.write(&b"coucou"[..]); let mut sz1 = s1.read(&mut res[..]).unwrap(); println!("s1 received {:?}", str::from_utf8(&res[..sz1])); assert_eq!(&res[..sz1], &b"hello END"[..]); s3.shutdown(Shutdown::Both); let sz2 = s2.read(&mut res[..]).unwrap(); println!("s2 received {:?}", str::from_utf8(&res[..sz2])); assert_eq!(&res[..sz2], &b"pouet pouet END"[..]); thread::sleep(Duration::from_millis(400)); sz1 = s1.read(&mut res[..]).unwrap(); println!("s1 received again({}): {:?}", sz1, str::from_utf8(&res[..sz1])); assert_eq!(&res[..sz1], &b"coucou END"[..]); //assert!(false); } /* #[allow(unused_mut, unused_must_use, unused_variables)] #[test] fn concurrent() { use std::sync::mpsc; use time; let thread_nb = 127; thread::spawn(|| { start_server(); }); start(); thread::sleep_ms(300); let (tx, rx) = mpsc::channel(); let begin = time::precise_time_s(); for i in 0..thread_nb { let id = i; let tx = tx.clone(); thread::Builder::new().name(id.to_string()).spawn(move || { let s = format!("[{}] Hello world!\n", id); let v: Vec<u8> = s.bytes().collect(); if let Ok(mut conn) = TcpStream::connect("127.0.0.1:1234") { let mut res = [0; 128]; for j in 0..10000 { conn.write(&v[..]); if j % 5 == 0 { if let Ok(sz) = conn.read(&mut res[..]) { //println!("[{}] received({}): {:?}", id, sz, str::from_utf8(&res[..sz])); } else { println!("failed reading"); tx.send(()); return; } } } tx.send(()); return; } else { println!("failed connecting"); tx.send(()); return; } }); } //thread::sleep_ms(5000); for i in 0..thread_nb { rx.recv(); } let end = time::precise_time_s(); println!("executed in {} seconds", end - begin); assert!(false); } */ #[allow(unused_mut, unused_must_use, unused_variables)] fn start_server() { let listener = TcpListener::bind("127.0.0.1:5678").unwrap(); fn handle_client(stream: &mut TcpStream, id: u8) { let mut buf = [0; 128]; let response = b" END"; while let Ok(sz) = stream.read(&mut buf[..]) { if sz > 0 { //println!("[{}] {:?}", id, str::from_utf8(&buf[..sz])); stream.write(&buf[..sz]); thread::sleep(Duration::from_millis(20)); stream.write(&response[..]); } } } let mut count = 0; thread::spawn(move|| { for conn in listener.incoming() { match conn { Ok(mut stream) => { thread::spawn(move|| { println!("got a new client: {}", count); handle_client(&mut stream, count) }); } Err(e) => { println!("connection failed"); } } count += 1; } }); } } rewrite the TCP proxy to use the buffer queue not working yet, the proxy keeps looping between front write interest and back write interest without any data #![allow(dead_code, unused_must_use, unused_variables, unused_imports)] use std::thread::{self,Thread,Builder}; use std::sync::mpsc::{self,channel,Receiver}; use mio::tcp::*; use mio::*; use bytes::{ByteBuf,MutByteBuf}; use std::collections::HashMap; use std::io::{self,Read,ErrorKind}; use nom::HexDisplay; use std::error::Error; use mio::util::Slab; use std::net::SocketAddr; use std::str::FromStr; use time::{Duration,precise_time_s}; use rand::random; use uuid::Uuid; use network::{Backend,ClientResult,ServerMessage,ServerMessageType,ConnectionError,ProxyOrder,RequiredEvents}; use network::proxy::{Server,ProxyClient,ProxyConfiguration,Readiness}; use network::buffer::Buffer; use network::buffer_queue::BufferQueue; use network::socket::{SocketHandler,SocketResult}; use pool::{Pool,Checkout,Reset}; use messages::{TcpFront,Command,Instance}; const SERVER: Token = Token(0); #[derive(Debug,Clone,PartialEq,Eq)] pub enum ConnectionStatus { Initial, ClientConnected, Connected, ClientClosed, ServerClosed, Closed } pub struct Client { sock: TcpStream, backend: Option<TcpStream>, front_buf: Checkout<BufferQueue>, back_buf: Checkout<BufferQueue>, token: Option<Token>, backend_token: Option<Token>, accept_token: Token, back_interest: EventSet, front_interest: EventSet, front_timeout: Option<Timeout>, back_timeout: Option<Timeout>, status: ConnectionStatus, rx_count: usize, tx_count: usize, app_id: Option<String>, request_id: String, readiness: Readiness, } impl Client { fn new(sock: TcpStream, accept_token: Token, front_buf: Checkout<BufferQueue>, back_buf: Checkout<BufferQueue>) -> Option<Client> { Some(Client { sock: sock, backend: None, front_buf: front_buf, back_buf: back_buf, token: None, backend_token: None, accept_token: accept_token, back_interest: EventSet::all(), front_interest: EventSet::all(), front_timeout: None, back_timeout: None, status: ConnectionStatus::Connected, rx_count: 0, tx_count: 0, app_id: None, request_id: Uuid::new_v4().hyphenated().to_string(), readiness: Readiness::new(), }) } } impl ProxyClient for Client { fn front_socket(&self) -> &TcpStream { &self.sock } fn back_socket(&self) -> Option<&TcpStream> { self.backend.as_ref() } fn front_token(&self) -> Option<Token> { self.token } fn back_token(&self) -> Option<Token> { self.backend_token } fn log_context(&self) -> String { if let Some(ref app_id) = self.app_id { format!("TCP\t{}\t{}\t", self.request_id, app_id) } else { format!("TCP\t{}\tunknown\t", self.request_id) } } fn set_back_socket(&mut self, socket: TcpStream) { self.backend = Some(socket); } fn set_front_token(&mut self, token: Token) { self.token = Some(token); } fn set_back_token(&mut self, token: Token) { self.backend_token = Some(token); } fn set_tokens(&mut self, token: Token, backend: Token) { self.token = Some(token); self.backend_token = Some(backend); } fn front_timeout(&mut self) -> Option<Timeout> { self.front_timeout.take() } fn back_timeout(&mut self) -> Option<Timeout> { self.back_timeout.take() } fn set_front_timeout(&mut self, timeout: Timeout) { self.front_timeout = Some(timeout) } fn set_back_timeout(&mut self, timeout: Timeout) { self.back_timeout = Some(timeout) } //FIXME: too much cloning in there, should optimize fn remove_backend(&mut self) -> (Option<String>, Option<SocketAddr>) { debug!("{}\tTCP\tPROXY [{} -> {}] CLOSED BACKEND", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize()); let addr = self.backend.as_ref().and_then(|sock| sock.peer_addr().ok()); self.backend = None; self.backend_token = None; (self.app_id.clone(), addr) } fn front_hup(&mut self) -> ClientResult { if self.status == ConnectionStatus::ServerClosed || self.status == ConnectionStatus::ClientConnected { // the server never answered, the client closed self.status = ConnectionStatus::Closed; ClientResult::CloseClient } else { self.status = ConnectionStatus::ClientClosed; ClientResult::Continue } } fn back_hup(&mut self) -> ClientResult { if self.status == ConnectionStatus::ClientClosed { self.status = ConnectionStatus::Closed; ClientResult::CloseClient } else { self.status = ConnectionStatus::ServerClosed; ClientResult::Continue } } fn readable(&mut self) -> (RequiredEvents, ClientResult) { println!("{}\tTCP\tFRONT [{}->{}] readable", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize()); if self.front_buf.buffer.available_space() == 0 { return (RequiredEvents::FrontNoneBackReadWrite, ClientResult::Continue); } let (sz, res) = self.sock.socket_read(self.front_buf.buffer.space()); self.front_buf.buffer.fill(sz); self.front_buf.sliced_input(sz); self.front_buf.consume_parsed_data(sz); self.front_buf.slice_output(sz); println!("{}\tTCP\tFRONT [{}->{}]: read {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), sz); match res { SocketResult::Error => { self.readiness.front_readiness.remove(EventSet::readable()); return (RequiredEvents::FrontNoneBackNone, ClientResult::CloseClient); }, _ => { if res == SocketResult::WouldBlock { self.readiness.front_readiness.remove(EventSet::readable()); } return (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue); } } } fn writable(&mut self) -> (RequiredEvents, ClientResult) { println!("{}\tTCP\tFRONT [{}<-{}] writable", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize()); if self.back_buf.buffer.available_data() == 0 { return (RequiredEvents::FrontReadBackReadWrite, ClientResult::Continue); } let mut sz = 0usize; let mut socket_res = SocketResult::Continue; while socket_res == SocketResult::Continue && self.back_buf.output_data_size() > 0 { let (current_sz, current_res) = self.sock.socket_write(self.back_buf.next_output_data()); socket_res = current_res; self.back_buf.consume_output_data(current_sz); sz += current_sz; } println!("{}\tTCP\tFRONT [{}<-{}]: wrote {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), sz); match socket_res { SocketResult::Error => { self.readiness.front_readiness.remove(EventSet::writable()); (RequiredEvents::FrontNoneBackNone, ClientResult::CloseBothFailure) }, SocketResult::WouldBlock => { self.readiness.front_readiness.remove(EventSet::writable()); (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) }, SocketResult::Continue => { (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) } } } fn back_readable(&mut self) -> (RequiredEvents, ClientResult) { println!("{}\tTCP\tBACK [{}<-{}] back_readable", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize()); if self.back_buf.buffer.available_space() == 0 { return (RequiredEvents::FrontWriteBackRead, ClientResult::Continue); } if let Some(ref mut sock) = self.backend { let (sz, res) = sock.socket_read(self.back_buf.buffer.space()); self.back_buf.buffer.fill(sz); self.back_buf.sliced_input(sz); self.back_buf.consume_parsed_data(sz); self.back_buf.slice_output(sz); println!("{}\tTCP\tBACK [{}<-{}]: read {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), sz); match res { SocketResult::Error => { self.readiness.back_readiness.remove(EventSet::readable()); return (RequiredEvents::FrontNoneBackNone, ClientResult::CloseClient); }, _ => { if res == SocketResult::WouldBlock { self.readiness.back_readiness.remove(EventSet::readable()); } return (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue); } } } else { (RequiredEvents::FrontNoneBackNone, ClientResult::CloseBothFailure) } } fn back_writable(&mut self) -> (RequiredEvents, ClientResult) { println!("{}\tTCP\tBACK [{}->{}] back_writable", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize()); if self.front_buf.buffer.available_data() == 0 { return (RequiredEvents::FrontReadWriteBackRead, ClientResult::Continue); } let mut sz = 0usize; let mut socket_res = SocketResult::Continue; if let Some(ref mut sock) = self.backend { while socket_res == SocketResult::Continue && self.front_buf.output_data_size() > 0 { let (current_sz, current_res) = sock.socket_write(self.front_buf.next_output_data()); socket_res = current_res; self.front_buf.consume_output_data(current_sz); sz += current_sz; } } println!("{}\tTCP\tBACK [{}->{}]: wrote {} bytes", self.request_id, self.token.unwrap().as_usize(), self.backend_token.unwrap().as_usize(), sz); match socket_res { SocketResult::Error => { self.readiness.back_readiness.remove(EventSet::writable()); (RequiredEvents::FrontNoneBackNone, ClientResult::CloseBothFailure) }, SocketResult::WouldBlock => { self.readiness.back_readiness.remove(EventSet::writable()); (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) }, SocketResult::Continue => { (RequiredEvents::FrontReadWriteBackReadWrite, ClientResult::Continue) } } } fn readiness(&mut self) -> &mut Readiness { &mut self.readiness } } pub struct ApplicationListener { app_id: String, sock: TcpListener, token: Option<Token>, front_address: SocketAddr, back_addresses: Vec<SocketAddr> } type ClientToken = Token; pub struct ServerConfiguration { fronts: HashMap<String, Token>, instances: HashMap<String, Vec<Backend>>, listeners: Slab<ApplicationListener>, tx: mpsc::Sender<ServerMessage>, pool: Pool<BufferQueue>, front_timeout: u64, back_timeout: u64, } impl ServerConfiguration { pub fn new(max_listeners: usize, tx: mpsc::Sender<ServerMessage>) -> ServerConfiguration { ServerConfiguration { instances: HashMap::new(), listeners: Slab::new_starting_at(Token(0), max_listeners), fronts: HashMap::new(), tx: tx, pool: Pool::with_capacity(2*max_listeners, 0, || BufferQueue::with_capacity(2048)), front_timeout: 5000, back_timeout: 5000, } } fn add_tcp_front(&mut self, app_id: &str, front: &SocketAddr, event_loop: &mut EventLoop<TcpServer>) -> Option<Token> { if let Ok(listener) = TcpListener::bind(front) { let addresses: Vec<SocketAddr> = if let Some(ads) = self.instances.get(app_id) { let v: Vec<SocketAddr> = ads.iter().map(|backend| backend.address).collect(); v } else { Vec::new() }; let al = ApplicationListener { app_id: String::from(app_id), sock: listener, token: None, front_address: *front, back_addresses: addresses }; if let Ok(tok) = self.listeners.insert(al) { self.listeners[tok].token = Some(tok); self.fronts.insert(String::from(app_id), tok); event_loop.register(&self.listeners[tok].sock, tok, EventSet::readable(), PollOpt::level()); info!("TCP\tregistered listener for app {} on port {}", app_id, front.port()); Some(tok) } else { error!("TCP\tcould not register listener for app {} on port {}", app_id, front.port()); None } } else { error!("TCP\tcould not declare listener for app {} on port {}", app_id, front.port()); None } } pub fn remove_tcp_front(&mut self, app_id: String, event_loop: &mut EventLoop<TcpServer>) -> Option<Token>{ info!("TCP\tremoving tcp_front {:?}", app_id); // ToDo // Removes all listeners for the given app_id // an app can't have two listeners. Is this a problem? if let Some(&tok) = self.fronts.get(&app_id) { if self.listeners.contains(tok) { event_loop.deregister(&self.listeners[tok].sock); self.listeners.remove(tok); warn!("TCP\tremoved server {:?}", tok); //self.listeners[tok].sock.shutdown(Shutdown::Both); Some(tok) } else { None } } else { None } } pub fn add_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut EventLoop<TcpServer>) -> Option<Token> { if let Some(addrs) = self.instances.get_mut(app_id) { let backend = Backend::new(*instance_address); addrs.push(backend); } if self.instances.get(app_id).is_none() { let backend = Backend::new(*instance_address); self.instances.insert(String::from(app_id), vec![backend]); } if let Some(&tok) = self.fronts.get(app_id) { let application_listener = &mut self.listeners[tok]; application_listener.back_addresses.push(*instance_address); Some(tok) } else { error!("TCP\tNo front for this instance"); None } } pub fn remove_instance(&mut self, app_id: &str, instance_address: &SocketAddr, event_loop: &mut EventLoop<TcpServer>) -> Option<Token>{ // ToDo None } } impl ProxyConfiguration<TcpServer, Client> for ServerConfiguration { fn connect_to_backend(&mut self, client:&mut Client) ->Result<(),ConnectionError> { let rnd = random::<usize>(); let idx = rnd % self.listeners[client.accept_token].back_addresses.len(); client.app_id = Some(self.listeners[client.accept_token].app_id.clone()); let backend_addr = try!(self.listeners[client.accept_token].back_addresses.get(idx).ok_or(ConnectionError::ToBeDefined)); let stream = try!(TcpStream::connect(backend_addr).map_err(|_| ConnectionError::ToBeDefined)); stream.set_nodelay(true); client.set_back_socket(stream); Ok(()) } fn notify(&mut self, event_loop: &mut EventLoop<TcpServer>, message: ProxyOrder) { match message { ProxyOrder::Command(id, Command::AddTcpFront(tcp_front)) => { trace!("TCP\t{:?}", tcp_front); let addr_string = tcp_front.ip_address + &tcp_front.port.to_string(); if let Ok(front) = addr_string.parse() { if let Some(token) = self.add_tcp_front(&tcp_front.app_id, &front, event_loop) { self.tx.send(ServerMessage{ id: id, message: ServerMessageType::AddedFront}); } else { error!("TCP\tCouldn't add tcp front"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot add tcp front"))}); } } else { error!("TCP\tCouldn't parse tcp front address"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot parse the address"))}); } }, ProxyOrder::Command(id, Command::RemoveTcpFront(front)) => { trace!("TCP\t{:?}", front); let _ = self.remove_tcp_front(front.app_id, event_loop); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::RemovedFront}); }, ProxyOrder::Command(id, Command::AddInstance(instance)) => { trace!("TCP\t{:?}", instance); let addr_string = instance.ip_address + ":" + &instance.port.to_string(); let addr = &addr_string.parse().unwrap(); if let Some(token) = self.add_instance(&instance.app_id, addr, event_loop) { self.tx.send(ServerMessage{ id: id, message: ServerMessageType::AddedInstance}); } else { error!("TCP\tCouldn't add tcp instance"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot add tcp instance"))}); } }, ProxyOrder::Command(id, Command::RemoveInstance(instance)) => { trace!("TCP\t{:?}", instance); let addr_string = instance.ip_address + ":" + &instance.port.to_string(); let addr = &addr_string.parse().unwrap(); if let Some(token) = self.remove_instance(&instance.app_id, addr, event_loop) { self.tx.send(ServerMessage{ id: id, message: ServerMessageType::RemovedInstance}); } else { error!("TCP\tCouldn't remove tcp instance"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("cannot remove tcp instance"))}); } }, ProxyOrder::Stop(id) => { event_loop.shutdown(); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Stopped}); }, ProxyOrder::Command(id, _) => { error!("TCP\tunsupported message, ignoring"); self.tx.send(ServerMessage{ id: id, message: ServerMessageType::Error(String::from("unsupported message"))}); } } } fn accept(&mut self, token: Token) -> Option<(Client, bool)> { if let (Some(front_buf), Some(back_buf)) = (self.pool.checkout(), self.pool.checkout()) { if self.listeners.contains(token) { let accepted = self.listeners[token].sock.accept(); if let Ok(Some((frontend_sock, _))) = accepted { frontend_sock.set_nodelay(true); if let Some(c) = Client::new(frontend_sock, token, front_buf, back_buf) { return Some((c, true)); } } } } else { error!("TCP\tcould not get buffers from pool"); } None } fn close_backend(&mut self, app_id: String, addr: &SocketAddr) { if let Some(app_instances) = self.instances.get_mut(&app_id) { if let Some(ref mut backend) = app_instances.iter_mut().find(|backend| &backend.address == addr) { backend.dec_connections(); } } } fn front_timeout(&self) -> u64 { self.front_timeout } fn back_timeout(&self) -> u64 { self.back_timeout } } pub type TcpServer = Server<ServerConfiguration,Client>; pub fn start() { let mut event_loop = EventLoop::new().unwrap(); info!("TCP\tlisten for connections"); //event_loop.register(&listener, SERVER, EventSet::readable(), PollOpt::edge() | PollOpt::oneshot()).unwrap(); let (tx,rx) = channel::<ServerMessage>(); let configuration = ServerConfiguration::new(10, tx); let mut s = TcpServer::new(10, 500, configuration); { let front: SocketAddr = FromStr::from_str("127.0.0.1:1234").unwrap(); let back: SocketAddr = FromStr::from_str("127.0.0.1:5678").unwrap(); s.configuration().add_tcp_front("yolo", &front, &mut event_loop); s.configuration().add_instance("yolo", &back, &mut event_loop); } { let front: SocketAddr = FromStr::from_str("127.0.0.1:1235").unwrap(); let back: SocketAddr = FromStr::from_str("127.0.0.1:5678").unwrap(); s.configuration().add_tcp_front("yolo", &front, &mut event_loop); s.configuration().add_instance("yolo", &back, &mut event_loop); } thread::spawn(move|| { info!("TCP\tstarting event loop"); event_loop.run(&mut s).unwrap(); info!("TCP\tending event loop"); }); } pub fn start_listener(max_listeners: usize, max_connections: usize, tx: mpsc::Sender<ServerMessage>) -> (Sender<ProxyOrder>,thread::JoinHandle<()>) { let mut event_loop = EventLoop::new().unwrap(); let channel = event_loop.channel(); let notify_tx = tx.clone(); let configuration = ServerConfiguration::new(max_listeners, tx); let mut server = TcpServer::new(max_listeners, max_connections, configuration); let front: SocketAddr = FromStr::from_str("127.0.0.1:8443").unwrap(); server.configuration().add_tcp_front("yolo", &front, &mut event_loop); let join_guard = thread::spawn(move|| { info!("TCP\tstarting event loop"); event_loop.run(&mut server).unwrap(); info!("TCP\tending event loop"); //notify_tx.send(ServerMessage::Stopped); }); (channel, join_guard) } #[cfg(test)] mod tests { use super::*; use std::net::{TcpListener, TcpStream, Shutdown}; use std::io::{Read,Write}; use std::time::Duration; use std::{thread,str}; #[allow(unused_mut, unused_must_use, unused_variables)] #[test] fn mi() { thread::spawn(|| { start_server(); }); start(); thread::sleep(Duration::from_millis(300)); let mut s1 = TcpStream::connect("127.0.0.1:1234").unwrap(); let mut s3 = TcpStream::connect("127.0.0.1:1234").unwrap(); thread::sleep(Duration::from_millis(300)); let mut s2 = TcpStream::connect("127.0.0.1:1234").unwrap(); s1.write(&b"hello"[..]); println!("s1 sent"); s2.write(&b"pouet pouet"[..]); println!("s2 sent"); thread::sleep(Duration::from_millis(500)); let mut res = [0; 128]; s1.write(&b"coucou"[..]); let mut sz1 = s1.read(&mut res[..]).unwrap(); println!("s1 received {:?}", str::from_utf8(&res[..sz1])); assert_eq!(&res[..sz1], &b"hello END"[..]); s3.shutdown(Shutdown::Both); let sz2 = s2.read(&mut res[..]).unwrap(); println!("s2 received {:?}", str::from_utf8(&res[..sz2])); assert_eq!(&res[..sz2], &b"pouet pouet END"[..]); thread::sleep(Duration::from_millis(400)); sz1 = s1.read(&mut res[..]).unwrap(); println!("s1 received again({}): {:?}", sz1, str::from_utf8(&res[..sz1])); assert_eq!(&res[..sz1], &b"coucou END"[..]); //assert!(false); } /* #[allow(unused_mut, unused_must_use, unused_variables)] #[test] fn concurrent() { use std::sync::mpsc; use time; let thread_nb = 127; thread::spawn(|| { start_server(); }); start(); thread::sleep_ms(300); let (tx, rx) = mpsc::channel(); let begin = time::precise_time_s(); for i in 0..thread_nb { let id = i; let tx = tx.clone(); thread::Builder::new().name(id.to_string()).spawn(move || { let s = format!("[{}] Hello world!\n", id); let v: Vec<u8> = s.bytes().collect(); if let Ok(mut conn) = TcpStream::connect("127.0.0.1:1234") { let mut res = [0; 128]; for j in 0..10000 { conn.write(&v[..]); if j % 5 == 0 { if let Ok(sz) = conn.read(&mut res[..]) { //println!("[{}] received({}): {:?}", id, sz, str::from_utf8(&res[..sz])); } else { println!("failed reading"); tx.send(()); return; } } } tx.send(()); return; } else { println!("failed connecting"); tx.send(()); return; } }); } //thread::sleep_ms(5000); for i in 0..thread_nb { rx.recv(); } let end = time::precise_time_s(); println!("executed in {} seconds", end - begin); assert!(false); } */ #[allow(unused_mut, unused_must_use, unused_variables)] fn start_server() { let listener = TcpListener::bind("127.0.0.1:5678").unwrap(); fn handle_client(stream: &mut TcpStream, id: u8) { let mut buf = [0; 128]; let response = b" END"; while let Ok(sz) = stream.read(&mut buf[..]) { if sz > 0 { //println!("[{}] {:?}", id, str::from_utf8(&buf[..sz])); stream.write(&buf[..sz]); thread::sleep(Duration::from_millis(20)); stream.write(&response[..]); } } } let mut count = 0; thread::spawn(move|| { for conn in listener.incoming() { match conn { Ok(mut stream) => { thread::spawn(move|| { println!("got a new client: {}", count); handle_client(&mut stream, count) }); } Err(e) => { println!("connection failed"); } } count += 1; } }); } }
// Std use std::borrow::Cow; use std::cmp; use std::fmt::Write as _; use std::usize; // Internal use crate::builder::PossibleValue; use crate::builder::Str; use crate::builder::StyledStr; use crate::builder::{render_arg_val, Arg, Command}; use crate::output::display_width; use crate::output::wrap; use crate::output::Usage; use crate::util::FlatSet; use crate::ArgAction; /// `clap` Help Writer. /// /// Wraps a writer stream providing different methods to generate help for `clap` objects. pub(crate) struct Help<'cmd, 'writer> { writer: &'writer mut StyledStr, cmd: &'cmd Command, usage: &'cmd Usage<'cmd>, next_line_help: bool, term_w: usize, use_long: bool, } // Public Functions impl<'cmd, 'writer> Help<'cmd, 'writer> { const DEFAULT_TEMPLATE: &'static str = "\ {before-help}{name} {version}\n\ {author-with-newline}{about-with-newline}\n\ {usage-heading}\n {usage}\n\ \n\ {all-args}{after-help}\ "; const DEFAULT_NO_ARGS_TEMPLATE: &'static str = "\ {before-help}{name} {version}\n\ {author-with-newline}{about-with-newline}\n\ {usage-heading}\n {usage}{after-help}\ "; /// Create a new `Help` instance. pub(crate) fn new( writer: &'writer mut StyledStr, cmd: &'cmd Command, usage: &'cmd Usage<'cmd>, use_long: bool, ) -> Self { debug!("Help::new cmd={}, use_long={}", cmd.get_name(), use_long); let term_w = match cmd.get_term_width() { Some(0) => usize::MAX, Some(w) => w, None => cmp::min( dimensions().map_or(100, |(w, _)| w), match cmd.get_max_term_width() { None | Some(0) => usize::MAX, Some(mw) => mw, }, ), }; let next_line_help = cmd.is_next_line_help_set(); Help { writer, cmd, usage, next_line_help, term_w, use_long, } } /// Writes the parser help to the wrapped stream. pub(crate) fn write_help(&mut self) { debug!("Help::write_help"); if let Some(h) = self.cmd.get_override_help() { self.extend(h); } else if let Some(tmpl) = self.cmd.get_help_template() { for (style, content) in tmpl.iter() { if style == None { self.write_templated_help(content); } else { self.writer.stylize(style, content); } } } else { let pos = self .cmd .get_positionals() .any(|arg| should_show_arg(self.use_long, arg)); let non_pos = self .cmd .get_non_positionals() .any(|arg| should_show_arg(self.use_long, arg)); let subcmds = self.cmd.has_visible_subcommands(); let template = if non_pos || pos || subcmds { Self::DEFAULT_TEMPLATE } else { Self::DEFAULT_NO_ARGS_TEMPLATE }; self.write_templated_help(template); } self.none("\n"); } } // Methods to write Arg help. impl<'cmd, 'writer> Help<'cmd, 'writer> { fn extend(&mut self, msg: &StyledStr) { self.writer.extend(msg.iter()); } fn good<T: Into<String>>(&mut self, msg: T) { self.writer.good(msg); } fn warning<T: Into<String>>(&mut self, msg: T) { self.writer.warning(msg); } fn none<T: Into<String>>(&mut self, msg: T) { self.writer.none(msg); } fn get_spaces(&self, n: usize) -> String { " ".repeat(n) } fn spaces(&mut self, n: usize) { self.none(self.get_spaces(n)); } /// Writes help for each argument in the order they were declared to the wrapped stream. fn write_args_unsorted(&mut self, args: &[&Arg]) { debug!("Help::write_args_unsorted"); // The shortest an arg can legally be is 2 (i.e. '-x') let mut longest = 2; let mut arg_v = Vec::with_capacity(10); for &arg in args .iter() .filter(|arg| should_show_arg(self.use_long, *arg)) { if arg.longest_filter() { longest = longest.max(display_width(&arg.to_string())); debug!( "Help::write_args_unsorted: arg={:?} longest={}", arg.get_id(), longest ); } arg_v.push(arg) } let next_line_help = self.will_args_wrap(args, longest); let argc = arg_v.len(); for (i, arg) in arg_v.iter().enumerate() { self.write_arg(arg, i + 1 == argc, next_line_help, longest); } } /// Sorts arguments by length and display order and write their help to the wrapped stream. fn write_args(&mut self, args: &[&Arg], _category: &str) { debug!("Help::write_args {}", _category); // The shortest an arg can legally be is 2 (i.e. '-x') let mut longest = 2; let mut ord_v = Vec::new(); // Determine the longest for &arg in args.iter().filter(|arg| { // If it's NextLineHelp we don't care to compute how long it is because it may be // NextLineHelp on purpose simply *because* it's so long and would throw off all other // args alignment should_show_arg(self.use_long, *arg) }) { if arg.longest_filter() { longest = longest.max(display_width(&arg.to_string())); debug!( "Help::write_args: arg={:?} longest={}", arg.get_id(), longest ); } // Formatting key like this to ensure that: // 1. Argument has long flags are printed just after short flags. // 2. For two args both have short flags like `-c` and `-C`, the // `-C` arg is printed just after the `-c` arg // 3. For args without short or long flag, print them at last(sorted // by arg name). // Example order: -a, -b, -B, -s, --select-file, --select-folder, -x let key = if let Some(x) = arg.get_short() { let mut s = x.to_ascii_lowercase().to_string(); s.push(if x.is_ascii_lowercase() { '0' } else { '1' }); s } else if let Some(x) = arg.get_long() { x.to_string() } else { let mut s = '{'.to_string(); s.push_str(arg.id.as_str()); s }; ord_v.push((arg.get_display_order(), key, arg)); } ord_v.sort_by(|a, b| (a.0, &a.1).cmp(&(b.0, &b.1))); let next_line_help = self.will_args_wrap(args, longest); for (i, (_, _, arg)) in ord_v.iter().enumerate() { let last_arg = i + 1 == ord_v.len(); self.write_arg(arg, last_arg, next_line_help, longest); } } /// Writes help for an argument to the wrapped stream. fn write_arg(&mut self, arg: &Arg, last_arg: bool, next_line_help: bool, longest: usize) { let spec_vals = &self.spec_vals(arg); self.short(arg); self.long(arg); self.val(arg); self.align_to_about(arg, next_line_help, longest); let about = if self.use_long { arg.get_long_help() .or_else(|| arg.get_help()) .unwrap_or_default() } else { arg.get_help() .or_else(|| arg.get_long_help()) .unwrap_or_default() }; self.help(Some(arg), about, spec_vals, next_line_help, longest); if !last_arg { self.none("\n"); if next_line_help { self.none("\n"); } } } /// Writes argument's short command to the wrapped stream. fn short(&mut self, arg: &Arg) { debug!("Help::short"); self.none(TAB); if let Some(s) = arg.get_short() { self.good(format!("-{}", s)); } else if !arg.is_positional() { self.none(TAB) } } /// Writes argument's long command to the wrapped stream. fn long(&mut self, arg: &Arg) { debug!("Help::long"); if let Some(long) = arg.get_long() { if arg.short.is_some() { self.none(", "); } self.good(format!("--{}", long)); } } /// Writes argument's possible values to the wrapped stream. fn val(&mut self, arg: &Arg) { debug!("Help::val: arg={}", arg.get_id()); let mut need_closing_bracket = false; if arg.is_takes_value_set() && !arg.is_positional() { let is_optional_val = arg.get_min_vals() == 0; let sep = if arg.is_require_equals_set() { if is_optional_val { need_closing_bracket = true; "[=" } else { "=" } } else if is_optional_val { need_closing_bracket = true; " [" } else { " " }; self.none(sep); } if arg.is_takes_value_set() || arg.is_positional() { let arg_val = render_arg_val(arg); self.good(arg_val); } else if matches!(*arg.get_action(), ArgAction::Count) { self.good("..."); } if need_closing_bracket { self.none("]"); } } /// Write alignment padding between arg's switches/values and its about message. fn align_to_about(&mut self, arg: &Arg, next_line_help: bool, longest: usize) { debug!( "Help::align_to_about: arg={}, next_line_help={}, longest={}", arg.get_id(), next_line_help, longest ); if self.use_long || next_line_help { // long help prints messages on the next line so it doesn't need to align text debug!("Help::align_to_about: printing long help so skip alignment"); } else if !arg.is_positional() { let self_len = display_width(&arg.to_string()); // Since we're writing spaces from the tab point we first need to know if we // had a long and short, or just short let padding = if arg.long.is_some() { // Only account 4 after the val 4 } else { // Only account for ', --' + 4 after the val 8 }; let spcs = longest + padding - self_len; debug!( "Help::align_to_about: positional=false arg_len={}, spaces={}", self_len, spcs ); self.spaces(spcs); } else { let self_len = display_width(&arg.to_string()); let padding = 4; let spcs = longest + padding - self_len; debug!( "Help::align_to_about: positional=true arg_len={}, spaces={}", self_len, spcs ); self.spaces(spcs); } } fn write_before_help(&mut self) { debug!("Help::write_before_help"); let before_help = if self.use_long { self.cmd .get_before_long_help() .or_else(|| self.cmd.get_before_help()) } else { self.cmd.get_before_help() }; if let Some(output) = before_help { let mut output = output.clone(); output.replace_newline(); output.wrap(self.term_w); self.writer.extend(output.into_iter()); self.none("\n\n"); } } fn write_after_help(&mut self) { debug!("Help::write_after_help"); let after_help = if self.use_long { self.cmd .get_after_long_help() .or_else(|| self.cmd.get_after_help()) } else { self.cmd.get_after_help() }; if let Some(output) = after_help { self.none("\n\n"); let mut output = output.clone(); output.replace_newline(); output.wrap(self.term_w); self.writer.extend(output.into_iter()); } } /// Writes argument's help to the wrapped stream. fn help( &mut self, arg: Option<&Arg>, about: &StyledStr, spec_vals: &str, next_line_help: bool, longest: usize, ) { debug!("Help::help"); // Is help on next line, if so then indent if next_line_help { debug!("Help::help: Next Line...{:?}", next_line_help); self.none(format!("\n{}{}{}", TAB, TAB, TAB)); } let trailing_indent = if next_line_help { TAB_WIDTH * 3 } else if let Some(true) = arg.map(|a| a.is_positional()) { longest + TAB_WIDTH * 2 } else { longest + TAB_WIDTH * 3 }; let trailing_indent = self.get_spaces(trailing_indent); let spaces = if next_line_help { 12 // "tab" * 3 } else { longest + 12 }; let mut help = about.clone(); help.replace_newline(); help.none(spec_vals); let avail_chars = self.term_w.saturating_sub(spaces); debug!( "Help::help: help_width={}, spaces={}, avail={}", spaces, help.display_width(), avail_chars ); help.wrap(avail_chars); help.indent("", &trailing_indent); let help_is_empty = help.is_empty(); self.writer.extend(help.into_iter()); if let Some(arg) = arg { const DASH_SPACE: usize = "- ".len(); const COLON_SPACE: usize = ": ".len(); let possible_vals = arg.get_possible_values(); if self.use_long && !arg.is_hide_possible_values_set() && possible_vals.iter().any(PossibleValue::should_show_help) { debug!("Help::help: Found possible vals...{:?}", possible_vals); if !help_is_empty { self.none("\n\n"); self.spaces(spaces); } self.none("Possible values:"); let longest = possible_vals .iter() .filter_map(|f| f.get_visible_quoted_name().map(|name| display_width(&name))) .max() .expect("Only called with possible value"); let help_longest = possible_vals .iter() .filter_map(|f| f.get_visible_help().map(|h| h.display_width())) .max() .expect("Only called with possible value with help"); // should new line let taken = longest + spaces + DASH_SPACE; let possible_value_new_line = self.term_w >= taken && self.term_w < taken + COLON_SPACE + help_longest; let spaces = spaces + TAB_WIDTH - DASH_SPACE; let trailing_indent = if possible_value_new_line { spaces + DASH_SPACE } else { spaces + longest + DASH_SPACE + COLON_SPACE }; let trailing_indent = self.get_spaces(trailing_indent); for pv in possible_vals.iter().filter(|pv| !pv.is_hide_set()) { self.none("\n"); self.spaces(spaces); self.none("- "); self.good(pv.get_name()); if let Some(help) = pv.get_help() { debug!("Help::help: Possible Value help"); if possible_value_new_line { self.none(":\n"); self.spaces(trailing_indent.len()); } else { self.none(": "); // To align help messages self.spaces(longest - display_width(pv.get_name())); } let avail_chars = if self.term_w > trailing_indent.len() { self.term_w - trailing_indent.len() } else { usize::MAX }; let mut help = help.clone(); help.replace_newline(); help.wrap(avail_chars); help.indent("", &trailing_indent); self.writer.extend(help.into_iter()); } } } } } /// Will use next line help on writing args. fn will_args_wrap(&self, args: &[&Arg], longest: usize) -> bool { args.iter() .filter(|arg| should_show_arg(self.use_long, *arg)) .any(|arg| { let spec_vals = &self.spec_vals(arg); self.arg_next_line_help(arg, spec_vals, longest) }) } fn arg_next_line_help(&self, arg: &Arg, spec_vals: &str, longest: usize) -> bool { if self.next_line_help || arg.is_next_line_help_set() || self.use_long { // setting_next_line true } else { // force_next_line let h = arg.get_help().unwrap_or_default(); let h_w = h.display_width() + display_width(spec_vals); let taken = longest + 12; self.term_w >= taken && (taken as f32 / self.term_w as f32) > 0.40 && h_w > (self.term_w - taken) } } fn spec_vals(&self, a: &Arg) -> String { debug!("Help::spec_vals: a={}", a); let mut spec_vals = vec![]; #[cfg(feature = "env")] if let Some(ref env) = a.env { if !a.is_hide_env_set() { debug!( "Help::spec_vals: Found environment variable...[{:?}:{:?}]", env.0, env.1 ); let env_val = if !a.is_hide_env_values_set() { format!( "={}", env.1 .as_ref() .map_or(Cow::Borrowed(""), |val| val.to_string_lossy()) ) } else { String::new() }; let env_info = format!("[env: {}{}]", env.0.to_string_lossy(), env_val); spec_vals.push(env_info); } } if a.is_takes_value_set() && !a.is_hide_default_value_set() && !a.default_vals.is_empty() { debug!( "Help::spec_vals: Found default value...[{:?}]", a.default_vals ); let pvs = a .default_vals .iter() .map(|pvs| pvs.to_string_lossy()) .map(|pvs| { if pvs.contains(char::is_whitespace) { Cow::from(format!("{:?}", pvs)) } else { pvs } }) .collect::<Vec<_>>() .join(" "); spec_vals.push(format!("[default: {}]", pvs)); } if !a.aliases.is_empty() { debug!("Help::spec_vals: Found aliases...{:?}", a.aliases); let als = a .aliases .iter() .filter(|&als| als.1) // visible .map(|als| als.0.as_str()) // name .collect::<Vec<_>>() .join(", "); if !als.is_empty() { spec_vals.push(format!("[aliases: {}]", als)); } } if !a.short_aliases.is_empty() { debug!( "Help::spec_vals: Found short aliases...{:?}", a.short_aliases ); let als = a .short_aliases .iter() .filter(|&als| als.1) // visible .map(|&als| als.0.to_string()) // name .collect::<Vec<_>>() .join(", "); if !als.is_empty() { spec_vals.push(format!("[short aliases: {}]", als)); } } let possible_vals = a.get_possible_values(); if !(a.is_hide_possible_values_set() || possible_vals.is_empty() || self.use_long && possible_vals.iter().any(PossibleValue::should_show_help)) { debug!("Help::spec_vals: Found possible vals...{:?}", possible_vals); let pvs = possible_vals .iter() .filter_map(PossibleValue::get_visible_quoted_name) .collect::<Vec<_>>() .join(", "); spec_vals.push(format!("[possible values: {}]", pvs)); } let connector = if self.use_long { "\n" } else { " " }; let prefix = if !spec_vals.is_empty() && !a.get_help().unwrap_or_default().is_empty() { if self.use_long { "\n\n" } else { " " } } else { "" }; prefix.to_string() + &spec_vals.join(connector) } fn write_about(&mut self, before_new_line: bool, after_new_line: bool) { let about = if self.use_long { self.cmd.get_long_about().or_else(|| self.cmd.get_about()) } else { self.cmd.get_about() }; if let Some(output) = about { if before_new_line { self.none("\n"); } let mut output = output.clone(); output.replace_newline(); output.wrap(self.term_w); self.writer.extend(output.into_iter()); if after_new_line { self.none("\n"); } } } fn write_author(&mut self, before_new_line: bool, after_new_line: bool) { if let Some(author) = self.cmd.get_author() { if before_new_line { self.none("\n"); } self.none(wrap(author, self.term_w)); if after_new_line { self.none("\n"); } } } fn write_version(&mut self) { let version = self .cmd .get_version() .or_else(|| self.cmd.get_long_version()); if let Some(output) = version { self.none(wrap(output, self.term_w)); } } } /// Methods to write a single subcommand impl<'cmd, 'writer> Help<'cmd, 'writer> { fn write_subcommand( &mut self, sc_str: &str, cmd: &Command, next_line_help: bool, longest: usize, ) { debug!("Help::write_subcommand"); let spec_vals = &self.sc_spec_vals(cmd); let about = cmd .get_about() .or_else(|| cmd.get_long_about()) .unwrap_or_default(); self.subcmd(sc_str, next_line_help, longest); self.help(None, about, spec_vals, next_line_help, longest) } fn sc_spec_vals(&self, a: &Command) -> String { debug!("Help::sc_spec_vals: a={}", a.get_name()); let mut spec_vals = vec![]; if 0 < a.get_all_aliases().count() || 0 < a.get_all_short_flag_aliases().count() { debug!( "Help::spec_vals: Found aliases...{:?}", a.get_all_aliases().collect::<Vec<_>>() ); debug!( "Help::spec_vals: Found short flag aliases...{:?}", a.get_all_short_flag_aliases().collect::<Vec<_>>() ); let mut short_als = a .get_visible_short_flag_aliases() .map(|a| format!("-{}", a)) .collect::<Vec<_>>(); let als = a.get_visible_aliases().map(|s| s.to_string()); short_als.extend(als); let all_als = short_als.join(", "); if !all_als.is_empty() { spec_vals.push(format!(" [aliases: {}]", all_als)); } } spec_vals.join(" ") } fn subcommand_next_line_help(&self, cmd: &Command, spec_vals: &str, longest: usize) -> bool { if self.next_line_help | self.use_long { // setting_next_line true } else { // force_next_line let h = cmd.get_about().unwrap_or_default(); let h_w = h.display_width() + display_width(spec_vals); let taken = longest + 12; self.term_w >= taken && (taken as f32 / self.term_w as f32) > 0.40 && h_w > (self.term_w - taken) } } /// Writes subcommand to the wrapped stream. fn subcmd(&mut self, sc_str: &str, next_line_help: bool, longest: usize) { self.none(TAB); self.good(sc_str); if !next_line_help { let width = display_width(sc_str); self.spaces(width.max(longest + 4) - width); } } } // Methods to write Parser help. impl<'cmd, 'writer> Help<'cmd, 'writer> { /// Writes help for all arguments (options, flags, args, subcommands) /// including titles of a Parser Object to the wrapped stream. pub(crate) fn write_all_args(&mut self) { debug!("Help::write_all_args"); let pos = self .cmd .get_positionals_with_no_heading() .filter(|arg| should_show_arg(self.use_long, arg)) .collect::<Vec<_>>(); let non_pos = self .cmd .get_non_positionals_with_no_heading() .filter(|arg| should_show_arg(self.use_long, arg)) .collect::<Vec<_>>(); let subcmds = self.cmd.has_visible_subcommands(); let custom_headings = self .cmd .get_arguments() .filter_map(|arg| arg.get_help_heading()) .collect::<FlatSet<_>>(); let mut first = if !pos.is_empty() { // Write positional args if any self.warning("ARGS:\n"); self.write_args_unsorted(&pos); false } else { true }; if !non_pos.is_empty() { if !first { self.none("\n\n"); } self.warning("OPTIONS:\n"); self.write_args(&non_pos, "OPTIONS"); first = false; } if !custom_headings.is_empty() { for heading in custom_headings { let args = self .cmd .get_arguments() .filter(|a| { if let Some(help_heading) = a.get_help_heading() { return help_heading == heading; } false }) .filter(|arg| should_show_arg(self.use_long, arg)) .collect::<Vec<_>>(); if !args.is_empty() { if !first { self.none("\n\n"); } self.warning(format!("{}:\n", heading)); self.write_args(&args, heading); first = false } } } if subcmds { if !first { self.none("\n\n"); } let default_help_heading = Str::from("SUBCOMMANDS"); self.warning( self.cmd .get_subcommand_help_heading() .unwrap_or(&default_help_heading), ); self.warning(":\n"); self.write_subcommands(self.cmd); } } /// Will use next line help on writing subcommands. fn will_subcommands_wrap<'a>( &self, subcommands: impl IntoIterator<Item = &'a Command>, longest: usize, ) -> bool { subcommands .into_iter() .filter(|&subcommand| should_show_subcommand(subcommand)) .any(|subcommand| { let spec_vals = &self.sc_spec_vals(subcommand); self.subcommand_next_line_help(subcommand, spec_vals, longest) }) } /// Writes help for subcommands of a Parser Object to the wrapped stream. fn write_subcommands(&mut self, cmd: &Command) { debug!("Help::write_subcommands"); // The shortest an arg can legally be is 2 (i.e. '-x') let mut longest = 2; let mut ord_v = Vec::new(); for subcommand in cmd .get_subcommands() .filter(|subcommand| should_show_subcommand(subcommand)) { let mut sc_str = String::new(); sc_str.push_str(subcommand.get_name()); if let Some(short) = subcommand.get_short_flag() { write!(sc_str, " -{}", short).unwrap(); } if let Some(long) = subcommand.get_long_flag() { write!(sc_str, " --{}", long).unwrap(); } longest = longest.max(display_width(&sc_str)); ord_v.push((subcommand.get_display_order(), sc_str, subcommand)); } ord_v.sort_by(|a, b| (a.0, &a.1).cmp(&(b.0, &b.1))); debug!("Help::write_subcommands longest = {}", longest); let next_line_help = self.will_subcommands_wrap(cmd.get_subcommands(), longest); let mut first = true; for (_, sc_str, sc) in &ord_v { if first { first = false; } else { self.none("\n"); } self.write_subcommand(sc_str, sc, next_line_help, longest); } } /// Writes binary name of a Parser Object to the wrapped stream. fn write_display_name(&mut self) { debug!("Help::write_display_name"); let display_name = wrap( &self .cmd .get_display_name() .unwrap_or_else(|| self.cmd.get_name()) .replace("{n}", "\n"), self.term_w, ); self.good(&display_name); } /// Writes binary name of a Parser Object to the wrapped stream. fn write_bin_name(&mut self) { debug!("Help::write_bin_name"); let bin_name = if let Some(bn) = self.cmd.get_bin_name() { if bn.contains(' ') { // In case we're dealing with subcommands i.e. git mv is translated to git-mv bn.replace(' ', "-") } else { wrap(&self.cmd.get_name().replace("{n}", "\n"), self.term_w) } } else { wrap(&self.cmd.get_name().replace("{n}", "\n"), self.term_w) }; self.good(&bin_name); } } // Methods to write Parser help using templates. impl<'cmd, 'writer> Help<'cmd, 'writer> { /// Write help to stream for the parser in the format defined by the template. /// /// For details about the template language see [`Command::help_template`]. /// /// [`Command::help_template`]: Command::help_template() fn write_templated_help(&mut self, template: &str) { debug!("Help::write_templated_help"); // The strategy is to copy the template from the reader to wrapped stream // until a tag is found. Depending on its value, the appropriate content is copied // to the wrapped stream. // The copy from template is then resumed, repeating this sequence until reading // the complete template. macro_rules! tags { ( match $part:ident { $( $tag:expr => $action:stmt )* } ) => { match $part { $( part if part.starts_with(concat!($tag, "}")) => { $action let rest = &part[$tag.len()+1..]; self.none(rest); } )* // Unknown tag, write it back. part => { self.none("{"); self.none(part); } } }; } let mut parts = template.split('{'); if let Some(first) = parts.next() { self.none(first); } for part in parts { tags! { match part { "name" => { self.write_display_name(); } "bin" => { self.write_bin_name(); } "version" => { self.write_version(); } "author" => { self.write_author(false, false); } "author-with-newline" => { self.write_author(false, true); } "author-section" => { self.write_author(true, true); } "about" => { self.write_about(false, false); } "about-with-newline" => { self.write_about(false, true); } "about-section" => { self.write_about(true, true); } "usage-heading" => { self.warning("USAGE:"); } "usage" => { self.none(self.usage.create_usage_no_title(&[])); } "all-args" => { self.write_all_args(); } "options" => { // Include even those with a heading as we don't have a good way of // handling help_heading in the template. self.write_args(&self.cmd.get_non_positionals().collect::<Vec<_>>(), "options"); } "positionals" => { self.write_args(&self.cmd.get_positionals().collect::<Vec<_>>(), "positionals"); } "subcommands" => { self.write_subcommands(self.cmd); } "after-help" => { self.write_after_help(); } "before-help" => { self.write_before_help(); } } } } } } pub(crate) fn dimensions() -> Option<(usize, usize)> { #[cfg(not(feature = "wrap_help"))] return None; #[cfg(feature = "wrap_help")] terminal_size::terminal_size().map(|(w, h)| (w.0.into(), h.0.into())) } const TAB: &str = " "; const TAB_WIDTH: usize = 4; fn should_show_arg(use_long: bool, arg: &Arg) -> bool { debug!( "should_show_arg: use_long={:?}, arg={}", use_long, arg.get_id() ); if arg.is_hide_set() { return false; } (!arg.is_hide_long_help_set() && use_long) || (!arg.is_hide_short_help_set() && !use_long) || arg.is_next_line_help_set() } fn should_show_subcommand(subcommand: &Command) -> bool { !subcommand.is_hide_set() } #[cfg(test)] mod test { use super::*; #[test] fn wrap_help_last_word() { let help = String::from("foo bar baz"); assert_eq!(wrap(&help, 5), "foo\nbar\nbaz"); } #[test] #[cfg(feature = "unicode")] fn display_width_handles_non_ascii() { // Popular Danish tongue-twister, the name of a fruit dessert. let text = "rødgrød med fløde"; assert_eq!(display_width(text), 17); // Note that the string width is smaller than the string // length. This is due to the precomposed non-ASCII letters: assert_eq!(text.len(), 20); } #[test] #[cfg(feature = "unicode")] fn display_width_handles_emojis() { let text = "😂"; // There is a single `char`... assert_eq!(text.chars().count(), 1); // but it is double-width: assert_eq!(display_width(text), 2); // This is much less than the byte length: assert_eq!(text.len(), 4); } } perf(help): Reduce code size for arg generation // Std use std::borrow::Cow; use std::cmp; use std::fmt::Write as _; use std::usize; // Internal use crate::builder::PossibleValue; use crate::builder::Str; use crate::builder::StyledStr; use crate::builder::{render_arg_val, Arg, Command}; use crate::output::display_width; use crate::output::wrap; use crate::output::Usage; use crate::util::FlatSet; use crate::ArgAction; /// `clap` Help Writer. /// /// Wraps a writer stream providing different methods to generate help for `clap` objects. pub(crate) struct Help<'cmd, 'writer> { writer: &'writer mut StyledStr, cmd: &'cmd Command, usage: &'cmd Usage<'cmd>, next_line_help: bool, term_w: usize, use_long: bool, } // Public Functions impl<'cmd, 'writer> Help<'cmd, 'writer> { const DEFAULT_TEMPLATE: &'static str = "\ {before-help}{name} {version}\n\ {author-with-newline}{about-with-newline}\n\ {usage-heading}\n {usage}\n\ \n\ {all-args}{after-help}\ "; const DEFAULT_NO_ARGS_TEMPLATE: &'static str = "\ {before-help}{name} {version}\n\ {author-with-newline}{about-with-newline}\n\ {usage-heading}\n {usage}{after-help}\ "; /// Create a new `Help` instance. pub(crate) fn new( writer: &'writer mut StyledStr, cmd: &'cmd Command, usage: &'cmd Usage<'cmd>, use_long: bool, ) -> Self { debug!("Help::new cmd={}, use_long={}", cmd.get_name(), use_long); let term_w = match cmd.get_term_width() { Some(0) => usize::MAX, Some(w) => w, None => cmp::min( dimensions().map_or(100, |(w, _)| w), match cmd.get_max_term_width() { None | Some(0) => usize::MAX, Some(mw) => mw, }, ), }; let next_line_help = cmd.is_next_line_help_set(); Help { writer, cmd, usage, next_line_help, term_w, use_long, } } /// Writes the parser help to the wrapped stream. pub(crate) fn write_help(&mut self) { debug!("Help::write_help"); if let Some(h) = self.cmd.get_override_help() { self.extend(h); } else if let Some(tmpl) = self.cmd.get_help_template() { for (style, content) in tmpl.iter() { if style == None { self.write_templated_help(content); } else { self.writer.stylize(style, content); } } } else { let pos = self .cmd .get_positionals() .any(|arg| should_show_arg(self.use_long, arg)); let non_pos = self .cmd .get_non_positionals() .any(|arg| should_show_arg(self.use_long, arg)); let subcmds = self.cmd.has_visible_subcommands(); let template = if non_pos || pos || subcmds { Self::DEFAULT_TEMPLATE } else { Self::DEFAULT_NO_ARGS_TEMPLATE }; self.write_templated_help(template); } self.none("\n"); } } // Methods to write Arg help. impl<'cmd, 'writer> Help<'cmd, 'writer> { fn extend(&mut self, msg: &StyledStr) { self.writer.extend(msg.iter()); } fn good<T: Into<String>>(&mut self, msg: T) { self.writer.good(msg); } fn warning<T: Into<String>>(&mut self, msg: T) { self.writer.warning(msg); } fn none<T: Into<String>>(&mut self, msg: T) { self.writer.none(msg); } fn get_spaces(&self, n: usize) -> String { " ".repeat(n) } fn spaces(&mut self, n: usize) { self.none(self.get_spaces(n)); } /// Sorts arguments by length and display order and write their help to the wrapped stream. fn write_args(&mut self, args: &[&Arg], _category: &str, sort_key: ArgSortKey) { debug!("Help::write_args {}", _category); // The shortest an arg can legally be is 2 (i.e. '-x') let mut longest = 2; let mut ord_v = Vec::new(); // Determine the longest for &arg in args.iter().filter(|arg| { // If it's NextLineHelp we don't care to compute how long it is because it may be // NextLineHelp on purpose simply *because* it's so long and would throw off all other // args alignment should_show_arg(self.use_long, *arg) }) { if arg.longest_filter() { longest = longest.max(display_width(&arg.to_string())); debug!( "Help::write_args: arg={:?} longest={}", arg.get_id(), longest ); } let key = (sort_key)(arg); ord_v.push((key, arg)); } ord_v.sort_by(|a, b| a.0.cmp(&b.0)); let next_line_help = self.will_args_wrap(args, longest); for (i, (_, arg)) in ord_v.iter().enumerate() { let last_arg = i + 1 == ord_v.len(); self.write_arg(arg, last_arg, next_line_help, longest); } } /// Writes help for an argument to the wrapped stream. fn write_arg(&mut self, arg: &Arg, last_arg: bool, next_line_help: bool, longest: usize) { let spec_vals = &self.spec_vals(arg); self.short(arg); self.long(arg); self.val(arg); self.align_to_about(arg, next_line_help, longest); let about = if self.use_long { arg.get_long_help() .or_else(|| arg.get_help()) .unwrap_or_default() } else { arg.get_help() .or_else(|| arg.get_long_help()) .unwrap_or_default() }; self.help(Some(arg), about, spec_vals, next_line_help, longest); if !last_arg { self.none("\n"); if next_line_help { self.none("\n"); } } } /// Writes argument's short command to the wrapped stream. fn short(&mut self, arg: &Arg) { debug!("Help::short"); self.none(TAB); if let Some(s) = arg.get_short() { self.good(format!("-{}", s)); } else if !arg.is_positional() { self.none(TAB) } } /// Writes argument's long command to the wrapped stream. fn long(&mut self, arg: &Arg) { debug!("Help::long"); if let Some(long) = arg.get_long() { if arg.short.is_some() { self.none(", "); } self.good(format!("--{}", long)); } } /// Writes argument's possible values to the wrapped stream. fn val(&mut self, arg: &Arg) { debug!("Help::val: arg={}", arg.get_id()); let mut need_closing_bracket = false; if arg.is_takes_value_set() && !arg.is_positional() { let is_optional_val = arg.get_min_vals() == 0; let sep = if arg.is_require_equals_set() { if is_optional_val { need_closing_bracket = true; "[=" } else { "=" } } else if is_optional_val { need_closing_bracket = true; " [" } else { " " }; self.none(sep); } if arg.is_takes_value_set() || arg.is_positional() { let arg_val = render_arg_val(arg); self.good(arg_val); } else if matches!(*arg.get_action(), ArgAction::Count) { self.good("..."); } if need_closing_bracket { self.none("]"); } } /// Write alignment padding between arg's switches/values and its about message. fn align_to_about(&mut self, arg: &Arg, next_line_help: bool, longest: usize) { debug!( "Help::align_to_about: arg={}, next_line_help={}, longest={}", arg.get_id(), next_line_help, longest ); if self.use_long || next_line_help { // long help prints messages on the next line so it doesn't need to align text debug!("Help::align_to_about: printing long help so skip alignment"); } else if !arg.is_positional() { let self_len = display_width(&arg.to_string()); // Since we're writing spaces from the tab point we first need to know if we // had a long and short, or just short let padding = if arg.long.is_some() { // Only account 4 after the val 4 } else { // Only account for ', --' + 4 after the val 8 }; let spcs = longest + padding - self_len; debug!( "Help::align_to_about: positional=false arg_len={}, spaces={}", self_len, spcs ); self.spaces(spcs); } else { let self_len = display_width(&arg.to_string()); let padding = 4; let spcs = longest + padding - self_len; debug!( "Help::align_to_about: positional=true arg_len={}, spaces={}", self_len, spcs ); self.spaces(spcs); } } fn write_before_help(&mut self) { debug!("Help::write_before_help"); let before_help = if self.use_long { self.cmd .get_before_long_help() .or_else(|| self.cmd.get_before_help()) } else { self.cmd.get_before_help() }; if let Some(output) = before_help { let mut output = output.clone(); output.replace_newline(); output.wrap(self.term_w); self.writer.extend(output.into_iter()); self.none("\n\n"); } } fn write_after_help(&mut self) { debug!("Help::write_after_help"); let after_help = if self.use_long { self.cmd .get_after_long_help() .or_else(|| self.cmd.get_after_help()) } else { self.cmd.get_after_help() }; if let Some(output) = after_help { self.none("\n\n"); let mut output = output.clone(); output.replace_newline(); output.wrap(self.term_w); self.writer.extend(output.into_iter()); } } /// Writes argument's help to the wrapped stream. fn help( &mut self, arg: Option<&Arg>, about: &StyledStr, spec_vals: &str, next_line_help: bool, longest: usize, ) { debug!("Help::help"); // Is help on next line, if so then indent if next_line_help { debug!("Help::help: Next Line...{:?}", next_line_help); self.none(format!("\n{}{}{}", TAB, TAB, TAB)); } let trailing_indent = if next_line_help { TAB_WIDTH * 3 } else if let Some(true) = arg.map(|a| a.is_positional()) { longest + TAB_WIDTH * 2 } else { longest + TAB_WIDTH * 3 }; let trailing_indent = self.get_spaces(trailing_indent); let spaces = if next_line_help { 12 // "tab" * 3 } else { longest + 12 }; let mut help = about.clone(); help.replace_newline(); help.none(spec_vals); let avail_chars = self.term_w.saturating_sub(spaces); debug!( "Help::help: help_width={}, spaces={}, avail={}", spaces, help.display_width(), avail_chars ); help.wrap(avail_chars); help.indent("", &trailing_indent); let help_is_empty = help.is_empty(); self.writer.extend(help.into_iter()); if let Some(arg) = arg { const DASH_SPACE: usize = "- ".len(); const COLON_SPACE: usize = ": ".len(); let possible_vals = arg.get_possible_values(); if self.use_long && !arg.is_hide_possible_values_set() && possible_vals.iter().any(PossibleValue::should_show_help) { debug!("Help::help: Found possible vals...{:?}", possible_vals); if !help_is_empty { self.none("\n\n"); self.spaces(spaces); } self.none("Possible values:"); let longest = possible_vals .iter() .filter_map(|f| f.get_visible_quoted_name().map(|name| display_width(&name))) .max() .expect("Only called with possible value"); let help_longest = possible_vals .iter() .filter_map(|f| f.get_visible_help().map(|h| h.display_width())) .max() .expect("Only called with possible value with help"); // should new line let taken = longest + spaces + DASH_SPACE; let possible_value_new_line = self.term_w >= taken && self.term_w < taken + COLON_SPACE + help_longest; let spaces = spaces + TAB_WIDTH - DASH_SPACE; let trailing_indent = if possible_value_new_line { spaces + DASH_SPACE } else { spaces + longest + DASH_SPACE + COLON_SPACE }; let trailing_indent = self.get_spaces(trailing_indent); for pv in possible_vals.iter().filter(|pv| !pv.is_hide_set()) { self.none("\n"); self.spaces(spaces); self.none("- "); self.good(pv.get_name()); if let Some(help) = pv.get_help() { debug!("Help::help: Possible Value help"); if possible_value_new_line { self.none(":\n"); self.spaces(trailing_indent.len()); } else { self.none(": "); // To align help messages self.spaces(longest - display_width(pv.get_name())); } let avail_chars = if self.term_w > trailing_indent.len() { self.term_w - trailing_indent.len() } else { usize::MAX }; let mut help = help.clone(); help.replace_newline(); help.wrap(avail_chars); help.indent("", &trailing_indent); self.writer.extend(help.into_iter()); } } } } } /// Will use next line help on writing args. fn will_args_wrap(&self, args: &[&Arg], longest: usize) -> bool { args.iter() .filter(|arg| should_show_arg(self.use_long, *arg)) .any(|arg| { let spec_vals = &self.spec_vals(arg); self.arg_next_line_help(arg, spec_vals, longest) }) } fn arg_next_line_help(&self, arg: &Arg, spec_vals: &str, longest: usize) -> bool { if self.next_line_help || arg.is_next_line_help_set() || self.use_long { // setting_next_line true } else { // force_next_line let h = arg.get_help().unwrap_or_default(); let h_w = h.display_width() + display_width(spec_vals); let taken = longest + 12; self.term_w >= taken && (taken as f32 / self.term_w as f32) > 0.40 && h_w > (self.term_w - taken) } } fn spec_vals(&self, a: &Arg) -> String { debug!("Help::spec_vals: a={}", a); let mut spec_vals = vec![]; #[cfg(feature = "env")] if let Some(ref env) = a.env { if !a.is_hide_env_set() { debug!( "Help::spec_vals: Found environment variable...[{:?}:{:?}]", env.0, env.1 ); let env_val = if !a.is_hide_env_values_set() { format!( "={}", env.1 .as_ref() .map_or(Cow::Borrowed(""), |val| val.to_string_lossy()) ) } else { String::new() }; let env_info = format!("[env: {}{}]", env.0.to_string_lossy(), env_val); spec_vals.push(env_info); } } if a.is_takes_value_set() && !a.is_hide_default_value_set() && !a.default_vals.is_empty() { debug!( "Help::spec_vals: Found default value...[{:?}]", a.default_vals ); let pvs = a .default_vals .iter() .map(|pvs| pvs.to_string_lossy()) .map(|pvs| { if pvs.contains(char::is_whitespace) { Cow::from(format!("{:?}", pvs)) } else { pvs } }) .collect::<Vec<_>>() .join(" "); spec_vals.push(format!("[default: {}]", pvs)); } if !a.aliases.is_empty() { debug!("Help::spec_vals: Found aliases...{:?}", a.aliases); let als = a .aliases .iter() .filter(|&als| als.1) // visible .map(|als| als.0.as_str()) // name .collect::<Vec<_>>() .join(", "); if !als.is_empty() { spec_vals.push(format!("[aliases: {}]", als)); } } if !a.short_aliases.is_empty() { debug!( "Help::spec_vals: Found short aliases...{:?}", a.short_aliases ); let als = a .short_aliases .iter() .filter(|&als| als.1) // visible .map(|&als| als.0.to_string()) // name .collect::<Vec<_>>() .join(", "); if !als.is_empty() { spec_vals.push(format!("[short aliases: {}]", als)); } } let possible_vals = a.get_possible_values(); if !(a.is_hide_possible_values_set() || possible_vals.is_empty() || self.use_long && possible_vals.iter().any(PossibleValue::should_show_help)) { debug!("Help::spec_vals: Found possible vals...{:?}", possible_vals); let pvs = possible_vals .iter() .filter_map(PossibleValue::get_visible_quoted_name) .collect::<Vec<_>>() .join(", "); spec_vals.push(format!("[possible values: {}]", pvs)); } let connector = if self.use_long { "\n" } else { " " }; let prefix = if !spec_vals.is_empty() && !a.get_help().unwrap_or_default().is_empty() { if self.use_long { "\n\n" } else { " " } } else { "" }; prefix.to_string() + &spec_vals.join(connector) } fn write_about(&mut self, before_new_line: bool, after_new_line: bool) { let about = if self.use_long { self.cmd.get_long_about().or_else(|| self.cmd.get_about()) } else { self.cmd.get_about() }; if let Some(output) = about { if before_new_line { self.none("\n"); } let mut output = output.clone(); output.replace_newline(); output.wrap(self.term_w); self.writer.extend(output.into_iter()); if after_new_line { self.none("\n"); } } } fn write_author(&mut self, before_new_line: bool, after_new_line: bool) { if let Some(author) = self.cmd.get_author() { if before_new_line { self.none("\n"); } self.none(wrap(author, self.term_w)); if after_new_line { self.none("\n"); } } } fn write_version(&mut self) { let version = self .cmd .get_version() .or_else(|| self.cmd.get_long_version()); if let Some(output) = version { self.none(wrap(output, self.term_w)); } } } /// Methods to write a single subcommand impl<'cmd, 'writer> Help<'cmd, 'writer> { fn write_subcommand( &mut self, sc_str: &str, cmd: &Command, next_line_help: bool, longest: usize, ) { debug!("Help::write_subcommand"); let spec_vals = &self.sc_spec_vals(cmd); let about = cmd .get_about() .or_else(|| cmd.get_long_about()) .unwrap_or_default(); self.subcmd(sc_str, next_line_help, longest); self.help(None, about, spec_vals, next_line_help, longest) } fn sc_spec_vals(&self, a: &Command) -> String { debug!("Help::sc_spec_vals: a={}", a.get_name()); let mut spec_vals = vec![]; if 0 < a.get_all_aliases().count() || 0 < a.get_all_short_flag_aliases().count() { debug!( "Help::spec_vals: Found aliases...{:?}", a.get_all_aliases().collect::<Vec<_>>() ); debug!( "Help::spec_vals: Found short flag aliases...{:?}", a.get_all_short_flag_aliases().collect::<Vec<_>>() ); let mut short_als = a .get_visible_short_flag_aliases() .map(|a| format!("-{}", a)) .collect::<Vec<_>>(); let als = a.get_visible_aliases().map(|s| s.to_string()); short_als.extend(als); let all_als = short_als.join(", "); if !all_als.is_empty() { spec_vals.push(format!(" [aliases: {}]", all_als)); } } spec_vals.join(" ") } fn subcommand_next_line_help(&self, cmd: &Command, spec_vals: &str, longest: usize) -> bool { if self.next_line_help | self.use_long { // setting_next_line true } else { // force_next_line let h = cmd.get_about().unwrap_or_default(); let h_w = h.display_width() + display_width(spec_vals); let taken = longest + 12; self.term_w >= taken && (taken as f32 / self.term_w as f32) > 0.40 && h_w > (self.term_w - taken) } } /// Writes subcommand to the wrapped stream. fn subcmd(&mut self, sc_str: &str, next_line_help: bool, longest: usize) { self.none(TAB); self.good(sc_str); if !next_line_help { let width = display_width(sc_str); self.spaces(width.max(longest + 4) - width); } } } // Methods to write Parser help. impl<'cmd, 'writer> Help<'cmd, 'writer> { /// Writes help for all arguments (options, flags, args, subcommands) /// including titles of a Parser Object to the wrapped stream. pub(crate) fn write_all_args(&mut self) { debug!("Help::write_all_args"); let pos = self .cmd .get_positionals_with_no_heading() .filter(|arg| should_show_arg(self.use_long, arg)) .collect::<Vec<_>>(); let non_pos = self .cmd .get_non_positionals_with_no_heading() .filter(|arg| should_show_arg(self.use_long, arg)) .collect::<Vec<_>>(); let subcmds = self.cmd.has_visible_subcommands(); let custom_headings = self .cmd .get_arguments() .filter_map(|arg| arg.get_help_heading()) .collect::<FlatSet<_>>(); let mut first = if !pos.is_empty() { // Write positional args if any self.warning("ARGS:\n"); self.write_args(&pos, "ARGS", positional_sort_key); false } else { true }; if !non_pos.is_empty() { if !first { self.none("\n\n"); } self.warning("OPTIONS:\n"); self.write_args(&non_pos, "OPTIONS", option_sort_key); first = false; } if !custom_headings.is_empty() { for heading in custom_headings { let args = self .cmd .get_arguments() .filter(|a| { if let Some(help_heading) = a.get_help_heading() { return help_heading == heading; } false }) .filter(|arg| should_show_arg(self.use_long, arg)) .collect::<Vec<_>>(); if !args.is_empty() { if !first { self.none("\n\n"); } self.warning(format!("{}:\n", heading)); self.write_args(&args, heading, option_sort_key); first = false } } } if subcmds { if !first { self.none("\n\n"); } let default_help_heading = Str::from("SUBCOMMANDS"); self.warning( self.cmd .get_subcommand_help_heading() .unwrap_or(&default_help_heading), ); self.warning(":\n"); self.write_subcommands(self.cmd); } } /// Will use next line help on writing subcommands. fn will_subcommands_wrap<'a>( &self, subcommands: impl IntoIterator<Item = &'a Command>, longest: usize, ) -> bool { subcommands .into_iter() .filter(|&subcommand| should_show_subcommand(subcommand)) .any(|subcommand| { let spec_vals = &self.sc_spec_vals(subcommand); self.subcommand_next_line_help(subcommand, spec_vals, longest) }) } /// Writes help for subcommands of a Parser Object to the wrapped stream. fn write_subcommands(&mut self, cmd: &Command) { debug!("Help::write_subcommands"); // The shortest an arg can legally be is 2 (i.e. '-x') let mut longest = 2; let mut ord_v = Vec::new(); for subcommand in cmd .get_subcommands() .filter(|subcommand| should_show_subcommand(subcommand)) { let mut sc_str = String::new(); sc_str.push_str(subcommand.get_name()); if let Some(short) = subcommand.get_short_flag() { write!(sc_str, " -{}", short).unwrap(); } if let Some(long) = subcommand.get_long_flag() { write!(sc_str, " --{}", long).unwrap(); } longest = longest.max(display_width(&sc_str)); ord_v.push((subcommand.get_display_order(), sc_str, subcommand)); } ord_v.sort_by(|a, b| (a.0, &a.1).cmp(&(b.0, &b.1))); debug!("Help::write_subcommands longest = {}", longest); let next_line_help = self.will_subcommands_wrap(cmd.get_subcommands(), longest); let mut first = true; for (_, sc_str, sc) in &ord_v { if first { first = false; } else { self.none("\n"); } self.write_subcommand(sc_str, sc, next_line_help, longest); } } /// Writes binary name of a Parser Object to the wrapped stream. fn write_display_name(&mut self) { debug!("Help::write_display_name"); let display_name = wrap( &self .cmd .get_display_name() .unwrap_or_else(|| self.cmd.get_name()) .replace("{n}", "\n"), self.term_w, ); self.good(&display_name); } /// Writes binary name of a Parser Object to the wrapped stream. fn write_bin_name(&mut self) { debug!("Help::write_bin_name"); let bin_name = if let Some(bn) = self.cmd.get_bin_name() { if bn.contains(' ') { // In case we're dealing with subcommands i.e. git mv is translated to git-mv bn.replace(' ', "-") } else { wrap(&self.cmd.get_name().replace("{n}", "\n"), self.term_w) } } else { wrap(&self.cmd.get_name().replace("{n}", "\n"), self.term_w) }; self.good(&bin_name); } } // Methods to write Parser help using templates. impl<'cmd, 'writer> Help<'cmd, 'writer> { /// Write help to stream for the parser in the format defined by the template. /// /// For details about the template language see [`Command::help_template`]. /// /// [`Command::help_template`]: Command::help_template() fn write_templated_help(&mut self, template: &str) { debug!("Help::write_templated_help"); // The strategy is to copy the template from the reader to wrapped stream // until a tag is found. Depending on its value, the appropriate content is copied // to the wrapped stream. // The copy from template is then resumed, repeating this sequence until reading // the complete template. macro_rules! tags { ( match $part:ident { $( $tag:expr => $action:stmt )* } ) => { match $part { $( part if part.starts_with(concat!($tag, "}")) => { $action let rest = &part[$tag.len()+1..]; self.none(rest); } )* // Unknown tag, write it back. part => { self.none("{"); self.none(part); } } }; } let mut parts = template.split('{'); if let Some(first) = parts.next() { self.none(first); } for part in parts { tags! { match part { "name" => { self.write_display_name(); } "bin" => { self.write_bin_name(); } "version" => { self.write_version(); } "author" => { self.write_author(false, false); } "author-with-newline" => { self.write_author(false, true); } "author-section" => { self.write_author(true, true); } "about" => { self.write_about(false, false); } "about-with-newline" => { self.write_about(false, true); } "about-section" => { self.write_about(true, true); } "usage-heading" => { self.warning("USAGE:"); } "usage" => { self.none(self.usage.create_usage_no_title(&[])); } "all-args" => { self.write_all_args(); } "options" => { // Include even those with a heading as we don't have a good way of // handling help_heading in the template. self.write_args(&self.cmd.get_non_positionals().collect::<Vec<_>>(), "options", option_sort_key); } "positionals" => { self.write_args(&self.cmd.get_positionals().collect::<Vec<_>>(), "positionals", positional_sort_key); } "subcommands" => { self.write_subcommands(self.cmd); } "after-help" => { self.write_after_help(); } "before-help" => { self.write_before_help(); } } } } } } type ArgSortKey = fn(arg: &Arg) -> (usize, String); fn positional_sort_key(arg: &Arg) -> (usize, String) { (arg.get_index().unwrap_or(0), String::new()) } fn option_sort_key(arg: &Arg) -> (usize, String) { // Formatting key like this to ensure that: // 1. Argument has long flags are printed just after short flags. // 2. For two args both have short flags like `-c` and `-C`, the // `-C` arg is printed just after the `-c` arg // 3. For args without short or long flag, print them at last(sorted // by arg name). // Example order: -a, -b, -B, -s, --select-file, --select-folder, -x let key = if let Some(x) = arg.get_short() { let mut s = x.to_ascii_lowercase().to_string(); s.push(if x.is_ascii_lowercase() { '0' } else { '1' }); s } else if let Some(x) = arg.get_long() { x.to_string() } else { let mut s = '{'.to_string(); s.push_str(arg.id.as_str()); s }; (arg.get_display_order(), key) } pub(crate) fn dimensions() -> Option<(usize, usize)> { #[cfg(not(feature = "wrap_help"))] return None; #[cfg(feature = "wrap_help")] terminal_size::terminal_size().map(|(w, h)| (w.0.into(), h.0.into())) } const TAB: &str = " "; const TAB_WIDTH: usize = 4; fn should_show_arg(use_long: bool, arg: &Arg) -> bool { debug!( "should_show_arg: use_long={:?}, arg={}", use_long, arg.get_id() ); if arg.is_hide_set() { return false; } (!arg.is_hide_long_help_set() && use_long) || (!arg.is_hide_short_help_set() && !use_long) || arg.is_next_line_help_set() } fn should_show_subcommand(subcommand: &Command) -> bool { !subcommand.is_hide_set() } #[cfg(test)] mod test { use super::*; #[test] fn wrap_help_last_word() { let help = String::from("foo bar baz"); assert_eq!(wrap(&help, 5), "foo\nbar\nbaz"); } #[test] #[cfg(feature = "unicode")] fn display_width_handles_non_ascii() { // Popular Danish tongue-twister, the name of a fruit dessert. let text = "rødgrød med fløde"; assert_eq!(display_width(text), 17); // Note that the string width is smaller than the string // length. This is due to the precomposed non-ASCII letters: assert_eq!(text.len(), 20); } #[test] #[cfg(feature = "unicode")] fn display_width_handles_emojis() { let text = "😂"; // There is a single `char`... assert_eq!(text.chars().count(), 1); // but it is double-width: assert_eq!(display_width(text), 2); // This is much less than the byte length: assert_eq!(text.len(), 4); } }
use std::any::Any; use std::cell::{Cell, RefCell}; use std::io::{self, ErrorKind}; use std::marker; use std::mem; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use std::sync::mpsc; use std::time::{Instant, Duration}; use futures::{Future, Task, TaskHandle, Poll}; use futures::executor::{ExecuteCallback, Executor}; use futures_io::Ready; use mio; use slab::Slab; use channel::{Sender, Receiver, channel}; use event_loop::dropbox::DropBox; use slot::{self, Slot}; use timer_wheel::{TimerWheel, Timeout}; static NEXT_LOOP_ID: AtomicUsize = ATOMIC_USIZE_INIT; scoped_thread_local!(static CURRENT_LOOP: Loop); const SLAB_CAPACITY: usize = 1024 * 64; /// An event loop. /// /// The event loop is the main source of blocking in an application which drives /// all other I/O events and notifications happening. Each event loop can have /// multiple handles pointing to it, each of which can then be used to create /// various I/O objects to interact with the event loop in interesting ways. // TODO: expand this pub struct Loop { id: usize, active: Cell<bool>, io: mio::Poll, tx: Arc<MioSender>, rx: Receiver<Message>, dispatch: RefCell<Slab<Scheduled, usize>>, // Timer wheel keeping track of all timeouts. The `usize` stored in the // timer wheel is an index into the slab below. // // The slab below keeps track of the timeouts themselves as well as the // state of the timeout itself. The `TimeoutToken` type is an index into the // `timeouts` slab. timer_wheel: RefCell<TimerWheel<usize>>, timeouts: RefCell<Slab<(Timeout, TimeoutState), usize>>, } struct MioSender { inner: Sender<Message>, } /// Handle to an event loop, used to construct I/O objects, send messages, and /// otherwise interact indirectly with the event loop itself. /// /// Handles can be cloned, and when cloned they will still refer to the /// same underlying event loop. #[derive(Clone)] pub struct LoopHandle { id: usize, tx: Arc<MioSender>, } struct Scheduled { source: IoSource, waiter: Option<TaskHandle>, } enum TimeoutState { NotFired, Fired, Waiting(TaskHandle), } enum Message { AddSource(IoSource, Arc<Slot<io::Result<usize>>>), DropSource(usize), Schedule(usize, TaskHandle), Deschedule(usize), AddTimeout(Instant, Arc<Slot<io::Result<TimeoutToken>>>), UpdateTimeout(TimeoutToken, TaskHandle), CancelTimeout(TimeoutToken), Run(Box<ExecuteCallback>), Drop(DropBox<Any>), Shutdown, } pub struct Source<E: ?Sized> { readiness: AtomicUsize, io: E, } pub type IoSource = Arc<Source<mio::Evented + Sync + Send>>; fn register(poll: &mio::Poll, token: usize, sched: &Scheduled) -> io::Result<()> { poll.register(&sched.source.io, mio::Token(token), mio::EventSet::readable() | mio::EventSet::writable(), mio::PollOpt::edge()) } fn deregister(poll: &mio::Poll, sched: &Scheduled) { // TODO: handle error poll.deregister(&sched.source.io).unwrap(); } impl Loop { /// Creates a new event loop, returning any error that happened during the /// creation. pub fn new() -> io::Result<Loop> { let (tx, rx) = channel(); let io = try!(mio::Poll::new()); try!(io.register(&rx, mio::Token(0), mio::EventSet::readable(), mio::PollOpt::edge())); Ok(Loop { id: NEXT_LOOP_ID.fetch_add(1, Ordering::Relaxed), active: Cell::new(true), io: io, tx: Arc::new(MioSender { inner: tx }), rx: rx, dispatch: RefCell::new(Slab::new_starting_at(1, SLAB_CAPACITY)), timeouts: RefCell::new(Slab::new_starting_at(0, SLAB_CAPACITY)), timer_wheel: RefCell::new(TimerWheel::new()), }) } /// Generates a handle to this event loop used to construct I/O objects and /// send messages. /// /// Handles to an event loop are cloneable as well and clones will always /// refer to the same event loop. pub fn handle(&self) -> LoopHandle { LoopHandle { id: self.id, tx: self.tx.clone(), } } /// Runs a future until completion, driving the event loop while we're /// otherwise waiting for the future to complete. /// /// Returns the value that the future resolves to. pub fn run<F: Future>(&mut self, f: F) -> Result<F::Item, F::Error> { let (tx_res, rx_res) = mpsc::channel(); let handle = self.handle(); f.then(move |res| { handle.shutdown(); tx_res.send(res) }).forget(); self._run(); rx_res.recv().unwrap() } fn _run(&mut self) { let mut events = mio::Events::new(); self.active.set(true); while self.active.get() { let amt; // On Linux, Poll::poll is epoll_wait, which may return EINTR if a // ptracer attaches. This retry loop prevents crashing when // attaching strace, or similar. let start = Instant::now(); loop { let timeout = self.timer_wheel.borrow().next_timeout().map(|t| { if t < start { Duration::new(0, 0) } else { t - start } }); match self.io.poll(&mut events, timeout) { Ok(a) => { amt = a; break; } Err(ref e) if e.kind() == ErrorKind::Interrupted => {} err @ Err(_) => { err.unwrap(); } } } debug!("loop poll - {:?}", start.elapsed()); // First up, process all timeouts that may have just occurred. let start = Instant::now(); self.consume_timeouts(start); // Next, process all the events that came in. for i in 0..events.len() { let event = events.get(i).unwrap(); let token = usize::from(event.token()); // Token 0 == our incoming message queue, so this means we // process the whole queue of messages. if token == 0 { debug!("consuming notification queue"); CURRENT_LOOP.set(&self, || { self.consume_queue(); }); continue } // For any other token we look at `dispatch` to see what we're // supposed to do. If there's a waiter we get ready to notify // it, and we also or-in atomically any events that have // happened (currently read/write events). let mut waiter = None; if let Some(sched) = self.dispatch.borrow_mut().get_mut(token) { waiter = sched.waiter.take(); if event.kind().is_readable() { sched.source.readiness.fetch_or(1, Ordering::Relaxed); } if event.kind().is_writable() { sched.source.readiness.fetch_or(2, Ordering::Relaxed); } } else { debug!("notified on {} which no longer exists", token); } // If we actually got a waiter, then notify! if let Some(waiter) = waiter { self.notify_handle(waiter); } } debug!("loop process - {} events, {:?}", amt, start.elapsed()); } debug!("loop is done!"); } fn consume_timeouts(&mut self, now: Instant) { loop { let idx = match self.timer_wheel.borrow_mut().poll(now) { Some(idx) => idx, None => break, }; trace!("firing timeout: {}", idx); let handle = self.timeouts.borrow_mut()[idx].1.fire(); if let Some(handle) = handle { self.notify_handle(handle); } } } /// Method used to notify a task handle. /// /// Note that this should be used instead fo `handle.notify()` to ensure /// that the `CURRENT_LOOP` variable is set appropriately. fn notify_handle(&self, handle: TaskHandle) { CURRENT_LOOP.set(&self, || handle.notify()); } fn add_source(&self, source: IoSource) -> io::Result<usize> { let sched = Scheduled { source: source, waiter: None, }; let mut dispatch = self.dispatch.borrow_mut(); if dispatch.vacant_entry().is_none() { let amt = dispatch.count(); dispatch.grow(amt); } let entry = dispatch.vacant_entry().unwrap(); try!(register(&self.io, entry.index(), &sched)); Ok(entry.insert(sched).index()) } fn drop_source(&self, token: usize) { let sched = self.dispatch.borrow_mut().remove(token).unwrap(); deregister(&self.io, &sched); } fn schedule(&self, token: usize, wake: TaskHandle) { let to_call = { let mut dispatch = self.dispatch.borrow_mut(); let sched = dispatch.get_mut(token).unwrap(); if sched.source.readiness.load(Ordering::Relaxed) != 0 { sched.waiter = None; Some(wake) } else { sched.waiter = Some(wake); None } }; if let Some(to_call) = to_call { self.notify_handle(to_call); } } fn deschedule(&self, token: usize) { let mut dispatch = self.dispatch.borrow_mut(); let sched = dispatch.get_mut(token).unwrap(); sched.waiter = None; } fn add_timeout(&self, at: Instant) -> io::Result<TimeoutToken> { let mut timeouts = self.timeouts.borrow_mut(); if timeouts.vacant_entry().is_none() { let len = timeouts.count(); timeouts.grow(len); } let entry = timeouts.vacant_entry().unwrap(); let timeout = self.timer_wheel.borrow_mut().insert(at, entry.index()); let entry = entry.insert((timeout, TimeoutState::NotFired)); Ok(TimeoutToken { token: entry.index() }) } fn update_timeout(&self, token: &TimeoutToken, handle: TaskHandle) { let to_wake = self.timeouts.borrow_mut()[token.token].1.block(handle); if let Some(to_wake) = to_wake { self.notify_handle(to_wake); } } fn cancel_timeout(&self, token: &TimeoutToken) { let pair = self.timeouts.borrow_mut().remove(token.token); if let Some((timeout, _state)) = pair { self.timer_wheel.borrow_mut().cancel(&timeout); } } fn consume_queue(&self) { // TODO: can we do better than `.unwrap()` here? while let Some(msg) = self.rx.recv().unwrap() { self.notify(msg); } } fn notify(&self, msg: Message) { match msg { Message::AddSource(source, slot) => { // This unwrap() should always be ok as we're the only producer slot.try_produce(self.add_source(source)) .ok().expect("interference with try_produce"); } Message::DropSource(tok) => self.drop_source(tok), Message::Schedule(tok, wake) => self.schedule(tok, wake), Message::Deschedule(tok) => self.deschedule(tok), Message::Shutdown => self.active.set(false), Message::AddTimeout(at, slot) => { slot.try_produce(self.add_timeout(at)) .ok().expect("interference with try_produce on timeout"); } Message::UpdateTimeout(t, handle) => self.update_timeout(&t, handle), Message::CancelTimeout(t) => self.cancel_timeout(&t), Message::Run(f) => f.call(), Message::Drop(data) => drop(data), } } } impl LoopHandle { fn send(&self, msg: Message) { self.with_loop(|lp| { match lp { Some(lp) => { // Need to execute all existing requests first, to ensure // that our message is processed "in order" lp.consume_queue(); lp.notify(msg); } None => { match self.tx.inner.send(msg) { Ok(()) => {} // This should only happen when there was an error // writing to the pipe to wake up the event loop, // hopefully that never happens Err(e) => { panic!("error sending message to event loop: {}", e) } } } } }) } fn with_loop<F, R>(&self, f: F) -> R where F: FnOnce(Option<&Loop>) -> R { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { if lp.id == self.id { f(Some(lp)) } else { f(None) } }) } else { f(None) } } /// Add a new source to an event loop, returning a future which will resolve /// to the token that can be used to identify this source. /// /// When a new I/O object is created it needs to be communicated to the /// event loop to ensure that it's registered and ready to receive /// notifications. The event loop with then respond with a unique token that /// this handle can be identified with (the resolved value of the returned /// future). /// /// This token is then passed in turn to each of the methods below to /// interact with notifications on the I/O object itself. /// /// # Panics /// /// The returned future will panic if the event loop this handle is /// associated with has gone away, or if there is an error communicating /// with the event loop. pub fn add_source(&self, source: IoSource) -> AddSource { AddSource { inner: LoopFuture { loop_handle: self.clone(), data: Some(source), result: None, } } } /// Begin listening for events on an event loop. /// /// Once an I/O object has been registered with the event loop through the /// `add_source` method, this method can be used with the assigned token to /// begin awaiting notifications. /// /// The `dir` argument indicates how the I/O object is expected to be /// awaited on (either readable or writable) and the `wake` callback will be /// invoked. Note that one the `wake` callback is invoked once it will not /// be invoked again, it must be re-`schedule`d to continue receiving /// notifications. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn schedule(&self, tok: usize, task: &mut Task) { // TODO: plumb through `&mut Task` if we're on the event loop self.send(Message::Schedule(tok, task.handle().clone())); } /// Stop listening for events on an event loop. /// /// Once a callback has been scheduled with the `schedule` method, it can be /// unregistered from the event loop with this method. This method does not /// guarantee that the callback will not be invoked if it hasn't already, /// but a best effort will be made to ensure it is not called. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn deschedule(&self, tok: usize) { self.send(Message::Deschedule(tok)); } /// Unregister all information associated with a token on an event loop, /// deallocating all internal resources assigned to the given token. /// /// This method should be called whenever a source of events is being /// destroyed. This will ensure that the event loop can reuse `tok` for /// another I/O object if necessary and also remove it from any poll /// notifications and callbacks. /// /// Note that wake callbacks may still be invoked after this method is /// called as it may take some time for the message to drop a source to /// reach the event loop. Despite this fact, this method will attempt to /// ensure that the callbacks are **not** invoked, so pending scheduled /// callbacks cannot be relied upon to get called. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn drop_source(&self, tok: usize) { self.send(Message::DropSource(tok)); } /// Adds a new timeout to get fired at the specified instant, notifying the /// specified task. pub fn add_timeout(&self, at: Instant) -> AddTimeout { AddTimeout { inner: LoopFuture { loop_handle: self.clone(), data: Some(at), result: None, }, } } /// Updates a previously added timeout to notify a new task instead. /// /// # Panics /// /// This method will panic if the timeout specified was not created by this /// loop handle's `add_timeout` method. pub fn update_timeout(&self, timeout: &TimeoutToken, task: &mut Task) { let timeout = TimeoutToken { token: timeout.token }; self.send(Message::UpdateTimeout(timeout, task.handle().clone())) } /// Cancel a previously added timeout. /// /// # Panics /// /// This method will panic if the timeout specified was not created by this /// loop handle's `add_timeout` method. pub fn cancel_timeout(&self, timeout: &TimeoutToken) { let timeout = TimeoutToken { token: timeout.token }; self.send(Message::CancelTimeout(timeout)) } /// Schedules a closure to add some data to event loop thread itself. /// /// This function is useful for when storing non-`Send` data inside of a /// future. This returns a future which will resolve to a `LoopData<A>` /// handle, which is itself `Send + 'static` regardless of the underlying /// `A`. That is, for example, you can create a handle to some data that /// contains an `Rc`, for example. /// /// This function takes a closure which may be sent to the event loop to /// generate an instance of type `A`. The closure itself is required to be /// `Send + 'static`, but the data it produces is only required to adhere to /// `Any`. /// /// If the returned future is polled on the event loop thread itself it will /// very cheaply resolve to a handle to the data, but if it's not polled on /// the event loop then it will send a message to the event loop to run the /// closure `f`, generate a handle, and then the future will yield it back. // TODO: more with examples pub fn add_loop_data<F, A>(&self, f: F) -> AddLoopData<F, A> where F: FnOnce() -> A + Send + 'static, A: Any, { AddLoopData { _marker: marker::PhantomData, inner: LoopFuture { loop_handle: self.clone(), data: Some(f), result: None, }, } } /// Send a message to the associated event loop that it should shut down, or /// otherwise break out of its current loop of iteration. /// /// This method does not forcibly cause the event loop to shut down or /// perform an interrupt on whatever task is currently running, instead a /// message is simply enqueued to at a later date process the request to /// stop looping ASAP. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn shutdown(&self) { self.send(Message::Shutdown); } } /// A future which will resolve a unique `tok` token for an I/O object. /// /// Created through the `LoopHandle::add_source` method, this future can also /// resolve to an error if there's an issue communicating with the event loop. pub struct AddSource { inner: LoopFuture<usize, IoSource>, } impl Future for AddSource { type Item = usize; type Error = io::Error; fn poll(&mut self, _task: &mut Task) -> Poll<usize, io::Error> { self.inner.poll(Loop::add_source) } fn schedule(&mut self, task: &mut Task) { self.inner.schedule(task, Message::AddSource) } } /// Return value from the `LoopHandle::add_timeout` method, a future that will /// resolve to a `TimeoutToken` to configure the behavior of that timeout. pub struct AddTimeout { inner: LoopFuture<TimeoutToken, Instant>, } /// A token that identifies an active timeout. pub struct TimeoutToken { token: usize, } impl Future for AddTimeout { type Item = TimeoutToken; type Error = io::Error; fn poll(&mut self, _task: &mut Task) -> Poll<TimeoutToken, io::Error> { self.inner.poll(Loop::add_timeout) } fn schedule(&mut self, task: &mut Task) { self.inner.schedule(task, Message::AddTimeout) } } /// A handle to data that is owned by an event loop thread, and is only /// accessible on that thread itself. /// /// This structure is created by the `LoopHandle::add_loop_data` method which /// will return a future resolving to one of these references. A `LoopData<A>` /// handle is `Send` regardless of what `A` is, but the internal data can only /// be accessed on the event loop thread itself. /// /// Internally this reference also stores a handle to the event loop that the /// data originated on, so it knows how to go back to the event loop to access /// the data itself. // TODO: write more once it's implemented pub struct LoopData<A: Any> { data: DropBox<A>, handle: LoopHandle, } /// Future returned from the `LoopHandle::add_loop_data` method. /// /// This future will resolve to a `LoopData<A>` reference when completed, which /// represents a handle to data that is "owned" by the event loop thread but can /// migrate among threads temporarily so travel with a future itself. pub struct AddLoopData<F, A> { inner: LoopFuture<DropBox<Any>, F>, _marker: marker::PhantomData<fn() -> A>, } fn _assert() { fn _assert_send<T: Send>() {} _assert_send::<LoopData<()>>(); } impl<F, A> Future for AddLoopData<F, A> where F: FnOnce() -> A + Send + 'static, A: Any, { type Item = LoopData<A>; type Error = io::Error; fn poll(&mut self, _task: &mut Task) -> Poll<LoopData<A>, io::Error> { let ret = self.inner.poll(|_lp, f| { Ok(DropBox::new(f())) }); ret.map(|mut data| { match data.downcast::<A>() { Some(data) => { LoopData { data: data, handle: self.inner.loop_handle.clone(), } } None => panic!("data mixed up?"), } }) } fn schedule(&mut self, task: &mut Task) { self.inner.schedule(task, |f, slot| { Message::Run(Box::new(move || { slot.try_produce(Ok(DropBox::new(f()))).ok() .expect("add loop data try_produce intereference"); })) }) } } impl<A: Any> LoopData<A> { /// Gets a shared reference to the underlying data in this handle. /// /// Returns `None` if it is not called from the event loop thread that this /// `LoopData<A>` is associated with, or `Some` with a reference to the data /// if we are indeed on the event loop thread. pub fn get(&self) -> Option<&A> { self.data.get() } /// Gets a mutable reference to the underlying data in this handle. /// /// Returns `None` if it is not called from the event loop thread that this /// `LoopData<A>` is associated with, or `Some` with a reference to the data /// if we are indeed on the event loop thread. pub fn get_mut(&mut self) -> Option<&mut A> { self.data.get_mut() } /// Acquire the executor associated with the thread that owns this /// `LoopData<A>`'s data. /// /// If the `get` and `get_mut` functions above return `None`, then this data /// is being polled on the wrong thread to access the data, and to make /// progress a future may need to migrate to the actual thread which owns /// the relevant data. /// /// This executor can in turn be passed to `Task::poll_on`, which will then /// move the entire future to be polled on the right thread. pub fn executor(&self) -> Arc<Executor> { self.handle.tx.clone() } } impl<A: Any> Drop for LoopData<A> { fn drop(&mut self) { // The `DropBox` we store internally will cause a memory leak if it's // dropped on the wrong thread. While necessary for safety, we don't // actually want a memory leak, so for all normal circumstances we take // out the `DropBox<A>` as a `DropBox<Any>` and then we send it off to // the event loop. // // TODO: possible optimization is to do none of this if we're on the // event loop thread itself if let Some(data) = self.data.take_any() { self.handle.send(Message::Drop(data)); } } } /// A curious inner module with one `unsafe` keyword, yet quite an important /// one! /// /// The purpose of this module is to define a type, `DropBox<A>`, which is able /// to be sent across thread event when the underlying data `A` is itself not /// sendable across threads. This is then in turn used to build up the /// `LoopData` abstraction above. /// /// A `DropBox` currently contains two major components, an identification of /// the thread that it originated from as well as the data itself. Right now the /// data is stored in a `Box` as we'll transition between it and `Box<Any>`, but /// this is perhaps optimizable. /// /// The `DropBox<A>` itself only provides a few safe methods, all of which are /// safe to call from any thread. Access to the underlying data is only granted /// if we're on the right thread, and otherwise the methods don't access the /// data itself. /// /// Finally, one crucial piece, if the data is dropped it may run code that /// assumes it's on the original thread. For this reason we have to be sure that /// the data is only dropped on the originating thread itself. It's currently /// the job of the outer `LoopData` to ensure that a `DropBox` is dropped on the /// right thread, so we don't attempt to perform any communication in this /// `Drop` implementation. Instead, if a `DropBox` is dropped on the wrong /// thread, it simply leaks its contents. /// /// All that's really just a lot of words in an attempt to justify the `unsafe` /// impl of `Send` below. The idea is that the data is only ever accessed on the /// originating thread, even during `Drop`. /// /// Note that this is a private module to have a visibility boundary around the /// unsafe internals. Although there's not any unsafe blocks here, the code /// itself is quite unsafe as it has to make sure that the data is dropped in /// the right place, if ever. mod dropbox { use std::any::Any; use std::mem; use super::CURRENT_LOOP; pub struct DropBox<A: ?Sized> { id: usize, inner: Option<Box<A>>, } unsafe impl<A: ?Sized> Send for DropBox<A> {} impl DropBox<Any> { /// Creates a new `DropBox` pinned to the current threads. /// /// Will panic if `CURRENT_LOOP` isn't set. pub fn new<A: Any>(a: A) -> DropBox<Any> { DropBox { id: CURRENT_LOOP.with(|lp| lp.id), inner: Some(Box::new(a) as Box<Any>), } } /// Downcasts this `DropBox` to the type specified. /// /// Normally this always succeeds as it's a static assertion that we /// already have all the types matched up, but an `Option` is returned /// here regardless. pub fn downcast<A: Any>(&mut self) -> Option<DropBox<A>> { self.inner.take().and_then(|data| { match data.downcast::<A>() { Ok(a) => Some(DropBox { id: self.id, inner: Some(a) }), // Note that we're careful that when a downcast fails we put // the data back into ourselves, because we may be // downcasting on any thread. This will ensure that if we // drop accidentally we'll forget the data correctly. Err(obj) => { self.inner = Some(obj); None } } }) } } impl<A: Any> DropBox<A> { /// Consumes the contents of this `DropBox<A>`, returning a new /// `DropBox<Any>`. /// /// This is just intended to be a simple and cheap conversion, should /// almost always return `Some`. pub fn take_any(&mut self) -> Option<DropBox<Any>> { self.inner.take().map(|d| { DropBox { id: self.id, inner: Some(d as Box<Any>) } }) } } impl<A: ?Sized> DropBox<A> { /// Returns a shared reference to the data if we're on the right /// thread. pub fn get(&self) -> Option<&A> { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { if lp.id == self.id { self.inner.as_ref().map(|b| &**b) } else { None } }) } else { None } } /// Returns a mutable reference to the data if we're on the right /// thread. pub fn get_mut(&mut self) -> Option<&mut A> { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(move |lp| { if lp.id == self.id { self.inner.as_mut().map(|b| &mut **b) } else { None } }) } else { None } } } impl<A: ?Sized> Drop for DropBox<A> { fn drop(&mut self) { // Try our safe accessor first, and if it works then we know that // we're on the right thread. In that case we can simply drop as // usual. if let Some(a) = self.get_mut().take() { return drop(a) } // If we're on the wrong thread but we actually have some data, then // something in theory horrible has gone awry. Prevent memory safety // issues by forgetting the data and then also warn about this odd // event. if let Some(data) = self.inner.take() { mem::forget(data); warn!("forgetting some data on an event loop"); } } } } struct LoopFuture<T, U> { loop_handle: LoopHandle, data: Option<U>, result: Option<(Arc<Slot<io::Result<T>>>, slot::Token)>, } impl<T, U> LoopFuture<T, U> where T: Send + 'static, { fn poll<F>(&mut self, f: F) -> Poll<T, io::Error> where F: FnOnce(&Loop, U) -> io::Result<T>, { match self.result { Some((ref result, ref token)) => { result.cancel(*token); match result.try_consume() { Ok(t) => t.into(), Err(_) => Poll::NotReady, } } None => { let data = &mut self.data; self.loop_handle.with_loop(|lp| { match lp { Some(lp) => f(lp, data.take().unwrap()).into(), None => Poll::NotReady, } }) } } } fn schedule<F>(&mut self, task: &mut Task, f: F) where F: FnOnce(U, Arc<Slot<io::Result<T>>>) -> Message, { if let Some((ref result, ref mut token)) = self.result { result.cancel(*token); let handle = task.handle().clone(); *token = result.on_full(move |_| { handle.notify(); }); return } let handle = task.handle().clone(); let result = Arc::new(Slot::new(None)); let token = result.on_full(move |_| { handle.notify(); }); self.result = Some((result.clone(), token)); self.loop_handle.send(f(self.data.take().unwrap(), result)) } } impl TimeoutState { fn block(&mut self, handle: TaskHandle) -> Option<TaskHandle> { match *self { TimeoutState::Fired => return Some(handle), _ => {} } *self = TimeoutState::Waiting(handle); None } fn fire(&mut self) -> Option<TaskHandle> { match mem::replace(self, TimeoutState::Fired) { TimeoutState::NotFired => None, TimeoutState::Fired => panic!("fired twice?"), TimeoutState::Waiting(handle) => Some(handle), } } } impl<E> Source<E> { pub fn new(e: E) -> Source<E> { Source { readiness: AtomicUsize::new(0), io: e, } } } impl<E: ?Sized> Source<E> { pub fn take_readiness(&self) -> Option<Ready> { match self.readiness.swap(0, Ordering::SeqCst) { 0 => None, 1 => Some(Ready::Read), 2 => Some(Ready::Write), 3 => Some(Ready::ReadWrite), _ => panic!(), } } pub fn io(&self) -> &E { &self.io } } impl Executor for MioSender { fn execute_boxed(&self, callback: Box<ExecuteCallback>) { self.inner.send(Message::Run(callback)) .expect("error sending a message to the event loop") } } Add some assorted debug messages use std::any::Any; use std::cell::{Cell, RefCell}; use std::io::{self, ErrorKind}; use std::marker; use std::mem; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; use std::sync::mpsc; use std::time::{Instant, Duration}; use futures::{Future, Task, TaskHandle, Poll}; use futures::executor::{ExecuteCallback, Executor}; use futures_io::Ready; use mio; use slab::Slab; use channel::{Sender, Receiver, channel}; use event_loop::dropbox::DropBox; use slot::{self, Slot}; use timer_wheel::{TimerWheel, Timeout}; static NEXT_LOOP_ID: AtomicUsize = ATOMIC_USIZE_INIT; scoped_thread_local!(static CURRENT_LOOP: Loop); const SLAB_CAPACITY: usize = 1024 * 64; /// An event loop. /// /// The event loop is the main source of blocking in an application which drives /// all other I/O events and notifications happening. Each event loop can have /// multiple handles pointing to it, each of which can then be used to create /// various I/O objects to interact with the event loop in interesting ways. // TODO: expand this pub struct Loop { id: usize, active: Cell<bool>, io: mio::Poll, tx: Arc<MioSender>, rx: Receiver<Message>, dispatch: RefCell<Slab<Scheduled, usize>>, // Timer wheel keeping track of all timeouts. The `usize` stored in the // timer wheel is an index into the slab below. // // The slab below keeps track of the timeouts themselves as well as the // state of the timeout itself. The `TimeoutToken` type is an index into the // `timeouts` slab. timer_wheel: RefCell<TimerWheel<usize>>, timeouts: RefCell<Slab<(Timeout, TimeoutState), usize>>, } struct MioSender { inner: Sender<Message>, } /// Handle to an event loop, used to construct I/O objects, send messages, and /// otherwise interact indirectly with the event loop itself. /// /// Handles can be cloned, and when cloned they will still refer to the /// same underlying event loop. #[derive(Clone)] pub struct LoopHandle { id: usize, tx: Arc<MioSender>, } struct Scheduled { source: IoSource, waiter: Option<TaskHandle>, } enum TimeoutState { NotFired, Fired, Waiting(TaskHandle), } enum Message { AddSource(IoSource, Arc<Slot<io::Result<usize>>>), DropSource(usize), Schedule(usize, TaskHandle), Deschedule(usize), AddTimeout(Instant, Arc<Slot<io::Result<TimeoutToken>>>), UpdateTimeout(TimeoutToken, TaskHandle), CancelTimeout(TimeoutToken), Run(Box<ExecuteCallback>), Drop(DropBox<Any>), Shutdown, } pub struct Source<E: ?Sized> { readiness: AtomicUsize, io: E, } pub type IoSource = Arc<Source<mio::Evented + Sync + Send>>; fn register(poll: &mio::Poll, token: usize, sched: &Scheduled) -> io::Result<()> { poll.register(&sched.source.io, mio::Token(token), mio::EventSet::readable() | mio::EventSet::writable(), mio::PollOpt::edge()) } fn deregister(poll: &mio::Poll, sched: &Scheduled) { // TODO: handle error poll.deregister(&sched.source.io).unwrap(); } impl Loop { /// Creates a new event loop, returning any error that happened during the /// creation. pub fn new() -> io::Result<Loop> { let (tx, rx) = channel(); let io = try!(mio::Poll::new()); try!(io.register(&rx, mio::Token(0), mio::EventSet::readable(), mio::PollOpt::edge())); Ok(Loop { id: NEXT_LOOP_ID.fetch_add(1, Ordering::Relaxed), active: Cell::new(true), io: io, tx: Arc::new(MioSender { inner: tx }), rx: rx, dispatch: RefCell::new(Slab::new_starting_at(1, SLAB_CAPACITY)), timeouts: RefCell::new(Slab::new_starting_at(0, SLAB_CAPACITY)), timer_wheel: RefCell::new(TimerWheel::new()), }) } /// Generates a handle to this event loop used to construct I/O objects and /// send messages. /// /// Handles to an event loop are cloneable as well and clones will always /// refer to the same event loop. pub fn handle(&self) -> LoopHandle { LoopHandle { id: self.id, tx: self.tx.clone(), } } /// Runs a future until completion, driving the event loop while we're /// otherwise waiting for the future to complete. /// /// Returns the value that the future resolves to. pub fn run<F: Future>(&mut self, f: F) -> Result<F::Item, F::Error> { let (tx_res, rx_res) = mpsc::channel(); let handle = self.handle(); f.then(move |res| { handle.shutdown(); tx_res.send(res) }).forget(); self._run(); rx_res.recv().unwrap() } fn _run(&mut self) { let mut events = mio::Events::new(); self.active.set(true); while self.active.get() { let amt; // On Linux, Poll::poll is epoll_wait, which may return EINTR if a // ptracer attaches. This retry loop prevents crashing when // attaching strace, or similar. let start = Instant::now(); loop { let timeout = self.timer_wheel.borrow().next_timeout().map(|t| { if t < start { Duration::new(0, 0) } else { t - start } }); match self.io.poll(&mut events, timeout) { Ok(a) => { amt = a; break; } Err(ref e) if e.kind() == ErrorKind::Interrupted => {} err @ Err(_) => { err.unwrap(); } } } debug!("loop poll - {:?}", start.elapsed()); // First up, process all timeouts that may have just occurred. let start = Instant::now(); self.consume_timeouts(start); // Next, process all the events that came in. for i in 0..events.len() { let event = events.get(i).unwrap(); let token = usize::from(event.token()); // Token 0 == our incoming message queue, so this means we // process the whole queue of messages. if token == 0 { debug!("consuming notification queue"); CURRENT_LOOP.set(&self, || { self.consume_queue(); }); continue } trace!("event {:?} {:?}", event.kind(), event.token()); // For any other token we look at `dispatch` to see what we're // supposed to do. If there's a waiter we get ready to notify // it, and we also or-in atomically any events that have // happened (currently read/write events). let mut waiter = None; if let Some(sched) = self.dispatch.borrow_mut().get_mut(token) { waiter = sched.waiter.take(); if event.kind().is_readable() { sched.source.readiness.fetch_or(1, Ordering::Relaxed); } if event.kind().is_writable() { sched.source.readiness.fetch_or(2, Ordering::Relaxed); } } else { debug!("notified on {} which no longer exists", token); } // If we actually got a waiter, then notify! if let Some(waiter) = waiter { self.notify_handle(waiter); } } debug!("loop process - {} events, {:?}", amt, start.elapsed()); } debug!("loop is done!"); } fn consume_timeouts(&mut self, now: Instant) { loop { let idx = match self.timer_wheel.borrow_mut().poll(now) { Some(idx) => idx, None => break, }; trace!("firing timeout: {}", idx); let handle = self.timeouts.borrow_mut()[idx].1.fire(); if let Some(handle) = handle { self.notify_handle(handle); } } } /// Method used to notify a task handle. /// /// Note that this should be used instead fo `handle.notify()` to ensure /// that the `CURRENT_LOOP` variable is set appropriately. fn notify_handle(&self, handle: TaskHandle) { CURRENT_LOOP.set(&self, || handle.notify()); } fn add_source(&self, source: IoSource) -> io::Result<usize> { let sched = Scheduled { source: source, waiter: None, }; let mut dispatch = self.dispatch.borrow_mut(); if dispatch.vacant_entry().is_none() { let amt = dispatch.count(); dispatch.grow(amt); } let entry = dispatch.vacant_entry().unwrap(); try!(register(&self.io, entry.index(), &sched)); Ok(entry.insert(sched).index()) } fn drop_source(&self, token: usize) { let sched = self.dispatch.borrow_mut().remove(token).unwrap(); deregister(&self.io, &sched); } fn schedule(&self, token: usize, wake: TaskHandle) { let to_call = { let mut dispatch = self.dispatch.borrow_mut(); let sched = dispatch.get_mut(token).unwrap(); if sched.source.readiness.load(Ordering::Relaxed) != 0 { sched.waiter = None; Some(wake) } else { sched.waiter = Some(wake); None } }; if let Some(to_call) = to_call { debug!("schedule immediately done"); self.notify_handle(to_call); } } fn deschedule(&self, token: usize) { let mut dispatch = self.dispatch.borrow_mut(); let sched = dispatch.get_mut(token).unwrap(); sched.waiter = None; } fn add_timeout(&self, at: Instant) -> io::Result<TimeoutToken> { let mut timeouts = self.timeouts.borrow_mut(); if timeouts.vacant_entry().is_none() { let len = timeouts.count(); timeouts.grow(len); } let entry = timeouts.vacant_entry().unwrap(); let timeout = self.timer_wheel.borrow_mut().insert(at, entry.index()); let entry = entry.insert((timeout, TimeoutState::NotFired)); Ok(TimeoutToken { token: entry.index() }) } fn update_timeout(&self, token: &TimeoutToken, handle: TaskHandle) { let to_wake = self.timeouts.borrow_mut()[token.token].1.block(handle); if let Some(to_wake) = to_wake { self.notify_handle(to_wake); } } fn cancel_timeout(&self, token: &TimeoutToken) { let pair = self.timeouts.borrow_mut().remove(token.token); if let Some((timeout, _state)) = pair { self.timer_wheel.borrow_mut().cancel(&timeout); } } fn consume_queue(&self) { // TODO: can we do better than `.unwrap()` here? while let Some(msg) = self.rx.recv().unwrap() { self.notify(msg); } } fn notify(&self, msg: Message) { match msg { Message::AddSource(source, slot) => { // This unwrap() should always be ok as we're the only producer slot.try_produce(self.add_source(source)) .ok().expect("interference with try_produce"); } Message::DropSource(tok) => self.drop_source(tok), Message::Schedule(tok, wake) => self.schedule(tok, wake), Message::Deschedule(tok) => self.deschedule(tok), Message::Shutdown => self.active.set(false), Message::AddTimeout(at, slot) => { slot.try_produce(self.add_timeout(at)) .ok().expect("interference with try_produce on timeout"); } Message::UpdateTimeout(t, handle) => self.update_timeout(&t, handle), Message::CancelTimeout(t) => self.cancel_timeout(&t), Message::Run(f) => f.call(), Message::Drop(data) => drop(data), } } } impl LoopHandle { fn send(&self, msg: Message) { self.with_loop(|lp| { match lp { Some(lp) => { // Need to execute all existing requests first, to ensure // that our message is processed "in order" lp.consume_queue(); lp.notify(msg); } None => { match self.tx.inner.send(msg) { Ok(()) => {} // This should only happen when there was an error // writing to the pipe to wake up the event loop, // hopefully that never happens Err(e) => { panic!("error sending message to event loop: {}", e) } } } } }) } fn with_loop<F, R>(&self, f: F) -> R where F: FnOnce(Option<&Loop>) -> R { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { if lp.id == self.id { f(Some(lp)) } else { f(None) } }) } else { f(None) } } /// Add a new source to an event loop, returning a future which will resolve /// to the token that can be used to identify this source. /// /// When a new I/O object is created it needs to be communicated to the /// event loop to ensure that it's registered and ready to receive /// notifications. The event loop with then respond with a unique token that /// this handle can be identified with (the resolved value of the returned /// future). /// /// This token is then passed in turn to each of the methods below to /// interact with notifications on the I/O object itself. /// /// # Panics /// /// The returned future will panic if the event loop this handle is /// associated with has gone away, or if there is an error communicating /// with the event loop. pub fn add_source(&self, source: IoSource) -> AddSource { AddSource { inner: LoopFuture { loop_handle: self.clone(), data: Some(source), result: None, } } } /// Begin listening for events on an event loop. /// /// Once an I/O object has been registered with the event loop through the /// `add_source` method, this method can be used with the assigned token to /// begin awaiting notifications. /// /// The `dir` argument indicates how the I/O object is expected to be /// awaited on (either readable or writable) and the `wake` callback will be /// invoked. Note that one the `wake` callback is invoked once it will not /// be invoked again, it must be re-`schedule`d to continue receiving /// notifications. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn schedule(&self, tok: usize, task: &mut Task) { // TODO: plumb through `&mut Task` if we're on the event loop self.send(Message::Schedule(tok, task.handle().clone())); } /// Stop listening for events on an event loop. /// /// Once a callback has been scheduled with the `schedule` method, it can be /// unregistered from the event loop with this method. This method does not /// guarantee that the callback will not be invoked if it hasn't already, /// but a best effort will be made to ensure it is not called. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn deschedule(&self, tok: usize) { self.send(Message::Deschedule(tok)); } /// Unregister all information associated with a token on an event loop, /// deallocating all internal resources assigned to the given token. /// /// This method should be called whenever a source of events is being /// destroyed. This will ensure that the event loop can reuse `tok` for /// another I/O object if necessary and also remove it from any poll /// notifications and callbacks. /// /// Note that wake callbacks may still be invoked after this method is /// called as it may take some time for the message to drop a source to /// reach the event loop. Despite this fact, this method will attempt to /// ensure that the callbacks are **not** invoked, so pending scheduled /// callbacks cannot be relied upon to get called. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn drop_source(&self, tok: usize) { self.send(Message::DropSource(tok)); } /// Adds a new timeout to get fired at the specified instant, notifying the /// specified task. pub fn add_timeout(&self, at: Instant) -> AddTimeout { AddTimeout { inner: LoopFuture { loop_handle: self.clone(), data: Some(at), result: None, }, } } /// Updates a previously added timeout to notify a new task instead. /// /// # Panics /// /// This method will panic if the timeout specified was not created by this /// loop handle's `add_timeout` method. pub fn update_timeout(&self, timeout: &TimeoutToken, task: &mut Task) { let timeout = TimeoutToken { token: timeout.token }; self.send(Message::UpdateTimeout(timeout, task.handle().clone())) } /// Cancel a previously added timeout. /// /// # Panics /// /// This method will panic if the timeout specified was not created by this /// loop handle's `add_timeout` method. pub fn cancel_timeout(&self, timeout: &TimeoutToken) { let timeout = TimeoutToken { token: timeout.token }; self.send(Message::CancelTimeout(timeout)) } /// Schedules a closure to add some data to event loop thread itself. /// /// This function is useful for when storing non-`Send` data inside of a /// future. This returns a future which will resolve to a `LoopData<A>` /// handle, which is itself `Send + 'static` regardless of the underlying /// `A`. That is, for example, you can create a handle to some data that /// contains an `Rc`, for example. /// /// This function takes a closure which may be sent to the event loop to /// generate an instance of type `A`. The closure itself is required to be /// `Send + 'static`, but the data it produces is only required to adhere to /// `Any`. /// /// If the returned future is polled on the event loop thread itself it will /// very cheaply resolve to a handle to the data, but if it's not polled on /// the event loop then it will send a message to the event loop to run the /// closure `f`, generate a handle, and then the future will yield it back. // TODO: more with examples pub fn add_loop_data<F, A>(&self, f: F) -> AddLoopData<F, A> where F: FnOnce() -> A + Send + 'static, A: Any, { AddLoopData { _marker: marker::PhantomData, inner: LoopFuture { loop_handle: self.clone(), data: Some(f), result: None, }, } } /// Send a message to the associated event loop that it should shut down, or /// otherwise break out of its current loop of iteration. /// /// This method does not forcibly cause the event loop to shut down or /// perform an interrupt on whatever task is currently running, instead a /// message is simply enqueued to at a later date process the request to /// stop looping ASAP. /// /// # Panics /// /// This function will panic if the event loop this handle is associated /// with has gone away, or if there is an error communicating with the event /// loop. pub fn shutdown(&self) { self.send(Message::Shutdown); } } /// A future which will resolve a unique `tok` token for an I/O object. /// /// Created through the `LoopHandle::add_source` method, this future can also /// resolve to an error if there's an issue communicating with the event loop. pub struct AddSource { inner: LoopFuture<usize, IoSource>, } impl Future for AddSource { type Item = usize; type Error = io::Error; fn poll(&mut self, _task: &mut Task) -> Poll<usize, io::Error> { self.inner.poll(Loop::add_source) } fn schedule(&mut self, task: &mut Task) { self.inner.schedule(task, Message::AddSource) } } /// Return value from the `LoopHandle::add_timeout` method, a future that will /// resolve to a `TimeoutToken` to configure the behavior of that timeout. pub struct AddTimeout { inner: LoopFuture<TimeoutToken, Instant>, } /// A token that identifies an active timeout. pub struct TimeoutToken { token: usize, } impl Future for AddTimeout { type Item = TimeoutToken; type Error = io::Error; fn poll(&mut self, _task: &mut Task) -> Poll<TimeoutToken, io::Error> { self.inner.poll(Loop::add_timeout) } fn schedule(&mut self, task: &mut Task) { self.inner.schedule(task, Message::AddTimeout) } } /// A handle to data that is owned by an event loop thread, and is only /// accessible on that thread itself. /// /// This structure is created by the `LoopHandle::add_loop_data` method which /// will return a future resolving to one of these references. A `LoopData<A>` /// handle is `Send` regardless of what `A` is, but the internal data can only /// be accessed on the event loop thread itself. /// /// Internally this reference also stores a handle to the event loop that the /// data originated on, so it knows how to go back to the event loop to access /// the data itself. // TODO: write more once it's implemented pub struct LoopData<A: Any> { data: DropBox<A>, handle: LoopHandle, } /// Future returned from the `LoopHandle::add_loop_data` method. /// /// This future will resolve to a `LoopData<A>` reference when completed, which /// represents a handle to data that is "owned" by the event loop thread but can /// migrate among threads temporarily so travel with a future itself. pub struct AddLoopData<F, A> { inner: LoopFuture<DropBox<Any>, F>, _marker: marker::PhantomData<fn() -> A>, } fn _assert() { fn _assert_send<T: Send>() {} _assert_send::<LoopData<()>>(); } impl<F, A> Future for AddLoopData<F, A> where F: FnOnce() -> A + Send + 'static, A: Any, { type Item = LoopData<A>; type Error = io::Error; fn poll(&mut self, _task: &mut Task) -> Poll<LoopData<A>, io::Error> { let ret = self.inner.poll(|_lp, f| { Ok(DropBox::new(f())) }); ret.map(|mut data| { match data.downcast::<A>() { Some(data) => { LoopData { data: data, handle: self.inner.loop_handle.clone(), } } None => panic!("data mixed up?"), } }) } fn schedule(&mut self, task: &mut Task) { self.inner.schedule(task, |f, slot| { Message::Run(Box::new(move || { slot.try_produce(Ok(DropBox::new(f()))).ok() .expect("add loop data try_produce intereference"); })) }) } } impl<A: Any> LoopData<A> { /// Gets a shared reference to the underlying data in this handle. /// /// Returns `None` if it is not called from the event loop thread that this /// `LoopData<A>` is associated with, or `Some` with a reference to the data /// if we are indeed on the event loop thread. pub fn get(&self) -> Option<&A> { self.data.get() } /// Gets a mutable reference to the underlying data in this handle. /// /// Returns `None` if it is not called from the event loop thread that this /// `LoopData<A>` is associated with, or `Some` with a reference to the data /// if we are indeed on the event loop thread. pub fn get_mut(&mut self) -> Option<&mut A> { self.data.get_mut() } /// Acquire the executor associated with the thread that owns this /// `LoopData<A>`'s data. /// /// If the `get` and `get_mut` functions above return `None`, then this data /// is being polled on the wrong thread to access the data, and to make /// progress a future may need to migrate to the actual thread which owns /// the relevant data. /// /// This executor can in turn be passed to `Task::poll_on`, which will then /// move the entire future to be polled on the right thread. pub fn executor(&self) -> Arc<Executor> { self.handle.tx.clone() } } impl<A: Any> Drop for LoopData<A> { fn drop(&mut self) { // The `DropBox` we store internally will cause a memory leak if it's // dropped on the wrong thread. While necessary for safety, we don't // actually want a memory leak, so for all normal circumstances we take // out the `DropBox<A>` as a `DropBox<Any>` and then we send it off to // the event loop. // // TODO: possible optimization is to do none of this if we're on the // event loop thread itself if let Some(data) = self.data.take_any() { self.handle.send(Message::Drop(data)); } } } /// A curious inner module with one `unsafe` keyword, yet quite an important /// one! /// /// The purpose of this module is to define a type, `DropBox<A>`, which is able /// to be sent across thread event when the underlying data `A` is itself not /// sendable across threads. This is then in turn used to build up the /// `LoopData` abstraction above. /// /// A `DropBox` currently contains two major components, an identification of /// the thread that it originated from as well as the data itself. Right now the /// data is stored in a `Box` as we'll transition between it and `Box<Any>`, but /// this is perhaps optimizable. /// /// The `DropBox<A>` itself only provides a few safe methods, all of which are /// safe to call from any thread. Access to the underlying data is only granted /// if we're on the right thread, and otherwise the methods don't access the /// data itself. /// /// Finally, one crucial piece, if the data is dropped it may run code that /// assumes it's on the original thread. For this reason we have to be sure that /// the data is only dropped on the originating thread itself. It's currently /// the job of the outer `LoopData` to ensure that a `DropBox` is dropped on the /// right thread, so we don't attempt to perform any communication in this /// `Drop` implementation. Instead, if a `DropBox` is dropped on the wrong /// thread, it simply leaks its contents. /// /// All that's really just a lot of words in an attempt to justify the `unsafe` /// impl of `Send` below. The idea is that the data is only ever accessed on the /// originating thread, even during `Drop`. /// /// Note that this is a private module to have a visibility boundary around the /// unsafe internals. Although there's not any unsafe blocks here, the code /// itself is quite unsafe as it has to make sure that the data is dropped in /// the right place, if ever. mod dropbox { use std::any::Any; use std::mem; use super::CURRENT_LOOP; pub struct DropBox<A: ?Sized> { id: usize, inner: Option<Box<A>>, } unsafe impl<A: ?Sized> Send for DropBox<A> {} impl DropBox<Any> { /// Creates a new `DropBox` pinned to the current threads. /// /// Will panic if `CURRENT_LOOP` isn't set. pub fn new<A: Any>(a: A) -> DropBox<Any> { DropBox { id: CURRENT_LOOP.with(|lp| lp.id), inner: Some(Box::new(a) as Box<Any>), } } /// Downcasts this `DropBox` to the type specified. /// /// Normally this always succeeds as it's a static assertion that we /// already have all the types matched up, but an `Option` is returned /// here regardless. pub fn downcast<A: Any>(&mut self) -> Option<DropBox<A>> { self.inner.take().and_then(|data| { match data.downcast::<A>() { Ok(a) => Some(DropBox { id: self.id, inner: Some(a) }), // Note that we're careful that when a downcast fails we put // the data back into ourselves, because we may be // downcasting on any thread. This will ensure that if we // drop accidentally we'll forget the data correctly. Err(obj) => { self.inner = Some(obj); None } } }) } } impl<A: Any> DropBox<A> { /// Consumes the contents of this `DropBox<A>`, returning a new /// `DropBox<Any>`. /// /// This is just intended to be a simple and cheap conversion, should /// almost always return `Some`. pub fn take_any(&mut self) -> Option<DropBox<Any>> { self.inner.take().map(|d| { DropBox { id: self.id, inner: Some(d as Box<Any>) } }) } } impl<A: ?Sized> DropBox<A> { /// Returns a shared reference to the data if we're on the right /// thread. pub fn get(&self) -> Option<&A> { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(|lp| { if lp.id == self.id { self.inner.as_ref().map(|b| &**b) } else { None } }) } else { None } } /// Returns a mutable reference to the data if we're on the right /// thread. pub fn get_mut(&mut self) -> Option<&mut A> { if CURRENT_LOOP.is_set() { CURRENT_LOOP.with(move |lp| { if lp.id == self.id { self.inner.as_mut().map(|b| &mut **b) } else { None } }) } else { None } } } impl<A: ?Sized> Drop for DropBox<A> { fn drop(&mut self) { // Try our safe accessor first, and if it works then we know that // we're on the right thread. In that case we can simply drop as // usual. if let Some(a) = self.get_mut().take() { return drop(a) } // If we're on the wrong thread but we actually have some data, then // something in theory horrible has gone awry. Prevent memory safety // issues by forgetting the data and then also warn about this odd // event. if let Some(data) = self.inner.take() { mem::forget(data); warn!("forgetting some data on an event loop"); } } } } struct LoopFuture<T, U> { loop_handle: LoopHandle, data: Option<U>, result: Option<(Arc<Slot<io::Result<T>>>, slot::Token)>, } impl<T, U> LoopFuture<T, U> where T: Send + 'static, { fn poll<F>(&mut self, f: F) -> Poll<T, io::Error> where F: FnOnce(&Loop, U) -> io::Result<T>, { match self.result { Some((ref result, ref token)) => { result.cancel(*token); match result.try_consume() { Ok(t) => t.into(), Err(_) => Poll::NotReady, } } None => { let data = &mut self.data; self.loop_handle.with_loop(|lp| { match lp { Some(lp) => f(lp, data.take().unwrap()).into(), None => Poll::NotReady, } }) } } } fn schedule<F>(&mut self, task: &mut Task, f: F) where F: FnOnce(U, Arc<Slot<io::Result<T>>>) -> Message, { if let Some((ref result, ref mut token)) = self.result { result.cancel(*token); let handle = task.handle().clone(); *token = result.on_full(move |_| { handle.notify(); }); return } let handle = task.handle().clone(); let result = Arc::new(Slot::new(None)); let token = result.on_full(move |_| { handle.notify(); }); self.result = Some((result.clone(), token)); self.loop_handle.send(f(self.data.take().unwrap(), result)) } } impl TimeoutState { fn block(&mut self, handle: TaskHandle) -> Option<TaskHandle> { match *self { TimeoutState::Fired => return Some(handle), _ => {} } *self = TimeoutState::Waiting(handle); None } fn fire(&mut self) -> Option<TaskHandle> { match mem::replace(self, TimeoutState::Fired) { TimeoutState::NotFired => None, TimeoutState::Fired => panic!("fired twice?"), TimeoutState::Waiting(handle) => Some(handle), } } } impl<E> Source<E> { pub fn new(e: E) -> Source<E> { Source { readiness: AtomicUsize::new(0), io: e, } } } impl<E: ?Sized> Source<E> { pub fn take_readiness(&self) -> Option<Ready> { match self.readiness.swap(0, Ordering::SeqCst) { 0 => None, 1 => Some(Ready::Read), 2 => Some(Ready::Write), 3 => Some(Ready::ReadWrite), _ => panic!(), } } pub fn io(&self) -> &E { &self.io } } impl Executor for MioSender { fn execute_boxed(&self, callback: Box<ExecuteCallback>) { self.inner.send(Message::Run(callback)) .expect("error sending a message to the event loop") } }
extern crate env_logger; extern crate lapin_futures as lapin; #[macro_use] extern crate log; extern crate futures; extern crate tokio; use std::io; use futures::future::Future; use futures::{IntoFuture, Stream}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::net::TcpStream; use tokio::runtime::Runtime; use lapin::types::FieldTable; use lapin::client::{Client, ConnectionOptions}; use lapin::channel::{BasicConsumeOptions, BasicProperties, BasicPublishOptions, ConfirmSelectOptions, QueueDeclareOptions}; const N_CONSUMERS : u8 = 8; const N_MESSAGES : u8 = 5; fn create_consumer<T: AsyncRead + AsyncWrite + Sync + Send + 'static>(client: &Client<T>, n: u8) -> impl Future<Item = (), Error = ()> + Send + 'static { info!("will create consumer {}", n); let queue = format!("test-queue-{}", n); client.create_confirm_channel(ConfirmSelectOptions::default()).and_then(move |channel| { info!("creating queue {}", queue); channel.queue_declare(&queue, QueueDeclareOptions::default(), FieldTable::new()).map(move |queue| (channel, queue)) }).and_then(move |(channel, queue)| { info!("creating consumer {}", n); channel.basic_consume(&queue, "", BasicConsumeOptions::default(), FieldTable::new()).map(move |stream| (channel, stream)) }).and_then(move |(channel, stream)| { info!("got stream for consumer {}", n); stream.for_each(move |message| { println!("consumer '{}' got '{}'", n, std::str::from_utf8(&message.data).unwrap()); channel.basic_ack(message.delivery_tag) }) }).map(|_| ()).map_err(move |err| eprintln!("got error in consumer '{}': {:?}", n, err)) } fn main() { env_logger::init(); let addr = std::env::var("AMQP_ADDR").unwrap_or_else(|_| "127.0.0.1:5672".to_string()).parse().unwrap(); // tokio::runtime::current_thread::Runtime::new().unwrap().block_on( Runtime::new().unwrap().block_on( TcpStream::connect(&addr).and_then(|stream| { Client::connect(stream, ConnectionOptions { frame_max: 65535, ..Default::default() }) }).and_then(|(client, heartbeat)| { tokio::spawn(heartbeat.map_err(|e| eprintln!("heartbeat error: {:?}", e))) .into_future().map(|_| client).map_err(|_| io::Error::new(io::ErrorKind::Other, "spawn error")) }).and_then(|client| { let _client = client.clone(); futures::stream::iter_ok(0..N_CONSUMERS).for_each(move |n| tokio::spawn(create_consumer(&_client, n))) .into_future().map(move |_| client).map_err(|_| io::Error::new(io::ErrorKind::Other, "spawn error")) }).and_then(|client| { client.create_confirm_channel(ConfirmSelectOptions::default()).and_then(move |channel| { futures::stream::iter_ok((0..N_CONSUMERS).flat_map(|c| { (0..N_MESSAGES).map(move |m| (c, m)) })).for_each(move |(c, m)| { let queue = format!("test-queue-{}", c); let message = format!("message {} for consumer {}", m, c); let channel = channel.clone(); info!("will publish {}", message); channel.queue_declare(&queue, QueueDeclareOptions::default(), FieldTable::new()).and_then(move |_| { channel.basic_publish("", &queue, message.as_str().as_bytes(), BasicPublishOptions::default(), BasicProperties::default()).map(move |confirmation| { println!("got confirmation (consumer {}, message {}): {:?}", c, m, confirmation); }) }) }) }) }).map_err(|err| eprintln!("error: {:?}", err)) ).expect("runtime exited with failure"); } futures: let the consumers run in consumers example Signed-off-by: Marc-Antoine Perennou <07f76cf0511c79b361712839686f3cee8c75791c@Perennou.com> extern crate env_logger; extern crate lapin_futures as lapin; #[macro_use] extern crate log; extern crate futures; extern crate tokio; use std::io; use futures::future::Future; use futures::{IntoFuture, Stream}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::net::TcpStream; use tokio::runtime::Runtime; use lapin::types::FieldTable; use lapin::client::{Client, ConnectionOptions}; use lapin::channel::{BasicConsumeOptions, BasicProperties, BasicPublishOptions, ConfirmSelectOptions, QueueDeclareOptions}; const N_CONSUMERS : u8 = 8; const N_MESSAGES : u8 = 5; fn create_consumer<T: AsyncRead + AsyncWrite + Sync + Send + 'static>(client: &Client<T>, n: u8) -> impl Future<Item = (), Error = ()> + Send + 'static { info!("will create consumer {}", n); let queue = format!("test-queue-{}", n); client.create_confirm_channel(ConfirmSelectOptions::default()).and_then(move |channel| { info!("creating queue {}", queue); channel.queue_declare(&queue, QueueDeclareOptions::default(), FieldTable::new()).map(move |queue| (channel, queue)) }).and_then(move |(channel, queue)| { info!("creating consumer {}", n); channel.basic_consume(&queue, "", BasicConsumeOptions::default(), FieldTable::new()).map(move |stream| (channel, stream)) }).and_then(move |(channel, stream)| { info!("got stream for consumer {}", n); stream.for_each(move |message| { println!("consumer '{}' got '{}'", n, std::str::from_utf8(&message.data).unwrap()); channel.basic_ack(message.delivery_tag) }) }).map(|_| ()).map_err(move |err| eprintln!("got error in consumer '{}': {:?}", n, err)) } fn main() { env_logger::init(); let addr = std::env::var("AMQP_ADDR").unwrap_or_else(|_| "127.0.0.1:5672".to_string()).parse().unwrap(); let mut runtime = Runtime::new().unwrap(); // let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); runtime.block_on( TcpStream::connect(&addr).and_then(|stream| { Client::connect(stream, ConnectionOptions { frame_max: 65535, ..Default::default() }) }).and_then(|(client, heartbeat)| { tokio::spawn(heartbeat.map_err(|e| eprintln!("heartbeat error: {:?}", e))) .into_future().map(|_| client).map_err(|_| io::Error::new(io::ErrorKind::Other, "spawn error")) }).and_then(|client| { let _client = client.clone(); futures::stream::iter_ok(0..N_CONSUMERS).for_each(move |n| tokio::spawn(create_consumer(&_client, n))) .into_future().map(move |_| client).map_err(|_| io::Error::new(io::ErrorKind::Other, "spawn error")) }).and_then(|client| { client.create_confirm_channel(ConfirmSelectOptions::default()).and_then(move |channel| { futures::stream::iter_ok((0..N_CONSUMERS).flat_map(|c| { (0..N_MESSAGES).map(move |m| (c, m)) })).for_each(move |(c, m)| { let queue = format!("test-queue-{}", c); let message = format!("message {} for consumer {}", m, c); let channel = channel.clone(); info!("will publish {}", message); channel.queue_declare(&queue, QueueDeclareOptions::default(), FieldTable::new()).and_then(move |_| { channel.basic_publish("", &queue, message.as_str().as_bytes(), BasicPublishOptions::default(), BasicProperties::default()).map(move |confirmation| { println!("got confirmation (consumer {}, message {}): {:?}", c, m, confirmation); }) }) }) }) }).map_err(|err| eprintln!("error: {:?}", err)) ).expect("runtime exited with failure"); runtime.shutdown_on_idle().wait().expect("runtime shutdown exited with error"); }
#![cfg(feature = "s3")] extern crate futures; extern crate rusoto_core; extern crate rusoto_s3; extern crate time; extern crate env_logger; extern crate log; extern crate reqwest; use std::collections::HashMap; use std::env; use std::fs::File; use std::io::Read; use time::get_time; use futures::{Future, Stream}; use rusoto_core::Region; use rusoto_core::ProvideAwsCredentials; use rusoto_core::credential::AwsCredentials; use rusoto_core::reactor::CredentialsProvider; use rusoto_s3::{S3, S3Client, HeadObjectRequest, CopyObjectRequest, GetObjectError, GetObjectRequest, PutObjectRequest, DeleteObjectRequest, PutBucketCorsRequest, CORSConfiguration, CORSRule, CreateBucketRequest, DeleteBucketRequest, CreateMultipartUploadRequest, UploadPartRequest, CompleteMultipartUploadRequest, CompletedMultipartUpload, CompletedPart, ListObjectsRequest, ListObjectsV2Request}; use rusoto_s3::util::PreSignedRequest; type TestClient = S3Client; // Rust is in bad need of an integration test harness // This creates the S3 resources needed for a suite of tests, // executes those tests, and then destroys the resources #[test] fn test_all_the_things() { let _ = env_logger::try_init(); let region = if let Ok(endpoint) = env::var("S3_ENDPOINT") { let region = Region::Custom { name: "us-east-1".to_owned(), endpoint: endpoint.to_owned() }; println!("picked up non-standard endpoint {:?} from S3_ENDPOINT env. variable", region); region } else { Region::UsEast1 }; let client = S3Client::simple(region.clone()); let credentials = CredentialsProvider::default().credentials().wait().unwrap(); let test_bucket = format!("rusoto-test-bucket-{}", get_time().sec); let filename = format!("test_file_{}", get_time().sec); let utf8_filename = format!("test[über]file@{}", get_time().sec); let binary_filename = format!("test_file_b{}", get_time().sec); let multipart_filename = format!("test_multipart_file_{}", get_time().sec); let metadata_filename = format!("test_metadata_file_{}", get_time().sec); // get a list of list_buckets test_list_buckets(&client); // create a bucket for these tests test_create_bucket(&client, &test_bucket); // list items v2 list_items_in_bucket(&client, &test_bucket); // do a multipart upload test_multipart_upload(&client, &test_bucket, &multipart_filename); // modify the bucket's CORS properties if cfg!(not(feature = "disable_minio_unsupported")) { // Minio support: CORS is not implemented by Minio test_put_bucket_cors(&client, &test_bucket); } // PUT an object (no_credentials is an arbitrary choice) test_put_object_with_filename(&client, &test_bucket, &filename, &"tests/sample-data/no_credentials"); // HEAD the object that was PUT test_head_object(&client, &test_bucket, &filename); // GET the object test_get_object(&client, &test_bucket, &filename); test_get_object_range(&client, &test_bucket, &filename); // copy the object to change its settings test_copy_object(&client, &test_bucket, &filename); // UTF8 filenames test_put_object_with_filename(&client, &test_bucket, &utf8_filename, &"tests/sample-data/no_credentials"); test_copy_object_utf8(&client, &test_bucket, &utf8_filename); test_delete_object(&client, &test_bucket, &utf8_filename); // test failure responses test_get_object_no_such_object(&client, &test_bucket, &binary_filename); // Binary objects: test_put_object_with_filename(&client, &test_bucket, &binary_filename, &"tests/sample-data/binary-file"); test_get_object(&client, &test_bucket, &binary_filename); // paging test requires three items in the bucket, put another item there: // PUT an object (no_credentials is an arbitrary choice) let another_filename = format!("foo{}", filename); test_put_object_with_filename(&client, &test_bucket, &another_filename, &"tests/sample-data/no_credentials"); // metadata tests let mut metadata = HashMap::<String, String>::new(); metadata.insert("rusoto-metadata-some".to_string(), "some-test-value".to_string()); metadata.insert("rusoto-metadata-none".to_string(), "".to_string()); test_put_object_with_metadata(&client, &test_bucket, &metadata_filename, &"tests/sample-data/no_credentials", &metadata); test_head_object_with_metadata(&client, &test_bucket, &metadata_filename, &metadata); test_get_object_with_metadata(&client, &test_bucket, &metadata_filename, &metadata); // list items with paging using list object API v1 list_items_in_bucket_paged_v1(&client, &test_bucket); // list items with paging using list object API v2 if cfg!(not(feature = "disable_ceph_unsupported")) { // Ceph support: this test depends on the list object v2 API which is not implemented by Ceph list_items_in_bucket_paged_v2(&client, &test_bucket); } test_delete_object(&client, &test_bucket, &metadata_filename); test_delete_object(&client, &test_bucket, &binary_filename); test_delete_object(&client, &test_bucket, &another_filename); // DELETE the object test_delete_object(&client, &test_bucket, &filename); // PUT an object for presigned url test_put_object_with_filename(&client, &test_bucket, &filename, &"tests/sample-data/no_credentials"); // generate a presigned url test_get_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); test_put_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); test_delete_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); // UTF8 filenames for presigned url test_put_object_with_filename(&client, &test_bucket, &utf8_filename, &"tests/sample-data/no_credentials"); // generate a presigned url test_get_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); test_put_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); test_delete_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); // delete the test bucket test_delete_bucket(&client, &test_bucket); } fn test_multipart_upload(client: &TestClient, bucket: &str, filename: &str) { let create_multipart_req = CreateMultipartUploadRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; // start the multipart upload and note the upload_id generated let response = client.create_multipart_upload(create_multipart_req).sync().expect("Couldn't create multipart upload"); println!("{:#?}", response); let upload_id = response.upload_id.unwrap(); // create 2 upload parts let create_upload_part = |body: Vec<u8>, part_number: i64| -> UploadPartRequest { UploadPartRequest { body: Some(body.into()), bucket: bucket.to_owned(), key: filename.to_owned(), upload_id: upload_id.to_owned(), part_number: part_number, ..Default::default() } }; // minimum size for a non-final multipart upload part is 5MB let part_req1 = create_upload_part(vec!['a' as u8; 1024 * 1024 * 5], 1); let part_req2 = create_upload_part("foo".as_bytes().to_vec(), 2); // upload 2 parts and note the etags generated for them let mut completed_parts = Vec::new(); for req in vec![part_req1, part_req2].into_iter() { let part_number = req.part_number; let response = client.upload_part(req).sync().expect("Couldn't upload a file part"); println!("{:#?}", response); completed_parts.push(CompletedPart { e_tag: response.e_tag.clone(), part_number: Some(part_number), }); } // complete the multipart upload with the etags of the parts let completed_upload = CompletedMultipartUpload { parts: Some(completed_parts) }; let complete_req = CompleteMultipartUploadRequest { bucket: bucket.to_owned(), key: filename.to_owned(), upload_id: upload_id.to_owned(), multipart_upload: Some(completed_upload), ..Default::default() }; let response = client.complete_multipart_upload(complete_req).sync().expect("Couldn't complete multipart upload"); println!("{:#?}", response); // delete the completed file test_delete_object(client, bucket, filename); } fn test_create_bucket(client: &TestClient, bucket: &str) { let create_bucket_req = CreateBucketRequest { bucket: bucket.to_owned(), ..Default::default() }; let result = client.create_bucket(create_bucket_req).sync().expect("Couldn't create bucket"); println!("{:#?}", result); } fn test_delete_bucket(client: &TestClient, bucket: &str) { let delete_bucket_req = DeleteBucketRequest { bucket: bucket.to_owned(), ..Default::default() }; let result = client.delete_bucket(delete_bucket_req).sync().expect("Couldn't delete bucket"); println!("{:#?}", result); } fn test_put_object_with_filename(client: &TestClient, bucket: &str, dest_filename: &str, local_filename: &str) { let mut f = File::open(local_filename).unwrap(); let mut contents: Vec<u8> = Vec::new(); match f.read_to_end(&mut contents) { Err(why) => panic!("Error opening file to send to S3: {}", why), Ok(_) => { let req = PutObjectRequest { bucket: bucket.to_owned(), key: dest_filename.to_owned(), body: Some(contents.into()), ..Default::default() }; let result = client.put_object(req).sync().expect("Couldn't PUT object"); println!("{:#?}", result); } } } fn test_head_object(client: &TestClient, bucket: &str, filename: &str) { let head_req = HeadObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.head_object(head_req).sync().expect("Couldn't HEAD object"); println!("{:#?}", result); } fn test_get_object(client: &TestClient, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.get_object(get_req).sync().expect("Couldn't GET object"); println!("get object result: {:#?}", result); let stream = result.body.unwrap(); let body = stream.concat2().wait().unwrap(); assert!(body.len() > 0); } fn test_get_object_no_such_object(client: &TestClient, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; match client.get_object(get_req).sync() { Err(GetObjectError::NoSuchKey(_)) => (), r => panic!("unexpected response {:?}", r) }; } fn test_get_object_range(client: &TestClient, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), range: Some("bytes=0-1".to_owned()), ..Default::default() }; let result = client.get_object(get_req).sync().expect("Couldn't GET object (range)"); println!("\nget object range result: {:#?}", result); assert_eq!(result.content_length.unwrap(), 2); } fn test_copy_object(client: &TestClient, bucket: &str, filename: &str) { let req = CopyObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), copy_source: format!("{}/{}", bucket, filename), cache_control: Some("max-age=123".to_owned()), content_type: Some("application/json".to_owned()), metadata_directive: Some("REPLACE".to_owned()), ..Default::default() }; let result = client.copy_object(req).sync().expect("Couldn't copy object"); println!("{:#?}", result); } fn test_copy_object_utf8(client: &TestClient, bucket: &str, filename: &str) { let req = CopyObjectRequest { bucket: bucket.to_owned(), key: format!("{}", filename.to_owned()), copy_source: rusoto_s3::util::encode_key(format!("{}/{}", bucket, filename)), cache_control: Some("max-age=123".to_owned()), content_type: Some("application/json".to_owned()), metadata_directive: Some("REPLACE".to_owned()), ..Default::default() }; let result = client.copy_object(req).sync().expect("Couldn't copy object (utf8)"); println!("{:#?}", result); } fn test_delete_object(client: &TestClient, bucket: &str, filename: &str) { let del_req = DeleteObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.delete_object(del_req).sync().expect("Couldn't delete object"); println!("{:#?}", result); } fn test_list_buckets(client: &TestClient) { let result = client.list_buckets().sync().expect("Couldn't list buckets"); println!("\nbuckets available: {:#?}", result); } fn list_items_in_bucket(client: &TestClient, bucket: &str) { let list_obj_req = ListObjectsV2Request { bucket: bucket.to_owned(), start_after: Some("foo".to_owned()), ..Default::default() }; let result = client.list_objects_v2(list_obj_req).sync().expect("Couldn't list items in bucket (v2)"); println!("Items in bucket: {:#?}", result); } fn list_items_in_bucket_paged_v1(client: &TestClient, bucket: &str) { let mut list_request = ListObjectsRequest { delimiter: Some("/".to_owned()), bucket: bucket.to_owned(), max_keys: Some(2), ..Default::default() }; let response1 = client.list_objects(list_request.clone()).sync().expect("list objects failed"); println!("Items in bucket, page 1: {:#?}", response1); let contents1 = response1.contents.unwrap(); assert!(response1.is_truncated.unwrap()); assert_eq!(contents1.len(), 2); list_request.marker = Some(response1.next_marker.unwrap()); list_request.max_keys = Some(1000); let response2 = client.list_objects(list_request).sync().expect("list objects failed"); println!("Items in buckut, page 2: {:#?}", response2); let contents2 = response2.contents.unwrap(); assert!(!response2.is_truncated.unwrap()); assert!(contents1[1].key.as_ref().unwrap() < contents2[0].key.as_ref().unwrap()); } // Assuming there's already more than three item in our test bucket: fn list_items_in_bucket_paged_v2(client: &TestClient, bucket: &str) { let mut list_obj_req = ListObjectsV2Request { bucket: bucket.to_owned(), max_keys: Some(1), ..Default::default() }; let result1 = client.list_objects_v2(list_obj_req.clone()).sync().expect("list objects v2 failed"); println!("Items in bucket, page 1: {:#?}", result1); assert!(result1.next_continuation_token.is_some()); list_obj_req.continuation_token = result1.next_continuation_token; let result2 = client.list_objects_v2(list_obj_req).sync().expect("list objects v2 paging failed"); println!("Items in bucket, page 2: {:#?}", result2); // For the second call it the token is in `continuation_token` not `next_continuation_token` assert!(result2.continuation_token.is_some()); assert!( result1.contents.unwrap()[0].key.as_ref().unwrap() < result2.contents.unwrap()[0].key.as_ref().unwrap() ); } fn test_put_bucket_cors(client: &TestClient, bucket: &str) { let cors_rules = vec![CORSRule { allowed_methods: vec!["PUT".to_owned(), "POST".to_owned(), "DELETE".to_owned()], allowed_origins: vec!["http://www.example.com".to_owned()], allowed_headers: Some(vec!["*".to_owned()]), max_age_seconds: Some(3000), expose_headers: Some(vec!["x-amz-server-side-encryption".to_owned()]), ..Default::default() }]; let cors_configuration = CORSConfiguration { cors_rules: cors_rules }; let req = PutBucketCorsRequest { bucket: bucket.to_owned(), cors_configuration: cors_configuration, ..Default::default() }; let result = client.put_bucket_cors(req).sync().expect("Couldn't apply bucket CORS"); println!("{:#?}", result); } fn test_put_object_with_metadata(client: &TestClient, bucket: &str, dest_filename: &str, local_filename: &str, metadata: &HashMap<String,String>) { let mut f = File::open(local_filename).unwrap(); let mut contents: Vec<u8> = Vec::new(); match f.read_to_end(&mut contents) { Err(why) => panic!("Error opening file to send to S3: {}", why), Ok(_) => { let req = PutObjectRequest { bucket: bucket.to_owned(), key: dest_filename.to_owned(), body: Some(contents.into()), metadata: Some(metadata.clone()), ..Default::default() }; let result = client.put_object(req).sync().expect("Couldn't PUT object"); println!("{:#?}", result); } } } fn test_head_object_with_metadata(client: &TestClient, bucket: &str, filename: &str, metadata: &HashMap<String,String>) { let head_req = HeadObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.head_object(head_req).sync().expect("Couldn't HEAD object"); println!("{:#?}", result); let head_metadata = result.metadata.as_ref().expect("No metadata available"); assert_eq!(metadata, head_metadata); } fn test_get_object_with_metadata(client: &TestClient, bucket: &str, filename: &str, metadata: &HashMap<String,String>) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.get_object(get_req).sync().expect("Couldn't GET object"); println!("get object result: {:#?}", result); let head_metadata = result.metadata.as_ref().expect("No metadata available"); assert_eq!(metadata, head_metadata); } fn test_get_object_with_presigned_url(region: &Region, credentials: &AwsCredentials, bucket: &str, filename: &str) { let req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let presigned_url = req.get_presigned_url(region, credentials); println!("get object presigned url: {:#?}", presigned_url); let mut res = reqwest::get(&presigned_url).unwrap(); assert_eq!(res.status(), reqwest::StatusCode::Ok); let size = res.headers().get::<reqwest::header::ContentLength>().map(|ct_len| **ct_len).unwrap_or(0); assert!(size > 0); let mut buf: Vec<u8> = vec![]; res.copy_to(&mut buf).unwrap(); assert!(buf.len() > 0); } fn test_put_object_with_presigned_url(region: &Region, credentials: &AwsCredentials, bucket: &str, filename: &str) { let req = PutObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let presigned_url = req.get_presigned_url(region, credentials); println!("put object presigned url: {:#?}", presigned_url); let mut map = HashMap::new(); map.insert("test", "data"); let client = reqwest::Client::new(); let res = client.put(&presigned_url).json(&map).send().unwrap(); assert_eq!(res.status(), reqwest::StatusCode::Ok); } fn test_delete_object_with_presigned_url(region: &Region, credentials: &AwsCredentials, bucket: &str, filename: &str) { let req = DeleteObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let presigned_url = req.get_presigned_url(region, credentials); println!("delete object presigned url: {:#?}", presigned_url); let client = reqwest::Client::new(); let res = client.delete(&presigned_url).send().unwrap(); assert_eq!(res.status(), reqwest::StatusCode::NoContent); } fix presigned url test probrems #![cfg(feature = "s3")] extern crate futures; extern crate rusoto_core; extern crate rusoto_s3; extern crate time; extern crate env_logger; extern crate log; extern crate reqwest; use std::collections::HashMap; use std::env; use std::fs::File; use std::io::Read; use time::get_time; use futures::{Future, Stream}; use rusoto_core::Region; use rusoto_core::ProvideAwsCredentials; use rusoto_core::credential::AwsCredentials; use rusoto_core::reactor::CredentialsProvider; use rusoto_s3::{S3, S3Client, HeadObjectRequest, CopyObjectRequest, GetObjectError, GetObjectRequest, PutObjectRequest, DeleteObjectRequest, PutBucketCorsRequest, CORSConfiguration, CORSRule, CreateBucketRequest, DeleteBucketRequest, CreateMultipartUploadRequest, UploadPartRequest, CompleteMultipartUploadRequest, CompletedMultipartUpload, CompletedPart, ListObjectsRequest, ListObjectsV2Request}; use rusoto_s3::util::PreSignedRequest; type TestClient = S3Client; // Rust is in bad need of an integration test harness // This creates the S3 resources needed for a suite of tests, // executes those tests, and then destroys the resources #[test] fn test_all_the_things() { let _ = env_logger::try_init(); let region = if let Ok(endpoint) = env::var("S3_ENDPOINT") { let region = Region::Custom { name: "us-east-1".to_owned(), endpoint: endpoint.to_owned() }; println!("picked up non-standard endpoint {:?} from S3_ENDPOINT env. variable", region); region } else { Region::UsEast1 }; let client = S3Client::simple(region.clone()); let credentials = CredentialsProvider::default().credentials().wait().unwrap(); let test_bucket = format!("rusoto-test-bucket-{}", get_time().sec); let filename = format!("test_file_{}", get_time().sec); let utf8_filename = format!("test[über]file@{}", get_time().sec); let binary_filename = format!("test_file_b{}", get_time().sec); let multipart_filename = format!("test_multipart_file_{}", get_time().sec); let metadata_filename = format!("test_metadata_file_{}", get_time().sec); // get a list of list_buckets test_list_buckets(&client); // create a bucket for these tests test_create_bucket(&client, &test_bucket); // list items v2 list_items_in_bucket(&client, &test_bucket); // do a multipart upload test_multipart_upload(&client, &test_bucket, &multipart_filename); // modify the bucket's CORS properties if cfg!(not(feature = "disable_minio_unsupported")) { // Minio support: CORS is not implemented by Minio test_put_bucket_cors(&client, &test_bucket); } // PUT an object (no_credentials is an arbitrary choice) test_put_object_with_filename(&client, &test_bucket, &filename, &"tests/sample-data/no_credentials"); // HEAD the object that was PUT test_head_object(&client, &test_bucket, &filename); // GET the object test_get_object(&client, &test_bucket, &filename); test_get_object_range(&client, &test_bucket, &filename); // copy the object to change its settings test_copy_object(&client, &test_bucket, &filename); // UTF8 filenames test_put_object_with_filename(&client, &test_bucket, &utf8_filename, &"tests/sample-data/no_credentials"); test_copy_object_utf8(&client, &test_bucket, &utf8_filename); test_delete_object(&client, &test_bucket, &utf8_filename); // test failure responses test_get_object_no_such_object(&client, &test_bucket, &binary_filename); // Binary objects: test_put_object_with_filename(&client, &test_bucket, &binary_filename, &"tests/sample-data/binary-file"); test_get_object(&client, &test_bucket, &binary_filename); // paging test requires three items in the bucket, put another item there: // PUT an object (no_credentials is an arbitrary choice) let another_filename = format!("foo{}", filename); test_put_object_with_filename(&client, &test_bucket, &another_filename, &"tests/sample-data/no_credentials"); // metadata tests let mut metadata = HashMap::<String, String>::new(); metadata.insert("rusoto-metadata-some".to_string(), "some-test-value".to_string()); metadata.insert("rusoto-metadata-none".to_string(), "".to_string()); test_put_object_with_metadata(&client, &test_bucket, &metadata_filename, &"tests/sample-data/no_credentials", &metadata); test_head_object_with_metadata(&client, &test_bucket, &metadata_filename, &metadata); test_get_object_with_metadata(&client, &test_bucket, &metadata_filename, &metadata); // list items with paging using list object API v1 list_items_in_bucket_paged_v1(&client, &test_bucket); // list items with paging using list object API v2 if cfg!(not(feature = "disable_ceph_unsupported")) { // Ceph support: this test depends on the list object v2 API which is not implemented by Ceph list_items_in_bucket_paged_v2(&client, &test_bucket); } test_delete_object(&client, &test_bucket, &metadata_filename); test_delete_object(&client, &test_bucket, &binary_filename); test_delete_object(&client, &test_bucket, &another_filename); // DELETE the object test_delete_object(&client, &test_bucket, &filename); // PUT an object for presigned url test_put_object_with_filename(&client, &test_bucket, &filename, &"tests/sample-data/no_credentials"); // generate a presigned url test_get_object_with_presigned_url(&region, &credentials, &test_bucket, &filename); test_put_object_with_presigned_url(&region, &credentials, &test_bucket, &filename); test_delete_object_with_presigned_url(&region, &credentials, &test_bucket, &filename); // UTF8 filenames for presigned url test_put_object_with_filename(&client, &test_bucket, &utf8_filename, &"tests/sample-data/no_credentials"); // generate a presigned url test_get_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); test_put_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); test_delete_object_with_presigned_url(&region, &credentials, &test_bucket, &utf8_filename); // delete the test bucket test_delete_bucket(&client, &test_bucket); } fn test_multipart_upload(client: &TestClient, bucket: &str, filename: &str) { let create_multipart_req = CreateMultipartUploadRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; // start the multipart upload and note the upload_id generated let response = client.create_multipart_upload(create_multipart_req).sync().expect("Couldn't create multipart upload"); println!("{:#?}", response); let upload_id = response.upload_id.unwrap(); // create 2 upload parts let create_upload_part = |body: Vec<u8>, part_number: i64| -> UploadPartRequest { UploadPartRequest { body: Some(body.into()), bucket: bucket.to_owned(), key: filename.to_owned(), upload_id: upload_id.to_owned(), part_number: part_number, ..Default::default() } }; // minimum size for a non-final multipart upload part is 5MB let part_req1 = create_upload_part(vec!['a' as u8; 1024 * 1024 * 5], 1); let part_req2 = create_upload_part("foo".as_bytes().to_vec(), 2); // upload 2 parts and note the etags generated for them let mut completed_parts = Vec::new(); for req in vec![part_req1, part_req2].into_iter() { let part_number = req.part_number; let response = client.upload_part(req).sync().expect("Couldn't upload a file part"); println!("{:#?}", response); completed_parts.push(CompletedPart { e_tag: response.e_tag.clone(), part_number: Some(part_number), }); } // complete the multipart upload with the etags of the parts let completed_upload = CompletedMultipartUpload { parts: Some(completed_parts) }; let complete_req = CompleteMultipartUploadRequest { bucket: bucket.to_owned(), key: filename.to_owned(), upload_id: upload_id.to_owned(), multipart_upload: Some(completed_upload), ..Default::default() }; let response = client.complete_multipart_upload(complete_req).sync().expect("Couldn't complete multipart upload"); println!("{:#?}", response); // delete the completed file test_delete_object(client, bucket, filename); } fn test_create_bucket(client: &TestClient, bucket: &str) { let create_bucket_req = CreateBucketRequest { bucket: bucket.to_owned(), ..Default::default() }; let result = client.create_bucket(create_bucket_req).sync().expect("Couldn't create bucket"); println!("{:#?}", result); } fn test_delete_bucket(client: &TestClient, bucket: &str) { let delete_bucket_req = DeleteBucketRequest { bucket: bucket.to_owned(), ..Default::default() }; let result = client.delete_bucket(delete_bucket_req).sync().expect("Couldn't delete bucket"); println!("{:#?}", result); } fn test_put_object_with_filename(client: &TestClient, bucket: &str, dest_filename: &str, local_filename: &str) { let mut f = File::open(local_filename).unwrap(); let mut contents: Vec<u8> = Vec::new(); match f.read_to_end(&mut contents) { Err(why) => panic!("Error opening file to send to S3: {}", why), Ok(_) => { let req = PutObjectRequest { bucket: bucket.to_owned(), key: dest_filename.to_owned(), body: Some(contents.into()), ..Default::default() }; let result = client.put_object(req).sync().expect("Couldn't PUT object"); println!("{:#?}", result); } } } fn test_head_object(client: &TestClient, bucket: &str, filename: &str) { let head_req = HeadObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.head_object(head_req).sync().expect("Couldn't HEAD object"); println!("{:#?}", result); } fn test_get_object(client: &TestClient, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.get_object(get_req).sync().expect("Couldn't GET object"); println!("get object result: {:#?}", result); let stream = result.body.unwrap(); let body = stream.concat2().wait().unwrap(); assert!(body.len() > 0); } fn test_get_object_no_such_object(client: &TestClient, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; match client.get_object(get_req).sync() { Err(GetObjectError::NoSuchKey(_)) => (), r => panic!("unexpected response {:?}", r) }; } fn test_get_object_range(client: &TestClient, bucket: &str, filename: &str) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), range: Some("bytes=0-1".to_owned()), ..Default::default() }; let result = client.get_object(get_req).sync().expect("Couldn't GET object (range)"); println!("\nget object range result: {:#?}", result); assert_eq!(result.content_length.unwrap(), 2); } fn test_copy_object(client: &TestClient, bucket: &str, filename: &str) { let req = CopyObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), copy_source: format!("{}/{}", bucket, filename), cache_control: Some("max-age=123".to_owned()), content_type: Some("application/json".to_owned()), metadata_directive: Some("REPLACE".to_owned()), ..Default::default() }; let result = client.copy_object(req).sync().expect("Couldn't copy object"); println!("{:#?}", result); } fn test_copy_object_utf8(client: &TestClient, bucket: &str, filename: &str) { let req = CopyObjectRequest { bucket: bucket.to_owned(), key: format!("{}", filename.to_owned()), copy_source: rusoto_s3::util::encode_key(format!("{}/{}", bucket, filename)), cache_control: Some("max-age=123".to_owned()), content_type: Some("application/json".to_owned()), metadata_directive: Some("REPLACE".to_owned()), ..Default::default() }; let result = client.copy_object(req).sync().expect("Couldn't copy object (utf8)"); println!("{:#?}", result); } fn test_delete_object(client: &TestClient, bucket: &str, filename: &str) { let del_req = DeleteObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.delete_object(del_req).sync().expect("Couldn't delete object"); println!("{:#?}", result); } fn test_list_buckets(client: &TestClient) { let result = client.list_buckets().sync().expect("Couldn't list buckets"); println!("\nbuckets available: {:#?}", result); } fn list_items_in_bucket(client: &TestClient, bucket: &str) { let list_obj_req = ListObjectsV2Request { bucket: bucket.to_owned(), start_after: Some("foo".to_owned()), ..Default::default() }; let result = client.list_objects_v2(list_obj_req).sync().expect("Couldn't list items in bucket (v2)"); println!("Items in bucket: {:#?}", result); } fn list_items_in_bucket_paged_v1(client: &TestClient, bucket: &str) { let mut list_request = ListObjectsRequest { delimiter: Some("/".to_owned()), bucket: bucket.to_owned(), max_keys: Some(2), ..Default::default() }; let response1 = client.list_objects(list_request.clone()).sync().expect("list objects failed"); println!("Items in bucket, page 1: {:#?}", response1); let contents1 = response1.contents.unwrap(); assert!(response1.is_truncated.unwrap()); assert_eq!(contents1.len(), 2); list_request.marker = Some(response1.next_marker.unwrap()); list_request.max_keys = Some(1000); let response2 = client.list_objects(list_request).sync().expect("list objects failed"); println!("Items in buckut, page 2: {:#?}", response2); let contents2 = response2.contents.unwrap(); assert!(!response2.is_truncated.unwrap()); assert!(contents1[1].key.as_ref().unwrap() < contents2[0].key.as_ref().unwrap()); } // Assuming there's already more than three item in our test bucket: fn list_items_in_bucket_paged_v2(client: &TestClient, bucket: &str) { let mut list_obj_req = ListObjectsV2Request { bucket: bucket.to_owned(), max_keys: Some(1), ..Default::default() }; let result1 = client.list_objects_v2(list_obj_req.clone()).sync().expect("list objects v2 failed"); println!("Items in bucket, page 1: {:#?}", result1); assert!(result1.next_continuation_token.is_some()); list_obj_req.continuation_token = result1.next_continuation_token; let result2 = client.list_objects_v2(list_obj_req).sync().expect("list objects v2 paging failed"); println!("Items in bucket, page 2: {:#?}", result2); // For the second call it the token is in `continuation_token` not `next_continuation_token` assert!(result2.continuation_token.is_some()); assert!( result1.contents.unwrap()[0].key.as_ref().unwrap() < result2.contents.unwrap()[0].key.as_ref().unwrap() ); } fn test_put_bucket_cors(client: &TestClient, bucket: &str) { let cors_rules = vec![CORSRule { allowed_methods: vec!["PUT".to_owned(), "POST".to_owned(), "DELETE".to_owned()], allowed_origins: vec!["http://www.example.com".to_owned()], allowed_headers: Some(vec!["*".to_owned()]), max_age_seconds: Some(3000), expose_headers: Some(vec!["x-amz-server-side-encryption".to_owned()]), ..Default::default() }]; let cors_configuration = CORSConfiguration { cors_rules: cors_rules }; let req = PutBucketCorsRequest { bucket: bucket.to_owned(), cors_configuration: cors_configuration, ..Default::default() }; let result = client.put_bucket_cors(req).sync().expect("Couldn't apply bucket CORS"); println!("{:#?}", result); } fn test_put_object_with_metadata(client: &TestClient, bucket: &str, dest_filename: &str, local_filename: &str, metadata: &HashMap<String,String>) { let mut f = File::open(local_filename).unwrap(); let mut contents: Vec<u8> = Vec::new(); match f.read_to_end(&mut contents) { Err(why) => panic!("Error opening file to send to S3: {}", why), Ok(_) => { let req = PutObjectRequest { bucket: bucket.to_owned(), key: dest_filename.to_owned(), body: Some(contents.into()), metadata: Some(metadata.clone()), ..Default::default() }; let result = client.put_object(req).sync().expect("Couldn't PUT object"); println!("{:#?}", result); } } } fn test_head_object_with_metadata(client: &TestClient, bucket: &str, filename: &str, metadata: &HashMap<String,String>) { let head_req = HeadObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.head_object(head_req).sync().expect("Couldn't HEAD object"); println!("{:#?}", result); let head_metadata = result.metadata.as_ref().expect("No metadata available"); assert_eq!(metadata, head_metadata); } fn test_get_object_with_metadata(client: &TestClient, bucket: &str, filename: &str, metadata: &HashMap<String,String>) { let get_req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let result = client.get_object(get_req).sync().expect("Couldn't GET object"); println!("get object result: {:#?}", result); let head_metadata = result.metadata.as_ref().expect("No metadata available"); assert_eq!(metadata, head_metadata); } fn test_get_object_with_presigned_url(region: &Region, credentials: &AwsCredentials, bucket: &str, filename: &str) { let req = GetObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let presigned_url = req.get_presigned_url(region, credentials); println!("get object presigned url: {:#?}", presigned_url); let mut res = reqwest::get(&presigned_url).unwrap(); assert_eq!(res.status(), reqwest::StatusCode::Ok); let size = res.headers().get::<reqwest::header::ContentLength>().map(|ct_len| **ct_len).unwrap_or(0); assert!(size > 0); let mut buf: Vec<u8> = vec![]; res.copy_to(&mut buf).unwrap(); assert!(buf.len() > 0); } fn test_put_object_with_presigned_url(region: &Region, credentials: &AwsCredentials, bucket: &str, filename: &str) { let req = PutObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let presigned_url = req.get_presigned_url(region, credentials); println!("put object presigned url: {:#?}", presigned_url); let mut map = HashMap::new(); map.insert("test", "data"); let client = reqwest::Client::new(); let res = client.put(&presigned_url).json(&map).send().unwrap(); assert_eq!(res.status(), reqwest::StatusCode::Ok); } fn test_delete_object_with_presigned_url(region: &Region, credentials: &AwsCredentials, bucket: &str, filename: &str) { let req = DeleteObjectRequest { bucket: bucket.to_owned(), key: filename.to_owned(), ..Default::default() }; let presigned_url = req.get_presigned_url(region, credentials); println!("delete object presigned url: {:#?}", presigned_url); let client = reqwest::Client::new(); let res = client.delete(&presigned_url).send().unwrap(); assert_eq!(res.status(), reqwest::StatusCode::NoContent); }
use self::Token::{Begin, End, Identifier, Number, Text, Whitespace}; use self::LexError::{UnexpectedEscape, UnexpectedChar, UnexpectedEOF, UnclosedString, UnparseableInt}; use self::ParseError::{LexErr, ExpectedModuleErr, ExpectedEndErr, ExpectedNumberErr, ExpectedTextErr}; use ast::{Memory, Module, Segment}; use parsell::{Upcast, Downcast, ToStatic, StaticMarker}; use parsell::{Parser, Uncommitted, Boxable, ParseResult, HasOutput, InState, Stateful}; use parsell::{emit, character, character_ref, CHARACTER}; use std::num::ParseIntError; use std::borrow::Cow; use std::borrow::Cow::Borrowed; use std::str::Chars; use std::iter::Peekable; use std::vec::Drain; // Lexer #[derive(Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)] pub enum Token<'a> { Begin(Cow<'a,str>), End, Identifier(Cow<'a,str>), Number(usize), Text(String), Whitespace(Cow<'a,str>), } impl<'a> Upcast<Token<'a>> for Token<'static> { fn upcast(self) -> Token<'a> { self } } impl<'a> Downcast<Token<'static>> for Token<'a> { fn downcast(self) -> Token<'static> { match self { Begin(kw) => Begin(kw.downcast()), End => End, Identifier(name) => Identifier(name.downcast()), Number(num) => Number(num), Text(string) => Text(string), Whitespace(string) => Whitespace(string.downcast()), } } } impl<'a> ToStatic for Token<'a> { type Static = Token<'static>; } #[derive(Clone, PartialEq, Debug)] pub enum LexError { UnexpectedEscape(String), UnexpectedChar(char), UnclosedString(char), UnparseableInt(ParseIntError), UnexpectedEOF, } impl From<ParseIntError> for LexError { fn from(err: ParseIntError) -> LexError { UnparseableInt(err) } } impl StaticMarker for LexError {} fn ignore() {} fn discard_char1(_: char) {} fn discard_char2(_: char, _: Option<char>) {} fn is_lparen(ch: char) -> bool { ch == '(' } fn is_rparen(ch: char) -> bool { ch == ')' } fn is_dbl_quote(ch: char) -> bool { ch == '"' } fn is_backslash(ch: char) -> bool { ch == '\\' } fn is_dollar(ch: char) -> bool { ch == '$' } fn is_keyword_char(ch: char) -> bool { ch.is_alphanumeric() || (ch == '.') } fn is_identifier_char(ch: char) -> bool { ch.is_alphanumeric() || (ch == '.') || (ch == '$') } fn is_unescaped_char(ch: char) -> bool { ch != '"' && ch != '\\' && ch != '\r' && ch != '\n' } fn mk_begin<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Begin(s)) } fn mk_end<'a>(_: char) -> Result<Token<'a>, LexError> { Ok(End) } fn mk_identifier<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Identifier(s)) } fn mk_whitespace<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Whitespace(s)) } fn mk_escape<'a>(escaped: Cow<'a, str>) -> Result<Cow<'a, str>, LexError> { match &*escaped { "\\\\" => Ok(Borrowed("\\")), "\\\"" => Ok(Borrowed("\"")), "\\n" => Ok(Borrowed("\n")), "\\r" => Ok(Borrowed("\r")), "\\t" => Ok(Borrowed("\t")), _ => Err(UnexpectedEscape(escaped.into_owned())), } } fn mk_unescape<'a>(unescaped: Cow<'a, str>) -> Result<Cow<'a, str>, LexError> { Ok(unescaped) } fn mk_number<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Number(try!(usize::from_str_radix(&*s, 10)))) } fn mk_text<'a>(s: String) -> Token<'a> { Text(s) } fn mk_unexpected_char_err<'a>(ch: Option<char>) -> Result<Token<'a>,LexError> { Err(ch.map_or(UnexpectedEOF, UnexpectedChar)) } fn mk_lexer_state<Lexer>(lexer: Lexer) -> WasmLexerState where Lexer: 'static + for<'a> Boxable<char, Chars<'a>, Result<Token<'a>, LexError>> { WasmLexer.in_state(Box::new(lexer)) } fn mk_ok_string() -> Result<String, LexError> { Ok(String::new()) } fn must_be_dbl_quote(ch: Option<char>) -> Result<(), LexError> { if ch == Some('"') { Ok(()) } else { Err(ch.map_or(UnexpectedEOF, UnclosedString)) } } // Work-around for not having impl results yet. #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)] pub struct WasmLexer; pub type WasmLexerState = InState<WasmLexer, Box<for<'a> Boxable<char, Chars<'a>, Result<Token<'a>, LexError>>>>; impl Parser for WasmLexer {} impl<'a> HasOutput<char, Chars<'a>> for WasmLexer { type Output = Result<Token<'a>, LexError>; } impl<'a> Uncommitted<char, Chars<'a>, Result<Token<'a>, LexError>> for WasmLexer { type State = WasmLexerState; #[allow(non_snake_case)] fn init(&self, data: &mut Chars<'a>) -> Option<ParseResult<WasmLexerState, Result<Token<'a>, LexError>>> { let BEGIN = character(is_lparen) .discard_and_then(character(is_keyword_char).star(ignore).buffer()) .map(mk_begin); let END = character(is_rparen) .map(mk_end); let IDENTIFIER = character(is_dollar) .and_then(character(is_identifier_char).star(ignore)) .buffer() .map(mk_identifier); let WHITESPACE = character(char::is_whitespace).plus(ignore).buffer() .map(mk_whitespace); let OPEN_QUOTE = character(is_dbl_quote); let ESCAPED = character(is_backslash) .and_then(CHARACTER) .buffer() .map(mk_escape); let UNESCAPED = character(is_unescaped_char).plus(ignore) .buffer() .map(mk_unescape); let TEXT = OPEN_QUOTE .discard_and_then(ESCAPED.or_else(UNESCAPED).star(mk_ok_string)) // TODO: buffer into a Cow<'a,str> .try_and_then_try_discard(CHARACTER.map(must_be_dbl_quote)) .try_map(mk_text); let NUMBER = character(char::is_numeric).plus(ignore).buffer() .map(mk_number); let UNRECOGNIZED = CHARACTER .map(mk_unexpected_char_err); let WASM_TOKEN = IDENTIFIER .or_else(BEGIN) .or_else(END) .or_else(WHITESPACE) .or_else(TEXT) .or_else(NUMBER) .or_else(UNRECOGNIZED); WASM_TOKEN.boxed(mk_lexer_state).init(data) } } pub const LEXER: WasmLexer = WasmLexer; #[test] #[allow(non_snake_case)] fn test_lexer() { use parsell::UncommittedStr; use parsell::ParseResult::{Done}; use std::borrow::Cow::{Borrowed}; let overflow = usize::from_str_radix("983748948934789348763894786345786", 10).unwrap_err(); assert_eq!(LEXER.init_str("(foo!"),Some(Done(Ok(Begin(Borrowed("foo")))))); assert_eq!(LEXER.init_str(")!"),Some(Done(Ok(End)))); assert_eq!(LEXER.init_str("$abc!"),Some(Done(Ok(Identifier(Borrowed("$abc")))))); assert_eq!(LEXER.init_str(" \t\r\n !"),Some(Done(Ok(Whitespace(Borrowed(" \t\r\n ")))))); assert_eq!(LEXER.init_str("\"xyz\\t\\\"abc\"!"),Some(Done(Ok(Text(String::from("xyz\t\"abc")))))); assert_eq!(LEXER.init_str(" \t\r\n !"),Some(Done(Ok(Whitespace(Borrowed(" \t\r\n ")))))); assert_eq!(LEXER.init_str("!!"),Some(Done(Err(UnexpectedChar('!'))))); assert_eq!(LEXER.init_str("\"abc\r\"!"),Some(Done(Err(UnclosedString('\r'))))); assert_eq!(LEXER.init_str("1234567890123456789012345678901234567890!"),Some(Done(Err(UnparseableInt(overflow))))) ; } // Parser #[derive(Clone, PartialEq, Debug)] pub enum ParseError { LexErr(LexError), ExpectedModuleErr, ExpectedNumberErr, ExpectedTextErr, ExpectedEndErr, } impl From<LexError> for ParseError { fn from(err: LexError) -> ParseError { LexErr(err) } } impl StaticMarker for ParseError {} fn is_begin_module<'a>(tok: &Token<'a>) -> bool { match *tok { Begin(ref kw) => (kw == "module"), _ => false, } } fn is_begin_memory<'a>(tok: &Token<'a>) -> bool { match *tok { Begin(ref kw) => (kw == "memory"), _ => false, } } fn mk_memory<'a>(_: Token<'a>) -> Memory { Memory { init: 0, max: None, segments: Vec::new() } } fn mk_module<'a>() -> Result<Module, ParseError> { Ok(Module::new()) } fn mk_ok_token<'a>(tok: Token<'a>) -> Result<Token<'a>, ParseError> { Ok(tok) } fn mk_expected_module_err<'a>(_: Option<Token<'a>>) -> Result<Module, ParseError> { Err(ExpectedModuleErr) } fn mk_expected_end_err<'a>(_: Option<Token<'a>>) -> Result<Token<'a>, ParseError> { Err(ExpectedEndErr) } fn must_be_end<'a>(tok: Option<Token<'a>>) -> Result<(), ParseError> { match tok { Some(End) => Ok(()), _ => Err(ExpectedEndErr), } } fn mk_parser_state<P>(parser: P) -> WasmParserState where P: 'static + for<'a> Boxable<Token<'a>, Tokens<'a>, Result<Module, ParseError>> { WasmParser.in_state(Box::new(parser)) } #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)] pub struct WasmParser; pub type WasmParserState = InState<WasmParser, Box<for<'a> Boxable<Token<'a>, Tokens<'a>, Result<Module, ParseError>>>>; pub type Tokens<'a> = Peekable<Drain<'a, Token<'a>>>; // A placeholder impl Parser for WasmParser {} impl<'a> HasOutput<Token<'a>, Tokens<'a>> for WasmParser { type Output = Result<Module, ParseError>; } impl<'a> Uncommitted<Token<'a>, Tokens<'a>, Result<Module, ParseError>> for WasmParser { type State = WasmParserState; #[allow(non_snake_case)] fn init(&self, data: &mut Tokens<'a>) -> Option<ParseResult<WasmParserState, Result<Module, ParseError>>> { let END = CHARACTER.map(must_be_end); let MEMORY = character_ref(is_begin_memory) .map(mk_memory) .and_then_try_discard(END); let MODULE = character_ref(is_begin_module) .discard_and_then(MEMORY.star(mk_module)) .try_and_then_try_discard(END); let EXPECTED_MODULE = CHARACTER .map(mk_expected_module_err); let TOP_LEVEL = MODULE .or_else(EXPECTED_MODULE); TOP_LEVEL.boxed(mk_parser_state).init(data) } } Rewrote parser to reduce compile times. use self::Token::{Begin, End, Identifier, Number, Text, Whitespace}; use self::LexError::{UnexpectedEscape, UnexpectedChar, UnexpectedEOF, UnclosedString, UnparseableInt}; use self::ParseError::{LexErr, ExpectedEndErr, ExpectedNumberErr}; use ast::{Memory, Module}; use parsell::{Upcast, Downcast, ToStatic, StaticMarker}; use parsell::{Parser, Uncommitted, Boxable, ParseResult, HasOutput, InState, Stateful}; use parsell::{character, character_ref, CHARACTER}; use parsell::ParseResult::{Done, Continue}; use std::num::ParseIntError; use std::borrow::Cow; use std::borrow::Cow::Borrowed; use std::str::Chars; use std::iter::Peekable; use std::vec::Drain; // Lexer #[derive(Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)] pub enum Token<'a> { Begin(Cow<'a,str>), End, Identifier(Cow<'a,str>), Number(usize), Text(String), Whitespace(Cow<'a,str>), } impl<'a> Upcast<Token<'a>> for Token<'static> { fn upcast(self) -> Token<'a> { self } } impl<'a> Downcast<Token<'static>> for Token<'a> { fn downcast(self) -> Token<'static> { match self { Begin(kw) => Begin(kw.downcast()), End => End, Identifier(name) => Identifier(name.downcast()), Number(num) => Number(num), Text(string) => Text(string), Whitespace(string) => Whitespace(string.downcast()), } } } impl<'a> ToStatic for Token<'a> { type Static = Token<'static>; } #[derive(Clone, PartialEq, Debug)] pub enum LexError { UnexpectedEscape(String), UnexpectedChar(char), UnclosedString(char), UnparseableInt(ParseIntError), UnexpectedEOF, } impl From<ParseIntError> for LexError { fn from(err: ParseIntError) -> LexError { UnparseableInt(err) } } impl StaticMarker for LexError {} fn ignore() {} fn is_lparen(ch: char) -> bool { ch == '(' } fn is_rparen(ch: char) -> bool { ch == ')' } fn is_dbl_quote(ch: char) -> bool { ch == '"' } fn is_backslash(ch: char) -> bool { ch == '\\' } fn is_dollar(ch: char) -> bool { ch == '$' } fn is_keyword_char(ch: char) -> bool { ch.is_alphanumeric() || (ch == '.') } fn is_identifier_char(ch: char) -> bool { ch.is_alphanumeric() || (ch == '.') || (ch == '$') } fn is_unescaped_char(ch: char) -> bool { ch != '"' && ch != '\\' && ch != '\r' && ch != '\n' } fn mk_begin<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Begin(s)) } fn mk_end<'a>(_: char) -> Result<Token<'a>, LexError> { Ok(End) } fn mk_identifier<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Identifier(s)) } fn mk_whitespace<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Whitespace(s)) } fn mk_escape<'a>(escaped: Cow<'a, str>) -> Result<Cow<'a, str>, LexError> { match &*escaped { "\\\\" => Ok(Borrowed("\\")), "\\\"" => Ok(Borrowed("\"")), "\\n" => Ok(Borrowed("\n")), "\\r" => Ok(Borrowed("\r")), "\\t" => Ok(Borrowed("\t")), _ => Err(UnexpectedEscape(escaped.into_owned())), } } fn mk_unescape<'a>(unescaped: Cow<'a, str>) -> Result<Cow<'a, str>, LexError> { Ok(unescaped) } fn mk_number<'a>(s: Cow<'a,str>) -> Result<Token<'a>, LexError> { Ok(Number(try!(usize::from_str_radix(&*s, 10)))) } fn mk_text<'a>(s: String) -> Token<'a> { Text(s) } fn mk_unexpected_char_err<'a>(ch: Option<char>) -> Result<Token<'a>,LexError> { Err(ch.map_or(UnexpectedEOF, UnexpectedChar)) } fn mk_ok_string() -> Result<String, LexError> { Ok(String::new()) } fn must_be_dbl_quote(ch: Option<char>) -> Result<(), LexError> { if ch == Some('"') { Ok(()) } else { Err(ch.map_or(UnexpectedEOF, UnclosedString)) } } fn mk_lexer_state<Lexer>(lexer: Lexer) -> WasmLexerState where Lexer: 'static + for<'a> Boxable<char, Chars<'a>, Result<Token<'a>, LexError>> { WasmLexer.in_state(Box::new(lexer)) } // Work-around for not having impl results yet. #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)] pub struct WasmLexer; pub type WasmLexerState = InState<WasmLexer, Box<for<'a> Boxable<char, Chars<'a>, Result<Token<'a>, LexError>>>>; impl Parser for WasmLexer {} impl<'a> HasOutput<char, Chars<'a>> for WasmLexer { type Output = Result<Token<'a>, LexError>; } impl<'a> Uncommitted<char, Chars<'a>, Result<Token<'a>, LexError>> for WasmLexer { type State = WasmLexerState; #[allow(non_snake_case)] fn init(&self, data: &mut Chars<'a>) -> Option<ParseResult<WasmLexerState, Result<Token<'a>, LexError>>> { let BEGIN = character(is_lparen) .discard_and_then(character(is_keyword_char).star(ignore).buffer()) .map(mk_begin); let END = character(is_rparen) .map(mk_end); let IDENTIFIER = character(is_dollar) .and_then(character(is_identifier_char).star(ignore)) .buffer() .map(mk_identifier); let WHITESPACE = character(char::is_whitespace).plus(ignore).buffer() .map(mk_whitespace); let OPEN_QUOTE = character(is_dbl_quote); let ESCAPED = character(is_backslash) .and_then(CHARACTER) .buffer() .map(mk_escape); let UNESCAPED = character(is_unescaped_char).plus(ignore) .buffer() .map(mk_unescape); let TEXT = OPEN_QUOTE .discard_and_then(ESCAPED.or_else(UNESCAPED).star(mk_ok_string)) // TODO: buffer into a Cow<'a,str> .try_and_then_try_discard(CHARACTER.map(must_be_dbl_quote)) .try_map(mk_text); let NUMBER = character(char::is_numeric).plus(ignore).buffer() .map(mk_number); let UNRECOGNIZED = CHARACTER .map(mk_unexpected_char_err); let WASM_TOKEN = IDENTIFIER .or_else(BEGIN) .or_else(END) .or_else(WHITESPACE) .or_else(TEXT) .or_else(NUMBER) .or_else(UNRECOGNIZED); WASM_TOKEN.boxed(mk_lexer_state).init(data) } } pub const LEXER: WasmLexer = WasmLexer; #[test] #[allow(non_snake_case)] fn test_lexer() { use parsell::UncommittedStr; use parsell::ParseResult::{Done}; use std::borrow::Cow::{Borrowed}; let overflow = usize::from_str_radix("983748948934789348763894786345786", 10).unwrap_err(); assert_eq!(LEXER.init_str("(foo!"),Some(Done(Ok(Begin(Borrowed("foo")))))); assert_eq!(LEXER.init_str(")!"),Some(Done(Ok(End)))); assert_eq!(LEXER.init_str("$abc!"),Some(Done(Ok(Identifier(Borrowed("$abc")))))); assert_eq!(LEXER.init_str(" \t\r\n !"),Some(Done(Ok(Whitespace(Borrowed(" \t\r\n ")))))); assert_eq!(LEXER.init_str("\"xyz\\t\\\"abc\"!"),Some(Done(Ok(Text(String::from("xyz\t\"abc")))))); assert_eq!(LEXER.init_str(" \t\r\n !"),Some(Done(Ok(Whitespace(Borrowed(" \t\r\n ")))))); assert_eq!(LEXER.init_str("!!"),Some(Done(Err(UnexpectedChar('!'))))); assert_eq!(LEXER.init_str("\"abc\r\"!"),Some(Done(Err(UnclosedString('\r'))))); assert_eq!(LEXER.init_str("1234567890123456789012345678901234567890!"),Some(Done(Err(UnparseableInt(overflow))))) ; } // Parser #[derive(Clone, PartialEq, Debug)] pub enum ParseError { LexErr(LexError), ExpectedNumberErr, ExpectedEndErr, } impl From<LexError> for ParseError { fn from(err: LexError) -> ParseError { LexErr(err) } } impl StaticMarker for ParseError {} fn is_begin_module<'a>(tok: &Token<'a>) -> bool { match *tok { Begin(ref kw) => (kw == "module"), _ => false, } } fn is_begin_memory<'a>(tok: &Token<'a>) -> bool { match *tok { Begin(ref kw) => (kw == "memory"), _ => false, } } fn mk_memory<'a>(init: usize, _: Token<'a>) -> Memory { Memory { init: init, max: None, segments: Vec::new() } } fn mk_module<'a>(_: Option<Memory>, _: Token<'a>) -> Module { Module::new() } fn must_be_end<'a>(tok: Option<Token<'a>>) -> Result<Token<'a>, ParseError> { match tok { Some(End) => Ok(End), _ => Err(ExpectedEndErr), } } fn must_be_number<'a>(tok: Option<Token<'a>>) -> Result<usize, ParseError> { match tok { Some(Number(num)) => Ok(num), _ => Err(ExpectedNumberErr), } } fn mk_parser_state<P, T>(parser: P) -> WasmParserState<T> where P: 'static + for<'a> Boxable<Token<'a>, Tokens<'a>, WasmParserOutput<T>> { WasmParserState(Box::new(parser)) } pub struct WasmParserState<T> (Box<for<'a> Boxable<Token<'a>, Tokens<'a>, WasmParserOutput<T>>>); impl<'a,T> HasOutput<Token<'a>, Tokens<'a>> for WasmParserState<T> { type Output = WasmParserOutput<T>; } impl<'a,T> Stateful<Token<'a>, Tokens<'a>, WasmParserOutput<T>> for WasmParserState<T> { fn more(self, string: &mut Tokens<'a>) -> ParseResult<WasmParserState<T>, WasmParserOutput<T>> { match self.0.more(string) { Done(result) => Done(result), Continue(parsing) => Continue(WasmParserState(parsing)), } } fn done(self) -> WasmParserOutput<T> { self.0.done() } } pub type WasmParserOutput<T> = Result<T, ParseError>; pub type Tokens<'a> = Peekable<Drain<'a, Token<'a>>>; // A placeholder #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)] pub struct MEMORY; impl Parser for MEMORY {} impl<'a> HasOutput<Token<'a>, Tokens<'a>> for MEMORY { type Output = WasmParserOutput<Memory>; } impl<'a> Uncommitted<Token<'a>, Tokens<'a>, WasmParserOutput<Memory>> for MEMORY { type State = WasmParserState<Memory>; #[allow(non_snake_case)] fn init(&self, data: &mut Tokens<'a>) -> Option<ParseResult<WasmParserState<Memory>, WasmParserOutput<Memory>>> { character_ref(is_begin_memory) .discard_and_then(CHARACTER.map(must_be_number)) .try_and_then_try(CHARACTER.map(must_be_end)) .try_map2(mk_memory) .boxed(mk_parser_state) .init(data) } } #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Debug)] pub struct MODULE; impl Parser for MODULE {} impl<'a> HasOutput<Token<'a>, Tokens<'a>> for MODULE { type Output = WasmParserOutput<Module>; } impl<'a> Uncommitted<Token<'a>, Tokens<'a>, WasmParserOutput<Module>> for MODULE { type State = WasmParserState<Module>; #[allow(non_snake_case)] fn init(&self, data: &mut Tokens<'a>) -> Option<ParseResult<WasmParserState<Module>, WasmParserOutput<Module>>> { character_ref(is_begin_module) .discard_and_then(MEMORY.try_opt()) .try_and_then_try(CHARACTER.map(must_be_end)) .try_map2(mk_module) .boxed(mk_parser_state) .init(data) } } #[test] fn test_parser() { use parsell::ParseResult::Done; let mut input = vec![ Begin(Borrowed("module")), End, ]; let output = Module::new(); let mut iter = input.drain(..).peekable(); assert_eq!( MODULE.init(&mut iter), Some(Done(Ok(output))) ) }
#![no_std] #[macro_use] extern crate array_macro; /// Enum map constructor. /// /// This macro allows to create a new enum map in a type safe way. It takes /// a list of `,` separated pairs separated by `=>`. Left side is `|` /// separated list of enum keys, or `_` to match all unmatched enum keys, /// while right side is a value. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::{enum_map, Enum}; /// /// #[derive(Enum)] /// enum Example { /// A, /// B, /// C, /// D, /// } /// /// fn main() { /// let enum_map = enum_map! { /// Example::A | Example::B => 1, /// Example::C => 2, /// _ => 3, /// }; /// assert_eq!(enum_map[Example::A], 1); /// assert_eq!(enum_map[Example::B], 1); /// assert_eq!(enum_map[Example::C], 2); /// assert_eq!(enum_map[Example::D], 3); /// } /// ``` #[macro_export] macro_rules! enum_map { {$($t:tt)*} => { $crate::from_fn(|k| match k { $($t)* }) }; } mod enum_map_impls; mod internal; mod iter; mod serde; pub use internal::Enum; pub use iter::{IntoIter, Iter, IterMut, Values, ValuesMut}; /// An enum mapping. /// /// This internally uses an array which stores a value for each possible /// enum value. To work, it requires implementation of internal (private, /// although public due to macro limitations) trait which allows extracting /// information about an enum, which can be automatically generated using /// `#[derive(EnumMap)]` macro. /// /// Additionally, `bool` and `u8` automatically derives from `EnumMap`. While /// `u8` is not technically an enum, it's convenient to consider it like one. /// In particular, [reverse-complement in benchmark game] could be using `u8` /// as an enum. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::{enum_map, Enum, EnumMap}; /// /// #[derive(Enum)] /// enum Example { /// A, /// B, /// C, /// } /// /// fn main() { /// let mut map = EnumMap::new(); /// // new initializes map with default values /// assert_eq!(map[Example::A], 0); /// map[Example::A] = 3; /// assert_eq!(map[Example::A], 3); /// } /// ``` /// /// [reverse-complement in benchmark game]: /// http://benchmarksgame.alioth.debian.org/u64q/program.php?test=revcomp&lang=rust&id=2 pub struct EnumMap<K: Enum<V>, V> { array: K::Array, } impl<K: Enum<V>, V: Default> EnumMap<K, V> { /// Creates an enum map with default values. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::{Enum, EnumMap}; /// /// #[derive(Enum)] /// enum Example { /// A, /// } /// /// fn main() { /// let enum_map = EnumMap::<_, i32>::new(); /// assert_eq!(enum_map[Example::A], 0); /// } /// ``` #[inline] pub fn new() -> Self { EnumMap::default() } } impl<K: Enum<V>, V> EnumMap<K, V> { /// Returns an iterator over enum map. #[inline] pub fn iter(&self) -> Iter<K, V> { self.into_iter() } /// Returns a mutable iterator over enum map. #[inline] pub fn iter_mut(&mut self) -> IterMut<K, V> { self.into_iter() } /// Returns number of elements in enum map. #[inline] pub fn len(&self) -> usize { self.as_slice().len() } /// Returns whether the enum variant set is empty. /// /// This isn't particularly useful, as there is no real reason to use /// enum map for enums without variants. However, it is provided for /// consistency with data structures providing len method (and I will /// admit, to avoid clippy warnings). /// /// # Examples /// /// ``` /// extern crate enum_map; /// /// use enum_map::{Enum, EnumMap}; /// /// #[derive(Enum)] /// enum Void {} /// /// #[derive(Enum)] /// enum SingleVariant { /// Variant, /// } /// /// fn main() { /// assert!(EnumMap::<Void, ()>::new().is_empty()); /// assert!(!EnumMap::<SingleVariant, ()>::new().is_empty()); /// } #[inline] pub fn is_empty(&self) -> bool { self.as_slice().is_empty() } /// Swaps two indexes. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::enum_map; /// /// fn main() { /// let mut map = enum_map! { false => 0, true => 1 }; /// map.swap(false, true); /// assert_eq!(map[false], 1); /// assert_eq!(map[true], 0); /// } /// ``` #[inline] pub fn swap(&mut self, a: K, b: K) { self.as_mut_slice().swap(a.to_usize(), b.to_usize()) } /// Converts an enum map to a slice representing values. #[inline] pub fn as_slice(&self) -> &[V] { K::slice(&self.array) } /// Converts a mutable enum map to a mutable slice representing values. #[inline] pub fn as_mut_slice(&mut self) -> &mut [V] { K::slice_mut(&mut self.array) } /// Returns a raw pointer to the enum map's buffer. /// /// The caller must ensure that the slice outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// /// Modifying the container referenced by this slice may cause its buffer /// to be reallocated, which would also make any pointers to it invalid. /// /// # Examples /// /// ``` /// #[macro_use] /// extern crate enum_map; /// /// use enum_map::EnumMap; /// /// fn main() { /// let map = enum_map! { 5 => 42, _ => 0 }; /// assert_eq!(unsafe { *map.as_ptr().offset(5) }, 42); /// } /// ``` #[inline] pub fn as_ptr(&self) -> *const V { self.as_slice().as_ptr() } /// Returns an unsafe mutable pointer to the enum map's buffer. /// /// The caller must ensure that the slice outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// /// Modifying the container referenced by this slice may cause its buffer /// to be reallocated, which would also make any pointers to it invalid. /// /// # Examples /// /// ``` /// #[macro_use] /// extern crate enum_map; /// /// use enum_map::EnumMap; /// /// fn main() { /// let mut map = enum_map! { _ => 0 }; /// unsafe { /// *map.as_mut_ptr().offset(11) = 23 /// }; /// assert_eq!(map[11], 23); /// } /// ``` #[inline] pub fn as_mut_ptr(&mut self) -> *mut V { self.as_mut_slice().as_mut_ptr() } } impl<F: FnMut(K) -> V, K: Enum<V>, V> From<F> for EnumMap<K, V> { #[inline] fn from(f: F) -> Self { EnumMap { array: K::from_function(f), } } } pub fn from_fn<K, V>(f: impl FnMut(K) -> V) -> EnumMap<K, V> where K: Enum<V>, { f.into() } Remove references to buffer in documentation Those were accidentally copied from Vec documentation. #![no_std] #[macro_use] extern crate array_macro; /// Enum map constructor. /// /// This macro allows to create a new enum map in a type safe way. It takes /// a list of `,` separated pairs separated by `=>`. Left side is `|` /// separated list of enum keys, or `_` to match all unmatched enum keys, /// while right side is a value. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::{enum_map, Enum}; /// /// #[derive(Enum)] /// enum Example { /// A, /// B, /// C, /// D, /// } /// /// fn main() { /// let enum_map = enum_map! { /// Example::A | Example::B => 1, /// Example::C => 2, /// _ => 3, /// }; /// assert_eq!(enum_map[Example::A], 1); /// assert_eq!(enum_map[Example::B], 1); /// assert_eq!(enum_map[Example::C], 2); /// assert_eq!(enum_map[Example::D], 3); /// } /// ``` #[macro_export] macro_rules! enum_map { {$($t:tt)*} => { $crate::from_fn(|k| match k { $($t)* }) }; } mod enum_map_impls; mod internal; mod iter; mod serde; pub use internal::Enum; pub use iter::{IntoIter, Iter, IterMut, Values, ValuesMut}; /// An enum mapping. /// /// This internally uses an array which stores a value for each possible /// enum value. To work, it requires implementation of internal (private, /// although public due to macro limitations) trait which allows extracting /// information about an enum, which can be automatically generated using /// `#[derive(EnumMap)]` macro. /// /// Additionally, `bool` and `u8` automatically derives from `EnumMap`. While /// `u8` is not technically an enum, it's convenient to consider it like one. /// In particular, [reverse-complement in benchmark game] could be using `u8` /// as an enum. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::{enum_map, Enum, EnumMap}; /// /// #[derive(Enum)] /// enum Example { /// A, /// B, /// C, /// } /// /// fn main() { /// let mut map = EnumMap::new(); /// // new initializes map with default values /// assert_eq!(map[Example::A], 0); /// map[Example::A] = 3; /// assert_eq!(map[Example::A], 3); /// } /// ``` /// /// [reverse-complement in benchmark game]: /// http://benchmarksgame.alioth.debian.org/u64q/program.php?test=revcomp&lang=rust&id=2 pub struct EnumMap<K: Enum<V>, V> { array: K::Array, } impl<K: Enum<V>, V: Default> EnumMap<K, V> { /// Creates an enum map with default values. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::{Enum, EnumMap}; /// /// #[derive(Enum)] /// enum Example { /// A, /// } /// /// fn main() { /// let enum_map = EnumMap::<_, i32>::new(); /// assert_eq!(enum_map[Example::A], 0); /// } /// ``` #[inline] pub fn new() -> Self { EnumMap::default() } } impl<K: Enum<V>, V> EnumMap<K, V> { /// Returns an iterator over enum map. #[inline] pub fn iter(&self) -> Iter<K, V> { self.into_iter() } /// Returns a mutable iterator over enum map. #[inline] pub fn iter_mut(&mut self) -> IterMut<K, V> { self.into_iter() } /// Returns number of elements in enum map. #[inline] pub fn len(&self) -> usize { self.as_slice().len() } /// Returns whether the enum variant set is empty. /// /// This isn't particularly useful, as there is no real reason to use /// enum map for enums without variants. However, it is provided for /// consistency with data structures providing len method (and I will /// admit, to avoid clippy warnings). /// /// # Examples /// /// ``` /// extern crate enum_map; /// /// use enum_map::{Enum, EnumMap}; /// /// #[derive(Enum)] /// enum Void {} /// /// #[derive(Enum)] /// enum SingleVariant { /// Variant, /// } /// /// fn main() { /// assert!(EnumMap::<Void, ()>::new().is_empty()); /// assert!(!EnumMap::<SingleVariant, ()>::new().is_empty()); /// } #[inline] pub fn is_empty(&self) -> bool { self.as_slice().is_empty() } /// Swaps two indexes. /// /// # Examples /// /// ``` /// # extern crate enum_map; /// use enum_map::enum_map; /// /// fn main() { /// let mut map = enum_map! { false => 0, true => 1 }; /// map.swap(false, true); /// assert_eq!(map[false], 1); /// assert_eq!(map[true], 0); /// } /// ``` #[inline] pub fn swap(&mut self, a: K, b: K) { self.as_mut_slice().swap(a.to_usize(), b.to_usize()) } /// Converts an enum map to a slice representing values. #[inline] pub fn as_slice(&self) -> &[V] { K::slice(&self.array) } /// Converts a mutable enum map to a mutable slice representing values. #[inline] pub fn as_mut_slice(&mut self) -> &mut [V] { K::slice_mut(&mut self.array) } /// Returns a raw pointer to the enum map's slice. /// /// The caller must ensure that the slice outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// /// # Examples /// /// ``` /// #[macro_use] /// extern crate enum_map; /// /// use enum_map::EnumMap; /// /// fn main() { /// let map = enum_map! { 5 => 42, _ => 0 }; /// assert_eq!(unsafe { *map.as_ptr().offset(5) }, 42); /// } /// ``` #[inline] pub fn as_ptr(&self) -> *const V { self.as_slice().as_ptr() } /// Returns an unsafe mutable pointer to the enum map's slice. /// /// The caller must ensure that the slice outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// /// # Examples /// /// ``` /// #[macro_use] /// extern crate enum_map; /// /// use enum_map::EnumMap; /// /// fn main() { /// let mut map = enum_map! { _ => 0 }; /// unsafe { /// *map.as_mut_ptr().offset(11) = 23 /// }; /// assert_eq!(map[11], 23); /// } /// ``` #[inline] pub fn as_mut_ptr(&mut self) -> *mut V { self.as_mut_slice().as_mut_ptr() } } impl<F: FnMut(K) -> V, K: Enum<V>, V> From<F> for EnumMap<K, V> { #[inline] fn from(f: F) -> Self { EnumMap { array: K::from_function(f), } } } pub fn from_fn<K, V>(f: impl FnMut(K) -> V) -> EnumMap<K, V> where K: Enum<V>, { f.into() }
use spin::Mutex; use io::{Io, Pio, ReadOnly, WriteOnly}; pub static PS2: Mutex<Ps2> = Mutex::new(Ps2::new()); pub unsafe fn init() { PS2.lock().init(); } bitflags! { flags StatusFlags: u8 { const OUTPUT_FULL = 1, const INPUT_FULL = 1 << 1, const SYSTEM = 1 << 2, const COMMAND = 1 << 3, const TIME_OUT = 1 << 6, const PARITY = 1 << 7 } } bitflags! { flags ConfigFlags: u8 { const FIRST_INTERRUPT = 1, const SECOND_INTERRUPT = 1 << 1, const POST_PASSED = 1 << 2, // 1 << 3 should be zero const FIRST_DISABLED = 1 << 4, const SECOND_DISABLED = 1 << 5, const FIRST_TRANSLATE = 1 << 6, // 1 << 7 should be zero } } #[repr(u8)] enum Command { ReadConfig = 0x20, WriteConfig = 0x60, DisableSecond = 0xA7, EnableSecond = 0xA8, TestSecond = 0xA9, TestController = 0xAA, TestFirst = 0xAB, Diagnostic = 0xAC, DisableFirst = 0xAD, EnableFirst = 0xAE, WriteSecond = 0xD4 } #[repr(u8)] enum KeyboardCommand { EnableReporting = 0xF4, SetDefaults = 0xF6, Reset = 0xFF } #[repr(u8)] enum MouseCommand { EnableReporting = 0xF4, SetDefaults = 0xF6, Reset = 0xFF } bitflags! { flags MousePacketFlags: u8 { const LEFT_BUTTON = 1, const RIGHT_BUTTON = 1 << 1, const MIDDLE_BUTTON = 1 << 2, const ALWAYS_ON = 1 << 3, const X_SIGN = 1 << 4, const Y_SIGN = 1 << 5, const X_OVERFLOW = 1 << 6, const Y_OVERFLOW = 1 << 7 } } pub struct Ps2 { data: Pio<u8>, status: ReadOnly<Pio<u8>>, command: WriteOnly<Pio<u8>>, mouse: [u8; 3], mouse_i: usize } impl Ps2 { const fn new() -> Ps2 { Ps2 { data: Pio::new(0x60), status: ReadOnly::new(Pio::new(0x64)), command: WriteOnly::new(Pio::new(0x64)), mouse: [0; 3], mouse_i: 0 } } fn status(&mut self) -> StatusFlags { StatusFlags::from_bits_truncate(self.status.read()) } fn wait_write(&mut self) { while self.status().contains(INPUT_FULL) {} } fn wait_read(&mut self) { while ! self.status().contains(OUTPUT_FULL) {} } fn flush_read(&mut self) { while self.status().contains(OUTPUT_FULL) { self.data.read(); } } fn command(&mut self, command: Command) { self.wait_write(); self.command.write(command as u8); } fn read(&mut self) -> u8 { self.wait_read(); self.data.read() } fn write(&mut self, data: u8) { self.wait_write(); self.data.write(data); } fn config(&mut self) -> ConfigFlags { self.command(Command::ReadConfig); ConfigFlags::from_bits_truncate(self.read()) } fn set_config(&mut self, config: ConfigFlags) { self.command(Command::WriteConfig); self.write(config.bits()); } fn keyboard_command(&mut self, command: KeyboardCommand) -> u8 { self.write(command as u8); self.read() } fn mouse_command(&mut self, command: MouseCommand) -> u8 { self.command(Command::WriteSecond); self.write(command as u8); self.read() } fn init(&mut self) { // Disable devices self.command(Command::DisableFirst); self.command(Command::DisableSecond); // Clear remaining data self.flush_read(); // Disable clocks, disable interrupts, and disable translate { let mut config = self.config(); config.insert(FIRST_DISABLED); config.insert(SECOND_DISABLED); config.remove(FIRST_TRANSLATE); config.remove(FIRST_INTERRUPT); config.remove(SECOND_INTERRUPT); self.set_config(config); } // Perform the self test self.command(Command::TestController); let self_test = self.read(); if self_test != 0x55 { // TODO: Do reset on failure print!("PS/2 Self Test Failure: {:X}\n", self_test); return; } // Enable clocks and interrupts { let mut config = self.config(); config.remove(FIRST_DISABLED); config.remove(SECOND_DISABLED); config.insert(FIRST_INTERRUPT); config.insert(SECOND_INTERRUPT); self.set_config(config); } // Enable devices self.command(Command::EnableFirst); self.command(Command::EnableSecond); // Reset and enable scanning on keyboard // TODO: Check for ack self.keyboard_command(KeyboardCommand::Reset); self.keyboard_command(KeyboardCommand::EnableReporting); // Reset and enable scanning on mouse // TODO: Check for ack self.mouse_command(MouseCommand::Reset); self.mouse_command(MouseCommand::EnableReporting); } pub fn on_keyboard(&mut self) { let data = self.data.read(); print!("KEY {:X}\n", data); } pub fn on_mouse(&mut self) { self.mouse[self.mouse_i] = self.data.read(); self.mouse_i += 1; if self.mouse_i >= self.mouse.len() { self.mouse_i = 0; let flags = MousePacketFlags::from_bits_truncate(self.mouse[0]); let mut x = self.mouse[1] as isize; if flags.contains(X_SIGN) { x -= 0x100; } let mut y = self.mouse[2] as isize; if flags.contains(Y_SIGN) { y -= 0x100; } print!("MOUSE {}, {}, {:?}\n", x, y, flags); } } } Set up fourth mouse packet use spin::Mutex; use io::{Io, Pio, ReadOnly, WriteOnly}; pub static PS2: Mutex<Ps2> = Mutex::new(Ps2::new()); pub unsafe fn init() { PS2.lock().init(); } bitflags! { flags StatusFlags: u8 { const OUTPUT_FULL = 1, const INPUT_FULL = 1 << 1, const SYSTEM = 1 << 2, const COMMAND = 1 << 3, const TIME_OUT = 1 << 6, const PARITY = 1 << 7 } } bitflags! { flags ConfigFlags: u8 { const FIRST_INTERRUPT = 1, const SECOND_INTERRUPT = 1 << 1, const POST_PASSED = 1 << 2, // 1 << 3 should be zero const FIRST_DISABLED = 1 << 4, const SECOND_DISABLED = 1 << 5, const FIRST_TRANSLATE = 1 << 6, // 1 << 7 should be zero } } #[repr(u8)] enum Command { ReadConfig = 0x20, WriteConfig = 0x60, DisableSecond = 0xA7, EnableSecond = 0xA8, TestSecond = 0xA9, TestController = 0xAA, TestFirst = 0xAB, Diagnostic = 0xAC, DisableFirst = 0xAD, EnableFirst = 0xAE, WriteSecond = 0xD4 } #[repr(u8)] enum KeyboardCommand { EnableReporting = 0xF4, SetDefaults = 0xF6, Reset = 0xFF } #[repr(u8)] enum MouseCommand { GetDeviceId = 0xF2, EnableReporting = 0xF4, SetDefaults = 0xF6, Reset = 0xFF } #[repr(u8)] enum MouseCommandData { SetSampleRate = 0xF3, } bitflags! { flags MousePacketFlags: u8 { const LEFT_BUTTON = 1, const RIGHT_BUTTON = 1 << 1, const MIDDLE_BUTTON = 1 << 2, const ALWAYS_ON = 1 << 3, const X_SIGN = 1 << 4, const Y_SIGN = 1 << 5, const X_OVERFLOW = 1 << 6, const Y_OVERFLOW = 1 << 7 } } pub struct Ps2 { data: Pio<u8>, status: ReadOnly<Pio<u8>>, command: WriteOnly<Pio<u8>>, mouse: [u8; 4], mouse_i: usize, mouse_extra: bool } impl Ps2 { const fn new() -> Ps2 { Ps2 { data: Pio::new(0x60), status: ReadOnly::new(Pio::new(0x64)), command: WriteOnly::new(Pio::new(0x64)), mouse: [0; 4], mouse_i: 0, mouse_extra: false } } fn status(&mut self) -> StatusFlags { StatusFlags::from_bits_truncate(self.status.read()) } fn wait_write(&mut self) { while self.status().contains(INPUT_FULL) {} } fn wait_read(&mut self) { while ! self.status().contains(OUTPUT_FULL) {} } fn flush_read(&mut self) { while self.status().contains(OUTPUT_FULL) { print!("FLUSH: {:X}\n", self.data.read()); } } fn command(&mut self, command: Command) { self.wait_write(); self.command.write(command as u8); } fn read(&mut self) -> u8 { self.wait_read(); self.data.read() } fn write(&mut self, data: u8) { self.wait_write(); self.data.write(data); } fn config(&mut self) -> ConfigFlags { self.command(Command::ReadConfig); ConfigFlags::from_bits_truncate(self.read()) } fn set_config(&mut self, config: ConfigFlags) { self.command(Command::WriteConfig); self.write(config.bits()); } fn keyboard_command(&mut self, command: KeyboardCommand) -> u8 { self.write(command as u8); self.read() } fn mouse_command(&mut self, command: MouseCommand) -> u8 { self.command(Command::WriteSecond); self.write(command as u8); self.read() } fn mouse_command_data(&mut self, command: MouseCommandData, data: u8) -> u8 { self.command(Command::WriteSecond); self.write(command as u8); self.read(); self.command(Command::WriteSecond); self.write(data as u8); self.read() } fn init(&mut self) { // Disable devices self.command(Command::DisableFirst); self.command(Command::DisableSecond); // Clear remaining data self.flush_read(); // Disable clocks, disable interrupts, and disable translate { let mut config = self.config(); config.insert(FIRST_DISABLED); config.insert(SECOND_DISABLED); config.remove(FIRST_TRANSLATE); config.remove(FIRST_INTERRUPT); config.remove(SECOND_INTERRUPT); self.set_config(config); } // Perform the self test self.command(Command::TestController); let self_test = self.read(); if self_test != 0x55 { // TODO: Do reset on failure print!("PS/2 Self Test Failure: {:X}\n", self_test); return; } // Enable devices self.command(Command::EnableFirst); self.command(Command::EnableSecond); // Reset and enable scanning on keyboard // TODO: Check for ack print!("KEYBOARD RESET {:X}\n", self.keyboard_command(KeyboardCommand::Reset)); print!("KEYBOARD RESET RESULT {:X} == 0xAA\n", self.read()); self.flush_read(); // Reset and enable scanning on mouse // TODO: Check for ack print!("MOUSE RESET {:X}\n", self.mouse_command(MouseCommand::Reset)); print!("MOUSE RESET RESULT {:X} == 0xAA\n", self.read()); print!("MOUSE RESET ID {:X} == 0x00\n", self.read()); self.flush_read(); // Enable extra packet on mouse print!("SAMPLE 200 {:X}\n", self.mouse_command_data(MouseCommandData::SetSampleRate, 200)); print!("SAMPLE 100 {:X}\n", self.mouse_command_data(MouseCommandData::SetSampleRate, 100)); print!("SAMPLE 80 {:X}\n", self.mouse_command_data(MouseCommandData::SetSampleRate, 80)); print!("GET ID {:X}\n", self.mouse_command(MouseCommand::GetDeviceId)); let mouse_id = self.read(); print!("MOUSE ID: {:X} == 0x03\n", mouse_id); self.mouse_extra = mouse_id == 3; // Enable extra buttons, TODO /* if self.mouse_extra { print!("SAMPLE 200 {:X}\n", self.mouse_command_data(MouseCommandData::SetSampleRate, 200)); print!("SAMPLE 200 {:X}\n", self.mouse_command_data(MouseCommandData::SetSampleRate, 200)); print!("SAMPLE 80 {:X}\n", self.mouse_command_data(MouseCommandData::SetSampleRate, 80)); print!("GET ID {:X}\n", self.mouse_command(MouseCommand::GetDeviceId)); let mouse_id = self.read(); print!("MOUSE ID: {:X} == 0x04\n", mouse_id); } */ // Set sample rate to maximum print!("SAMPLE 200 {:X}\n", self.mouse_command_data(MouseCommandData::SetSampleRate, 200)); // Enable data reporting print!("KEYBOARD ENABLE {:X}\n", self.keyboard_command(KeyboardCommand::EnableReporting)); print!("MOUSE ENABLE {:X}\n", self.mouse_command(MouseCommand::EnableReporting)); // Enable clocks and interrupts { let mut config = self.config(); config.remove(FIRST_DISABLED); config.remove(SECOND_DISABLED); config.insert(FIRST_INTERRUPT); config.insert(SECOND_INTERRUPT); self.set_config(config); } } pub fn on_keyboard(&mut self) { let data = self.data.read(); print!("KEY {:X}\n", data); } pub fn on_mouse(&mut self) { self.mouse[self.mouse_i] = self.data.read(); self.mouse_i += 1; if self.mouse_i >= self.mouse.len() || (!self.mouse_extra && self.mouse_i >= 3) { self.mouse_i = 0; let flags = MousePacketFlags::from_bits_truncate(self.mouse[0]); let mut x = self.mouse[1] as isize; if flags.contains(X_SIGN) { x -= 0x100; } let mut y = self.mouse[2] as isize; if flags.contains(Y_SIGN) { y -= 0x100; } let extra = if self.mouse_extra { self.mouse[3] } else { 0 }; print!("MOUSE {:?}, {}, {}, {}\n", flags, x, y, extra); } } }
// Copyright 2014-2017 The Rooster Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use serde::de::{Deserialize, Deserializer, Error, Visitor}; use serde::ser::{Serialize, Serializer}; use std::convert::Into; use std::fmt; use std::ops::Deref; use std::ops::Drop; use std::{ptr, sync::atomic}; #[derive(Clone, Debug, PartialEq, Eq)] pub struct SafeString { pub inner: String, } struct StringVisitor; impl<'de> Visitor<'de> for StringVisitor { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a string") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: Error, { Ok(String::from(v)) } type Value = String; } impl SafeString { pub fn new(inner: String) -> SafeString { SafeString { inner: inner } } } impl Drop for SafeString { fn drop(&mut self) { let default = u8::default(); for c in unsafe { self.inner.as_bytes_mut() } { unsafe { ptr::write_volatile(c, default) }; } atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); } } impl Deref for SafeString { type Target = str; fn deref(&self) -> &str { self.inner.deref() } } impl Into<SafeString> for String { fn into(self) -> SafeString { SafeString::new(self) } } impl<'a> Into<SafeString> for &'a str { fn into(self) -> SafeString { self.to_string().into() } } impl Serialize for SafeString { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(self.deref()) } } impl<'de> Deserialize<'de> for SafeString { fn deserialize<D>(deserializer: D) -> Result<SafeString, D::Error> where D: Deserializer<'de>, { deserializer .deserialize_string(StringVisitor) .map(|parsed_value| SafeString { inner: parsed_value, }) } } #[cfg(test)] mod test { use safe_string::SafeString; use serde_json; use serde_json::Error; #[test] fn safe_string_serialization() { let s = SafeString { inner: String::from("blabla"), }; match serde_json::to_string(&s) { Ok(json) => assert_eq!("\"blabla\"", json), Err(_) => panic!("Serialization failed, somehow"), } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] pub struct TestStruct { password: SafeString, } #[test] fn safe_string_within_struct_serialization() { let ts = TestStruct { password: SafeString { inner: String::from("blabla"), }, }; match serde_json::to_string(&ts) { Ok(json) => assert_eq!("{\"password\":\"blabla\"}", json), Err(_) => panic!("Serialization failed, somehow"), } } #[test] fn safe_string_deserialization() { let s = "\"blabla\""; let res: Result<SafeString, Error> = serde_json::from_str(s); match res { Ok(ss) => assert_eq!( ss, SafeString { inner: String::from("blabla") } ), Err(_) => panic!("Deserialization failed"), } } #[test] fn safe_string_within_struct_deserialization() { let json = "{\"password\":\"blabla\"}"; let res: Result<TestStruct, Error> = serde_json::from_str(json); match res { Ok(ts) => assert_eq!( ts, TestStruct { password: SafeString { inner: String::from("blabla") } } ), Err(_) => panic!("Deserialization failed"), } } } Fixes tests // Copyright 2014-2017 The Rooster Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use serde::de::{Deserialize, Deserializer, Error, Visitor}; use serde::ser::{Serialize, Serializer}; use std::convert::Into; use std::fmt; use std::ops::Deref; use std::ops::Drop; use std::{ptr, sync::atomic}; #[derive(Clone, Debug, PartialEq, Eq)] pub struct SafeString { pub inner: String, } struct StringVisitor; impl<'de> Visitor<'de> for StringVisitor { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a string") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: Error, { Ok(String::from(v)) } type Value = String; } impl SafeString { pub fn new(inner: String) -> SafeString { SafeString { inner: inner } } } impl Drop for SafeString { fn drop(&mut self) { let default = u8::default(); for c in unsafe { self.inner.as_bytes_mut() } { unsafe { ptr::write_volatile(c, default) }; } atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); } } impl Deref for SafeString { type Target = str; fn deref(&self) -> &str { self.inner.deref() } } impl Into<SafeString> for String { fn into(self) -> SafeString { SafeString::new(self) } } impl<'a> Into<SafeString> for &'a str { fn into(self) -> SafeString { self.to_string().into() } } impl Serialize for SafeString { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(self.deref()) } } impl<'de> Deserialize<'de> for SafeString { fn deserialize<D>(deserializer: D) -> Result<SafeString, D::Error> where D: Deserializer<'de>, { deserializer .deserialize_string(StringVisitor) .map(|parsed_value| SafeString { inner: parsed_value, }) } } #[cfg(test)] mod test { use safe_string::SafeString; use serde::{Deserialize, Serialize}; use serde_json; use serde_json::Error; #[test] fn safe_string_serialization() { let s = SafeString { inner: String::from("blabla"), }; match serde_json::to_string(&s) { Ok(json) => assert_eq!("\"blabla\"", json), Err(_) => panic!("Serialization failed, somehow"), } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] pub struct TestStruct { password: SafeString, } #[test] fn safe_string_within_struct_serialization() { let ts = TestStruct { password: SafeString { inner: String::from("blabla"), }, }; match serde_json::to_string(&ts) { Ok(json) => assert_eq!("{\"password\":\"blabla\"}", json), Err(_) => panic!("Serialization failed, somehow"), } } #[test] fn safe_string_deserialization() { let s = "\"blabla\""; let res: Result<SafeString, Error> = serde_json::from_str(s); match res { Ok(ss) => assert_eq!( ss, SafeString { inner: String::from("blabla") } ), Err(_) => panic!("Deserialization failed"), } } #[test] fn safe_string_within_struct_deserialization() { let json = "{\"password\":\"blabla\"}"; let res: Result<TestStruct, Error> = serde_json::from_str(json); match res { Ok(ts) => assert_eq!( ts, TestStruct { password: SafeString { inner: String::from("blabla") } } ), Err(_) => panic!("Deserialization failed"), } } }
use video; use video::Window; use surface; use surface::Surface; use pixels; use get_error; use SdlResult; use std::ptr; use libc; use libc::{c_int, uint32_t, c_float, c_double, c_void, size_t}; use std::string; use rect::Point; use rect::Rect; use std::num::FromPrimitive; use std::vec::Vec; use std::c_vec::CVec; #[allow(non_camel_case_types)] pub mod ll { use libc::{c_int, c_char, c_void, c_float, c_double}; use libc::{uint8_t, uint32_t}; use rect::Rect; use rect::Point; use surface::ll::SDL_Surface; use video::ll::SDL_Window; pub type SDL_Rect = Rect; pub type SDL_Point = Point; pub type SDL_bool = c_int; //SDL_render.h pub enum SDL_RendererFlags { SDL_RENDERER_SOFTWARE = 0x00000001, SDL_RENDERER_ACCELERATED = 0x00000002, SDL_RENDERER_PRESENTVSYNC = 0x00000004, SDL_RENDERER_TARGETTEXTURE = 0x00000008 } #[repr(C)] pub struct SDL_RendererInfo { pub name: *const c_char, pub flags: uint32_t, pub num_texture_formats: uint32_t, pub texture_formats: [uint32_t, ..16], pub max_texture_width: c_int, pub max_texture_height: c_int, } pub enum SDL_TextureAccess { SDL_TEXTUREACCESS_STATIC = 0, SDL_TEXTUREACCESS_STREAMING = 1, SDL_TEXTUREACCESS_TARGET = 2 } pub enum SDL_TextureModulate { SDL_TEXTUREMODULATE_NONE = 0x00000000, SDL_TEXTUREMODULATE_COLOR = 0x00000001, SDL_TEXTUREMODULATE_ALPHA = 0x00000002 } #[deriving(FromPrimitive)] #[repr(C)] pub enum SDL_RendererFlip { SDL_FLIP_NONE = 0x00000000, SDL_FLIP_HORIZONTAL = 0x00000001, SDL_FLIP_VERTICAL = 0x00000002 } #[repr(C)] pub struct SDL_Renderer; #[repr(C)] pub struct SDL_Texture; //SDL_blendmode.h #[deriving(FromPrimitive)] #[repr(C)] pub enum SDL_BlendMode { SDL_BLENDMODE_NONE = 0x00000000, SDL_BLENDMODE_BLEND = 0x00000001, SDL_BLENDMODE_ADD = 0x00000002, SDL_BLENDMODE_MOD = 0x00000004 } extern "C" { pub fn SDL_GetNumRenderDrivers() -> c_int; pub fn SDL_GetRenderDriverInfo(index: c_int, info: *const SDL_RendererInfo) -> c_int; pub fn SDL_CreateWindowAndRenderer(width: c_int, height: c_int, window_flags: uint32_t, window: *const *const SDL_Window, renderer: *const *const SDL_Renderer) -> c_int; pub fn SDL_CreateRenderer(window: *const SDL_Window, index: c_int, flags: uint32_t) -> *const SDL_Renderer; pub fn SDL_CreateSoftwareRenderer(surface: *const SDL_Surface) -> *const SDL_Renderer; pub fn SDL_GetRenderer(window: *const SDL_Window) -> *const SDL_Renderer; pub fn SDL_GetRendererInfo(renderer: *const SDL_Renderer, info: *const SDL_RendererInfo) -> c_int; pub fn SDL_GetRendererOutputSize(renderer: *const SDL_Renderer, w: *const c_int, h: *const c_int) -> c_int; pub fn SDL_CreateTexture(renderer: *const SDL_Renderer, format: uint32_t, access: c_int, w: c_int, h: c_int) -> *const SDL_Texture; pub fn SDL_CreateTextureFromSurface(renderer: *const SDL_Renderer, surface: *const SDL_Surface) -> *const SDL_Texture; pub fn SDL_QueryTexture(texture: *const SDL_Texture, format: *const uint32_t, access: *const c_int, w: *const c_int, h: *const c_int) -> c_int; pub fn SDL_SetTextureColorMod(texture: *const SDL_Texture, r: uint8_t, g: uint8_t, b: uint8_t) -> c_int; pub fn SDL_GetTextureColorMod(texture: *const SDL_Texture, r: *const uint8_t, g: *const uint8_t, b: *const uint8_t) -> c_int; pub fn SDL_SetTextureAlphaMod(texture: *const SDL_Texture, alpha: uint8_t) -> c_int; pub fn SDL_GetTextureAlphaMod(texture: *const SDL_Texture, alpha: *const uint8_t) -> c_int; pub fn SDL_SetTextureBlendMode(texture: *const SDL_Texture, blendMode: SDL_BlendMode) -> c_int; pub fn SDL_GetTextureBlendMode(texture: *const SDL_Texture, blendMode: *const SDL_BlendMode) -> c_int; pub fn SDL_UpdateTexture(texture: *const SDL_Texture, rect: *const SDL_Rect, pixels: *const c_void, pitch: c_int) -> c_int; pub fn SDL_LockTexture(texture: *const SDL_Texture, rect: *const SDL_Rect, pixels: *const *const c_void, pitch: *const c_int) -> c_int; pub fn SDL_UnlockTexture(texture: *const SDL_Texture); pub fn SDL_RenderTargetSupported(renderer: *const SDL_Renderer) -> SDL_bool; pub fn SDL_SetRenderTarget(renderer: *const SDL_Renderer, texture: *const SDL_Texture) -> c_int; pub fn SDL_GetRenderTarget(renderer: *const SDL_Renderer) -> *const SDL_Texture; pub fn SDL_RenderSetLogicalSize(renderer: *const SDL_Renderer, w: c_int, h: c_int) -> c_int; pub fn SDL_RenderGetLogicalSize(renderer: *const SDL_Renderer, w: *const c_int, h: *const c_int); pub fn SDL_RenderSetViewport(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderGetViewport(renderer: *const SDL_Renderer, rect: *const SDL_Rect); pub fn SDL_RenderSetClipRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderGetClipRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect); pub fn SDL_RenderSetScale(renderer: *const SDL_Renderer, scaleX: c_float, scaleY: c_float) -> c_int; pub fn SDL_RenderGetScale(renderer: *const SDL_Renderer, scaleX: *const c_float, scaleY: *const c_float); pub fn SDL_SetRenderDrawColor(renderer: *const SDL_Renderer, r: uint8_t, g: uint8_t, b: uint8_t, a: uint8_t) -> c_int; pub fn SDL_GetRenderDrawColor(renderer: *const SDL_Renderer, r: *const uint8_t, g: *const uint8_t, b: *const uint8_t, a: *const uint8_t) -> c_int; pub fn SDL_SetRenderDrawBlendMode(renderer: *const SDL_Renderer, blendMode: SDL_BlendMode) -> c_int; pub fn SDL_GetRenderDrawBlendMode(renderer: *const SDL_Renderer, blendMode: *const SDL_BlendMode) -> c_int; pub fn SDL_RenderClear(renderer: *const SDL_Renderer) -> c_int; pub fn SDL_RenderDrawPoint(renderer: *const SDL_Renderer, x: c_int, y: c_int) -> c_int; pub fn SDL_RenderDrawPoints(renderer: *const SDL_Renderer, Points: *const SDL_Point, count: c_int) -> c_int; pub fn SDL_RenderDrawLine(renderer: *const SDL_Renderer, x1: c_int, y1: c_int, x2: c_int, y2: c_int) -> c_int; pub fn SDL_RenderDrawLines(renderer: *const SDL_Renderer, Points: *const SDL_Point, count: c_int) -> c_int; pub fn SDL_RenderDrawRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderDrawRects(renderer: *const SDL_Renderer, rects: *const SDL_Rect, count: c_int) -> c_int; pub fn SDL_RenderFillRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderFillRects(renderer: *const SDL_Renderer, rects: *const SDL_Rect, count: c_int) -> c_int; pub fn SDL_RenderCopy(renderer: *const SDL_Renderer, texture: *const SDL_Texture, srcrect: *const SDL_Rect, dstrect: *const SDL_Rect) -> c_int; pub fn SDL_RenderCopyEx(renderer: *const SDL_Renderer, texture: *const SDL_Texture, srcrect: *const SDL_Rect, dstrect: *const SDL_Rect, angle: c_double, center: *const SDL_Point, flip: SDL_RendererFlip) -> c_int; pub fn SDL_RenderReadPixels(renderer: *const SDL_Renderer, rect: *const SDL_Rect, format: uint32_t, pixels: *const c_void, pitch: c_int) -> c_int; pub fn SDL_RenderPresent(renderer: *const SDL_Renderer); pub fn SDL_DestroyTexture(texture: *const SDL_Texture); pub fn SDL_DestroyRenderer(renderer: *const SDL_Renderer); pub fn SDL_GL_BindTexture(texture: *const SDL_Texture, texw: *const c_float, texh: *const c_float) -> c_int; pub fn SDL_GL_UnbindTexture(texture: *const SDL_Texture) -> c_int; } } pub enum RenderDriverIndex { Auto, Index(int) } #[deriving(PartialEq, FromPrimitive)] pub enum TextureAccess { Static = ll::SDL_TextureAccess::SDL_TEXTUREACCESS_STATIC as int, Streaming = ll::SDL_TextureAccess::SDL_TEXTUREACCESS_STREAMING as int, Target = ll::SDL_TextureAccess::SDL_TEXTUREACCESS_TARGET as int } bitflags! { flags RendererFlags: u32 { const SOFTWARE = ll::SDL_RendererFlags::SDL_RENDERER_SOFTWARE as u32, const ACCELERATED = ll::SDL_RendererFlags::SDL_RENDERER_ACCELERATED as u32, const PRESENTVSYNC = ll::SDL_RendererFlags::SDL_RENDERER_PRESENTVSYNC as u32, const TARGETTEXTURE = ll::SDL_RendererFlags::SDL_RENDERER_TARGETTEXTURE as u32 } } #[deriving(PartialEq)] pub struct RendererInfo { pub name: String, pub flags: RendererFlags, pub texture_formats: Vec<pixels::PixelFormatFlag>, pub max_texture_width: int, pub max_texture_height: int } #[deriving(PartialEq, FromPrimitive)] pub enum BlendMode { None = ll::SDL_BlendMode::SDL_BLENDMODE_NONE as int, Blend = ll::SDL_BlendMode::SDL_BLENDMODE_BLEND as int, Add = ll::SDL_BlendMode::SDL_BLENDMODE_ADD as int, Mod = ll::SDL_BlendMode::SDL_BLENDMODE_MOD as int } #[deriving(PartialEq)] pub enum RendererFlip { FlipNone = ll::SDL_RendererFlip::SDL_FLIP_NONE as int, FlipHorizontal = ll::SDL_RendererFlip::SDL_FLIP_HORIZONTAL as int, FlipVertical = ll::SDL_RendererFlip::SDL_FLIP_VERTICAL as int, } impl RendererInfo { pub fn from_ll(info: &ll::SDL_RendererInfo) -> RendererInfo { let actual_flags = RendererFlags::from_bits(info.flags).unwrap(); unsafe { let texture_formats: Vec<pixels::PixelFormatFlag> = info.texture_formats[0..(info.num_texture_formats as uint)].iter().map(|&format| { FromPrimitive::from_i64(format as i64).unwrap() }).collect(); RendererInfo { name: string::raw::from_buf(info.name as *const _), flags: actual_flags, texture_formats: texture_formats, max_texture_width: info.max_texture_width as int, max_texture_height: info.max_texture_height as int } } } } pub enum RendererParent { SurfaceParent(Surface), WindowParent(Window) } #[allow(raw_pointer_deriving)] pub struct Renderer { raw: *const ll::SDL_Renderer, parent: Option<RendererParent>, owned: bool } #[unsafe_destructor] impl Drop for Renderer { fn drop(&mut self) { if self.owned { unsafe { ll::SDL_DestroyRenderer(self.raw); } } } } impl Renderer { pub fn from_window(window: Window, index: RenderDriverIndex, renderer_flags: RendererFlags) -> SdlResult<Renderer> { let index = match index { RenderDriverIndex::Auto => -1, RenderDriverIndex::Index(x) => x }; let raw = unsafe { ll::SDL_CreateRenderer(window.raw(), index as c_int, renderer_flags.bits()) }; if raw == ptr::null() { Err(get_error()) } else { Ok(Renderer{ raw: raw, parent: Some(RendererParent::WindowParent(window)), owned: true,}) } } pub fn new_with_window(width: int, height: int, window_flags: video::WindowFlags) -> SdlResult<Renderer> { let raw_window: *const video::ll::SDL_Window = ptr::null(); let raw_renderer: *const ll::SDL_Renderer = ptr::null(); let result = unsafe { ll::SDL_CreateWindowAndRenderer(width as c_int, height as c_int, window_flags.bits(), &raw_window, &raw_renderer) == 0}; if result { let window = unsafe { Window::from_ll(raw_window, true) }; Ok(Renderer { raw: raw_renderer, parent: Some(RendererParent::WindowParent(window)), owned: true }) } else { Err(get_error()) } } pub fn from_surface(surface: surface::Surface) -> SdlResult<Renderer> { let result = unsafe { ll::SDL_CreateSoftwareRenderer(surface.raw()) }; if result == ptr::null() { Ok(Renderer { raw: result, parent: Some(RendererParent::SurfaceParent(surface)), owned: true }) } else { Err(get_error()) } } #[inline] pub fn get_parent<'a>(&'a self) -> &'a RendererParent { self.parent.as_ref().unwrap() } #[inline] pub fn unwrap_parent(mut self) -> RendererParent { use std::mem; mem::replace(&mut self.parent, None).unwrap() } #[inline] pub fn raw(&self) -> *const ll::SDL_Renderer { self.raw } #[inline] pub fn owned(&self) -> bool { self.owned } pub fn set_draw_color(&self, color: pixels::Color) -> SdlResult<()> { let ret = match color { pixels::Color::RGB(r, g, b) => { unsafe { ll::SDL_SetRenderDrawColor(self.raw, r, g, b, 255) } }, pixels::Color::RGBA(r, g, b, a) => { unsafe { ll::SDL_SetRenderDrawColor(self.raw, r, g, b, a) } } }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_draw_color(&self) -> SdlResult<pixels::Color> { let r: u8 = 0; let g: u8 = 0; let b: u8 = 0; let a: u8 = 0; let result = unsafe { ll::SDL_GetRenderDrawColor(self.raw, &r, &g, &b, &a) == 0 }; if result { Ok(pixels::Color::RGBA(r, g, b, a)) } else { Err(get_error()) } } pub fn clear(&self) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderClear(self.raw) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn present(&self) { unsafe { ll::SDL_RenderPresent(self.raw) } } pub fn get_output_size(&self) -> SdlResult<(int, int)> { let width: c_int = 0; let height: c_int = 0; let result = unsafe { ll::SDL_GetRendererOutputSize(self.raw, &width, &height) == 0 }; if result { Ok((width as int, height as int)) } else { Err(get_error()) } } pub fn create_texture(&self, format: pixels::PixelFormatFlag, access: TextureAccess, width: int, height: int) -> SdlResult<Texture> { let result = unsafe { ll::SDL_CreateTexture(self.raw, format as uint32_t, access as c_int, width as c_int, height as c_int) }; if result == ptr::null() { Err(get_error()) } else { Ok(Texture { raw: result, owned: true } ) } } pub fn create_texture_from_surface(&self, surface: &surface::Surface) -> SdlResult<Texture> { let result = unsafe { ll::SDL_CreateTextureFromSurface(self.raw, surface.raw()) }; if result == ptr::null() { Err(get_error()) } else { Ok(Texture { raw: result, owned: true } ) } } pub fn render_target_supported(&self) -> bool { unsafe { ll::SDL_RenderTargetSupported(self.raw) == 1 } } pub fn set_render_target(&self, texture: Option<&Texture>) -> SdlResult<()> { unsafe { let actual_texture = match texture { Some(texture) => texture.raw, None => ptr::null() }; if ll::SDL_SetRenderTarget(self.raw, actual_texture) == 0 { Ok(()) } else { Err(get_error()) } } } pub fn get_render_target(&self) -> Option<Texture> { let raw = unsafe { ll::SDL_GetRenderTarget(self.raw) }; if raw == ptr::null() { None } else { Some(Texture{ raw: raw, owned: false }) } } pub fn set_logical_size(&self, width: int, height: int) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderSetLogicalSize(self.raw, width as c_int, height as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_logical_size(&self) -> (int, int) { let width: c_int = 0; let height: c_int = 0; unsafe { ll::SDL_RenderGetLogicalSize(self.raw, &width, &height) }; (width as int, height as int) } pub fn set_viewport(&self, rect: Option<Rect>) -> SdlResult<()> { let ptr = match rect { Some(ref rect) => rect as *const _, None => ptr::null() }; let ret = unsafe { ll::SDL_RenderSetViewport(self.raw, ptr) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_viewport(&self) -> Rect { let rect = Rect{ x: 0, y: 0, w: 0, h: 0 }; unsafe { ll::SDL_RenderGetViewport(self.raw, &rect) }; rect } pub fn set_clip_rect(&self, rect: &Rect) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderSetClipRect(self.raw, rect) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_clip_rect(&self) -> Rect { let rect = Rect{ x: 0, y: 0, w: 0, h: 0 }; unsafe { ll::SDL_RenderGetClipRect(self.raw, &rect) }; rect } pub fn set_scale(&self, scale_x: f64, scale_y: f64) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderSetScale(self.raw, scale_x as c_float, scale_y as c_float) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_scale(&self) -> (f64, f64) { let scale_x: c_float = 0.0; let scale_y: c_float = 0.0; unsafe { ll::SDL_RenderGetScale(self.raw, &scale_x, &scale_y) }; (scale_x as f64, scale_y as f64) } pub fn draw_point(&self, point: Point) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawPoint(self.raw, point.x, point.y) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_points(&self, points: &[Point]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawPoints(self.raw, points.as_ptr(), points.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_line(&self, start: Point, end: Point) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawLine(self.raw, start.x, start.y, end.x, end.y) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_lines(&self, points: &[Point]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawLines(self.raw, points.as_ptr(), points.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_rect(&self, rect: &Rect) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawRect(self.raw, rect) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_rects(&self, rects: &[Rect]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawRects(self.raw, rects.as_ptr(), rects.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn fill_rect(&self, rect: &Rect) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderFillRect(self.raw, rect) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn fill_rects(&self, rects: &[Rect]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderFillRects(self.raw, rects.as_ptr(), rects.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn copy(&self, texture: &Texture, src: Option<Rect>, dst: Option<Rect>) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderCopy( self.raw, texture.raw, match src { Some(ref rect) => rect as *const _, None => ptr::null() }, match dst { Some(ref rect) => rect as *const _, None => ptr::null() } ) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } //TODO: Check whether RendererFlip is supposed to be combinable pub fn copy_ex(&self, texture: &Texture, src: Option<Rect>, dst: Option<Rect>, angle: f64, center: Option<Point>, flip: RendererFlip) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderCopyEx( self.raw, texture.raw, match src { Some(ref rect) => rect as *const _, None => ptr::null() }, match dst { Some(ref rect) => rect as *const _, None => ptr::null() }, angle as c_double, match center { Some(ref point) => point as *const _, None => ptr::null() }, FromPrimitive::from_i64(flip as i64).unwrap() ) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn read_pixels(&self, rect: Option<Rect>, format: pixels::PixelFormatFlag) -> SdlResult<CVec<u8>> { unsafe { let (actual_rect, w, h) = match rect { Some(ref rect) => (rect as *const _, rect.w as uint, rect.h as uint), None => { let (w, h) = try!(self.get_output_size()); (ptr::null(), w as uint, h as uint) } }; let size = format.byte_size_of_pixels(w * h); let pixels = libc::malloc(size as size_t) as *const u8; let pitch = w * format.byte_size_per_pixel(); // calculated pitch let ret = ll::SDL_RenderReadPixels(self.raw, actual_rect, format as uint32_t, pixels as *const c_void, pitch as c_int); if ret == 0 { Ok(CVec::new_with_dtor(pixels as *mut u8, size, proc() { libc::free(pixels as *mut c_void) })) } else { Err(get_error()) } } } } pub struct TextureQuery { pub format: pixels::PixelFormatFlag, pub access: TextureAccess, pub width: int, pub height: int } #[deriving(PartialEq)] #[allow(raw_pointer_deriving)] pub struct Texture { pub raw: *const ll::SDL_Texture, pub owned: bool } impl Drop for Texture { fn drop(&mut self) { if self.owned { unsafe { ll::SDL_DestroyTexture(self.raw); } } } } impl Texture { pub fn query(&self) -> SdlResult<TextureQuery> { let format: uint32_t = 0; let access: c_int = 0; let width: c_int = 0; let height: c_int = 0; let result = unsafe { ll::SDL_QueryTexture(self.raw, &format, &access, &width, &height) == 0 }; if result { Ok(TextureQuery { format: FromPrimitive::from_i64(format as i64).unwrap(), access: FromPrimitive::from_i64(access as i64).unwrap(), width: width as int, height: height as int }) } else { Err(get_error()) } } pub fn set_color_mod(&self, red: u8, green: u8, blue: u8) -> SdlResult<()> { let ret = unsafe { ll::SDL_SetTextureColorMod(self.raw, red, green, blue) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_color_mod(&self) -> SdlResult<(u8, u8, u8)> { let r = 0; let g = 0; let b = 0; let result = unsafe { ll::SDL_GetTextureColorMod(self.raw, &r, &g, &b) == 0 }; if result { Ok((r, g, b)) } else { Err(get_error()) } } pub fn set_alpha_mod(&self, alpha: u8) -> SdlResult<()> { let ret = unsafe { ll::SDL_SetTextureAlphaMod(self.raw, alpha) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_alpha_mod(&self) -> SdlResult<u8> { let alpha = 0; let result = unsafe { ll::SDL_GetTextureAlphaMod(self.raw, &alpha) == 0 }; if result { Ok(alpha) } else { Err(get_error()) } } pub fn set_blend_mode(&self, blend: BlendMode) -> SdlResult<()> { let ret = unsafe { ll::SDL_SetTextureBlendMode(self.raw, FromPrimitive::from_i64(blend as i64).unwrap()) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_blend_mode(&self) -> SdlResult<BlendMode> { let blend: i64 = 0; let result = unsafe { ll::SDL_GetTextureBlendMode(self.raw, &FromPrimitive::from_i64(blend as i64).unwrap()) == 0 }; if result { Ok(FromPrimitive::from_i64(blend as i64).unwrap()) } else { Err(get_error()) } } pub fn update(&self, rect: Option<Rect>, pixel_data: &[u8], pitch: int) -> SdlResult<()> { let ret = unsafe { let actual_rect = match rect { Some(ref rect) => rect as *const _, None => ptr::null() }; ll::SDL_UpdateTexture(self.raw, actual_rect, pixel_data.as_ptr() as *const _, pitch as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } fn unsafe_lock(&self, rect: Option<Rect>) -> SdlResult<(CVec<u8>, i32)> { let q = try!(self.query()); unsafe { let actual_rect = match rect { Some(ref rect) => rect as *const _, None => ptr::null() }; let pixels : *const c_void = ptr::null(); let pitch = 0i32; let ret = ll::SDL_LockTexture(self.raw, actual_rect, &pixels, &pitch); let size = q.format.byte_size_of_pixels((q.width * q.height) as uint); if ret == 0 { Ok((CVec::new(pixels as *mut u8, size), pitch)) } else { Err(get_error()) } } } pub fn with_lock(&self, rect: Option<Rect>, func: |CVec<u8>, i32| -> ()) -> SdlResult<()> { match self.unsafe_lock(rect) { Ok((cvec, pitch)) => { func(cvec, pitch); self.unlock(); Ok(()) } Err(e) => Err(e), } } fn unlock(&self) { unsafe { ll::SDL_UnlockTexture(self.raw) } } pub fn gl_bind_texture(&self) -> SdlResult<(f64, f64)> { let texw: c_float = 0.0; let texh: c_float = 0.0; let result = unsafe { ll::SDL_GL_BindTexture(self.raw, &texw, &texh) == 0 }; if result { Ok((texw as f64, texh as f64)) } else { Err("Operation not supported".into_string()) } } pub fn gl_unbind_texture(&self) -> bool { unsafe { ll::SDL_GL_UnbindTexture(self.raw) == 0 } } pub fn gl_with_bind<R>(&self, f: |tex_w: f64, tex_h: f64| -> R) -> R { unsafe { let texw: c_float = 0.0; let texh: c_float = 0.0; if ll::SDL_GL_BindTexture(self.raw, &texw, &texh) != 0 { panic!("could not bind texture"); } let rv = f(texw as f64, texh as f64); ll::SDL_GL_UnbindTexture(self.raw); rv } } } pub fn get_num_render_drivers() -> SdlResult<int> { let result = unsafe { ll::SDL_GetNumRenderDrivers() }; if result > 0 { Ok(result as int) } else { Err(get_error()) } } pub fn get_render_driver_info(index: int) -> SdlResult<RendererInfo> { let out = ll::SDL_RendererInfo { name: ptr::null(), flags: 0, num_texture_formats: 0, texture_formats: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], max_texture_width: 0, max_texture_height: 0, }; let result = unsafe { ll::SDL_GetRenderDriverInfo(index as c_int, &out) == 0 }; if result { Ok(RendererInfo::from_ll(&out)) } else { Err(get_error()) } } /* //TODO: Figure out how to support this with our current struct format pub fn SDL_GetRenderer(window: *SDL_Window) -> *SDL_Renderer; */ Remove "Flip" prefix from RendererFlip enum variants. use video; use video::Window; use surface; use surface::Surface; use pixels; use get_error; use SdlResult; use std::ptr; use libc; use libc::{c_int, uint32_t, c_float, c_double, c_void, size_t}; use std::string; use rect::Point; use rect::Rect; use std::num::FromPrimitive; use std::vec::Vec; use std::c_vec::CVec; #[allow(non_camel_case_types)] pub mod ll { use libc::{c_int, c_char, c_void, c_float, c_double}; use libc::{uint8_t, uint32_t}; use rect::Rect; use rect::Point; use surface::ll::SDL_Surface; use video::ll::SDL_Window; pub type SDL_Rect = Rect; pub type SDL_Point = Point; pub type SDL_bool = c_int; //SDL_render.h pub enum SDL_RendererFlags { SDL_RENDERER_SOFTWARE = 0x00000001, SDL_RENDERER_ACCELERATED = 0x00000002, SDL_RENDERER_PRESENTVSYNC = 0x00000004, SDL_RENDERER_TARGETTEXTURE = 0x00000008 } #[repr(C)] pub struct SDL_RendererInfo { pub name: *const c_char, pub flags: uint32_t, pub num_texture_formats: uint32_t, pub texture_formats: [uint32_t, ..16], pub max_texture_width: c_int, pub max_texture_height: c_int, } pub enum SDL_TextureAccess { SDL_TEXTUREACCESS_STATIC = 0, SDL_TEXTUREACCESS_STREAMING = 1, SDL_TEXTUREACCESS_TARGET = 2 } pub enum SDL_TextureModulate { SDL_TEXTUREMODULATE_NONE = 0x00000000, SDL_TEXTUREMODULATE_COLOR = 0x00000001, SDL_TEXTUREMODULATE_ALPHA = 0x00000002 } #[deriving(FromPrimitive)] #[repr(C)] pub enum SDL_RendererFlip { SDL_FLIP_NONE = 0x00000000, SDL_FLIP_HORIZONTAL = 0x00000001, SDL_FLIP_VERTICAL = 0x00000002 } #[repr(C)] pub struct SDL_Renderer; #[repr(C)] pub struct SDL_Texture; //SDL_blendmode.h #[deriving(FromPrimitive)] #[repr(C)] pub enum SDL_BlendMode { SDL_BLENDMODE_NONE = 0x00000000, SDL_BLENDMODE_BLEND = 0x00000001, SDL_BLENDMODE_ADD = 0x00000002, SDL_BLENDMODE_MOD = 0x00000004 } extern "C" { pub fn SDL_GetNumRenderDrivers() -> c_int; pub fn SDL_GetRenderDriverInfo(index: c_int, info: *const SDL_RendererInfo) -> c_int; pub fn SDL_CreateWindowAndRenderer(width: c_int, height: c_int, window_flags: uint32_t, window: *const *const SDL_Window, renderer: *const *const SDL_Renderer) -> c_int; pub fn SDL_CreateRenderer(window: *const SDL_Window, index: c_int, flags: uint32_t) -> *const SDL_Renderer; pub fn SDL_CreateSoftwareRenderer(surface: *const SDL_Surface) -> *const SDL_Renderer; pub fn SDL_GetRenderer(window: *const SDL_Window) -> *const SDL_Renderer; pub fn SDL_GetRendererInfo(renderer: *const SDL_Renderer, info: *const SDL_RendererInfo) -> c_int; pub fn SDL_GetRendererOutputSize(renderer: *const SDL_Renderer, w: *const c_int, h: *const c_int) -> c_int; pub fn SDL_CreateTexture(renderer: *const SDL_Renderer, format: uint32_t, access: c_int, w: c_int, h: c_int) -> *const SDL_Texture; pub fn SDL_CreateTextureFromSurface(renderer: *const SDL_Renderer, surface: *const SDL_Surface) -> *const SDL_Texture; pub fn SDL_QueryTexture(texture: *const SDL_Texture, format: *const uint32_t, access: *const c_int, w: *const c_int, h: *const c_int) -> c_int; pub fn SDL_SetTextureColorMod(texture: *const SDL_Texture, r: uint8_t, g: uint8_t, b: uint8_t) -> c_int; pub fn SDL_GetTextureColorMod(texture: *const SDL_Texture, r: *const uint8_t, g: *const uint8_t, b: *const uint8_t) -> c_int; pub fn SDL_SetTextureAlphaMod(texture: *const SDL_Texture, alpha: uint8_t) -> c_int; pub fn SDL_GetTextureAlphaMod(texture: *const SDL_Texture, alpha: *const uint8_t) -> c_int; pub fn SDL_SetTextureBlendMode(texture: *const SDL_Texture, blendMode: SDL_BlendMode) -> c_int; pub fn SDL_GetTextureBlendMode(texture: *const SDL_Texture, blendMode: *const SDL_BlendMode) -> c_int; pub fn SDL_UpdateTexture(texture: *const SDL_Texture, rect: *const SDL_Rect, pixels: *const c_void, pitch: c_int) -> c_int; pub fn SDL_LockTexture(texture: *const SDL_Texture, rect: *const SDL_Rect, pixels: *const *const c_void, pitch: *const c_int) -> c_int; pub fn SDL_UnlockTexture(texture: *const SDL_Texture); pub fn SDL_RenderTargetSupported(renderer: *const SDL_Renderer) -> SDL_bool; pub fn SDL_SetRenderTarget(renderer: *const SDL_Renderer, texture: *const SDL_Texture) -> c_int; pub fn SDL_GetRenderTarget(renderer: *const SDL_Renderer) -> *const SDL_Texture; pub fn SDL_RenderSetLogicalSize(renderer: *const SDL_Renderer, w: c_int, h: c_int) -> c_int; pub fn SDL_RenderGetLogicalSize(renderer: *const SDL_Renderer, w: *const c_int, h: *const c_int); pub fn SDL_RenderSetViewport(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderGetViewport(renderer: *const SDL_Renderer, rect: *const SDL_Rect); pub fn SDL_RenderSetClipRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderGetClipRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect); pub fn SDL_RenderSetScale(renderer: *const SDL_Renderer, scaleX: c_float, scaleY: c_float) -> c_int; pub fn SDL_RenderGetScale(renderer: *const SDL_Renderer, scaleX: *const c_float, scaleY: *const c_float); pub fn SDL_SetRenderDrawColor(renderer: *const SDL_Renderer, r: uint8_t, g: uint8_t, b: uint8_t, a: uint8_t) -> c_int; pub fn SDL_GetRenderDrawColor(renderer: *const SDL_Renderer, r: *const uint8_t, g: *const uint8_t, b: *const uint8_t, a: *const uint8_t) -> c_int; pub fn SDL_SetRenderDrawBlendMode(renderer: *const SDL_Renderer, blendMode: SDL_BlendMode) -> c_int; pub fn SDL_GetRenderDrawBlendMode(renderer: *const SDL_Renderer, blendMode: *const SDL_BlendMode) -> c_int; pub fn SDL_RenderClear(renderer: *const SDL_Renderer) -> c_int; pub fn SDL_RenderDrawPoint(renderer: *const SDL_Renderer, x: c_int, y: c_int) -> c_int; pub fn SDL_RenderDrawPoints(renderer: *const SDL_Renderer, Points: *const SDL_Point, count: c_int) -> c_int; pub fn SDL_RenderDrawLine(renderer: *const SDL_Renderer, x1: c_int, y1: c_int, x2: c_int, y2: c_int) -> c_int; pub fn SDL_RenderDrawLines(renderer: *const SDL_Renderer, Points: *const SDL_Point, count: c_int) -> c_int; pub fn SDL_RenderDrawRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderDrawRects(renderer: *const SDL_Renderer, rects: *const SDL_Rect, count: c_int) -> c_int; pub fn SDL_RenderFillRect(renderer: *const SDL_Renderer, rect: *const SDL_Rect) -> c_int; pub fn SDL_RenderFillRects(renderer: *const SDL_Renderer, rects: *const SDL_Rect, count: c_int) -> c_int; pub fn SDL_RenderCopy(renderer: *const SDL_Renderer, texture: *const SDL_Texture, srcrect: *const SDL_Rect, dstrect: *const SDL_Rect) -> c_int; pub fn SDL_RenderCopyEx(renderer: *const SDL_Renderer, texture: *const SDL_Texture, srcrect: *const SDL_Rect, dstrect: *const SDL_Rect, angle: c_double, center: *const SDL_Point, flip: SDL_RendererFlip) -> c_int; pub fn SDL_RenderReadPixels(renderer: *const SDL_Renderer, rect: *const SDL_Rect, format: uint32_t, pixels: *const c_void, pitch: c_int) -> c_int; pub fn SDL_RenderPresent(renderer: *const SDL_Renderer); pub fn SDL_DestroyTexture(texture: *const SDL_Texture); pub fn SDL_DestroyRenderer(renderer: *const SDL_Renderer); pub fn SDL_GL_BindTexture(texture: *const SDL_Texture, texw: *const c_float, texh: *const c_float) -> c_int; pub fn SDL_GL_UnbindTexture(texture: *const SDL_Texture) -> c_int; } } pub enum RenderDriverIndex { Auto, Index(int) } #[deriving(PartialEq, FromPrimitive)] pub enum TextureAccess { Static = ll::SDL_TextureAccess::SDL_TEXTUREACCESS_STATIC as int, Streaming = ll::SDL_TextureAccess::SDL_TEXTUREACCESS_STREAMING as int, Target = ll::SDL_TextureAccess::SDL_TEXTUREACCESS_TARGET as int } bitflags! { flags RendererFlags: u32 { const SOFTWARE = ll::SDL_RendererFlags::SDL_RENDERER_SOFTWARE as u32, const ACCELERATED = ll::SDL_RendererFlags::SDL_RENDERER_ACCELERATED as u32, const PRESENTVSYNC = ll::SDL_RendererFlags::SDL_RENDERER_PRESENTVSYNC as u32, const TARGETTEXTURE = ll::SDL_RendererFlags::SDL_RENDERER_TARGETTEXTURE as u32 } } #[deriving(PartialEq)] pub struct RendererInfo { pub name: String, pub flags: RendererFlags, pub texture_formats: Vec<pixels::PixelFormatFlag>, pub max_texture_width: int, pub max_texture_height: int } #[deriving(PartialEq, FromPrimitive)] pub enum BlendMode { None = ll::SDL_BlendMode::SDL_BLENDMODE_NONE as int, Blend = ll::SDL_BlendMode::SDL_BLENDMODE_BLEND as int, Add = ll::SDL_BlendMode::SDL_BLENDMODE_ADD as int, Mod = ll::SDL_BlendMode::SDL_BLENDMODE_MOD as int } #[deriving(PartialEq)] pub enum RendererFlip { None = ll::SDL_RendererFlip::SDL_FLIP_NONE as int, Horizontal = ll::SDL_RendererFlip::SDL_FLIP_HORIZONTAL as int, Vertical = ll::SDL_RendererFlip::SDL_FLIP_VERTICAL as int, } impl RendererInfo { pub fn from_ll(info: &ll::SDL_RendererInfo) -> RendererInfo { let actual_flags = RendererFlags::from_bits(info.flags).unwrap(); unsafe { let texture_formats: Vec<pixels::PixelFormatFlag> = info.texture_formats[0..(info.num_texture_formats as uint)].iter().map(|&format| { FromPrimitive::from_i64(format as i64).unwrap() }).collect(); RendererInfo { name: string::raw::from_buf(info.name as *const _), flags: actual_flags, texture_formats: texture_formats, max_texture_width: info.max_texture_width as int, max_texture_height: info.max_texture_height as int } } } } pub enum RendererParent { SurfaceParent(Surface), WindowParent(Window) } #[allow(raw_pointer_deriving)] pub struct Renderer { raw: *const ll::SDL_Renderer, parent: Option<RendererParent>, owned: bool } #[unsafe_destructor] impl Drop for Renderer { fn drop(&mut self) { if self.owned { unsafe { ll::SDL_DestroyRenderer(self.raw); } } } } impl Renderer { pub fn from_window(window: Window, index: RenderDriverIndex, renderer_flags: RendererFlags) -> SdlResult<Renderer> { let index = match index { RenderDriverIndex::Auto => -1, RenderDriverIndex::Index(x) => x }; let raw = unsafe { ll::SDL_CreateRenderer(window.raw(), index as c_int, renderer_flags.bits()) }; if raw == ptr::null() { Err(get_error()) } else { Ok(Renderer{ raw: raw, parent: Some(RendererParent::WindowParent(window)), owned: true,}) } } pub fn new_with_window(width: int, height: int, window_flags: video::WindowFlags) -> SdlResult<Renderer> { let raw_window: *const video::ll::SDL_Window = ptr::null(); let raw_renderer: *const ll::SDL_Renderer = ptr::null(); let result = unsafe { ll::SDL_CreateWindowAndRenderer(width as c_int, height as c_int, window_flags.bits(), &raw_window, &raw_renderer) == 0}; if result { let window = unsafe { Window::from_ll(raw_window, true) }; Ok(Renderer { raw: raw_renderer, parent: Some(RendererParent::WindowParent(window)), owned: true }) } else { Err(get_error()) } } pub fn from_surface(surface: surface::Surface) -> SdlResult<Renderer> { let result = unsafe { ll::SDL_CreateSoftwareRenderer(surface.raw()) }; if result == ptr::null() { Ok(Renderer { raw: result, parent: Some(RendererParent::SurfaceParent(surface)), owned: true }) } else { Err(get_error()) } } #[inline] pub fn get_parent<'a>(&'a self) -> &'a RendererParent { self.parent.as_ref().unwrap() } #[inline] pub fn unwrap_parent(mut self) -> RendererParent { use std::mem; mem::replace(&mut self.parent, None).unwrap() } #[inline] pub fn raw(&self) -> *const ll::SDL_Renderer { self.raw } #[inline] pub fn owned(&self) -> bool { self.owned } pub fn set_draw_color(&self, color: pixels::Color) -> SdlResult<()> { let ret = match color { pixels::Color::RGB(r, g, b) => { unsafe { ll::SDL_SetRenderDrawColor(self.raw, r, g, b, 255) } }, pixels::Color::RGBA(r, g, b, a) => { unsafe { ll::SDL_SetRenderDrawColor(self.raw, r, g, b, a) } } }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_draw_color(&self) -> SdlResult<pixels::Color> { let r: u8 = 0; let g: u8 = 0; let b: u8 = 0; let a: u8 = 0; let result = unsafe { ll::SDL_GetRenderDrawColor(self.raw, &r, &g, &b, &a) == 0 }; if result { Ok(pixels::Color::RGBA(r, g, b, a)) } else { Err(get_error()) } } pub fn clear(&self) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderClear(self.raw) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn present(&self) { unsafe { ll::SDL_RenderPresent(self.raw) } } pub fn get_output_size(&self) -> SdlResult<(int, int)> { let width: c_int = 0; let height: c_int = 0; let result = unsafe { ll::SDL_GetRendererOutputSize(self.raw, &width, &height) == 0 }; if result { Ok((width as int, height as int)) } else { Err(get_error()) } } pub fn create_texture(&self, format: pixels::PixelFormatFlag, access: TextureAccess, width: int, height: int) -> SdlResult<Texture> { let result = unsafe { ll::SDL_CreateTexture(self.raw, format as uint32_t, access as c_int, width as c_int, height as c_int) }; if result == ptr::null() { Err(get_error()) } else { Ok(Texture { raw: result, owned: true } ) } } pub fn create_texture_from_surface(&self, surface: &surface::Surface) -> SdlResult<Texture> { let result = unsafe { ll::SDL_CreateTextureFromSurface(self.raw, surface.raw()) }; if result == ptr::null() { Err(get_error()) } else { Ok(Texture { raw: result, owned: true } ) } } pub fn render_target_supported(&self) -> bool { unsafe { ll::SDL_RenderTargetSupported(self.raw) == 1 } } pub fn set_render_target(&self, texture: Option<&Texture>) -> SdlResult<()> { unsafe { let actual_texture = match texture { Some(texture) => texture.raw, None => ptr::null() }; if ll::SDL_SetRenderTarget(self.raw, actual_texture) == 0 { Ok(()) } else { Err(get_error()) } } } pub fn get_render_target(&self) -> Option<Texture> { let raw = unsafe { ll::SDL_GetRenderTarget(self.raw) }; if raw == ptr::null() { None } else { Some(Texture{ raw: raw, owned: false }) } } pub fn set_logical_size(&self, width: int, height: int) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderSetLogicalSize(self.raw, width as c_int, height as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_logical_size(&self) -> (int, int) { let width: c_int = 0; let height: c_int = 0; unsafe { ll::SDL_RenderGetLogicalSize(self.raw, &width, &height) }; (width as int, height as int) } pub fn set_viewport(&self, rect: Option<Rect>) -> SdlResult<()> { let ptr = match rect { Some(ref rect) => rect as *const _, None => ptr::null() }; let ret = unsafe { ll::SDL_RenderSetViewport(self.raw, ptr) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_viewport(&self) -> Rect { let rect = Rect{ x: 0, y: 0, w: 0, h: 0 }; unsafe { ll::SDL_RenderGetViewport(self.raw, &rect) }; rect } pub fn set_clip_rect(&self, rect: &Rect) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderSetClipRect(self.raw, rect) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_clip_rect(&self) -> Rect { let rect = Rect{ x: 0, y: 0, w: 0, h: 0 }; unsafe { ll::SDL_RenderGetClipRect(self.raw, &rect) }; rect } pub fn set_scale(&self, scale_x: f64, scale_y: f64) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderSetScale(self.raw, scale_x as c_float, scale_y as c_float) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_scale(&self) -> (f64, f64) { let scale_x: c_float = 0.0; let scale_y: c_float = 0.0; unsafe { ll::SDL_RenderGetScale(self.raw, &scale_x, &scale_y) }; (scale_x as f64, scale_y as f64) } pub fn draw_point(&self, point: Point) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawPoint(self.raw, point.x, point.y) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_points(&self, points: &[Point]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawPoints(self.raw, points.as_ptr(), points.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_line(&self, start: Point, end: Point) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawLine(self.raw, start.x, start.y, end.x, end.y) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_lines(&self, points: &[Point]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawLines(self.raw, points.as_ptr(), points.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_rect(&self, rect: &Rect) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawRect(self.raw, rect) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn draw_rects(&self, rects: &[Rect]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderDrawRects(self.raw, rects.as_ptr(), rects.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn fill_rect(&self, rect: &Rect) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderFillRect(self.raw, rect) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn fill_rects(&self, rects: &[Rect]) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderFillRects(self.raw, rects.as_ptr(), rects.len() as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn copy(&self, texture: &Texture, src: Option<Rect>, dst: Option<Rect>) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderCopy( self.raw, texture.raw, match src { Some(ref rect) => rect as *const _, None => ptr::null() }, match dst { Some(ref rect) => rect as *const _, None => ptr::null() } ) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } //TODO: Check whether RendererFlip is supposed to be combinable pub fn copy_ex(&self, texture: &Texture, src: Option<Rect>, dst: Option<Rect>, angle: f64, center: Option<Point>, flip: RendererFlip) -> SdlResult<()> { let ret = unsafe { ll::SDL_RenderCopyEx( self.raw, texture.raw, match src { Some(ref rect) => rect as *const _, None => ptr::null() }, match dst { Some(ref rect) => rect as *const _, None => ptr::null() }, angle as c_double, match center { Some(ref point) => point as *const _, None => ptr::null() }, FromPrimitive::from_i64(flip as i64).unwrap() ) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn read_pixels(&self, rect: Option<Rect>, format: pixels::PixelFormatFlag) -> SdlResult<CVec<u8>> { unsafe { let (actual_rect, w, h) = match rect { Some(ref rect) => (rect as *const _, rect.w as uint, rect.h as uint), None => { let (w, h) = try!(self.get_output_size()); (ptr::null(), w as uint, h as uint) } }; let size = format.byte_size_of_pixels(w * h); let pixels = libc::malloc(size as size_t) as *const u8; let pitch = w * format.byte_size_per_pixel(); // calculated pitch let ret = ll::SDL_RenderReadPixels(self.raw, actual_rect, format as uint32_t, pixels as *const c_void, pitch as c_int); if ret == 0 { Ok(CVec::new_with_dtor(pixels as *mut u8, size, proc() { libc::free(pixels as *mut c_void) })) } else { Err(get_error()) } } } } pub struct TextureQuery { pub format: pixels::PixelFormatFlag, pub access: TextureAccess, pub width: int, pub height: int } #[deriving(PartialEq)] #[allow(raw_pointer_deriving)] pub struct Texture { pub raw: *const ll::SDL_Texture, pub owned: bool } impl Drop for Texture { fn drop(&mut self) { if self.owned { unsafe { ll::SDL_DestroyTexture(self.raw); } } } } impl Texture { pub fn query(&self) -> SdlResult<TextureQuery> { let format: uint32_t = 0; let access: c_int = 0; let width: c_int = 0; let height: c_int = 0; let result = unsafe { ll::SDL_QueryTexture(self.raw, &format, &access, &width, &height) == 0 }; if result { Ok(TextureQuery { format: FromPrimitive::from_i64(format as i64).unwrap(), access: FromPrimitive::from_i64(access as i64).unwrap(), width: width as int, height: height as int }) } else { Err(get_error()) } } pub fn set_color_mod(&self, red: u8, green: u8, blue: u8) -> SdlResult<()> { let ret = unsafe { ll::SDL_SetTextureColorMod(self.raw, red, green, blue) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_color_mod(&self) -> SdlResult<(u8, u8, u8)> { let r = 0; let g = 0; let b = 0; let result = unsafe { ll::SDL_GetTextureColorMod(self.raw, &r, &g, &b) == 0 }; if result { Ok((r, g, b)) } else { Err(get_error()) } } pub fn set_alpha_mod(&self, alpha: u8) -> SdlResult<()> { let ret = unsafe { ll::SDL_SetTextureAlphaMod(self.raw, alpha) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_alpha_mod(&self) -> SdlResult<u8> { let alpha = 0; let result = unsafe { ll::SDL_GetTextureAlphaMod(self.raw, &alpha) == 0 }; if result { Ok(alpha) } else { Err(get_error()) } } pub fn set_blend_mode(&self, blend: BlendMode) -> SdlResult<()> { let ret = unsafe { ll::SDL_SetTextureBlendMode(self.raw, FromPrimitive::from_i64(blend as i64).unwrap()) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } pub fn get_blend_mode(&self) -> SdlResult<BlendMode> { let blend: i64 = 0; let result = unsafe { ll::SDL_GetTextureBlendMode(self.raw, &FromPrimitive::from_i64(blend as i64).unwrap()) == 0 }; if result { Ok(FromPrimitive::from_i64(blend as i64).unwrap()) } else { Err(get_error()) } } pub fn update(&self, rect: Option<Rect>, pixel_data: &[u8], pitch: int) -> SdlResult<()> { let ret = unsafe { let actual_rect = match rect { Some(ref rect) => rect as *const _, None => ptr::null() }; ll::SDL_UpdateTexture(self.raw, actual_rect, pixel_data.as_ptr() as *const _, pitch as c_int) }; if ret == 0 { Ok(()) } else { Err(get_error()) } } fn unsafe_lock(&self, rect: Option<Rect>) -> SdlResult<(CVec<u8>, i32)> { let q = try!(self.query()); unsafe { let actual_rect = match rect { Some(ref rect) => rect as *const _, None => ptr::null() }; let pixels : *const c_void = ptr::null(); let pitch = 0i32; let ret = ll::SDL_LockTexture(self.raw, actual_rect, &pixels, &pitch); let size = q.format.byte_size_of_pixels((q.width * q.height) as uint); if ret == 0 { Ok((CVec::new(pixels as *mut u8, size), pitch)) } else { Err(get_error()) } } } pub fn with_lock(&self, rect: Option<Rect>, func: |CVec<u8>, i32| -> ()) -> SdlResult<()> { match self.unsafe_lock(rect) { Ok((cvec, pitch)) => { func(cvec, pitch); self.unlock(); Ok(()) } Err(e) => Err(e), } } fn unlock(&self) { unsafe { ll::SDL_UnlockTexture(self.raw) } } pub fn gl_bind_texture(&self) -> SdlResult<(f64, f64)> { let texw: c_float = 0.0; let texh: c_float = 0.0; let result = unsafe { ll::SDL_GL_BindTexture(self.raw, &texw, &texh) == 0 }; if result { Ok((texw as f64, texh as f64)) } else { Err("Operation not supported".into_string()) } } pub fn gl_unbind_texture(&self) -> bool { unsafe { ll::SDL_GL_UnbindTexture(self.raw) == 0 } } pub fn gl_with_bind<R>(&self, f: |tex_w: f64, tex_h: f64| -> R) -> R { unsafe { let texw: c_float = 0.0; let texh: c_float = 0.0; if ll::SDL_GL_BindTexture(self.raw, &texw, &texh) != 0 { panic!("could not bind texture"); } let rv = f(texw as f64, texh as f64); ll::SDL_GL_UnbindTexture(self.raw); rv } } } pub fn get_num_render_drivers() -> SdlResult<int> { let result = unsafe { ll::SDL_GetNumRenderDrivers() }; if result > 0 { Ok(result as int) } else { Err(get_error()) } } pub fn get_render_driver_info(index: int) -> SdlResult<RendererInfo> { let out = ll::SDL_RendererInfo { name: ptr::null(), flags: 0, num_texture_formats: 0, texture_formats: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], max_texture_width: 0, max_texture_height: 0, }; let result = unsafe { ll::SDL_GetRenderDriverInfo(index as c_int, &out) == 0 }; if result { Ok(RendererInfo::from_ll(&out)) } else { Err(get_error()) } } /* //TODO: Figure out how to support this with our current struct format pub fn SDL_GetRenderer(window: *SDL_Window) -> *SDL_Renderer; */
#![crate_name = "split"] /* * This file is part of the uutils coreutils package. * * (c) Akira Hayakawa <ruby.wktk@gmail.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ #![feature(macro_rules)] extern crate getopts; extern crate libc; use std::io; use std::num::Int; use std::char; #[path = "../common/util.rs"] mod util; static NAME: &'static str = "split"; static VERSION: &'static str = "1.0.0"; pub fn uumain(args: Vec<String>) -> int { let opts = [ getopts::optopt("a", "suffix-length", "use suffixes of length N (default 2)", "N"), getopts::optopt("b", "bytes", "put SIZE bytes per output file", "SIZE"), getopts::optopt("C", "line-bytes", "put at most SIZE bytes of lines per output file", "SIZE"), getopts::optflag("d", "numeric-suffixes", "use numeric suffixes instead of alphabetic"), getopts::optopt("l", "lines", "put NUMBER lines per output file", "NUMBER"), getopts::optflag("", "verbose", "print a diagnostic just before each output file is opened"), getopts::optflag("h", "help", "display help and exit"), getopts::optflag("V", "version", "output version information and exit"), ]; let matches = match getopts::getopts(args.tail(), &opts) { Ok(m) => m, Err(f) => crash!(1, "{}", f) }; if matches.opt_present("h") { println!("{} v{}", NAME, VERSION); println!(""); println!("Usage:"); println!(" {0} [OPTION]... [INPUT [PREFIX]]", NAME); println!(""); io::print(getopts::usage("Output fixed-size pieces of INPUT to PREFIXaa, PREFIX ab, ...; default size is 1000, and default PREFIX is 'x'. With no INPUT, or when INPUT is -, read standard input." , &opts).as_slice()); println!(""); println!("SIZE may have a multiplier suffix: b for 512, k for 1K, m for 1 Meg."); return 0; } if matches.opt_present("V") { println!("{} v{}", NAME, VERSION); return 0; } let mut settings = Settings { prefix: "".to_string(), numeric_suffix: false, suffix_length: 0, input: "".to_string(), strategy: "".to_string(), strategy_param: "".to_string(), verbose: false, }; settings.numeric_suffix = if matches.opt_present("d") { true } else { false }; settings.suffix_length = match matches.opt_str("a") { Some(n) => match from_str(n.as_slice()) { Some(m) => m, None => crash!(1, "cannot parse num") }, None => 2 }; settings.verbose = if matches.opt_present("verbose") { true } else { false }; settings.strategy = "l".to_string(); settings.strategy_param = "1000".to_string(); let strategies = vec!["b", "C", "l"]; for e in strategies.iter() { match matches.opt_str(*e) { Some(a) => { if settings.strategy.as_slice() == "l" { settings.strategy = e.to_string(); settings.strategy_param = a; } else { crash!(1, "{}: cannot split in more than one way", NAME) } }, None => {} } } let mut v = matches.free.iter(); let (input, prefix) = match (v.next(), v.next()) { (Some(a), None) => (a.to_string(), "x".to_string()), (Some(a), Some(b)) => (a.to_string(), b.to_string()), (None, _) => ("-".to_string(), "x".to_string()), }; settings.input = input; settings.prefix = prefix; split(&settings) } struct Settings { prefix: String, numeric_suffix: bool, suffix_length: uint, input: String, strategy: String, strategy_param: String, verbose: bool, } struct SplitControl { current_line: String, // Don't touch request_new_file: bool, // Splitter implementation requests new file } trait Splitter { // Factory pattern fn new(_hint: Option<Self>, &Settings) -> Box<Splitter>; // Consume the current_line and return the consumed string fn consume(&mut self, &mut SplitControl) -> String; } struct LineSplitter { saved_lines_to_write: uint, lines_to_write: uint, } impl Splitter for LineSplitter { fn new(_: Option<LineSplitter>, settings: &Settings) -> Box<Splitter> { let n = match from_str(settings.strategy_param.as_slice()) { Some(a) => a, _ => crash!(1, "invalid number of lines") }; box LineSplitter { saved_lines_to_write: n, lines_to_write: n, } as Box<Splitter> } fn consume(&mut self, control: &mut SplitControl) -> String { self.lines_to_write -= 1; if self.lines_to_write == 0 { self.lines_to_write = self.saved_lines_to_write; control.request_new_file = true; } control.current_line.clone() } } struct ByteSplitter { saved_bytes_to_write: uint, bytes_to_write: uint, } impl Splitter for ByteSplitter { fn new(_: Option<ByteSplitter>, settings: &Settings) -> Box<Splitter> { let mut strategy_param : Vec<char> = settings.strategy_param.chars().collect(); let suffix = strategy_param.pop().unwrap(); let multiplier = match suffix { '0'...'9' => 1u, 'b' => 512u, 'k' => 1024u, 'm' => 1024u * 1024u, _ => crash!(1, "invalid number of bytes") }; let n = if suffix.is_alphabetic() { match String::from_chars(strategy_param.as_slice()).as_slice().parse::<uint>() { Some(a) => a, _ => crash!(1, "invalid number of bytes") } } else { match settings.strategy_param.as_slice().parse::<uint>() { Some(a) => a, _ => crash!(1, "invalid number of bytes") } }; box ByteSplitter { saved_bytes_to_write: n * multiplier, bytes_to_write: n * multiplier, } as Box<Splitter> } fn consume(&mut self, control: &mut SplitControl) -> String { let line = control.current_line.clone(); let n = std::cmp::min(line.as_slice().char_len(), self.bytes_to_write); self.bytes_to_write -= n; if n == 0 { self.bytes_to_write = self.saved_bytes_to_write; control.request_new_file = true; } line.as_slice().slice(0, n).to_string() } } // (1, 3) -> "aab" fn str_prefix(i: uint, width: uint) -> String { let mut c = "".to_string(); let mut n = i; let mut w = width; while w > 0 { w -= 1; let div = Int::pow(26 as uint, w); let r = n / div; n -= r * div; c.push(char::from_u32((r as u32) + 97).unwrap()); } c } // (1, 3) -> "001" fn num_prefix(i: uint, width: uint) -> String { let mut c = "".to_string(); let mut n = i; let mut w = width; while w > 0 { w -= 1; let div = Int::pow(10 as uint, w); let r = n / div; n -= r * div; c.push(char::from_digit(r, 10).unwrap()); } c } fn split(settings: &Settings) -> int { let mut reader = io::BufferedReader::new( if settings.input.as_slice() == "-" { box io::stdio::stdin_raw() as Box<Reader> } else { let r = match io::File::open(&Path::new(settings.input.clone())) { Ok(a) => a, Err(_) => crash!(1, "cannot open '{}' for reading: No such file or directory", settings.input) }; box r as Box<Reader> } ); let mut splitter: Box<Splitter> = match settings.strategy.as_slice() { "l" => Splitter::new(None::<LineSplitter>, settings), "b" => Splitter::new(None::<ByteSplitter>, settings), a @ _ => crash!(1, "strategy {} not supported", a) }; let mut control = SplitControl { current_line: "".to_string(), // Request new line request_new_file: true, // Request new file }; let mut writer = io::BufferedWriter::new(box io::stdio::stdout_raw() as Box<Writer>); let mut fileno = 0; loop { if control.current_line.as_slice().char_len() == 0 { match reader.read_line() { Ok(a) => { control.current_line = a; } Err(_) => { break; } } } if control.request_new_file { let mut filename = settings.prefix.to_string(); filename.push_str(if settings.numeric_suffix { num_prefix(fileno, settings.suffix_length) } else { str_prefix(fileno, settings.suffix_length) }.as_slice()); if fileno != 0 { crash_if_err!(1, writer.flush()); } fileno += 1; writer = io::BufferedWriter::new(box io::File::open_mode(&Path::new(filename.as_slice()), io::Open, io::Write) as Box<Writer>); control.request_new_file = false; } let consumed = splitter.consume(&mut control); crash_if_err!(1, writer.write_str(consumed.as_slice())); let advance = consumed.as_slice().char_len(); let clone = control.current_line.clone(); let sl = clone.as_slice(); control.current_line = sl.slice(advance, sl.char_len()).to_string(); } 0 } Removed deprecated calls. #![crate_name = "split"] /* * This file is part of the uutils coreutils package. * * (c) Akira Hayakawa <ruby.wktk@gmail.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ #![feature(macro_rules)] extern crate getopts; extern crate libc; use std::io; use std::num::Int; use std::char; #[path = "../common/util.rs"] mod util; static NAME: &'static str = "split"; static VERSION: &'static str = "1.0.0"; pub fn uumain(args: Vec<String>) -> int { let opts = [ getopts::optopt("a", "suffix-length", "use suffixes of length N (default 2)", "N"), getopts::optopt("b", "bytes", "put SIZE bytes per output file", "SIZE"), getopts::optopt("C", "line-bytes", "put at most SIZE bytes of lines per output file", "SIZE"), getopts::optflag("d", "numeric-suffixes", "use numeric suffixes instead of alphabetic"), getopts::optopt("l", "lines", "put NUMBER lines per output file", "NUMBER"), getopts::optflag("", "verbose", "print a diagnostic just before each output file is opened"), getopts::optflag("h", "help", "display help and exit"), getopts::optflag("V", "version", "output version information and exit"), ]; let matches = match getopts::getopts(args.tail(), &opts) { Ok(m) => m, Err(f) => crash!(1, "{}", f) }; if matches.opt_present("h") { println!("{} v{}", NAME, VERSION); println!(""); println!("Usage:"); println!(" {0} [OPTION]... [INPUT [PREFIX]]", NAME); println!(""); io::print(getopts::usage("Output fixed-size pieces of INPUT to PREFIXaa, PREFIX ab, ...; default size is 1000, and default PREFIX is 'x'. With no INPUT, or when INPUT is -, read standard input." , &opts).as_slice()); println!(""); println!("SIZE may have a multiplier suffix: b for 512, k for 1K, m for 1 Meg."); return 0; } if matches.opt_present("V") { println!("{} v{}", NAME, VERSION); return 0; } let mut settings = Settings { prefix: "".to_string(), numeric_suffix: false, suffix_length: 0, input: "".to_string(), strategy: "".to_string(), strategy_param: "".to_string(), verbose: false, }; settings.numeric_suffix = if matches.opt_present("d") { true } else { false }; settings.suffix_length = match matches.opt_str("a") { Some(n) => match n.as_slice().parse() { Some(m) => m, None => crash!(1, "cannot parse num") }, None => 2 }; settings.verbose = if matches.opt_present("verbose") { true } else { false }; settings.strategy = "l".to_string(); settings.strategy_param = "1000".to_string(); let strategies = vec!["b", "C", "l"]; for e in strategies.iter() { match matches.opt_str(*e) { Some(a) => { if settings.strategy.as_slice() == "l" { settings.strategy = e.to_string(); settings.strategy_param = a; } else { crash!(1, "{}: cannot split in more than one way", NAME) } }, None => {} } } let mut v = matches.free.iter(); let (input, prefix) = match (v.next(), v.next()) { (Some(a), None) => (a.to_string(), "x".to_string()), (Some(a), Some(b)) => (a.to_string(), b.to_string()), (None, _) => ("-".to_string(), "x".to_string()), }; settings.input = input; settings.prefix = prefix; split(&settings) } struct Settings { prefix: String, numeric_suffix: bool, suffix_length: uint, input: String, strategy: String, strategy_param: String, verbose: bool, } struct SplitControl { current_line: String, // Don't touch request_new_file: bool, // Splitter implementation requests new file } trait Splitter { // Factory pattern fn new(_hint: Option<Self>, &Settings) -> Box<Splitter>; // Consume the current_line and return the consumed string fn consume(&mut self, &mut SplitControl) -> String; } struct LineSplitter { saved_lines_to_write: uint, lines_to_write: uint, } impl Splitter for LineSplitter { fn new(_: Option<LineSplitter>, settings: &Settings) -> Box<Splitter> { let n = match settings.strategy_param.as_slice().parse() { Some(a) => a, _ => crash!(1, "invalid number of lines") }; box LineSplitter { saved_lines_to_write: n, lines_to_write: n, } as Box<Splitter> } fn consume(&mut self, control: &mut SplitControl) -> String { self.lines_to_write -= 1; if self.lines_to_write == 0 { self.lines_to_write = self.saved_lines_to_write; control.request_new_file = true; } control.current_line.clone() } } struct ByteSplitter { saved_bytes_to_write: uint, bytes_to_write: uint, } impl Splitter for ByteSplitter { fn new(_: Option<ByteSplitter>, settings: &Settings) -> Box<Splitter> { let mut strategy_param : Vec<char> = settings.strategy_param.chars().collect(); let suffix = strategy_param.pop().unwrap(); let multiplier = match suffix { '0'...'9' => 1u, 'b' => 512u, 'k' => 1024u, 'm' => 1024u * 1024u, _ => crash!(1, "invalid number of bytes") }; let n = if suffix.is_alphabetic() { match String::from_chars(strategy_param.as_slice()).as_slice().parse::<uint>() { Some(a) => a, _ => crash!(1, "invalid number of bytes") } } else { match settings.strategy_param.as_slice().parse::<uint>() { Some(a) => a, _ => crash!(1, "invalid number of bytes") } }; box ByteSplitter { saved_bytes_to_write: n * multiplier, bytes_to_write: n * multiplier, } as Box<Splitter> } fn consume(&mut self, control: &mut SplitControl) -> String { let line = control.current_line.clone(); let n = std::cmp::min(line.as_slice().chars().count(), self.bytes_to_write); self.bytes_to_write -= n; if n == 0 { self.bytes_to_write = self.saved_bytes_to_write; control.request_new_file = true; } line.as_slice().slice(0, n).to_string() } } // (1, 3) -> "aab" fn str_prefix(i: uint, width: uint) -> String { let mut c = "".to_string(); let mut n = i; let mut w = width; while w > 0 { w -= 1; let div = Int::pow(26 as uint, w); let r = n / div; n -= r * div; c.push(char::from_u32((r as u32) + 97).unwrap()); } c } // (1, 3) -> "001" fn num_prefix(i: uint, width: uint) -> String { let mut c = "".to_string(); let mut n = i; let mut w = width; while w > 0 { w -= 1; let div = Int::pow(10 as uint, w); let r = n / div; n -= r * div; c.push(char::from_digit(r, 10).unwrap()); } c } fn split(settings: &Settings) -> int { let mut reader = io::BufferedReader::new( if settings.input.as_slice() == "-" { box io::stdio::stdin_raw() as Box<Reader> } else { let r = match io::File::open(&Path::new(settings.input.clone())) { Ok(a) => a, Err(_) => crash!(1, "cannot open '{}' for reading: No such file or directory", settings.input) }; box r as Box<Reader> } ); let mut splitter: Box<Splitter> = match settings.strategy.as_slice() { "l" => Splitter::new(None::<LineSplitter>, settings), "b" => Splitter::new(None::<ByteSplitter>, settings), a @ _ => crash!(1, "strategy {} not supported", a) }; let mut control = SplitControl { current_line: "".to_string(), // Request new line request_new_file: true, // Request new file }; let mut writer = io::BufferedWriter::new(box io::stdio::stdout_raw() as Box<Writer>); let mut fileno = 0; loop { if control.current_line.as_slice().chars().count() == 0 { match reader.read_line() { Ok(a) => { control.current_line = a; } Err(_) => { break; } } } if control.request_new_file { let mut filename = settings.prefix.to_string(); filename.push_str(if settings.numeric_suffix { num_prefix(fileno, settings.suffix_length) } else { str_prefix(fileno, settings.suffix_length) }.as_slice()); if fileno != 0 { crash_if_err!(1, writer.flush()); } fileno += 1; writer = io::BufferedWriter::new(box io::File::open_mode(&Path::new(filename.as_slice()), io::Open, io::Write) as Box<Writer>); control.request_new_file = false; } let consumed = splitter.consume(&mut control); crash_if_err!(1, writer.write_str(consumed.as_slice())); let advance = consumed.as_slice().chars().count(); let clone = control.current_line.clone(); let sl = clone.as_slice(); control.current_line = sl.slice(advance, sl.chars().count()).to_string(); } 0 }
pub use std::path::Path; pub use std::fs::File; pub use std::error::Error; pub use runtime::Runtime; pub trait StorageBackend { fn name(&self) -> String; fn create(&self, file : File) -> Option<Error>; fn read(&self, path: Path) -> Result<File, Error>; fn update(&self, file : File) -> Option<Error>; fn destroy(&self, path: Path) -> Option<Error>; } Add module file pub use std::path::Path; pub use std::fs::File; pub use std::error::Error; pub use runtime::Runtime; mod file; pub trait StorageBackend { fn name(&self) -> String; fn create(&self, file : File) -> Option<Error>; fn read(&self, path: Path) -> Result<File, Error>; fn update(&self, file : File) -> Option<Error>; fn destroy(&self, path: Path) -> Option<Error>; }
use std::rc::Rc; use std::cell::RefCell; use std::collections::HashMap; use std::fs::File as FSFile; use std::ops::Deref; use std::io::Write; use std::io::Read; pub mod path; pub mod file; pub mod parser; pub mod json; use module::Module; use runtime::Runtime; use storage::file::File; use storage::file::id::FileID; use storage::file::id_type::FileIDType; use storage::file::hash::FileHash; use storage::parser::{FileHeaderParser, Parser, ParserError}; use storage::file::header::data::FileHeaderData; type Cache = HashMap<FileID, Rc<RefCell<File>>>; pub struct Store { storepath: String, cache : RefCell<Cache>, } impl Store { pub fn new(storepath: String) -> Store { Store { storepath: storepath, cache: RefCell::new(HashMap::new()), } } fn put_in_cache(&self, f: File) -> FileID { let res = f.id().clone(); self.cache.borrow_mut().insert(f.id().clone(), Rc::new(RefCell::new(f))); res } pub fn load_in_cache<HP>(&self, m: &Module, parser: &Parser<HP>, id: FileID) -> Option<Rc<RefCell<File>>> where HP: FileHeaderParser { let idstr : String = id.clone().into(); let path = format!("{}/{}-{}.imag", self.storepath, m.name(), idstr); debug!("Loading path = '{}'", path); let mut string = String::new(); FSFile::open(&path).map(|mut file| { file.read_to_string(&mut string) .map_err(|e| error!("Failed reading file: '{}'", path)); }); parser.read(string).map(|(header, data)| { self.new_file_from_parser_result(m, id.clone(), header, data); }); self.load(&id) } pub fn new_file(&self, module: &Module) -> FileID { let f = File { owning_module_name: module.name(), header: FileHeaderData::Null, data: String::from(""), id: self.get_new_file_id(), }; debug!("Create new File object: {:?}", &f); self.put_in_cache(f) } pub fn new_file_from_parser_result(&self, module: &Module, id: FileID, header: FileHeaderData, data: String) -> FileID { let f = File { owning_module_name: module.name(), header: header, data: data, id: id, }; debug!("Create new File object from parser result: {:?}", f); self.put_in_cache(f) } pub fn new_file_with_header(&self, module: &Module, h: FileHeaderData) -> FileID { let f = File { owning_module_name: module.name(), header: h, data: String::from(""), id: self.get_new_file_id(), }; debug!("Create new File object with header: {:?}", f); self.put_in_cache(f) } pub fn new_file_with_data(&self, module: &Module, d: String) -> FileID { let f = File { owning_module_name: module.name(), header: FileHeaderData::Null, data: d, id: self.get_new_file_id(), }; debug!("Create new File object with data: {:?}", f); self.put_in_cache(f) } pub fn new_file_with_content(&self, module: &Module, h: FileHeaderData, d: String) -> FileID { let f = File { owning_module_name: module.name(), header: h, data: d, id: self.get_new_file_id(), }; debug!("Create new File object with content: {:?}", f); self.put_in_cache(f) } pub fn persist<HP>(&self, p: &Parser<HP>, f: Rc<RefCell<File>>) -> bool where HP: FileHeaderParser { let file = f.deref().borrow(); let text = p.write(file.contents()); if text.is_err() { error!("Error: {}", text.err().unwrap()); return false; } let path = { let ids : String = file.id().clone().into(); format!("{}/{}-{}.imag", self.storepath, file.owning_module_name, ids) }; self.ensure_store_path_exists(); FSFile::create(&path).map(|mut fsfile| { fsfile.write_all(&text.unwrap().clone().into_bytes()[..]) }).map_err(|writeerr| { debug!("Could not create file at '{}'", path); }).and(Ok(true)).unwrap() } fn ensure_store_path_exists(&self) { use std::fs::create_dir_all; use std::process::exit; create_dir_all(&self.storepath).unwrap_or_else(|e| { error!("Could not create store: '{}'", self.storepath); error!("Error : '{}'", e); error!("Killing myself now"); exit(1); }) } pub fn load(&self, id: &FileID) -> Option<Rc<RefCell<File>>> { debug!("Loading '{:?}'", id); self.cache.borrow().get(id).cloned() } pub fn load_by_hash<HP>(&self, m: &Module, parser: &Parser<HP>, hash: FileHash) -> Option<Rc<RefCell<File>>> where HP: FileHeaderParser { macro_rules! try_some { ($expr:expr) => (match $expr { ::std::option::Option::Some(val) => val, ::std::option::Option::None => return ::std::option::Option::None, }); ($expr:expr => return) => (match $expr { ::std::option::Option::Some(val) => val, ::std::option::Option::None => return, }) } use glob::{glob, Paths, PatternError}; let hashstr : String = hash.into(); let globstr = format!("{}/*-{}.imag", self.storepath, hashstr); debug!("glob({})", globstr); let globs = glob(&globstr[..]); if globs.is_err() { return None; } let path = globs.unwrap().last(); debug!("path = {:?}", path); let pathbuf = try_some!(path); if pathbuf.is_err() { return None; } let pathbuf_un = pathbuf.unwrap(); let filename = pathbuf_un.file_name(); let s = try_some!(filename).to_str(); let string = String::from(try_some!(s)); let id = try_some!(FileID::parse(&string)); debug!("Loaded ID = '{:?}'", id); self.load_in_cache(m, parser, id) .map(|file| { debug!("Loaded File = '{:?}'", file); Some(file) }).unwrap_or(None) } pub fn remove(&self, id: FileID) -> bool { use std::fs::remove_file; self.cache .borrow_mut() .remove(&id) .map(|file| { let idstr : String = id.into(); let path = format!("{}/{}-{}.imag", self.storepath, file.deref().borrow().owner_name(), idstr); debug!("Removing file NOW: '{}'", path); remove_file(path).is_ok() }) .unwrap_or(false) } pub fn load_for_module<HP>(&self, m: &Module, parser: &Parser<HP>) -> Vec<Rc<RefCell<File>>> where HP: FileHeaderParser { use glob::{glob, Paths, PatternError}; let globstr = format!("{}/{}-*.imag", self.storepath, m.name()); let mut res = vec![]; glob(&globstr[..]).map(|paths| { for path in paths { if let Ok(pathbuf) = path { let fname = pathbuf.file_name().and_then(|s| s.to_str()); fname.map(|s| { FileID::parse(&String::from(s)).map(|id| { self.load_in_cache(m, parser, id).map(|file| { res.push(file); }) }); }); } } }); res } fn get_new_file_id(&self) -> FileID { use uuid::Uuid; let hash = FileHash::from(Uuid::new_v4().to_hyphenated_string()); FileID::new(FileIDType::UUID, hash) } } Add doc to Store object implementation use std::rc::Rc; use std::cell::RefCell; use std::collections::HashMap; use std::fs::File as FSFile; use std::ops::Deref; use std::io::Write; use std::io::Read; pub mod path; pub mod file; pub mod parser; pub mod json; use module::Module; use runtime::Runtime; use storage::file::File; use storage::file::id::FileID; use storage::file::id_type::FileIDType; use storage::file::hash::FileHash; use storage::parser::{FileHeaderParser, Parser, ParserError}; use storage::file::header::data::FileHeaderData; type Cache = HashMap<FileID, Rc<RefCell<File>>>; pub struct Store { storepath: String, cache : RefCell<Cache>, } /** * Store object * * This object is an abstraction layer over FS and an interface to the object store of this * software. */ impl Store { pub fn new(storepath: String) -> Store { Store { storepath: storepath, cache: RefCell::new(HashMap::new()), } } /** * Put a file into the cache */ fn put_in_cache(&self, f: File) -> FileID { let res = f.id().clone(); self.cache.borrow_mut().insert(f.id().clone(), Rc::new(RefCell::new(f))); res } /** * Load a file by ID into the cache and return it afterwards * * Returns None if the file could be loaded from the Filesystem */ pub fn load_in_cache<HP>(&self, m: &Module, parser: &Parser<HP>, id: FileID) -> Option<Rc<RefCell<File>>> where HP: FileHeaderParser { let idstr : String = id.clone().into(); let path = format!("{}/{}-{}.imag", self.storepath, m.name(), idstr); debug!("Loading path = '{}'", path); let mut string = String::new(); FSFile::open(&path).map(|mut file| { file.read_to_string(&mut string) .map_err(|e| error!("Failed reading file: '{}'", path)); }); parser.read(string).map(|(header, data)| { self.new_file_from_parser_result(m, id.clone(), header, data); }); self.load(&id) } /** * Generate a new file for a module. * * Returns the new FileID object then */ pub fn new_file(&self, module: &Module) -> FileID { let f = File { owning_module_name: module.name(), header: FileHeaderData::Null, data: String::from(""), id: self.get_new_file_id(), }; debug!("Create new File object: {:?}", &f); self.put_in_cache(f) } /** * Generate a new file from a parser result. * * @deprecated This function shouldn't be needed anymore */ pub fn new_file_from_parser_result(&self, module: &Module, id: FileID, header: FileHeaderData, data: String) -> FileID { let f = File { owning_module_name: module.name(), header: header, data: data, id: id, }; debug!("Create new File object from parser result: {:?}", f); self.put_in_cache(f) } /** * Generate a new file for a module, providing some header data * * Returns the new FileID object then */ pub fn new_file_with_header(&self, module: &Module, h: FileHeaderData) -> FileID { let f = File { owning_module_name: module.name(), header: h, data: String::from(""), id: self.get_new_file_id(), }; debug!("Create new File object with header: {:?}", f); self.put_in_cache(f) } /** * Generate a new file for a module, providing some initial data * * Returns the new FileID object then */ pub fn new_file_with_data(&self, module: &Module, d: String) -> FileID { let f = File { owning_module_name: module.name(), header: FileHeaderData::Null, data: d, id: self.get_new_file_id(), }; debug!("Create new File object with data: {:?}", f); self.put_in_cache(f) } /** * Generate a new file for a module, providing some initial data and some header * * Returns the new FileID object then */ pub fn new_file_with_content(&self, module: &Module, h: FileHeaderData, d: String) -> FileID { let f = File { owning_module_name: module.name(), header: h, data: d, id: self.get_new_file_id(), }; debug!("Create new File object with content: {:?}", f); self.put_in_cache(f) } /** * Persist a File on the filesystem * * Returns true if this worked */ pub fn persist<HP>(&self, p: &Parser<HP>, f: Rc<RefCell<File>>) -> bool where HP: FileHeaderParser { let file = f.deref().borrow(); let text = p.write(file.contents()); if text.is_err() { error!("Error: {}", text.err().unwrap()); return false; } let path = { let ids : String = file.id().clone().into(); format!("{}/{}-{}.imag", self.storepath, file.owning_module_name, ids) }; self.ensure_store_path_exists(); FSFile::create(&path).map(|mut fsfile| { fsfile.write_all(&text.unwrap().clone().into_bytes()[..]) }).map_err(|writeerr| { debug!("Could not create file at '{}'", path); }).and(Ok(true)).unwrap() // TODO: Is this unwrap() save? } /** * Helper to generate the store path * * Kills the program if it fails */ fn ensure_store_path_exists(&self) { use std::fs::create_dir_all; use std::process::exit; create_dir_all(&self.storepath).unwrap_or_else(|e| { error!("Could not create store: '{}'", self.storepath); error!("Error : '{}'", e); error!("Killing myself now"); exit(1); }) } /** * Load a file from the cache by FileID * * TODO: Semantics: This function should load from FS if the file is not in the cache yet or * fail if the file is not available. */ pub fn load(&self, id: &FileID) -> Option<Rc<RefCell<File>>> { debug!("Loading '{:?}'", id); self.cache.borrow().get(id).cloned() } /** * Load a file from the filesystem/cache by a FileHash */ pub fn load_by_hash<HP>(&self, m: &Module, parser: &Parser<HP>, hash: FileHash) -> Option<Rc<RefCell<File>>> where HP: FileHeaderParser { macro_rules! try_some { ($expr:expr) => (match $expr { ::std::option::Option::Some(val) => val, ::std::option::Option::None => return ::std::option::Option::None, }); ($expr:expr => return) => (match $expr { ::std::option::Option::Some(val) => val, ::std::option::Option::None => return, }) } use glob::{glob, Paths, PatternError}; let hashstr : String = hash.into(); let globstr = format!("{}/*-{}.imag", self.storepath, hashstr); debug!("glob({})", globstr); let globs = glob(&globstr[..]); if globs.is_err() { return None; } let path = globs.unwrap().last(); debug!("path = {:?}", path); let pathbuf = try_some!(path); if pathbuf.is_err() { return None; } let pathbuf_un = pathbuf.unwrap(); let filename = pathbuf_un.file_name(); let s = try_some!(filename).to_str(); let string = String::from(try_some!(s)); let id = try_some!(FileID::parse(&string)); debug!("Loaded ID = '{:?}'", id); self.load_in_cache(m, parser, id) .map(|file| { debug!("Loaded File = '{:?}'", file); Some(file) }).unwrap_or(None) } /** * Remove a file from the filesystem by FileID * * Returns true if this works. */ pub fn remove(&self, id: FileID) -> bool { use std::fs::remove_file; self.cache .borrow_mut() .remove(&id) .map(|file| { let idstr : String = id.into(); let path = format!("{}/{}-{}.imag", self.storepath, file.deref().borrow().owner_name(), idstr); debug!("Removing file NOW: '{}'", path); remove_file(path).is_ok() }) .unwrap_or(false) } /** * Load all files for a module */ pub fn load_for_module<HP>(&self, m: &Module, parser: &Parser<HP>) -> Vec<Rc<RefCell<File>>> where HP: FileHeaderParser { use glob::{glob, Paths, PatternError}; let globstr = format!("{}/{}-*.imag", self.storepath, m.name()); let mut res = vec![]; glob(&globstr[..]).map(|paths| { for path in paths { if let Ok(pathbuf) = path { let fname = pathbuf.file_name().and_then(|s| s.to_str()); fname.map(|s| { FileID::parse(&String::from(s)).map(|id| { self.load_in_cache(m, parser, id).map(|file| { res.push(file); }) }); }); } } }); res } /** * Helper to generate a new FileID object */ fn get_new_file_id(&self) -> FileID { use uuid::Uuid; let hash = FileHash::from(Uuid::new_v4().to_hyphenated_string()); FileID::new(FileIDType::UUID, hash) } }
use Error; use Result; use byteorder::{BigEndian, ReadBytesExt}; #[derive(Debug)] pub struct CMAP { encoding_subtable: EncodingSubtable, cmap_offset: usize, } impl CMAP { pub fn from_data(data: &[u8], offset: usize) -> Result<Self> { use byteorder::ByteOrder; if offset >= data.len() || offset + 4 > data.len() { return Err(Error::Malformed); } // +2 skip version field. let number_subtables = BigEndian::read_u16(&data[offset + 2..]) as usize; let data = &data[offset + 4..]; if number_subtables * (2 + 2 + 4) > data.len() { return Err(Error::Malformed); } let mut encoding_subtables: Vec<_> = (0..number_subtables).filter_map(|n| { let z = n as usize * 8; let platform_id = BigEndian::read_u16(&data[z + 0..]); let platform_specific_id = BigEndian::read_u16(&data[z + 2..]); let offset = BigEndian::read_u32(&data[z + 4..]); Platform::new(platform_id, platform_specific_id).map(|platform| { EncodingSubtable { platform: platform, offset: offset} }) }).collect(); encoding_subtables.sort_by(|a, b| a.platform.order().cmp(&b.platform.order())); if encoding_subtables.is_empty() { return Err(Error::CMAPEncodingSubtableIsNotSupported); } Ok(CMAP { encoding_subtable: encoding_subtables.first().unwrap().clone(), cmap_offset: offset, }) } pub fn index_map(&self) -> usize { self.encoding_subtable.offset as usize + self.cmap_offset } } #[derive(Debug, PartialEq, Clone, Copy)] struct EncodingSubtable { platform: Platform, offset: u32, } #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum Platform { Unicode(UnicodeEncodingId), Microsoft(MicrosoftEncodingId), } impl Platform { fn new(platform_id: u16, platform_specific_id: u16) -> Option<Self> { use self::Platform::*; use self::UnicodeEncodingId::*; use self::MicrosoftEncodingId::*; match platform_id { 0 => match platform_specific_id { 0 => Some(Unicode(DefaultSemantics)), 1 => Some(Unicode(Version11Semantics)), 3 => Some(Unicode(Unicode20BMPOnly)), 4 => Some(Unicode(Unicode20)), 5 => Some(Unicode(UnicodeVariationSequences)), 6 => Some(Unicode(FullUnicodeCoverage)), _ => None, }, 3 => match platform_specific_id { 0 => Some(Microsoft(Symbol)), 1 => Some(Microsoft(UnicodeUCS2)), 2 => Some(Microsoft(ShiftJIS)), 3 => Some(Microsoft(PRC)), 4 => Some(Microsoft(BigFive)), 5 => Some(Microsoft(Johab)), 10 => Some(Microsoft(UnicodeUCS4)), _ => None, }, _ => None, } } fn order(&self) -> u32 { use self::Platform::*; use self::UnicodeEncodingId::*; use self::MicrosoftEncodingId::*; match *self { Unicode(Unicode20) => 0, Unicode(Unicode20BMPOnly) => 1, Unicode(Version11Semantics) => 1, Unicode(DefaultSemantics) => 1, Microsoft(UnicodeUCS4) => 2, Microsoft(UnicodeUCS2) => 3, Microsoft(Symbol) => 4, _ => 10, } } } #[repr(u16)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum UnicodeEncodingId { DefaultSemantics = 0, Version11Semantics = 1, Unicode20BMPOnly = 3, Unicode20 = 4, UnicodeVariationSequences = 5, FullUnicodeCoverage = 6, } #[repr(u16)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum MicrosoftEncodingId { Symbol = 0, UnicodeUCS2 = 1, ShiftJIS = 2, PRC = 3, BigFive = 4, Johab = 5, UnicodeUCS4 = 10 } #[cfg(test)] mod tests { use super::*; use Error::*; use expectest::prelude::*; #[test] fn smoke() { let data = ::utils::read_file("tests/Tuffy_Bold.ttf"); let offset = ::utils::find_table_offset(&data, 0, b"cmap").unwrap().unwrap(); let _ = CMAP::from_data(&data, offset).unwrap(); } } Implement cmap's format 0. use Error; use Result; use byteorder::{BigEndian, ByteOrder}; #[derive(Debug)] pub struct CMAP { encoding_subtable: EncodingSubtable, cmap_offset: usize, } impl CMAP { pub fn from_data(data: &[u8], offset: usize) -> Result<Self> { if offset >= data.len() || offset + 4 > data.len() { return Err(Error::Malformed); } // +2 skip version field. let number_subtables = BigEndian::read_u16(&data[offset + 2..]) as usize; let data = &data[offset + 4..]; if number_subtables * (2 + 2 + 4) > data.len() { return Err(Error::Malformed); } let mut encoding_subtables: Vec<_> = (0..number_subtables).filter_map(|n| { let z = n as usize * 8; let platform_id = BigEndian::read_u16(&data[z + 0..]); let platform_specific_id = BigEndian::read_u16(&data[z + 2..]); let offset = BigEndian::read_u32(&data[z + 4..]); Platform::new(platform_id, platform_specific_id).map(|platform| { EncodingSubtable { platform: platform, offset: offset} }) }).collect(); encoding_subtables.sort_by(|a, b| a.platform.order().cmp(&b.platform.order())); if encoding_subtables.is_empty() { return Err(Error::CMAPEncodingSubtableIsNotSupported); } Ok(CMAP { encoding_subtable: encoding_subtables.first().unwrap().clone(), cmap_offset: offset, }) } pub fn index_map(&self) -> usize { self.encoding_subtable.offset as usize + self.cmap_offset } } #[derive(Debug, PartialEq, Clone, Copy)] struct EncodingSubtable { platform: Platform, offset: u32, } #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum Platform { Unicode(UnicodeEncodingId), Microsoft(MicrosoftEncodingId), } impl Platform { fn new(platform_id: u16, platform_specific_id: u16) -> Option<Self> { use self::Platform::*; use self::UnicodeEncodingId::*; use self::MicrosoftEncodingId::*; match platform_id { 0 => match platform_specific_id { 0 => Some(Unicode(DefaultSemantics)), 1 => Some(Unicode(Version11Semantics)), 3 => Some(Unicode(Unicode20BMPOnly)), 4 => Some(Unicode(Unicode20)), 5 => Some(Unicode(UnicodeVariationSequences)), 6 => Some(Unicode(FullUnicodeCoverage)), _ => None, }, 3 => match platform_specific_id { 0 => Some(Microsoft(Symbol)), 1 => Some(Microsoft(UnicodeUCS2)), 2 => Some(Microsoft(ShiftJIS)), 3 => Some(Microsoft(PRC)), 4 => Some(Microsoft(BigFive)), 5 => Some(Microsoft(Johab)), 10 => Some(Microsoft(UnicodeUCS4)), _ => None, }, _ => None, } } fn order(&self) -> u32 { use self::Platform::*; use self::UnicodeEncodingId::*; use self::MicrosoftEncodingId::*; match *self { Unicode(Unicode20) => 0, Unicode(Unicode20BMPOnly) => 1, Unicode(Version11Semantics) => 1, Unicode(DefaultSemantics) => 1, Microsoft(UnicodeUCS4) => 2, Microsoft(UnicodeUCS2) => 3, Microsoft(Symbol) => 4, _ => 10, } } } #[repr(u16)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum UnicodeEncodingId { DefaultSemantics = 0, Version11Semantics = 1, Unicode20BMPOnly = 3, Unicode20 = 4, UnicodeVariationSequences = 5, FullUnicodeCoverage = 6, } #[repr(u16)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum MicrosoftEncodingId { Symbol = 0, UnicodeUCS2 = 1, ShiftJIS = 2, PRC = 3, BigFive = 4, Johab = 5, UnicodeUCS4 = 10 } #[derive(Debug)] struct Format0 { format: u16, length: u16, language: u16, glyph_index_array: Vec<u8>, } impl Format0 { fn from_data(data: &[u8], offset: usize) -> Result<Self> { const SIZE: usize = 262; if offset + SIZE > data.len() { return Err(Error::Malformed); } let format = BigEndian::read_u16(&data[offset..]); let length = BigEndian::read_u16(&data[offset + 2..]); if length as usize != SIZE { return Err(Error::Malformed); } let language = BigEndian::read_u16(&data[offset + 4..]); Ok(Format0 { format: format, length: length, language: language, glyph_index_array: data[offset + 6..SIZE].to_owned(), }) } fn index_for_code(&self, code: usize) -> Option<usize> { self.glyph_index_array.get(code).map(|&i| i as usize) } } #[cfg(test)] mod tests { use super::*; use Error::*; use expectest::prelude::*; #[test] fn smoke() { let data = ::utils::read_file("tests/Tuffy_Bold.ttf"); let offset = ::utils::find_table_offset(&data, 0, b"cmap").unwrap().unwrap(); let _ = CMAP::from_data(&data, offset).unwrap(); } }
use std::any::{Any, TypeId}; use std::error::Error; use std::fmt; use chrono::NaiveDateTime; use conduit::Response; use diesel::result::Error as DieselError; use crate::util::json_response; pub(super) mod concrete; mod http; /// Returns an error with status 200 and the provided description as JSON /// /// This is for backwards compatibility with cargo endpoints. For all other /// endpoints, use helpers like `bad_request` or `server_error` which set a /// correct status code. pub fn cargo_err<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(http::Ok(error.to_string())) } // The following are intended to be used for errors being sent back to the Ember // frontend, not to cargo as cargo does not handle non-200 response codes well // (see <https://github.com/rust-lang/cargo/issues/3995>), but Ember requires // non-200 response codes for its stores to work properly. /// Return an error with status 400 and the provided description as JSON pub fn bad_request<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(http::BadRequest(error.to_string())) } /// Returns an error with status 500 and the provided description as JSON pub fn server_error<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(http::ServerError(error.to_string())) } #[derive(Serialize)] struct StringError<'a> { detail: &'a str, } #[derive(Serialize)] struct Bad<'a> { errors: Vec<StringError<'a>>, } /// Generates a response with the provided status and description as JSON fn json_error(detail: &str, status: (u32, &'static str)) -> Response { let mut response = json_response(&Bad { errors: vec![StringError { detail }], }); response.status = status; response } // ============================================================================= // AppError trait pub trait AppError: Send + fmt::Display + fmt::Debug + 'static { /// Generate an HTTP response for the error /// /// If none is returned, the error will bubble up the middleware stack /// where it is eventually logged and turned into a status 500 response. fn response(&self) -> Option<Response>; fn get_type_id(&self) -> TypeId { TypeId::of::<Self>() } } impl dyn AppError { pub fn is<T: Any>(&self) -> bool { self.get_type_id() == TypeId::of::<T>() } pub fn from_std_error(err: Box<dyn Error + Send>) -> Box<dyn AppError> { Self::try_convert(&*err).unwrap_or_else(|| internal(&err)) } fn try_convert(err: &(dyn Error + Send + 'static)) -> Option<Box<Self>> { match err.downcast_ref() { Some(DieselError::NotFound) => Some(Box::new(NotFound)), Some(DieselError::DatabaseError(_, info)) if info.message().ends_with("read-only transaction") => { Some(Box::new(ReadOnlyMode)) } _ => None, } } } impl AppError for Box<dyn AppError> { fn response(&self) -> Option<Response> { (**self).response() } } pub type AppResult<T> = Result<T, Box<dyn AppError>>; // ============================================================================= // Chaining errors pub trait ChainError<T> { fn chain_error<E, F>(self, callback: F) -> AppResult<T> where E: AppError, F: FnOnce() -> E; } #[derive(Debug)] struct ChainedError<E> { error: E, cause: Box<dyn AppError>, } impl<T, F> ChainError<T> for F where F: FnOnce() -> AppResult<T>, { fn chain_error<E, C>(self, callback: C) -> AppResult<T> where E: AppError, C: FnOnce() -> E, { self().chain_error(callback) } } impl<T, E: AppError> ChainError<T> for Result<T, E> { fn chain_error<E2, C>(self, callback: C) -> AppResult<T> where E2: AppError, C: FnOnce() -> E2, { self.map_err(move |err| { Box::new(ChainedError { error: callback(), cause: Box::new(err), }) as Box<dyn AppError> }) } } impl<T> ChainError<T> for Option<T> { fn chain_error<E, C>(self, callback: C) -> AppResult<T> where E: AppError, C: FnOnce() -> E, { match self { Some(t) => Ok(t), None => Err(Box::new(callback())), } } } impl<E: AppError> AppError for ChainedError<E> { fn response(&self) -> Option<Response> { self.error.response() } } impl<E: AppError> fmt::Display for ChainedError<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{} caused by {}", self.error, self.cause) } } // ============================================================================= // Error impls impl<E: Error + Send + 'static> AppError for E { fn response(&self) -> Option<Response> { None } } impl<E: Error + Send + 'static> From<E> for Box<dyn AppError> { fn from(err: E) -> Box<dyn AppError> { AppError::try_convert(&err).unwrap_or_else(|| Box::new(err)) } } // ============================================================================= // Internal error for use with `chain_error` #[derive(Debug)] struct InternalAppError { description: String, } impl fmt::Display for InternalAppError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.description)?; Ok(()) } } impl AppError for InternalAppError { fn response(&self) -> Option<Response> { None } } // TODO: The remaining can probably move under `http` #[derive(Debug, Clone, Copy)] pub struct NotFound; impl AppError for NotFound { fn response(&self) -> Option<Response> { Some(json_error("Not Found", (404, "Not Found"))) } } impl fmt::Display for NotFound { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "Not Found".fmt(f) } } #[derive(Debug, Clone, Copy)] pub struct Unauthorized; impl AppError for Unauthorized { fn response(&self) -> Option<Response> { let detail = "must be logged in to perform that action"; Some(json_error(detail, (403, "Forbidden"))) } } impl fmt::Display for Unauthorized { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "must be logged in to perform that action".fmt(f) } } pub fn internal<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(InternalAppError { description: error.to_string(), }) } #[derive(Debug)] struct AppErrToStdErr(pub Box<dyn AppError>); impl Error for AppErrToStdErr {} impl fmt::Display for AppErrToStdErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } pub(crate) fn std_error(e: Box<dyn AppError>) -> Box<dyn Error + Send> { Box::new(AppErrToStdErr(e)) } #[derive(Debug, Clone, Copy)] pub struct ReadOnlyMode; impl AppError for ReadOnlyMode { fn response(&self) -> Option<Response> { let detail = "Crates.io is currently in read-only mode for maintenance. \ Please try again later."; Some(json_error(detail, (503, "Service Unavailable"))) } } impl fmt::Display for ReadOnlyMode { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "Tried to write in read only mode".fmt(f) } } #[derive(Debug, Clone, Copy)] pub struct TooManyRequests { pub retry_after: NaiveDateTime, } impl AppError for TooManyRequests { fn response(&self) -> Option<Response> { const HTTP_DATE_FORMAT: &str = "%a, %d %b %Y %H:%M:%S GMT"; let retry_after = self.retry_after.format(HTTP_DATE_FORMAT); let detail = format!( "You have published too many crates in a \ short period of time. Please try again after {} or email \ help@crates.io to have your limit increased.", retry_after ); let mut response = json_error(&detail, (429, "TOO MANY REQUESTS")); response .headers .insert("Retry-After".into(), vec![retry_after.to_string()]); Some(response) } } impl fmt::Display for TooManyRequests { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "Too many requests".fmt(f) } } #[test] fn chain_error_internal() { assert_eq!( None::<()> .chain_error(|| internal("inner")) .chain_error(|| internal("middle")) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by middle caused by inner" ); assert_eq!( Err::<(), _>(internal("inner")) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by inner" ); // Don't do this, the user will see a generic 500 error instead of the intended message assert_eq!( Err::<(), _>(cargo_err("inner")) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by inner" ); assert_eq!( Err::<(), _>(Unauthorized) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by must be logged in to perform that action" ); } #[test] fn chain_error_user_facing() { // Do this rarely, the user will only see the outer error assert_eq!( Err::<(), _>(cargo_err("inner")) .chain_error(|| cargo_err("outer")) .unwrap_err() .to_string(), "outer caused by inner" // never logged ); // The outer error is sent as a response to the client. // The inner error never bubbles up to the logging middleware assert_eq!( Err::<(), _>(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) .chain_error(|| cargo_err("outer")) .unwrap_err() .to_string(), "outer caused by permission denied" // never logged ); } Add module level documentation describing typical usage of error types //! This module implements several error types and traits. The suggested usage in returned results //! is as follows: //! //! * The concrete `util::concrete::Error` type (re-exported as `util::Error`) is great for code //! that is not part of the request/response lifecycle. It avoids pulling in the unnecessary //! infrastructure to convert errors into a user facing JSON responses (relative to `AppError`). //! * `diesel::QueryResult` - There is a lot of code that only deals with query errors. If only //! one type of error is possible in a function, using that specific error is preferable to the //! more general `util::Error`. This is especially common in model code. //! * `util::errors::AppResult` - Some failures should be converted into user facing JSON //! responses. This error type is more dynamic and is box allocated. Low-level errors are //! typically not converted to user facing errors and most usage is within the models, //! controllers, and middleware layers. use std::any::{Any, TypeId}; use std::error::Error; use std::fmt; use chrono::NaiveDateTime; use conduit::Response; use diesel::result::Error as DieselError; use crate::util::json_response; pub(super) mod concrete; mod http; /// Returns an error with status 200 and the provided description as JSON /// /// This is for backwards compatibility with cargo endpoints. For all other /// endpoints, use helpers like `bad_request` or `server_error` which set a /// correct status code. pub fn cargo_err<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(http::Ok(error.to_string())) } // The following are intended to be used for errors being sent back to the Ember // frontend, not to cargo as cargo does not handle non-200 response codes well // (see <https://github.com/rust-lang/cargo/issues/3995>), but Ember requires // non-200 response codes for its stores to work properly. /// Return an error with status 400 and the provided description as JSON pub fn bad_request<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(http::BadRequest(error.to_string())) } /// Returns an error with status 500 and the provided description as JSON pub fn server_error<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(http::ServerError(error.to_string())) } #[derive(Serialize)] struct StringError<'a> { detail: &'a str, } #[derive(Serialize)] struct Bad<'a> { errors: Vec<StringError<'a>>, } /// Generates a response with the provided status and description as JSON fn json_error(detail: &str, status: (u32, &'static str)) -> Response { let mut response = json_response(&Bad { errors: vec![StringError { detail }], }); response.status = status; response } // ============================================================================= // AppError trait pub trait AppError: Send + fmt::Display + fmt::Debug + 'static { /// Generate an HTTP response for the error /// /// If none is returned, the error will bubble up the middleware stack /// where it is eventually logged and turned into a status 500 response. fn response(&self) -> Option<Response>; fn get_type_id(&self) -> TypeId { TypeId::of::<Self>() } } impl dyn AppError { pub fn is<T: Any>(&self) -> bool { self.get_type_id() == TypeId::of::<T>() } pub fn from_std_error(err: Box<dyn Error + Send>) -> Box<dyn AppError> { Self::try_convert(&*err).unwrap_or_else(|| internal(&err)) } fn try_convert(err: &(dyn Error + Send + 'static)) -> Option<Box<Self>> { match err.downcast_ref() { Some(DieselError::NotFound) => Some(Box::new(NotFound)), Some(DieselError::DatabaseError(_, info)) if info.message().ends_with("read-only transaction") => { Some(Box::new(ReadOnlyMode)) } _ => None, } } } impl AppError for Box<dyn AppError> { fn response(&self) -> Option<Response> { (**self).response() } } pub type AppResult<T> = Result<T, Box<dyn AppError>>; // ============================================================================= // Chaining errors pub trait ChainError<T> { fn chain_error<E, F>(self, callback: F) -> AppResult<T> where E: AppError, F: FnOnce() -> E; } #[derive(Debug)] struct ChainedError<E> { error: E, cause: Box<dyn AppError>, } impl<T, F> ChainError<T> for F where F: FnOnce() -> AppResult<T>, { fn chain_error<E, C>(self, callback: C) -> AppResult<T> where E: AppError, C: FnOnce() -> E, { self().chain_error(callback) } } impl<T, E: AppError> ChainError<T> for Result<T, E> { fn chain_error<E2, C>(self, callback: C) -> AppResult<T> where E2: AppError, C: FnOnce() -> E2, { self.map_err(move |err| { Box::new(ChainedError { error: callback(), cause: Box::new(err), }) as Box<dyn AppError> }) } } impl<T> ChainError<T> for Option<T> { fn chain_error<E, C>(self, callback: C) -> AppResult<T> where E: AppError, C: FnOnce() -> E, { match self { Some(t) => Ok(t), None => Err(Box::new(callback())), } } } impl<E: AppError> AppError for ChainedError<E> { fn response(&self) -> Option<Response> { self.error.response() } } impl<E: AppError> fmt::Display for ChainedError<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{} caused by {}", self.error, self.cause) } } // ============================================================================= // Error impls impl<E: Error + Send + 'static> AppError for E { fn response(&self) -> Option<Response> { None } } impl<E: Error + Send + 'static> From<E> for Box<dyn AppError> { fn from(err: E) -> Box<dyn AppError> { AppError::try_convert(&err).unwrap_or_else(|| Box::new(err)) } } // ============================================================================= // Internal error for use with `chain_error` #[derive(Debug)] struct InternalAppError { description: String, } impl fmt::Display for InternalAppError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.description)?; Ok(()) } } impl AppError for InternalAppError { fn response(&self) -> Option<Response> { None } } // TODO: The remaining can probably move under `http` #[derive(Debug, Clone, Copy)] pub struct NotFound; impl AppError for NotFound { fn response(&self) -> Option<Response> { Some(json_error("Not Found", (404, "Not Found"))) } } impl fmt::Display for NotFound { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "Not Found".fmt(f) } } #[derive(Debug, Clone, Copy)] pub struct Unauthorized; impl AppError for Unauthorized { fn response(&self) -> Option<Response> { let detail = "must be logged in to perform that action"; Some(json_error(detail, (403, "Forbidden"))) } } impl fmt::Display for Unauthorized { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "must be logged in to perform that action".fmt(f) } } pub fn internal<S: ToString + ?Sized>(error: &S) -> Box<dyn AppError> { Box::new(InternalAppError { description: error.to_string(), }) } #[derive(Debug)] struct AppErrToStdErr(pub Box<dyn AppError>); impl Error for AppErrToStdErr {} impl fmt::Display for AppErrToStdErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } pub(crate) fn std_error(e: Box<dyn AppError>) -> Box<dyn Error + Send> { Box::new(AppErrToStdErr(e)) } #[derive(Debug, Clone, Copy)] pub struct ReadOnlyMode; impl AppError for ReadOnlyMode { fn response(&self) -> Option<Response> { let detail = "Crates.io is currently in read-only mode for maintenance. \ Please try again later."; Some(json_error(detail, (503, "Service Unavailable"))) } } impl fmt::Display for ReadOnlyMode { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "Tried to write in read only mode".fmt(f) } } #[derive(Debug, Clone, Copy)] pub struct TooManyRequests { pub retry_after: NaiveDateTime, } impl AppError for TooManyRequests { fn response(&self) -> Option<Response> { const HTTP_DATE_FORMAT: &str = "%a, %d %b %Y %H:%M:%S GMT"; let retry_after = self.retry_after.format(HTTP_DATE_FORMAT); let detail = format!( "You have published too many crates in a \ short period of time. Please try again after {} or email \ help@crates.io to have your limit increased.", retry_after ); let mut response = json_error(&detail, (429, "TOO MANY REQUESTS")); response .headers .insert("Retry-After".into(), vec![retry_after.to_string()]); Some(response) } } impl fmt::Display for TooManyRequests { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "Too many requests".fmt(f) } } #[test] fn chain_error_internal() { assert_eq!( None::<()> .chain_error(|| internal("inner")) .chain_error(|| internal("middle")) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by middle caused by inner" ); assert_eq!( Err::<(), _>(internal("inner")) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by inner" ); // Don't do this, the user will see a generic 500 error instead of the intended message assert_eq!( Err::<(), _>(cargo_err("inner")) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by inner" ); assert_eq!( Err::<(), _>(Unauthorized) .chain_error(|| internal("outer")) .unwrap_err() .to_string(), "outer caused by must be logged in to perform that action" ); } #[test] fn chain_error_user_facing() { // Do this rarely, the user will only see the outer error assert_eq!( Err::<(), _>(cargo_err("inner")) .chain_error(|| cargo_err("outer")) .unwrap_err() .to_string(), "outer caused by inner" // never logged ); // The outer error is sent as a response to the client. // The inner error never bubbles up to the logging middleware assert_eq!( Err::<(), _>(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) .chain_error(|| cargo_err("outer")) .unwrap_err() .to_string(), "outer caused by permission denied" // never logged ); }
// DOCS #![crate_name = "vobject"] #![crate_type = "lib"] #![license = "MIT"] #![comment = "Parser for VObject and iCalendar."] #![feature(phase)] #[phase(plugin)] extern crate peg_syntax_ext; use std::collections::HashMap; use std::collections::hash_map::{Occupied, Vacant}; pub struct Property { params: HashMap<String, String>, raw_value: String, } impl Property { fn new(params: HashMap<String, String>, raw_value: String) -> Property { Property { params: params, raw_value: raw_value } } #[doc="Get parameters."] pub fn get_params(&self) -> &HashMap<String, String> { &self.params } #[doc="Get value as unparsed string."] pub fn get_raw_value(&self) -> &String { &self.raw_value } #[doc="Get value as unescaped string."] pub fn value_as_string(&self) -> String { unescape_chars(self.get_raw_value()) } } pub struct Component { #[doc="The name of the component, such as `VCARD` or `VEVENT`."] pub name: String, #[doc="The component's properties."] pub props: HashMap<String, Vec<Property>>, #[doc="The component's child- or sub-components."] pub subcomponents: Vec<Component> } impl Component { fn new(name: String) -> Component { Component { name: name, props: HashMap::new(), subcomponents: vec![] } } #[doc="Retrieve one property (from many) by key. Returns `None` if nothing is found."] pub fn single_prop(&self, key: &String) -> Option<&Property> { match self.props.get(key) { Some(x) => { match x.len() { 1 => Some(&x[0]), _ => None } }, None => None } } #[doc="Retrieve a mutable vector of properties for this key. Creates one (and inserts it into the component) if none exists."] pub fn all_props_mut(&mut self, key: String) -> &mut Vec<Property> { match self.props.entry(key) { Occupied(values) => values.into_mut(), Vacant(values) => values.set(vec![]) } } #[doc="Retrieve properties by key. Returns an empty slice if key doesn't exist."] pub fn all_props(&self, key: &String) -> &[Property] { static EMPTY: &'static [Property] = []; match self.props.get(key) { Some(values) => values.as_slice(), None => EMPTY } } } peg! parser(r#" use super::{Component,Property}; use std::collections::HashMap; #[pub] component -> Component = name:component_begin ps:props cs:components? component_end { let mut rv = Component::new(name); match cs { Some(components) => { rv.subcomponents = components; }, None => () }; for (k, v) in ps.into_iter() { rv.all_props_mut(k).push(v); }; rv } component_begin -> String = "BEGIN:" v:value __ { v } component_end -> String = "END:" v:value __ { v } components -> Vec<Component> = cs:component ++ eols __ { cs } props -> Vec<(String, Property)> = ps:contentline ++ eols __ { ps } contentline -> (String, Property) = k:name p:params ":" v:value { (k, Property::new(p, v)) } name -> String = !"BEGIN" !"END" iana_token+ { match_str.into_string() } params -> HashMap<String, String> = ps:(";" p:param {p})* { let mut rv: HashMap<String, String> = HashMap::new(); for (k, v) in ps.into_iter() { rv.insert(k, v); }; rv } param -> (String, String) // FIXME: Doesn't handle comma-separated values = k:param_name v:("=" v:param_value { v })? { (k, match v { Some(x) => x, None => "".into_string() }) } param_name -> String = iana_token+ { match_str.into_string() } param_value -> String = x:(quoted_string / param_text) { x } param_text -> String = safe_char* { match_str.into_string() } value -> String = value_char+ { match_str.into_string() } quoted_string -> String = dquote x:quoted_content dquote { x } quoted_content -> String = qsafe_char* { match_str.into_string() } iana_token = ([a-zA-Z0-9] / "-")+ safe_char = !";" !":" !"," value_char qsafe_char = !dquote value_char // FIXME value_char = !eol . eol = "\n" / "\r\n" / "\r" dquote = "\"" eols = eol+ whitespace = " " / "\t" __ = (eol / whitespace)* "#) #[doc="Parse a component. The error value is a human-readable message."] pub fn parse_component(s: &String) -> Result<Component, String> { // XXX: The unfolding should be worked into the PEG // See feature request: https://github.com/kevinmehall/rust-peg/issues/26 let unfolded = s .replace("\n ", "").replace("\n\t", "") .replace("\r\n ", "").replace("\r\n\t", "") .replace("\r ", "").replace("\r\t", ""); parser::component(unfolded.as_slice()) } #[doc="Escape text for a VObject property value."] pub fn escape_chars(s: &String) -> String { // Order matters! Lifted from icalendar.parser // https://github.com/collective/icalendar/ s .replace("\\N", "\n") .replace("\\", "\\\\") .replace(";", "\\;") .replace(",", "\\,") .replace("\r\n", "\\n") .replace("\n", "\\n") } #[doc="Unescape text from a VObject property value."] pub fn unescape_chars(s: &String) -> String { // Order matters! Lifted from icalendar.parser // https://github.com/collective/icalendar/ s .replace("\\N", "\\n") .replace("\r\n", "\n") .replace("\\n", "\n") .replace("\\,", ",") .replace("\\;", ";") .replace("\\\\", "\\") } Rust nightly updates // DOCS #![crate_name = "vobject"] #![crate_type = "lib"] #![license = "MIT"] #![comment = "Parser for VObject and iCalendar."] #![feature(phase)] #[phase(plugin)] extern crate peg_syntax_ext; use std::collections::HashMap; use std::collections::hash_map::{Occupied, Vacant}; pub struct Property { params: HashMap<String, String>, raw_value: String, } impl Property { fn new(params: HashMap<String, String>, raw_value: String) -> Property { Property { params: params, raw_value: raw_value } } #[doc="Get parameters."] pub fn get_params(&self) -> &HashMap<String, String> { &self.params } #[doc="Get value as unparsed string."] pub fn get_raw_value(&self) -> &String { &self.raw_value } #[doc="Get value as unescaped string."] pub fn value_as_string(&self) -> String { unescape_chars(self.get_raw_value()) } } pub struct Component { #[doc="The name of the component, such as `VCARD` or `VEVENT`."] pub name: String, #[doc="The component's properties."] pub props: HashMap<String, Vec<Property>>, #[doc="The component's child- or sub-components."] pub subcomponents: Vec<Component> } impl Component { fn new(name: String) -> Component { Component { name: name, props: HashMap::new(), subcomponents: vec![] } } #[doc="Retrieve one property (from many) by key. Returns `None` if nothing is found."] pub fn single_prop(&self, key: &String) -> Option<&Property> { match self.props.get(key) { Some(x) => { match x.len() { 1 => Some(&x[0]), _ => None } }, None => None } } #[doc="Retrieve a mutable vector of properties for this key. Creates one (and inserts it into the component) if none exists."] pub fn all_props_mut(&mut self, key: String) -> &mut Vec<Property> { match self.props.entry(key) { Occupied(values) => values.into_mut(), Vacant(values) => values.set(vec![]) } } #[doc="Retrieve properties by key. Returns an empty slice if key doesn't exist."] pub fn all_props(&self, key: &String) -> &[Property] { static EMPTY: &'static [Property] = &[]; match self.props.get(key) { Some(values) => values.as_slice(), None => EMPTY } } } peg! parser(r#" use super::{Component,Property}; use std::collections::HashMap; #[pub] component -> Component = name:component_begin ps:props cs:components? component_end { let mut rv = Component::new(name); match cs { Some(components) => { rv.subcomponents = components; }, None => () }; for (k, v) in ps.into_iter() { rv.all_props_mut(k).push(v); }; rv } component_begin -> String = "BEGIN:" v:value __ { v } component_end -> String = "END:" v:value __ { v } components -> Vec<Component> = cs:component ++ eols __ { cs } props -> Vec<(String, Property)> = ps:contentline ++ eols __ { ps } contentline -> (String, Property) = k:name p:params ":" v:value { (k, Property::new(p, v)) } name -> String = !"BEGIN" !"END" iana_token+ { match_str.into_string() } params -> HashMap<String, String> = ps:(";" p:param {p})* { let mut rv: HashMap<String, String> = HashMap::new(); for (k, v) in ps.into_iter() { rv.insert(k, v); }; rv } param -> (String, String) // FIXME: Doesn't handle comma-separated values = k:param_name v:("=" v:param_value { v })? { (k, match v { Some(x) => x, None => "".into_string() }) } param_name -> String = iana_token+ { match_str.into_string() } param_value -> String = x:(quoted_string / param_text) { x } param_text -> String = safe_char* { match_str.into_string() } value -> String = value_char+ { match_str.into_string() } quoted_string -> String = dquote x:quoted_content dquote { x } quoted_content -> String = qsafe_char* { match_str.into_string() } iana_token = ([a-zA-Z0-9] / "-")+ safe_char = !";" !":" !"," value_char qsafe_char = !dquote value_char // FIXME value_char = !eol . eol = "\n" / "\r\n" / "\r" dquote = "\"" eols = eol+ whitespace = " " / "\t" __ = (eol / whitespace)* "#) #[doc="Parse a component. The error value is a human-readable message."] pub fn parse_component(s: &String) -> Result<Component, String> { // XXX: The unfolding should be worked into the PEG // See feature request: https://github.com/kevinmehall/rust-peg/issues/26 let unfolded = s .replace("\n ", "").replace("\n\t", "") .replace("\r\n ", "").replace("\r\n\t", "") .replace("\r ", "").replace("\r\t", ""); parser::component(unfolded.as_slice()) } #[doc="Escape text for a VObject property value."] pub fn escape_chars(s: &String) -> String { // Order matters! Lifted from icalendar.parser // https://github.com/collective/icalendar/ s .replace("\\N", "\n") .replace("\\", "\\\\") .replace(";", "\\;") .replace(",", "\\,") .replace("\r\n", "\\n") .replace("\n", "\\n") } #[doc="Unescape text from a VObject property value."] pub fn unescape_chars(s: &String) -> String { // Order matters! Lifted from icalendar.parser // https://github.com/collective/icalendar/ s .replace("\\N", "\\n") .replace("\r\n", "\n") .replace("\\n", "\n") .replace("\\,", ",") .replace("\\;", ";") .replace("\\\\", "\\") }
// Copyright (C) 2016 ParadoxSpiral // // This file is part of mpv-rs. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA #![allow(unknown_lints)] use libc; use encoding; use parking_lot::{Condvar, Mutex}; use enum_primitive::FromPrimitive; use super::raw::*; use super::raw::prototype::*; use std::boxed::Box; use std::collections::HashMap; use std::marker::PhantomData; use std::mem; use std::path::Path; use std::ptr; use std::ffi::{CStr, CString}; use std::ops::Drop; use std::time::Duration; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; // Get the inner data of `Data`, and transmute it to a value that the API understands. macro_rules! data_ptr { ($data:ident) => ( unsafe { #[allow(match_ref_pats)] match $data { &mut Data::Flag(ref mut v) => mem::transmute::<*mut bool, *mut libc::c_void>(v), &mut Data::Int64(ref mut v) => mem::transmute::<*mut libc::int64_t, *mut libc::c_void>(v), &mut Data::Double(ref mut v) => mem::transmute::<*mut libc::c_double, *mut libc::c_void>(v), &mut Data::Node(ref mut v) => mem::transmute::<*mut MpvNode, *mut libc::c_void>(v), _ => unreachable!(), } } ) } fn mpv_err<T>(ret: T, v: libc::c_int) -> Result<T, Error> { if v == 0 { Ok(ret) } else { Err(Error::Mpv(MpvError::from_i32(v).unwrap())) } } extern "C" fn event_callback(d: *mut libc::c_void) { unsafe { let data = mem::transmute::<*mut libc::c_void, *mut (Mutex<bool>, Condvar)>(d); (*data).1.notify_one(); } } #[doc(hidden)] #[derive(Clone, Debug, PartialEq)] /// Designed for internal use. pub struct InnerEvent { event: Event, err: Option<Error>, } impl InnerEvent { #[inline] fn as_result(&self) -> Result<Event, Error> { if self.err.is_some() { Err(self.err.clone().unwrap()) } else { Ok(self.event.clone()) } } #[inline] fn as_event(&self) -> &Event { &self.event } } /// Represents an event returned by `EventIter`. #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] pub enum Event { LogMessage(Option<LogMessage>), StartFile, EndFile(Option<EndFile>), FileLoaded, Idle, Tick, VideoReconfig, AudioReconfig, Seek, PlaybackRestart, PropertyChange(Property), } impl Event { #[inline] fn as_id(&self) -> MpvEventId { match *self { Event::LogMessage(_) => MpvEventId::LogMessage, Event::StartFile => MpvEventId::StartFile, Event::EndFile(_) => MpvEventId::EndFile, Event::FileLoaded => MpvEventId::FileLoaded, Event::Idle => MpvEventId::Idle, Event::Tick => MpvEventId::Tick, Event::VideoReconfig => MpvEventId::VideoReconfig, Event::AudioReconfig => MpvEventId::AudioReconfig, Event::Seek => MpvEventId::Seek, Event::PlaybackRestart => MpvEventId::PlaybackRestart, Event::PropertyChange(_) => MpvEventId::PropertyChange, } } } impl MpvEvent { #[inline] fn as_event(&self) -> Result<Event, Error> { try!(mpv_err((), self.error)); Ok(match self.event_id { MpvEventId::LogMessage => Event::LogMessage(Some(LogMessage::from_raw(self.data))), MpvEventId::StartFile => Event::StartFile, MpvEventId::EndFile => { Event::EndFile(Some(EndFile::from_raw(MpvEventEndFile::from_raw(self.data)))) } MpvEventId::FileLoaded => Event::FileLoaded, MpvEventId::Idle => Event::Idle, MpvEventId::Tick => Event::Tick, MpvEventId::VideoReconfig => Event::VideoReconfig, MpvEventId::AudioReconfig => Event::AudioReconfig, MpvEventId::Seek => Event::Seek, MpvEventId::PlaybackRestart => Event::PlaybackRestart, MpvEventId::PropertyChange => Event::PropertyChange(Property::from_raw(self.data)), _ => unreachable!(), }) } #[inline] fn as_inner_event(&self) -> InnerEvent { InnerEvent { event: match self.event_id { MpvEventId::LogMessage => Event::LogMessage(Some(LogMessage::from_raw(self.data))), MpvEventId::StartFile => Event::StartFile, MpvEventId::EndFile => { Event::EndFile(Some(EndFile::from_raw(MpvEventEndFile::from_raw(self.data)))) } MpvEventId::FileLoaded => Event::FileLoaded, MpvEventId::Idle => Event::Idle, MpvEventId::Tick => Event::Tick, MpvEventId::VideoReconfig => Event::VideoReconfig, MpvEventId::AudioReconfig => Event::AudioReconfig, MpvEventId::Seek => Event::Seek, MpvEventId::PlaybackRestart => Event::PlaybackRestart, MpvEventId::PropertyChange => Event::PropertyChange(Property::from_raw(self.data)), _ => unreachable!(), }, err: { let err = mpv_err((), self.error); if err.is_err() { Some(err.unwrap_err()) } else { None } }, } } } /// Represents a blocking iter over some observed events of an mpv instance. /// `next` will never return `None`, instead it will return `Error::NoAssociatedEvent`. This is done /// so that the iterator is endless. Once the `EventIter` is dropped, it's `Event`s are removed from /// the "to be observed" queue, therefore new `Event` invocations won't be observed. pub struct EventIter<'parent, P> where P: MpvMarker + 'parent { ctx: *mut MpvHandle, notification: *mut (Mutex<bool>, Condvar), all_to_observe: &'parent Mutex<Vec<Event>>, all_to_observe_properties: &'parent Mutex<HashMap<String, usize>>, local_to_observe: Vec<Event>, all_observed: &'parent Mutex<Vec<InnerEvent>>, last_no_associated_ev: bool, _marker: PhantomData<&'parent P>, } impl<'parent, P> Drop for EventIter<'parent, P> where P: MpvMarker + 'parent { fn drop(&mut self) { let mut all_to_observe = self.all_to_observe.lock(); let mut all_observed = self.all_observed.lock(); let mut all_to_observe_properties = self.all_to_observe_properties.lock(); let mut compare_ev_unobsorve = |outer_ev: &Event, inner_ev: &Event| -> bool { if let Event::PropertyChange(ref outer_prop) = *outer_ev { if let Event::PropertyChange(ref inner_prop) = *inner_ev { if outer_prop.name == inner_prop.name { unsafe { mpv_unobserve_property(self.ctx, *all_to_observe_properties.get( &outer_prop.name).unwrap() as libc::uint64_t); } all_to_observe_properties.remove(&outer_prop.name); return true; } } } else if outer_ev.as_id() == inner_ev.as_id() { return true; } false }; let mut new_to = Vec::with_capacity(all_to_observe.len() - self.local_to_observe.len()); let mut new_obd = Vec::with_capacity(all_observed.len()); for outer_ev in &self.local_to_observe { for elem in all_to_observe.iter() .skip_while(|inner_ev| { compare_ev_unobsorve(outer_ev, *inner_ev) }) { new_to.push(elem.clone()); } for elem in all_observed.iter() .skip_while(|inner_ev| { compare_ev_unobsorve(outer_ev, (**inner_ev).as_event()) }) { new_obd.push(elem.clone()); } } *all_to_observe = new_to; *all_observed = new_obd; } } impl<'parent, P> Iterator for EventIter<'parent, P> where P: MpvMarker + 'parent { type Item = Result<Vec<Result<Event, Error>>, Error>; fn next(&mut self) -> Option<Self::Item> { let observed = self.all_observed.lock(); if observed.is_empty() || self.last_no_associated_ev { mem::drop(observed); unsafe { (*self.notification).1.wait(&mut (*self.notification).0.lock()) }; } let mut observed = self.all_observed.lock(); let mut ret_events = vec![]; if observed.is_empty() { let all_to_observe = self.all_to_observe.lock(); let o_iter = (*all_to_observe).clone(); let mut last = false; 'events: loop { let event = unsafe { &*mpv_wait_event(self.ctx, 0f64 as libc::c_double) }; let ev_id = event.event_id; if ev_id == MpvEventId::None || ev_id == MpvEventId::QueueOverflow { if last { break; } else { last = true; continue; } } for local_ob_ev_id in &self.local_to_observe { if ev_id == local_ob_ev_id.as_id() { ret_events.push(event.as_event()); continue 'events; } } for ob_ev_id in &o_iter { if ev_id == ob_ev_id.as_id() { observed.push(event.as_inner_event()); continue 'events; } } } if !observed.is_empty() { unsafe { (*self.notification).1.notify_all() }; mem::drop(observed); } } else { let mut index = vec![]; for (i, event) in observed.iter().enumerate() { for o_e_id in &self.local_to_observe { if event.event.as_id() == o_e_id.as_id() { if o_e_id.as_id() == MpvEventId::PropertyChange { if let Event::PropertyChange(ref v_ev) = event.event { if let Event::PropertyChange(ref v_ob) = *o_e_id { if v_ev.name == v_ob.name { index.push(i); ret_events.push(event.as_result()); } } } } else { index.push(i); ret_events.push(event.as_result()); } } } } for (n, i) in index.iter().enumerate() { observed.remove(i - n); } if !observed.is_empty() { unsafe { (*self.notification).1.notify_all() }; mem::drop(observed); } } if !ret_events.is_empty() { self.last_no_associated_ev = false; Some(Ok(ret_events)) } else { self.last_no_associated_ev = true; Some(Err(Error::NoAssociatedEvent)) } } } #[derive(Clone, Debug, PartialEq, Eq)] #[allow(missing_docs)] /// Represents the data of an `Event::LogMessage`. pub struct LogMessage { pub prefix: String, pub level: String, pub text: String, pub log_level: MpvLogLevel, } impl LogMessage { #[inline] fn from_raw(raw: *mut libc::c_void) -> LogMessage { let raw = unsafe { &mut *(raw as *mut MpvEventLogMessage) }; LogMessage { prefix: unsafe { CStr::from_ptr(raw.prefix).to_str().unwrap().into() }, level: unsafe { CStr::from_ptr(raw.level).to_str().unwrap().into() }, text: unsafe { CStr::from_ptr(raw.text).to_str().unwrap().into() }, log_level: raw.log_level, } } } impl MpvEventEndFile { #[inline] fn from_raw(raw: *mut libc::c_void) -> MpvEventEndFile { let raw = unsafe { &mut *(raw as *mut MpvEventEndFile) }; MpvEventEndFile { reason: raw.reason, error: raw.error, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[allow(missing_docs)] /// Represents the reason an `Event::EndFile` was fired. pub enum EndFileReason { Eof = 0, Stop = 2, Quit = 3, Error = 4, Redirect = 5, } #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] /// Represents the data of an `Event::EndFile`. `error` is `Some` if `EndFileReason` is `Error`. pub struct EndFile { pub reason: EndFileReason, pub error: Option<Error>, } impl EndFile { #[inline] fn from_raw(raw: MpvEventEndFile) -> EndFile { EndFile { reason: match raw.reason { 0 => EndFileReason::Eof, 2 => EndFileReason::Stop, 3 => EndFileReason::Quit, 4 => EndFileReason::Error, 5 => EndFileReason::Redirect, _ => unreachable!(), }, error: { let err = mpv_err((), raw.error); if err.is_ok() { None } else { Some(err.unwrap_err()) } }, } } } #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] /// Represents the data of an `Event::PropertyChange`. The `data` field is equal to the value of /// the property. pub struct Property { pub name: String, pub data: Data, } impl Property { #[inline] fn from_raw(raw: *mut libc::c_void) -> Property { let raw = unsafe { &mut *(raw as *mut MpvEventProperty) }; Property { name: unsafe { CStr::from_ptr(raw.name).to_str().unwrap().into() }, data: Data::from_raw(raw.format, raw.data), } } #[inline] /// Create a `Property` that is suitable for observing. /// Data is used to infer the format of the property, and the value is never used if supplied to /// a function of this crate. pub fn new(name: &str, data: Data) -> Property { Property { name: name.into(), data: data, } } } #[derive(Clone, Debug, PartialEq)] /// Represents all possible error values returned by this crate. pub enum Error { /// An internal mpv error. Mpv(MpvError), /// The core has already been initialized. /// This error is also handled by mpv, but results in a failed assertion. AlreadyInitialized, /// Calling `suspend` on an uninitialized core will deadlock. Uninitialized, /// All `suspend` calls have already been undone. AlreadyResumed, /// Some functions only accept absolute paths. ExpectedAbsolute, /// If a file was expected, but a directory was given. ExpectedFile, /// The parent was dropped before the clients ParentDropped, /// If an argument (like a percentage > 100) was out of bounds. OutOfBounds, /// If a command failed during a `loadfiles` call, contains index of failed command and `Error`. Loadfiles((usize, Box<Error>)), /// Events are not enabled for this `Mpv` instance. EventsDisabled, /// This event is already being observed by another `EventIter`. AlreadyObserved(Box<Event>), /// No `Event` associated with this `EventIter` was found, this means a spurious wakeup. NoAssociatedEvent, /// Used a `Data::OsdString` while writing. OsdStringWrite, /// Mpv returned a string that uses an unsupported codec. Inside are the raw bytes cast to u8. UnsupportedEncoding(Vec<u8>), /// Mpv returned null while creating the core. Null, } #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] /// Represents data that can be sent to or retrieved from `Mpv`. pub enum Data { String(String), OsdString(String), Flag(bool), Int64(i64), Double(f64), Node(MpvNode), } impl Data { #[inline] /// Create a `Data` from a supported value. Be careful about mistakenly using an isize when you /// want a float. pub fn new<T>(val: T) -> Data where T: Into<Data> { val.into() } #[inline] fn format(&self) -> MpvFormat { match *self { Data::String(_) => MpvFormat::String, Data::OsdString(_) => MpvFormat::OsdString, Data::Flag(_) => MpvFormat::Flag, Data::Int64(_) => MpvFormat::Int64, Data::Double(_) => MpvFormat::Double, Data::Node(_) => MpvFormat::Node, } } #[inline] fn from_raw(fmt: MpvFormat, ptr: *mut libc::c_void) -> Data { match fmt { MpvFormat::Flag => Data::Flag(unsafe { *(ptr as *mut i64) } != 0), MpvFormat::Int64 => Data::Int64(unsafe { *(ptr as *mut i64) }), MpvFormat::Double => Data::Double(unsafe { *(ptr as *mut f64) }), // TODO: MpvFormat::Node => Data::Node(unsafe{ *(ptr as *mut MpvNode) }), _ => unreachable!(), } } } impl Into<Data> for String { #[inline] fn into(self) -> Data { Data::String(self) } } impl Into<Data> for bool { #[inline] fn into(self) -> Data { Data::Flag(self) } } impl Into<Data> for isize { #[inline] fn into(self) -> Data { Data::Int64(self as i64) } } impl Into<Data> for f64 { #[inline] fn into(self) -> Data { Data::Double(self) } } impl Into<Data> for MpvNode { #[inline] fn into(self) -> Data { Data::Node(self) } } #[derive(Clone, Debug)] /// Represents a command that can be executed by `Mpv`. pub struct Command<'a> { name: &'a str, args: Option<Vec<String>>, } impl<'a> Command<'a> { #[inline] /// Create a new `MpvCommand`. pub fn new(name: &'a str, args: Option<Vec<String>>) -> Command<'a> { Command { name: name, args: args, } } } #[derive(Clone, Debug)] /// Represents data needed for `PlaylistOp::Loadfiles`. pub struct File<'a> { path: &'a Path, state: FileState, options: Option<&'a str>, } impl<'a> File<'a> { #[inline] /// Create a new `File`. pub fn new(path: &'a Path, state: FileState, opts: Option<&'a str>) -> File<'a> { File { path: path, state: state, options: opts, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents how a `File` is inserted into the playlist. pub enum FileState { /// Replace the current track. Replace, /// Append to the current playlist. Append, /// If current playlist is empty: play, otherwise append to playlist. AppendPlay, } impl FileState { #[inline] fn val(&self) -> &str { match *self { FileState::Replace => "replace", FileState::Append => "append", FileState::AppendPlay => "append-play", } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents possible seek operations by `seek`. pub enum Seek { /// Seek forward relatively from current position at runtime. /// This is less exact than `seek_abs`, see [mpv manual] /// (https://mpv.io/manual/master/#command-interface- /// [relative|absolute|absolute-percent|relative-percent|exact|keyframes]). RelativeForward(Duration), /// See `RelativeForward`. RelativeBackward(Duration), /// Seek to a given absolute time at runtime. Absolute(Duration), /// Seek to a given relative percent position at runtime. /// If `usize` is bigger than the remaining playtime, the next file is played. RelativePercent(usize), /// Seek to a given absolute percent position at runtime. AbsolutePercent(usize), /// Revert one previous `seek` invocation. If this is called twice, this /// reverts the previous revert seek. Revert, /// Mark the current position. The next `seek_revert` call will revert /// to the marked position. RevertMark, /// Play exactly one frame, and then pause. This does nothing with /// audio-only playback. Frame, /// Play exactly the last frame, and then pause. This does nothing with /// audio-only playback. See [this] /// (https://mpv.io/manual/master/#command-interface-frame-back-step) /// for performance issues. FrameBack, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents possible screenshot operations by `screenshot`. pub enum Screenshot<'a> { /// "Save the video image, in its original resolution, and with subtitles. /// Some video outputs may still include the OSD in the output under certain circumstances.". Subtitles, /// "Take a screenshot and save it to a given file. The format of the file will be guessed by /// the extension (and --screenshot-format is ignored - the behaviour when the extension is /// missing or unknown is arbitrary). If the file already exists, it's overwritten. Like all /// input command parameters, the filename is subject to property expansion as described in /// Property Expansion.". SubtitlesFile(&'a Path), /// "Like subtitles, but typically without OSD or subtitles. /// The exact behaviour depends on the selected video output.". Video, /// See `screenshot_subtitles_to_file`. VideoFile(&'a Path), /// "Save the contents of the mpv window. Typically scaled, with OSD /// and subtitles. The exact behaviour depends on the selected video output, and if no support /// is available, this will act like video.". Window, /// See `screenshot_subtitles_to_file`. WindowFile(&'a Path), } #[derive(Clone, Debug)] /// Represents operations on the playlist supported by `playlist`. pub enum PlaylistOp<'a> { /// Play the next item of the current playlist. /// This does nothing if the current item is the last item. NextWeak, /// Play the next item of the current playlist. /// This terminates playback if the current item is the last item. NextForce, /// Play the previous item of the current playlist. /// This does nothing if the current item is the first item. PreviousWeak, /// Play the next item of the current playlist. /// This terminates playback if the current item is the first item. PreviousForce, /// Load any number of files with any playlist insertion behaviour, /// and any optional options that are set during playback of the specific item. Loadfiles(&'a [File<'a>]), /// Load the given playlist file. Replace current playlist. LoadlistReplace(&'a Path), /// Load the given playlist file. Append to current playlist. LoadlistAppend(&'a Path), /// Clear the current playlist, except the currently played item. Clear, /// Remove the currently selected playlist item. RemoveCurrent, /// Remove the item at position `usize`. RemoveIndex(usize), /// Move item `usize` to the position of item `usize`. Move((usize, usize)), /// Shuffle the playlist. Shuffle, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents operations supported by `subtitle`. pub enum SubOp<'a> { /// Add and select the subtitle immediately. /// The second argument is the title, third is the language. AddSelect(&'a Path, Option<&'a str>, Option<&'a str>), /// See `AddSelect`. "Don't select the subtitle. /// (Or in some special situations, let the default stream selection mechanism decide.)". AddAuto(&'a Path, Option<&'a str>, Option<&'a str>), /// See `AddSelect`. "Select the subtitle. If a subtitle with the same file name was /// already added, that one is selected, instead of loading a duplicate entry. /// (In this case, title/language are ignored, and if the was changed since it was loaded, /// these changes won't be reflected.)". AddCached(&'a Path, Option<&'a str>, Option<&'a str>), /// Remove the given subtitle track. If the id argument is missing, remove the current /// track. (Works on external subtitle files only.) Remove(Option<usize>), /// Reload the given subtitle tracks. If the id argument is missing, reload the current /// track. (Works on external subtitle files only.) Reload(Option<usize>), /// Change subtitle timing such, that the subtitle event after the next `isize` subtitle /// events is displayed. `isize` can be negative to step backwards. Step(isize), /// Seek to the next subtitle. This is similar to sub-step, except that it seeks video and /// audio instead of adjusting the subtitle delay. /// For embedded subtitles (like with matroska), this works only with subtitle events that /// have already been displayed, or are within a short prefetch range. SeekForward, /// See `SeekForward`. SeekBackward, } impl MpvError { #[inline] fn as_val(&self) -> libc::c_int { *self as libc::c_int } #[inline] /// Returns a string slice associated with the `MpvError`. pub fn error_string(&self) -> &str { let raw = unsafe { mpv_error_string(self.as_val()) }; unsafe { CStr::from_ptr(raw) }.to_str().unwrap() } } impl MpvFormat { #[inline] fn as_val(self) -> libc::c_int { self as libc::c_int } } // TODO: more /// Represents an mpv instance from which `Client`s can be spawned. /// /// The mpv manual is very helpful with regards to confusion about syntax for commands, /// however there is an effort to catch common mistakes that may result in unexpected behaviour. /// See `command`. /// /// # Panics /// Any method on this struct may panic if any argument contains invalid utf-8. pub struct Parent { ctx: *mut MpvHandle, initialized: AtomicBool, suspension_count: AtomicUsize, check_events: bool, ev_iter_notification: Option<*mut (Mutex<bool>, Condvar)>, ev_to_observe: Option<Mutex<Vec<Event>>>, ev_to_observe_properties: Option<Mutex<HashMap<String, usize>>>, ev_observed: Option<Mutex<Vec<InnerEvent>>>, } // TODO: more /// Represents a client of a `Parent`. /// /// # Panics /// Any method on this struct may panic if any argument contains invalid utf-8. pub struct Client<'parent> { ctx: *mut MpvHandle, check_events: bool, ev_iter_notification: Option<*mut (Mutex<bool>, Condvar)>, ev_to_observe: Option<Mutex<Vec<Event>>>, ev_observed: Option<Mutex<Vec<InnerEvent>>>, ev_to_observe_properties: Option<Mutex<HashMap<String, usize>>>, _marker: PhantomData<&'parent Parent>, } unsafe impl Send for Parent {} unsafe impl Sync for Parent {} unsafe impl<'parent> Send for Client<'parent> {} unsafe impl<'parent> Sync for Client<'parent> {} #[doc(hidden)] #[allow(missing_docs)] /// Designed for internal use. pub trait MpvMarker { // FIXME: Most of these can go once `Associated Items` lands fn initialized(&self) -> bool; fn ctx(&self) -> *mut MpvHandle; fn check_events(&self) -> bool; fn ev_iter_notification(&self) -> &Option<*mut (Mutex<bool>, Condvar)>; fn ev_to_observe(&self) -> &Option<Mutex<Vec<Event>>>; fn ev_to_observe_properties(&self) -> &Option<Mutex<HashMap<String, usize>>>; fn ev_observed(&self) -> &Option<Mutex<Vec<InnerEvent>>>; fn drop_ev_iter_step(&mut self) { if self.check_events() { unsafe { Box::from_raw(self.ev_iter_notification().unwrap()); } } } } impl MpvMarker for Parent { #[inline] fn initialized(&self) -> bool { self.initialized.load(Ordering::Acquire) } #[inline] fn ctx(&self) -> *mut MpvHandle { self.ctx } #[inline] fn check_events(&self) -> bool { self.check_events } #[inline] fn ev_iter_notification(&self) -> &Option<*mut (Mutex<bool>, Condvar)> { &self.ev_iter_notification } #[inline] fn ev_to_observe(&self) -> &Option<Mutex<Vec<Event>>> { &self.ev_to_observe } #[inline] fn ev_to_observe_properties(&self) -> &Option<Mutex<HashMap<String, usize>>> { &self.ev_to_observe_properties } #[inline] fn ev_observed(&self) -> &Option<Mutex<Vec<InnerEvent>>> { &self.ev_observed } } impl<'parent> MpvMarker for Client<'parent> { #[inline] fn initialized(&self) -> bool { true } #[inline] fn ctx(&self) -> *mut MpvHandle { self.ctx } #[inline] fn check_events(&self) -> bool { self.check_events } #[inline] fn ev_iter_notification(&self) -> &Option<*mut (Mutex<bool>, Condvar)> { &self.ev_iter_notification } #[inline] fn ev_to_observe(&self) -> &Option<Mutex<Vec<Event>>> { &self.ev_to_observe } #[inline] fn ev_to_observe_properties(&self) -> &Option<Mutex<HashMap<String, usize>>> { &self.ev_to_observe_properties } #[inline] fn ev_observed(&self) -> &Option<Mutex<Vec<InnerEvent>>> { &self.ev_observed } } impl Drop for Parent { fn drop(&mut self) { self.drop_ev_iter_step(); unsafe { mpv_terminate_destroy(self.ctx()); } } } impl<'parent> Drop for Client<'parent> { fn drop(&mut self) { self.drop_ev_iter_step(); unsafe { mpv_detach_destroy(self.ctx()); } } } impl<'parent> Parent { #[allow(mutex_atomic)] /// Create a new `Mpv` instance. /// To call any method except for `set_option` on this, it has to be initialized first. /// The default settings can be probed by running: /// ///```$ mpv --show-profile=libmpv``` pub fn new(check_events: bool) -> Result<Parent, Error> { let ctx = unsafe { mpv_create() }; if ctx == ptr::null_mut() { Err(Error::Null) } else { unsafe { // Disable deprecated events. try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TracksChanged, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TrackSwitched, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Pause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Unpause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ScriptInputDispatch, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::MetadataUpdate, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ChapterChange, 0))); } let (ev_iter_notification, ev_to_observe, ev_to_observe_properties, ev_observed) = if check_events { let ev_iter_notification = Box::into_raw(box (Mutex::new(false), Condvar::new())); unsafe { mpv_set_wakeup_callback(ctx, event_callback, mem::transmute::<*mut (Mutex<bool>, Condvar), *mut libc::c_void> (ev_iter_notification)); } (Some(ev_iter_notification), Some(Mutex::new(Vec::with_capacity(10))), Some(Mutex::new(HashMap::new())), Some(Mutex::new(Vec::with_capacity(10)))) } else { unsafe { // Disable remaining events try!(mpv_err((), mpv_request_event(ctx, MpvEventId::LogMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::GetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::SetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::CommandReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::StartFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::EndFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::FileLoaded, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Idle, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ClientMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::VideoReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::AudioReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Seek, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PlaybackRestart, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PropertyChange, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::QueueOverflow, 0))); } (None, None, None, None) }; Ok(Parent { ctx: ctx, initialized: AtomicBool::new(false), suspension_count: AtomicUsize::new(0), check_events: check_events, ev_iter_notification: ev_iter_notification, ev_to_observe: ev_to_observe, ev_to_observe_properties: ev_to_observe_properties, ev_observed: ev_observed, }) } } /// Create a client with `name`, that is connected to the core of `self`, but has an own queue /// for API events and such. pub fn new_client(&self, name: &str, check_events: bool) -> Result<Client, Error> { if self.initialized() { let ctx = unsafe { let name = CString::new(name).unwrap(); mpv_create_client(self.ctx(), name.as_ptr()) }; unsafe { // Disable deprecated events. try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TracksChanged, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TrackSwitched, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Pause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Unpause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ScriptInputDispatch, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::MetadataUpdate, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ChapterChange, 0))); } let (ev_iter_notification, ev_to_observe, ev_to_observe_properties, ev_observed) = if check_events { let ev_iter_notification = Box::into_raw(box (Mutex::new(false), Condvar::new())); unsafe { mpv_set_wakeup_callback(ctx, event_callback, mem::transmute::<*mut (Mutex<bool>, Condvar), *mut libc::c_void> (ev_iter_notification)); } (Some(ev_iter_notification), Some(Mutex::new(Vec::with_capacity(10))), Some(Mutex::new(HashMap::new())), Some(Mutex::new(Vec::with_capacity(10)))) } else { unsafe { // Disable remaining events try!(mpv_err((), mpv_request_event(ctx, MpvEventId::LogMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::GetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::SetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::CommandReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::StartFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::EndFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::FileLoaded, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Idle, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ClientMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::VideoReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::AudioReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Seek, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PlaybackRestart, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PropertyChange, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::QueueOverflow, 0))); } (None, None, None, None) }; let instance = Client { ctx: ctx, check_events: check_events, ev_iter_notification: ev_iter_notification, ev_to_observe: ev_to_observe, ev_to_observe_properties: ev_to_observe_properties, ev_observed: ev_observed, _marker: PhantomData, }; Ok(instance) } else { Err(Error::Uninitialized) } } /// Initialize the mpv core. pub fn init(&self) -> Result<(), Error> { if self.initialized() { Err(Error::AlreadyInitialized) } else { self.initialized.store(true, Ordering::Release); let err = unsafe { mpv_initialize(self.ctx()) }; mpv_err((), err) } } #[allow(match_ref_pats)] /// Set an option. This only works before core initialization. pub fn set_option(&self, opt: Property) -> Result<(), Error> { if self.initialized() { return Err(Error::AlreadyInitialized); } let data = &mut opt.data.clone(); let name = CString::new(opt.name).unwrap().into_raw(); let format = data.format().as_val(); let ret = match data { &mut Data::OsdString(_) => Err(Error::OsdStringWrite), &mut Data::String(ref v) => { let data = CString::new(v.as_bytes()).unwrap().into_raw(); let ret = mpv_err((), unsafe { mpv_set_property(self.ctx(), name, format, mem::transmute::<*mut libc::c_char, *mut libc::c_void>(data)) }); unsafe { CString::from_raw(data); }; ret } _ => { let data = data_ptr!(data); mpv_err((), unsafe { mpv_set_option(self.ctx(), name, format, data) }) } }; unsafe { CString::from_raw(name) }; ret } /// Load a configuration file into the `Mpv` instance. /// The path has to be absolute. /// This should not be done during runtime. /// This overrides previously set options and properties. pub fn load_config(&self, path: &Path) -> Result<(), Error> { if path.is_relative() { Err(Error::ExpectedAbsolute) } else if path.is_dir() { Err(Error::ExpectedFile) } else { let file = CString::new(path.to_str().unwrap()).unwrap().into_raw(); let ret = mpv_err((), unsafe { mpv_load_config_file(self.ctx(), file) }); unsafe { CString::from_raw(file) }; ret } } /// Suspend the playback thread, or freeze the core. If the core is suspended, only /// client API calls will be accepted, ie. input, redrawing etc. will be suspended. /// For the thread to resume there has to be one `resume` call for each `suspend` call. pub fn suspend(&self) -> Result<(), Error> { if self.initialized() { self.suspension_count.fetch_add(1, Ordering::AcqRel); Ok(unsafe { mpv_suspend(self.ctx()) }) } else { Err(Error::Uninitialized) } } /// See `suspend`. pub fn resume(&self) -> Result<(), Error> { if self.initialized() { if self.suspension_count.load(Ordering::Acquire) == 0 { Err(Error::AlreadyResumed) } else { self.suspension_count.fetch_sub(1, Ordering::AcqRel); Ok(unsafe { mpv_resume(self.ctx()) }) } } else { Err(Error::Uninitialized) } } } impl<'parent> Client<'parent> { /// Returns the name associated with the instance, useful for debugging. pub fn name(&self) -> &str { unsafe { CStr::from_ptr(mpv_client_name(self.ctx())).to_str().unwrap() } } } #[allow(missing_docs)] /// Functions that an abstraction of libmpv should cover. pub trait MpvInstance<'parent, P> where P: MpvMarker + 'parent { fn enable_event(&self, e: Event) -> Result<(), Error>; fn disable_event(&self, e: Event) -> Result<(), Error>; fn observe_all(&self, events: Vec<Event>) -> Result<EventIter<P>, Error>; unsafe fn command(&self, cmd: Command) -> Result<(), Error>; fn set_property(&self, opt: Property) -> Result<(), Error>; fn get_property(&self, prop: Property) -> Result<Property, Error>; fn seek(&self, seek: Seek) -> Result<(), Error>; fn screenshot(&self, st: Screenshot) -> Result<(), Error>; fn playlist(&self, op: PlaylistOp) -> Result<(), Error>; fn cycle(&self, property: &str, up: bool) -> Result<(), Error>; fn multiply(&self, property: &str, factor: usize) -> Result<(), Error>; fn subtitle(&self, op: SubOp) -> Result<(), Error>; fn pause(&self) -> Result<(), Error>; fn unpause(&self) -> Result<(), Error>; } impl<'parent, T> MpvInstance<'parent, T> for T where T: MpvMarker + 'parent { /// Enable a given `Event`. Note that any event of `Event` is enabled by default, /// except for `Event::Tick`. fn enable_event(&self, e: Event) -> Result<(), Error> { if self.check_events() { mpv_err((), unsafe { mpv_request_event(self.ctx(), e.as_id(), 1) }) } else { Err(Error::EventsDisabled) } } /// Disable a given `Event`. fn disable_event(&self, e: Event) -> Result<(), Error> { if self.check_events() { mpv_err((), unsafe { mpv_request_event(self.ctx(), e.as_id(), 0) }) } else { Err(Error::EventsDisabled) } } /// Observe given `Event`s. /// Returns an `EventIter`, on which `next` can be called that blocks while waiting for new /// `Event`s. fn observe_all(&self, events: Vec<Event>) -> Result<EventIter<T>, Error> { if self.check_events() { let mut observe = self.ev_to_observe().as_ref().unwrap().lock(); let mut properties = self.ev_to_observe_properties().as_ref().unwrap().lock(); let mut ids = Vec::with_capacity(events.len()); let mut evs = Vec::with_capacity(events.len()); let mut props = Vec::with_capacity(events.len()); for elem in &events { if let Event::PropertyChange(ref v) = *elem { if properties.contains_key(&v.name) { return Err(Error::AlreadyObserved(box elem.clone())); } else { props.push(v); ids.push(elem.as_id()); evs.push(elem.clone()); continue; } } for id in &(*observe) { if elem.as_id() == id.as_id() { return Err(Error::AlreadyObserved(box elem.clone())); } } ids.push(elem.as_id()); evs.push(elem.clone()); } observe.extend(evs.clone()); for elem in props { let id = properties.len(); unsafe { let name = CString::new(elem.name.clone()).unwrap(); try!(mpv_err((), mpv_observe_property(self.ctx(), id as libc::uint64_t, name.as_ptr(), elem.data.format() as libc::c_int))) } properties.insert(elem.name.clone(), id); } Ok(EventIter { ctx: self.ctx(), notification: self.ev_iter_notification().unwrap(), all_to_observe: self.ev_to_observe().as_ref().unwrap(), all_to_observe_properties: self.ev_to_observe_properties().as_ref().unwrap(), local_to_observe: evs, all_observed: self.ev_observed().as_ref().unwrap(), last_no_associated_ev: false, _marker: PhantomData, }) } else { Err(Error::EventsDisabled) } } /// Send a command to the `Mpv` instance. This uses `mpv_command_string` internally, /// so that the syntax is the same as described in the [manual for the input.conf] /// (https://mpv.io/manual/master/#list-of-input-commands). It is advised to use the specific /// method for each command, because the specific functions may check for /// common errors and are generally type checked (enums to specify operations). /// /// # Safety /// This method is unsafe because the player may quit via the quit command. unsafe fn command(&self, cmd: Command) -> Result<(), Error> { if cmd.args.is_none() { let args = CString::new(cmd.name).unwrap(); mpv_err((), mpv_command_string(self.ctx(), args.as_ptr())) } else { let mut str = String::new(); for elem in cmd.args.unwrap() { str.push_str(&format!(" {}", elem)); } let args = CString::new(format!("{}{}", cmd.name, str)).unwrap(); mpv_err((), mpv_command_string(self.ctx(), args.as_ptr())) } } #[allow(match_ref_pats)] /// Set the value of a property. fn set_property(&self, opt: Property) -> Result<(), Error> { let data = &mut opt.data.clone(); let format = data.format().as_val(); let name = CString::new(opt.name).unwrap().into_raw(); let ret = match data { &mut Data::OsdString(_) => Err(Error::OsdStringWrite), &mut Data::String(ref v) => { let data = CString::new(v.as_bytes()).unwrap().into_raw(); let ret = mpv_err((), unsafe { mpv_set_property(self.ctx(), name, format, mem::transmute::<*mut libc::c_char, *mut libc::c_void>(data)) }); unsafe { CString::from_raw(data); }; ret } _ => { let data = data_ptr!(data); mpv_err((), unsafe { mpv_set_property(self.ctx(), name, format, data) }) } }; unsafe { CString::from_raw(name) }; ret } #[allow(match_ref_pats)] /// Get the value of a property. fn get_property(&self, prop: Property) -> Result<Property, Error> { Ok(Property::new(&prop.name, { let data = &mut prop.data.clone(); let format = data.format(); match data { &mut Data::String(_) | &mut Data::OsdString(_) => { println!("___ENTERING DANGER ZONE___"); let ptr = CString::new("").unwrap().into_raw(); let err = mpv_err((), unsafe { let name = CString::new(prop.name.clone()).unwrap(); mpv_get_property(self.ctx(), name.as_ptr(), format.as_val(), mem::transmute::<*mut libc::c_char, *mut libc::c_void>(ptr)) }); if err.is_err() { println!("___LEAVING DANGER ZONE___"); return Err(err.unwrap_err()); } else { let ptr = unsafe { CString::from_raw(ptr) }; let bytes = ptr.as_bytes(); let data = { encoding::decode(bytes, encoding::DecoderTrap::Strict, encoding::all::ASCII) .0 .or_else(|_| Err(Error::UnsupportedEncoding(Vec::from(bytes)))) }; // It should be this println!("ref: {:?}", "トゥッティ!".as_bytes()); // But we got this println!("got: {:?}", bytes); // Which is this in utf-8 println!("ldc: {}", String::from_utf8_lossy(bytes).into_owned()); // This is what the OsString is capable of (protip: nothing) use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; println!("OsS: {:?}", OsStr::from_bytes(bytes)); // And this in the guessed encoding println!("gue: {:?}", data); println!("___LEAVING DANGER ZONE___"); match prop.data { Data::String(_) => Data::String(data.unwrap()), Data::OsdString(_) => Data::OsdString(data.unwrap()), _ => unreachable!(), } } } _ => { let ptr = unsafe { libc::malloc(mem::size_of::<Data>() as libc::size_t) as *mut libc::c_void }; let err = mpv_err((), unsafe { let name = CString::new(prop.name.clone()).unwrap(); mpv_get_property(self.ctx(), name.as_ptr(), format.as_val(), ptr) }); if err.is_err() { return Err(err.unwrap_err()); } else { Data::from_raw(format, ptr) } } } })) } // --- Convenience command functions --- // /// Seek to a position as defined by `Seek`. fn seek(&self, seek: Seek) -> Result<(), Error> { match seek { Seek::RelativeForward(d) => unsafe { self.command(Command::new("seek", Some(vec![format!("{}", d.as_secs()), "relative".into()]))) }, Seek::RelativeBackward(d) => unsafe { self.command(Command::new("seek", Some(vec![format!("-{}", d.as_secs()), "relative".into()]))) }, Seek::Absolute(d) => unsafe { self.command(Command::new("seek", Some(vec![format!("{}", d.as_secs()), "absolute".into()]))) }, Seek::RelativePercent(p) => { if p > 100 { // This is actually allowed in libmpv (seek to end), // but it's confusing and may be an indicator of bugs. Err(Error::OutOfBounds) } else { unsafe { self.command(Command::new("seek", Some(vec![format!("{}", p), "relative-percent".into()]))) } } } Seek::AbsolutePercent(p) => { if p > 100 { // See `Seek::RelativePercent` above. Err(Error::OutOfBounds) } else { unsafe { self.command(Command::new("seek", Some(vec![format!("{}", p), "absolute-percent".into()]))) } } } Seek::Revert => unsafe { self.command(Command::new("revert-seek", None)) }, Seek::RevertMark => unsafe { self.command(Command::new("revert-seek", Some(vec!["mark".into()]))) }, Seek::Frame => unsafe { self.command(Command::new("frame-step", None)) }, Seek::FrameBack => unsafe { self.command(Command::new("frame-back-step", None)) }, } } /// Take a screenshot as defined by `Screenshot`. fn screenshot(&self, st: Screenshot) -> Result<(), Error> { match st { Screenshot::Subtitles => unsafe { self.command(Command::new("screenshot", Some(vec!["subtitles".into()]))) }, Screenshot::SubtitlesFile(ref p) => unsafe { self.command(Command::new("screenshot", Some(vec![p.to_str().unwrap().into(), "subtitles".into()]))) }, Screenshot::Video => unsafe { self.command(Command::new("screenshot", Some(vec!["video".into()]))) }, Screenshot::VideoFile(ref p) => unsafe { self.command(Command::new("screenshot", Some(vec![p.to_str().unwrap().into(), "video".into()]))) }, Screenshot::Window => unsafe { self.command(Command::new("screenshot", Some(vec!["window".into()]))) }, Screenshot::WindowFile(ref p) => unsafe { self.command(Command::new("screenshot", Some(vec![p.to_str().unwrap().into(), "window".into()]))) }, } } /// Execute an operation on the playlist as defined by `PlaylistOp` fn playlist(&self, op: PlaylistOp) -> Result<(), Error> { match op { PlaylistOp::NextWeak => unsafe { self.command(Command::new("playlist-next", Some(vec!["weak".into()]))) }, PlaylistOp::NextForce => unsafe { self.command(Command::new("playlist-next", Some(vec!["force".into()]))) }, PlaylistOp::PreviousWeak => unsafe { self.command(Command::new("playlist-previous", Some(vec!["weak".into()]))) }, PlaylistOp::PreviousForce => unsafe { self.command(Command::new("playlist-previous", Some(vec!["force".into()]))) }, PlaylistOp::LoadlistReplace(p) => unsafe { self.command(Command::new("loadlist", Some(vec![format!("\"{}\"", p.to_str().unwrap()), "replace".into()]))) }, PlaylistOp::LoadlistAppend(p) => unsafe { self.command(Command::new("loadlist", Some(vec![format!("\"{}\"", p.to_str().unwrap()), "append".into()]))) }, PlaylistOp::Clear => unsafe { self.command(Command::new("playlist-clear", None)) }, PlaylistOp::RemoveCurrent => unsafe { self.command(Command::new("playlist-remove", Some(vec!["current".into()]))) }, PlaylistOp::RemoveIndex(i) => unsafe { self.command(Command::new("playlist-remove", Some(vec![format!("{}", i)]))) }, PlaylistOp::Move((old, new)) => unsafe { self.command(Command::new("playlist-move", Some(vec![format!("{}", new), format!("{}", old)]))) }, PlaylistOp::Shuffle => unsafe { self.command(Command::new("playlist-shuffle", None)) }, PlaylistOp::Loadfiles(lfiles) => { for (i, elem) in lfiles.iter().enumerate() { let ret = unsafe { self.command(Command { name: "loadfile", args: Some(match elem.options { Some(v) => { vec![format!("\"{}\"", elem.path .to_str() .unwrap()), elem.state.val().into(), v.into()] } None => { vec![format!("\"{}\"", elem.path .to_str() .unwrap()), elem.state.val().into(), "".into()] } }), }) }; if ret.is_err() { return Err(Error::Loadfiles((i, box ret.unwrap_err()))); } } Ok(()) } } } /// Cycle through a given property. `up` specifies direction. On /// overflow, set the property back to the minimum, on underflow set it to the maximum. fn cycle(&self, property: &str, up: bool) -> Result<(), Error> { unsafe { self.command(Command::new("cycle", Some(vec![property.into(), if up { "up" } else { "down" } .into()]))) } } /// Multiply any property with any positive factor. fn multiply(&self, property: &str, factor: usize) -> Result<(), Error> { unsafe { self.command(Command::new("multiply", Some(vec![property.into(), format!("{}", factor)]))) } } /// Execute an operation as defined by `SubOp`. fn subtitle(&self, op: SubOp) -> Result<(), Error> { match op { SubOp::AddSelect(p, t, l) => unsafe { self.command(Command::new("sub-add", Some(vec![format!("\"{}\"", p.to_str().unwrap()), format!("select{}{}", if t.is_some() { format!(" {}", t.unwrap()) } else { "".into() }, if l.is_some() { format!(" {}", l.unwrap()) } else { "".into() })]))) }, SubOp::AddAuto(p, t, l) => unsafe { self.command(Command::new("sub-add", Some(vec![format!("\"{}\"", p.to_str().unwrap()), format!("auto{}{}", if t.is_some() { format!(" {}", t.unwrap()) } else { "".into() }, if l.is_some() { format!(" {}", l.unwrap()) } else { "".into() })]))) }, SubOp::AddCached(p, t, l) => unsafe { self.command(Command::new("sub-add", Some(vec![format!("\"{}\"", p.to_str().unwrap()), format!("cached{}{}", if t.is_some() { format!(" {}", t.unwrap()) } else { "".into() }, if l.is_some() { format!(" {}", l.unwrap()) } else { "".into() })]))) }, SubOp::Remove(i) => unsafe { self.command(Command::new("sub-remove", if i.is_some() { Some(vec![format!("{}", i.unwrap())]) } else { None })) }, SubOp::Reload(i) => unsafe { self.command(Command::new("sub-reload", if i.is_some() { Some(vec![format!("{}", i.unwrap())]) } else { None })) }, SubOp::Step(i) => unsafe { self.command(Command::new("sub-step", Some(vec![format!("{}", i)]))) }, SubOp::SeekForward => unsafe { self.command(Command::new("sub-seek", Some(vec!["1".into()]))) }, SubOp::SeekBackward => unsafe { self.command(Command::new("sub-seek", Some(vec!["-1".into()]))) }, } } // --- Convenience property functions --- // /// Pause playback at runtime. fn pause(&self) -> Result<(), Error> { self.set_property(Property::new("pause", Data::Flag(true))) } /// Unpause playback at runtime. fn unpause(&self) -> Result<(), Error> { self.set_property(Property::new("pause", Data::Flag(false))) } } Avoid potential deadlock // Copyright (C) 2016 ParadoxSpiral // // This file is part of mpv-rs. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA #![allow(unknown_lints)] use libc; use encoding; use parking_lot::{Condvar, Mutex}; use enum_primitive::FromPrimitive; use super::raw::*; use super::raw::prototype::*; use std::boxed::Box; use std::collections::HashMap; use std::marker::PhantomData; use std::mem; use std::path::Path; use std::ptr; use std::ffi::{CStr, CString}; use std::ops::Drop; use std::time::Duration; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; // Get the inner data of `Data`, and transmute it to a value that the API understands. macro_rules! data_ptr { ($data:ident) => ( unsafe { #[allow(match_ref_pats)] match $data { &mut Data::Flag(ref mut v) => mem::transmute::<*mut bool, *mut libc::c_void>(v), &mut Data::Int64(ref mut v) => mem::transmute::<*mut libc::int64_t, *mut libc::c_void>(v), &mut Data::Double(ref mut v) => mem::transmute::<*mut libc::c_double, *mut libc::c_void>(v), &mut Data::Node(ref mut v) => mem::transmute::<*mut MpvNode, *mut libc::c_void>(v), _ => unreachable!(), } } ) } fn mpv_err<T>(ret: T, v: libc::c_int) -> Result<T, Error> { if v == 0 { Ok(ret) } else { Err(Error::Mpv(MpvError::from_i32(v).unwrap())) } } extern "C" fn event_callback(d: *mut libc::c_void) { unsafe { let data = mem::transmute::<*mut libc::c_void, *mut (Mutex<bool>, Condvar)>(d); (*data).1.notify_one(); } } #[doc(hidden)] #[derive(Clone, Debug, PartialEq)] /// Designed for internal use. pub struct InnerEvent { event: Event, err: Option<Error>, } impl InnerEvent { #[inline] fn as_result(&self) -> Result<Event, Error> { if self.err.is_some() { Err(self.err.clone().unwrap()) } else { Ok(self.event.clone()) } } #[inline] fn as_event(&self) -> &Event { &self.event } } /// Represents an event returned by `EventIter`. #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] pub enum Event { LogMessage(Option<LogMessage>), StartFile, EndFile(Option<EndFile>), FileLoaded, Idle, Tick, VideoReconfig, AudioReconfig, Seek, PlaybackRestart, PropertyChange(Property), } impl Event { #[inline] fn as_id(&self) -> MpvEventId { match *self { Event::LogMessage(_) => MpvEventId::LogMessage, Event::StartFile => MpvEventId::StartFile, Event::EndFile(_) => MpvEventId::EndFile, Event::FileLoaded => MpvEventId::FileLoaded, Event::Idle => MpvEventId::Idle, Event::Tick => MpvEventId::Tick, Event::VideoReconfig => MpvEventId::VideoReconfig, Event::AudioReconfig => MpvEventId::AudioReconfig, Event::Seek => MpvEventId::Seek, Event::PlaybackRestart => MpvEventId::PlaybackRestart, Event::PropertyChange(_) => MpvEventId::PropertyChange, } } } impl MpvEvent { #[inline] fn as_event(&self) -> Result<Event, Error> { try!(mpv_err((), self.error)); Ok(match self.event_id { MpvEventId::LogMessage => Event::LogMessage(Some(LogMessage::from_raw(self.data))), MpvEventId::StartFile => Event::StartFile, MpvEventId::EndFile => { Event::EndFile(Some(EndFile::from_raw(MpvEventEndFile::from_raw(self.data)))) } MpvEventId::FileLoaded => Event::FileLoaded, MpvEventId::Idle => Event::Idle, MpvEventId::Tick => Event::Tick, MpvEventId::VideoReconfig => Event::VideoReconfig, MpvEventId::AudioReconfig => Event::AudioReconfig, MpvEventId::Seek => Event::Seek, MpvEventId::PlaybackRestart => Event::PlaybackRestart, MpvEventId::PropertyChange => Event::PropertyChange(Property::from_raw(self.data)), _ => unreachable!(), }) } #[inline] fn as_inner_event(&self) -> InnerEvent { InnerEvent { event: match self.event_id { MpvEventId::LogMessage => Event::LogMessage(Some(LogMessage::from_raw(self.data))), MpvEventId::StartFile => Event::StartFile, MpvEventId::EndFile => { Event::EndFile(Some(EndFile::from_raw(MpvEventEndFile::from_raw(self.data)))) } MpvEventId::FileLoaded => Event::FileLoaded, MpvEventId::Idle => Event::Idle, MpvEventId::Tick => Event::Tick, MpvEventId::VideoReconfig => Event::VideoReconfig, MpvEventId::AudioReconfig => Event::AudioReconfig, MpvEventId::Seek => Event::Seek, MpvEventId::PlaybackRestart => Event::PlaybackRestart, MpvEventId::PropertyChange => Event::PropertyChange(Property::from_raw(self.data)), _ => unreachable!(), }, err: { let err = mpv_err((), self.error); if err.is_err() { Some(err.unwrap_err()) } else { None } }, } } } /// Represents a blocking iter over some observed events of an mpv instance. /// `next` will never return `None`, instead it will return `Error::NoAssociatedEvent`. This is done /// so that the iterator is endless. Once the `EventIter` is dropped, it's `Event`s are removed from /// the "to be observed" queue, therefore new `Event` invocations won't be observed. pub struct EventIter<'parent, P> where P: MpvMarker + 'parent { ctx: *mut MpvHandle, notification: *mut (Mutex<bool>, Condvar), all_to_observe: &'parent Mutex<Vec<Event>>, all_to_observe_properties: &'parent Mutex<HashMap<String, usize>>, local_to_observe: Vec<Event>, all_observed: &'parent Mutex<Vec<InnerEvent>>, last_no_associated_ev: bool, _marker: PhantomData<&'parent P>, } impl<'parent, P> Drop for EventIter<'parent, P> where P: MpvMarker + 'parent { fn drop(&mut self) { let mut all_to_observe = self.all_to_observe.lock(); let mut all_observed = self.all_observed.lock(); let mut all_to_observe_properties = self.all_to_observe_properties.lock(); let mut compare_ev_unobsorve = |outer_ev: &Event, inner_ev: &Event| -> bool { if let Event::PropertyChange(ref outer_prop) = *outer_ev { if let Event::PropertyChange(ref inner_prop) = *inner_ev { if outer_prop.name == inner_prop.name { unsafe { mpv_unobserve_property(self.ctx, *all_to_observe_properties.get( &outer_prop.name).unwrap() as libc::uint64_t); } all_to_observe_properties.remove(&outer_prop.name); return true; } } } else if outer_ev.as_id() == inner_ev.as_id() { return true; } false }; let mut new_to = Vec::with_capacity(all_to_observe.len() - self.local_to_observe.len()); let mut new_obd = Vec::with_capacity(all_observed.len()); for outer_ev in &self.local_to_observe { for elem in all_to_observe.iter() .skip_while(|inner_ev| { compare_ev_unobsorve(outer_ev, *inner_ev) }) { new_to.push(elem.clone()); } for elem in all_observed.iter() .skip_while(|inner_ev| { compare_ev_unobsorve(outer_ev, (**inner_ev).as_event()) }) { new_obd.push(elem.clone()); } } *all_to_observe = new_to; *all_observed = new_obd; } } impl<'parent, P> Iterator for EventIter<'parent, P> where P: MpvMarker + 'parent { type Item = Result<Vec<Result<Event, Error>>, Error>; fn next(&mut self) -> Option<Self::Item> { let mut observed = self.all_observed.lock(); if observed.is_empty() || self.last_no_associated_ev { mem::drop(observed); unsafe { (*self.notification).1.wait(&mut (*self.notification).0.lock()) }; observed = self.all_observed.lock(); } let mut ret_events = vec![]; if observed.is_empty() { let all_to_observe = self.all_to_observe.lock(); let o_iter = (*all_to_observe).clone(); let mut last = false; 'events: loop { let event = unsafe { &*mpv_wait_event(self.ctx, 0f64 as libc::c_double) }; let ev_id = event.event_id; if ev_id == MpvEventId::None || ev_id == MpvEventId::QueueOverflow { if last { break; } else { last = true; continue; } } for local_ob_ev_id in &self.local_to_observe { if ev_id == local_ob_ev_id.as_id() { ret_events.push(event.as_event()); continue 'events; } } for ob_ev_id in &o_iter { if ev_id == ob_ev_id.as_id() { observed.push(event.as_inner_event()); continue 'events; } } } if !observed.is_empty() { unsafe { (*self.notification).1.notify_all() }; mem::drop(observed); } } else { let mut index = vec![]; for (i, event) in observed.iter().enumerate() { for o_e_id in &self.local_to_observe { if event.event.as_id() == o_e_id.as_id() { if o_e_id.as_id() == MpvEventId::PropertyChange { if let Event::PropertyChange(ref v_ev) = event.event { if let Event::PropertyChange(ref v_ob) = *o_e_id { if v_ev.name == v_ob.name { index.push(i); ret_events.push(event.as_result()); } } } } else { index.push(i); ret_events.push(event.as_result()); } } } } for (n, i) in index.iter().enumerate() { observed.remove(i - n); } if !observed.is_empty() { unsafe { (*self.notification).1.notify_all() }; mem::drop(observed); } } if !ret_events.is_empty() { self.last_no_associated_ev = false; Some(Ok(ret_events)) } else { self.last_no_associated_ev = true; Some(Err(Error::NoAssociatedEvent)) } } } #[derive(Clone, Debug, PartialEq, Eq)] #[allow(missing_docs)] /// Represents the data of an `Event::LogMessage`. pub struct LogMessage { pub prefix: String, pub level: String, pub text: String, pub log_level: MpvLogLevel, } impl LogMessage { #[inline] fn from_raw(raw: *mut libc::c_void) -> LogMessage { let raw = unsafe { &mut *(raw as *mut MpvEventLogMessage) }; LogMessage { prefix: unsafe { CStr::from_ptr(raw.prefix).to_str().unwrap().into() }, level: unsafe { CStr::from_ptr(raw.level).to_str().unwrap().into() }, text: unsafe { CStr::from_ptr(raw.text).to_str().unwrap().into() }, log_level: raw.log_level, } } } impl MpvEventEndFile { #[inline] fn from_raw(raw: *mut libc::c_void) -> MpvEventEndFile { let raw = unsafe { &mut *(raw as *mut MpvEventEndFile) }; MpvEventEndFile { reason: raw.reason, error: raw.error, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[allow(missing_docs)] /// Represents the reason an `Event::EndFile` was fired. pub enum EndFileReason { Eof = 0, Stop = 2, Quit = 3, Error = 4, Redirect = 5, } #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] /// Represents the data of an `Event::EndFile`. `error` is `Some` if `EndFileReason` is `Error`. pub struct EndFile { pub reason: EndFileReason, pub error: Option<Error>, } impl EndFile { #[inline] fn from_raw(raw: MpvEventEndFile) -> EndFile { EndFile { reason: match raw.reason { 0 => EndFileReason::Eof, 2 => EndFileReason::Stop, 3 => EndFileReason::Quit, 4 => EndFileReason::Error, 5 => EndFileReason::Redirect, _ => unreachable!(), }, error: { let err = mpv_err((), raw.error); if err.is_ok() { None } else { Some(err.unwrap_err()) } }, } } } #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] /// Represents the data of an `Event::PropertyChange`. The `data` field is equal to the value of /// the property. pub struct Property { pub name: String, pub data: Data, } impl Property { #[inline] fn from_raw(raw: *mut libc::c_void) -> Property { let raw = unsafe { &mut *(raw as *mut MpvEventProperty) }; Property { name: unsafe { CStr::from_ptr(raw.name).to_str().unwrap().into() }, data: Data::from_raw(raw.format, raw.data), } } #[inline] /// Create a `Property` that is suitable for observing. /// Data is used to infer the format of the property, and the value is never used if supplied to /// a function of this crate. pub fn new(name: &str, data: Data) -> Property { Property { name: name.into(), data: data, } } } #[derive(Clone, Debug, PartialEq)] /// Represents all possible error values returned by this crate. pub enum Error { /// An internal mpv error. Mpv(MpvError), /// The core has already been initialized. /// This error is also handled by mpv, but results in a failed assertion. AlreadyInitialized, /// Calling `suspend` on an uninitialized core will deadlock. Uninitialized, /// All `suspend` calls have already been undone. AlreadyResumed, /// Some functions only accept absolute paths. ExpectedAbsolute, /// If a file was expected, but a directory was given. ExpectedFile, /// The parent was dropped before the clients ParentDropped, /// If an argument (like a percentage > 100) was out of bounds. OutOfBounds, /// If a command failed during a `loadfiles` call, contains index of failed command and `Error`. Loadfiles((usize, Box<Error>)), /// Events are not enabled for this `Mpv` instance. EventsDisabled, /// This event is already being observed by another `EventIter`. AlreadyObserved(Box<Event>), /// No `Event` associated with this `EventIter` was found, this means a spurious wakeup. NoAssociatedEvent, /// Used a `Data::OsdString` while writing. OsdStringWrite, /// Mpv returned a string that uses an unsupported codec. Inside are the raw bytes cast to u8. UnsupportedEncoding(Vec<u8>), /// Mpv returned null while creating the core. Null, } #[derive(Clone, Debug, PartialEq)] #[allow(missing_docs)] /// Represents data that can be sent to or retrieved from `Mpv`. pub enum Data { String(String), OsdString(String), Flag(bool), Int64(i64), Double(f64), Node(MpvNode), } impl Data { #[inline] /// Create a `Data` from a supported value. Be careful about mistakenly using an isize when you /// want a float. pub fn new<T>(val: T) -> Data where T: Into<Data> { val.into() } #[inline] fn format(&self) -> MpvFormat { match *self { Data::String(_) => MpvFormat::String, Data::OsdString(_) => MpvFormat::OsdString, Data::Flag(_) => MpvFormat::Flag, Data::Int64(_) => MpvFormat::Int64, Data::Double(_) => MpvFormat::Double, Data::Node(_) => MpvFormat::Node, } } #[inline] fn from_raw(fmt: MpvFormat, ptr: *mut libc::c_void) -> Data { match fmt { MpvFormat::Flag => Data::Flag(unsafe { *(ptr as *mut i64) } != 0), MpvFormat::Int64 => Data::Int64(unsafe { *(ptr as *mut i64) }), MpvFormat::Double => Data::Double(unsafe { *(ptr as *mut f64) }), // TODO: MpvFormat::Node => Data::Node(unsafe{ *(ptr as *mut MpvNode) }), _ => unreachable!(), } } } impl Into<Data> for String { #[inline] fn into(self) -> Data { Data::String(self) } } impl Into<Data> for bool { #[inline] fn into(self) -> Data { Data::Flag(self) } } impl Into<Data> for isize { #[inline] fn into(self) -> Data { Data::Int64(self as i64) } } impl Into<Data> for f64 { #[inline] fn into(self) -> Data { Data::Double(self) } } impl Into<Data> for MpvNode { #[inline] fn into(self) -> Data { Data::Node(self) } } #[derive(Clone, Debug)] /// Represents a command that can be executed by `Mpv`. pub struct Command<'a> { name: &'a str, args: Option<Vec<String>>, } impl<'a> Command<'a> { #[inline] /// Create a new `MpvCommand`. pub fn new(name: &'a str, args: Option<Vec<String>>) -> Command<'a> { Command { name: name, args: args, } } } #[derive(Clone, Debug)] /// Represents data needed for `PlaylistOp::Loadfiles`. pub struct File<'a> { path: &'a Path, state: FileState, options: Option<&'a str>, } impl<'a> File<'a> { #[inline] /// Create a new `File`. pub fn new(path: &'a Path, state: FileState, opts: Option<&'a str>) -> File<'a> { File { path: path, state: state, options: opts, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents how a `File` is inserted into the playlist. pub enum FileState { /// Replace the current track. Replace, /// Append to the current playlist. Append, /// If current playlist is empty: play, otherwise append to playlist. AppendPlay, } impl FileState { #[inline] fn val(&self) -> &str { match *self { FileState::Replace => "replace", FileState::Append => "append", FileState::AppendPlay => "append-play", } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents possible seek operations by `seek`. pub enum Seek { /// Seek forward relatively from current position at runtime. /// This is less exact than `seek_abs`, see [mpv manual] /// (https://mpv.io/manual/master/#command-interface- /// [relative|absolute|absolute-percent|relative-percent|exact|keyframes]). RelativeForward(Duration), /// See `RelativeForward`. RelativeBackward(Duration), /// Seek to a given absolute time at runtime. Absolute(Duration), /// Seek to a given relative percent position at runtime. /// If `usize` is bigger than the remaining playtime, the next file is played. RelativePercent(usize), /// Seek to a given absolute percent position at runtime. AbsolutePercent(usize), /// Revert one previous `seek` invocation. If this is called twice, this /// reverts the previous revert seek. Revert, /// Mark the current position. The next `seek_revert` call will revert /// to the marked position. RevertMark, /// Play exactly one frame, and then pause. This does nothing with /// audio-only playback. Frame, /// Play exactly the last frame, and then pause. This does nothing with /// audio-only playback. See [this] /// (https://mpv.io/manual/master/#command-interface-frame-back-step) /// for performance issues. FrameBack, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents possible screenshot operations by `screenshot`. pub enum Screenshot<'a> { /// "Save the video image, in its original resolution, and with subtitles. /// Some video outputs may still include the OSD in the output under certain circumstances.". Subtitles, /// "Take a screenshot and save it to a given file. The format of the file will be guessed by /// the extension (and --screenshot-format is ignored - the behaviour when the extension is /// missing or unknown is arbitrary). If the file already exists, it's overwritten. Like all /// input command parameters, the filename is subject to property expansion as described in /// Property Expansion.". SubtitlesFile(&'a Path), /// "Like subtitles, but typically without OSD or subtitles. /// The exact behaviour depends on the selected video output.". Video, /// See `screenshot_subtitles_to_file`. VideoFile(&'a Path), /// "Save the contents of the mpv window. Typically scaled, with OSD /// and subtitles. The exact behaviour depends on the selected video output, and if no support /// is available, this will act like video.". Window, /// See `screenshot_subtitles_to_file`. WindowFile(&'a Path), } #[derive(Clone, Debug)] /// Represents operations on the playlist supported by `playlist`. pub enum PlaylistOp<'a> { /// Play the next item of the current playlist. /// This does nothing if the current item is the last item. NextWeak, /// Play the next item of the current playlist. /// This terminates playback if the current item is the last item. NextForce, /// Play the previous item of the current playlist. /// This does nothing if the current item is the first item. PreviousWeak, /// Play the next item of the current playlist. /// This terminates playback if the current item is the first item. PreviousForce, /// Load any number of files with any playlist insertion behaviour, /// and any optional options that are set during playback of the specific item. Loadfiles(&'a [File<'a>]), /// Load the given playlist file. Replace current playlist. LoadlistReplace(&'a Path), /// Load the given playlist file. Append to current playlist. LoadlistAppend(&'a Path), /// Clear the current playlist, except the currently played item. Clear, /// Remove the currently selected playlist item. RemoveCurrent, /// Remove the item at position `usize`. RemoveIndex(usize), /// Move item `usize` to the position of item `usize`. Move((usize, usize)), /// Shuffle the playlist. Shuffle, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] /// Represents operations supported by `subtitle`. pub enum SubOp<'a> { /// Add and select the subtitle immediately. /// The second argument is the title, third is the language. AddSelect(&'a Path, Option<&'a str>, Option<&'a str>), /// See `AddSelect`. "Don't select the subtitle. /// (Or in some special situations, let the default stream selection mechanism decide.)". AddAuto(&'a Path, Option<&'a str>, Option<&'a str>), /// See `AddSelect`. "Select the subtitle. If a subtitle with the same file name was /// already added, that one is selected, instead of loading a duplicate entry. /// (In this case, title/language are ignored, and if the was changed since it was loaded, /// these changes won't be reflected.)". AddCached(&'a Path, Option<&'a str>, Option<&'a str>), /// Remove the given subtitle track. If the id argument is missing, remove the current /// track. (Works on external subtitle files only.) Remove(Option<usize>), /// Reload the given subtitle tracks. If the id argument is missing, reload the current /// track. (Works on external subtitle files only.) Reload(Option<usize>), /// Change subtitle timing such, that the subtitle event after the next `isize` subtitle /// events is displayed. `isize` can be negative to step backwards. Step(isize), /// Seek to the next subtitle. This is similar to sub-step, except that it seeks video and /// audio instead of adjusting the subtitle delay. /// For embedded subtitles (like with matroska), this works only with subtitle events that /// have already been displayed, or are within a short prefetch range. SeekForward, /// See `SeekForward`. SeekBackward, } impl MpvError { #[inline] fn as_val(&self) -> libc::c_int { *self as libc::c_int } #[inline] /// Returns a string slice associated with the `MpvError`. pub fn error_string(&self) -> &str { let raw = unsafe { mpv_error_string(self.as_val()) }; unsafe { CStr::from_ptr(raw) }.to_str().unwrap() } } impl MpvFormat { #[inline] fn as_val(self) -> libc::c_int { self as libc::c_int } } // TODO: more /// Represents an mpv instance from which `Client`s can be spawned. /// /// The mpv manual is very helpful with regards to confusion about syntax for commands, /// however there is an effort to catch common mistakes that may result in unexpected behaviour. /// See `command`. /// /// # Panics /// Any method on this struct may panic if any argument contains invalid utf-8. pub struct Parent { ctx: *mut MpvHandle, initialized: AtomicBool, suspension_count: AtomicUsize, check_events: bool, ev_iter_notification: Option<*mut (Mutex<bool>, Condvar)>, ev_to_observe: Option<Mutex<Vec<Event>>>, ev_to_observe_properties: Option<Mutex<HashMap<String, usize>>>, ev_observed: Option<Mutex<Vec<InnerEvent>>>, } // TODO: more /// Represents a client of a `Parent`. /// /// # Panics /// Any method on this struct may panic if any argument contains invalid utf-8. pub struct Client<'parent> { ctx: *mut MpvHandle, check_events: bool, ev_iter_notification: Option<*mut (Mutex<bool>, Condvar)>, ev_to_observe: Option<Mutex<Vec<Event>>>, ev_observed: Option<Mutex<Vec<InnerEvent>>>, ev_to_observe_properties: Option<Mutex<HashMap<String, usize>>>, _marker: PhantomData<&'parent Parent>, } unsafe impl Send for Parent {} unsafe impl Sync for Parent {} unsafe impl<'parent> Send for Client<'parent> {} unsafe impl<'parent> Sync for Client<'parent> {} #[doc(hidden)] #[allow(missing_docs)] /// Designed for internal use. pub trait MpvMarker { // FIXME: Most of these can go once `Associated Items` lands fn initialized(&self) -> bool; fn ctx(&self) -> *mut MpvHandle; fn check_events(&self) -> bool; fn ev_iter_notification(&self) -> &Option<*mut (Mutex<bool>, Condvar)>; fn ev_to_observe(&self) -> &Option<Mutex<Vec<Event>>>; fn ev_to_observe_properties(&self) -> &Option<Mutex<HashMap<String, usize>>>; fn ev_observed(&self) -> &Option<Mutex<Vec<InnerEvent>>>; fn drop_ev_iter_step(&mut self) { if self.check_events() { unsafe { Box::from_raw(self.ev_iter_notification().unwrap()); } } } } impl MpvMarker for Parent { #[inline] fn initialized(&self) -> bool { self.initialized.load(Ordering::Acquire) } #[inline] fn ctx(&self) -> *mut MpvHandle { self.ctx } #[inline] fn check_events(&self) -> bool { self.check_events } #[inline] fn ev_iter_notification(&self) -> &Option<*mut (Mutex<bool>, Condvar)> { &self.ev_iter_notification } #[inline] fn ev_to_observe(&self) -> &Option<Mutex<Vec<Event>>> { &self.ev_to_observe } #[inline] fn ev_to_observe_properties(&self) -> &Option<Mutex<HashMap<String, usize>>> { &self.ev_to_observe_properties } #[inline] fn ev_observed(&self) -> &Option<Mutex<Vec<InnerEvent>>> { &self.ev_observed } } impl<'parent> MpvMarker for Client<'parent> { #[inline] fn initialized(&self) -> bool { true } #[inline] fn ctx(&self) -> *mut MpvHandle { self.ctx } #[inline] fn check_events(&self) -> bool { self.check_events } #[inline] fn ev_iter_notification(&self) -> &Option<*mut (Mutex<bool>, Condvar)> { &self.ev_iter_notification } #[inline] fn ev_to_observe(&self) -> &Option<Mutex<Vec<Event>>> { &self.ev_to_observe } #[inline] fn ev_to_observe_properties(&self) -> &Option<Mutex<HashMap<String, usize>>> { &self.ev_to_observe_properties } #[inline] fn ev_observed(&self) -> &Option<Mutex<Vec<InnerEvent>>> { &self.ev_observed } } impl Drop for Parent { fn drop(&mut self) { self.drop_ev_iter_step(); unsafe { mpv_terminate_destroy(self.ctx()); } } } impl<'parent> Drop for Client<'parent> { fn drop(&mut self) { self.drop_ev_iter_step(); unsafe { mpv_detach_destroy(self.ctx()); } } } impl<'parent> Parent { #[allow(mutex_atomic)] /// Create a new `Mpv` instance. /// To call any method except for `set_option` on this, it has to be initialized first. /// The default settings can be probed by running: /// ///```$ mpv --show-profile=libmpv``` pub fn new(check_events: bool) -> Result<Parent, Error> { let ctx = unsafe { mpv_create() }; if ctx == ptr::null_mut() { Err(Error::Null) } else { unsafe { // Disable deprecated events. try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TracksChanged, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TrackSwitched, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Pause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Unpause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ScriptInputDispatch, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::MetadataUpdate, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ChapterChange, 0))); } let (ev_iter_notification, ev_to_observe, ev_to_observe_properties, ev_observed) = if check_events { let ev_iter_notification = Box::into_raw(box (Mutex::new(false), Condvar::new())); unsafe { mpv_set_wakeup_callback(ctx, event_callback, mem::transmute::<*mut (Mutex<bool>, Condvar), *mut libc::c_void> (ev_iter_notification)); } (Some(ev_iter_notification), Some(Mutex::new(Vec::with_capacity(10))), Some(Mutex::new(HashMap::new())), Some(Mutex::new(Vec::with_capacity(10)))) } else { unsafe { // Disable remaining events try!(mpv_err((), mpv_request_event(ctx, MpvEventId::LogMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::GetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::SetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::CommandReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::StartFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::EndFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::FileLoaded, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Idle, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ClientMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::VideoReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::AudioReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Seek, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PlaybackRestart, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PropertyChange, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::QueueOverflow, 0))); } (None, None, None, None) }; Ok(Parent { ctx: ctx, initialized: AtomicBool::new(false), suspension_count: AtomicUsize::new(0), check_events: check_events, ev_iter_notification: ev_iter_notification, ev_to_observe: ev_to_observe, ev_to_observe_properties: ev_to_observe_properties, ev_observed: ev_observed, }) } } /// Create a client with `name`, that is connected to the core of `self`, but has an own queue /// for API events and such. pub fn new_client(&self, name: &str, check_events: bool) -> Result<Client, Error> { if self.initialized() { let ctx = unsafe { let name = CString::new(name).unwrap(); mpv_create_client(self.ctx(), name.as_ptr()) }; unsafe { // Disable deprecated events. try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TracksChanged, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::TrackSwitched, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Pause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Unpause, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ScriptInputDispatch, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::MetadataUpdate, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ChapterChange, 0))); } let (ev_iter_notification, ev_to_observe, ev_to_observe_properties, ev_observed) = if check_events { let ev_iter_notification = Box::into_raw(box (Mutex::new(false), Condvar::new())); unsafe { mpv_set_wakeup_callback(ctx, event_callback, mem::transmute::<*mut (Mutex<bool>, Condvar), *mut libc::c_void> (ev_iter_notification)); } (Some(ev_iter_notification), Some(Mutex::new(Vec::with_capacity(10))), Some(Mutex::new(HashMap::new())), Some(Mutex::new(Vec::with_capacity(10)))) } else { unsafe { // Disable remaining events try!(mpv_err((), mpv_request_event(ctx, MpvEventId::LogMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::GetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::SetPropertyReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::CommandReply, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::StartFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::EndFile, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::FileLoaded, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Idle, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::ClientMessage, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::VideoReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::AudioReconfig, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::Seek, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PlaybackRestart, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::PropertyChange, 0))); try!(mpv_err((), mpv_request_event(ctx, MpvEventId::QueueOverflow, 0))); } (None, None, None, None) }; let instance = Client { ctx: ctx, check_events: check_events, ev_iter_notification: ev_iter_notification, ev_to_observe: ev_to_observe, ev_to_observe_properties: ev_to_observe_properties, ev_observed: ev_observed, _marker: PhantomData, }; Ok(instance) } else { Err(Error::Uninitialized) } } /// Initialize the mpv core. pub fn init(&self) -> Result<(), Error> { if self.initialized() { Err(Error::AlreadyInitialized) } else { self.initialized.store(true, Ordering::Release); let err = unsafe { mpv_initialize(self.ctx()) }; mpv_err((), err) } } #[allow(match_ref_pats)] /// Set an option. This only works before core initialization. pub fn set_option(&self, opt: Property) -> Result<(), Error> { if self.initialized() { return Err(Error::AlreadyInitialized); } let data = &mut opt.data.clone(); let name = CString::new(opt.name).unwrap().into_raw(); let format = data.format().as_val(); let ret = match data { &mut Data::OsdString(_) => Err(Error::OsdStringWrite), &mut Data::String(ref v) => { let data = CString::new(v.as_bytes()).unwrap().into_raw(); let ret = mpv_err((), unsafe { mpv_set_property(self.ctx(), name, format, mem::transmute::<*mut libc::c_char, *mut libc::c_void>(data)) }); unsafe { CString::from_raw(data); }; ret } _ => { let data = data_ptr!(data); mpv_err((), unsafe { mpv_set_option(self.ctx(), name, format, data) }) } }; unsafe { CString::from_raw(name) }; ret } /// Load a configuration file into the `Mpv` instance. /// The path has to be absolute. /// This should not be done during runtime. /// This overrides previously set options and properties. pub fn load_config(&self, path: &Path) -> Result<(), Error> { if path.is_relative() { Err(Error::ExpectedAbsolute) } else if path.is_dir() { Err(Error::ExpectedFile) } else { let file = CString::new(path.to_str().unwrap()).unwrap().into_raw(); let ret = mpv_err((), unsafe { mpv_load_config_file(self.ctx(), file) }); unsafe { CString::from_raw(file) }; ret } } /// Suspend the playback thread, or freeze the core. If the core is suspended, only /// client API calls will be accepted, ie. input, redrawing etc. will be suspended. /// For the thread to resume there has to be one `resume` call for each `suspend` call. pub fn suspend(&self) -> Result<(), Error> { if self.initialized() { self.suspension_count.fetch_add(1, Ordering::AcqRel); Ok(unsafe { mpv_suspend(self.ctx()) }) } else { Err(Error::Uninitialized) } } /// See `suspend`. pub fn resume(&self) -> Result<(), Error> { if self.initialized() { if self.suspension_count.load(Ordering::Acquire) == 0 { Err(Error::AlreadyResumed) } else { self.suspension_count.fetch_sub(1, Ordering::AcqRel); Ok(unsafe { mpv_resume(self.ctx()) }) } } else { Err(Error::Uninitialized) } } } impl<'parent> Client<'parent> { /// Returns the name associated with the instance, useful for debugging. pub fn name(&self) -> &str { unsafe { CStr::from_ptr(mpv_client_name(self.ctx())).to_str().unwrap() } } } #[allow(missing_docs)] /// Functions that an abstraction of libmpv should cover. pub trait MpvInstance<'parent, P> where P: MpvMarker + 'parent { fn enable_event(&self, e: Event) -> Result<(), Error>; fn disable_event(&self, e: Event) -> Result<(), Error>; fn observe_all(&self, events: Vec<Event>) -> Result<EventIter<P>, Error>; unsafe fn command(&self, cmd: Command) -> Result<(), Error>; fn set_property(&self, opt: Property) -> Result<(), Error>; fn get_property(&self, prop: Property) -> Result<Property, Error>; fn seek(&self, seek: Seek) -> Result<(), Error>; fn screenshot(&self, st: Screenshot) -> Result<(), Error>; fn playlist(&self, op: PlaylistOp) -> Result<(), Error>; fn cycle(&self, property: &str, up: bool) -> Result<(), Error>; fn multiply(&self, property: &str, factor: usize) -> Result<(), Error>; fn subtitle(&self, op: SubOp) -> Result<(), Error>; fn pause(&self) -> Result<(), Error>; fn unpause(&self) -> Result<(), Error>; } impl<'parent, T> MpvInstance<'parent, T> for T where T: MpvMarker + 'parent { /// Enable a given `Event`. Note that any event of `Event` is enabled by default, /// except for `Event::Tick`. fn enable_event(&self, e: Event) -> Result<(), Error> { if self.check_events() { mpv_err((), unsafe { mpv_request_event(self.ctx(), e.as_id(), 1) }) } else { Err(Error::EventsDisabled) } } /// Disable a given `Event`. fn disable_event(&self, e: Event) -> Result<(), Error> { if self.check_events() { mpv_err((), unsafe { mpv_request_event(self.ctx(), e.as_id(), 0) }) } else { Err(Error::EventsDisabled) } } /// Observe given `Event`s. /// Returns an `EventIter`, on which `next` can be called that blocks while waiting for new /// `Event`s. fn observe_all(&self, events: Vec<Event>) -> Result<EventIter<T>, Error> { if self.check_events() { let mut observe = self.ev_to_observe().as_ref().unwrap().lock(); let mut properties = self.ev_to_observe_properties().as_ref().unwrap().lock(); let mut ids = Vec::with_capacity(events.len()); let mut evs = Vec::with_capacity(events.len()); let mut props = Vec::with_capacity(events.len()); for elem in &events { if let Event::PropertyChange(ref v) = *elem { if properties.contains_key(&v.name) { return Err(Error::AlreadyObserved(box elem.clone())); } else { props.push(v); ids.push(elem.as_id()); evs.push(elem.clone()); continue; } } for id in &(*observe) { if elem.as_id() == id.as_id() { return Err(Error::AlreadyObserved(box elem.clone())); } } ids.push(elem.as_id()); evs.push(elem.clone()); } observe.extend(evs.clone()); for elem in props { let id = properties.len(); unsafe { let name = CString::new(elem.name.clone()).unwrap(); try!(mpv_err((), mpv_observe_property(self.ctx(), id as libc::uint64_t, name.as_ptr(), elem.data.format() as libc::c_int))) } properties.insert(elem.name.clone(), id); } Ok(EventIter { ctx: self.ctx(), notification: self.ev_iter_notification().unwrap(), all_to_observe: self.ev_to_observe().as_ref().unwrap(), all_to_observe_properties: self.ev_to_observe_properties().as_ref().unwrap(), local_to_observe: evs, all_observed: self.ev_observed().as_ref().unwrap(), last_no_associated_ev: false, _marker: PhantomData, }) } else { Err(Error::EventsDisabled) } } /// Send a command to the `Mpv` instance. This uses `mpv_command_string` internally, /// so that the syntax is the same as described in the [manual for the input.conf] /// (https://mpv.io/manual/master/#list-of-input-commands). It is advised to use the specific /// method for each command, because the specific functions may check for /// common errors and are generally type checked (enums to specify operations). /// /// # Safety /// This method is unsafe because the player may quit via the quit command. unsafe fn command(&self, cmd: Command) -> Result<(), Error> { if cmd.args.is_none() { let args = CString::new(cmd.name).unwrap(); mpv_err((), mpv_command_string(self.ctx(), args.as_ptr())) } else { let mut str = String::new(); for elem in cmd.args.unwrap() { str.push_str(&format!(" {}", elem)); } let args = CString::new(format!("{}{}", cmd.name, str)).unwrap(); mpv_err((), mpv_command_string(self.ctx(), args.as_ptr())) } } #[allow(match_ref_pats)] /// Set the value of a property. fn set_property(&self, opt: Property) -> Result<(), Error> { let data = &mut opt.data.clone(); let format = data.format().as_val(); let name = CString::new(opt.name).unwrap().into_raw(); let ret = match data { &mut Data::OsdString(_) => Err(Error::OsdStringWrite), &mut Data::String(ref v) => { let data = CString::new(v.as_bytes()).unwrap().into_raw(); let ret = mpv_err((), unsafe { mpv_set_property(self.ctx(), name, format, mem::transmute::<*mut libc::c_char, *mut libc::c_void>(data)) }); unsafe { CString::from_raw(data); }; ret } _ => { let data = data_ptr!(data); mpv_err((), unsafe { mpv_set_property(self.ctx(), name, format, data) }) } }; unsafe { CString::from_raw(name) }; ret } #[allow(match_ref_pats)] /// Get the value of a property. fn get_property(&self, prop: Property) -> Result<Property, Error> { Ok(Property::new(&prop.name, { let data = &mut prop.data.clone(); let format = data.format(); match data { &mut Data::String(_) | &mut Data::OsdString(_) => { println!("___ENTERING DANGER ZONE___"); let ptr = CString::new("").unwrap().into_raw(); let err = mpv_err((), unsafe { let name = CString::new(prop.name.clone()).unwrap(); mpv_get_property(self.ctx(), name.as_ptr(), format.as_val(), mem::transmute::<*mut libc::c_char, *mut libc::c_void>(ptr)) }); if err.is_err() { println!("___LEAVING DANGER ZONE___"); return Err(err.unwrap_err()); } else { let ptr = unsafe { CString::from_raw(ptr) }; let bytes = ptr.as_bytes(); let data = { encoding::decode(bytes, encoding::DecoderTrap::Strict, encoding::all::ASCII) .0 .or_else(|_| Err(Error::UnsupportedEncoding(Vec::from(bytes)))) }; // It should be this println!("ref: {:?}", "トゥッティ!".as_bytes()); // But we got this println!("got: {:?}", bytes); // Which is this in utf-8 println!("ldc: {}", String::from_utf8_lossy(bytes).into_owned()); // This is what the OsString is capable of (protip: nothing) use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; println!("OsS: {:?}", OsStr::from_bytes(bytes)); // And this in the guessed encoding println!("gue: {:?}", data); println!("___LEAVING DANGER ZONE___"); match prop.data { Data::String(_) => Data::String(data.unwrap()), Data::OsdString(_) => Data::OsdString(data.unwrap()), _ => unreachable!(), } } } _ => { let ptr = unsafe { libc::malloc(mem::size_of::<Data>() as libc::size_t) as *mut libc::c_void }; let err = mpv_err((), unsafe { let name = CString::new(prop.name.clone()).unwrap(); mpv_get_property(self.ctx(), name.as_ptr(), format.as_val(), ptr) }); if err.is_err() { return Err(err.unwrap_err()); } else { Data::from_raw(format, ptr) } } } })) } // --- Convenience command functions --- // /// Seek to a position as defined by `Seek`. fn seek(&self, seek: Seek) -> Result<(), Error> { match seek { Seek::RelativeForward(d) => unsafe { self.command(Command::new("seek", Some(vec![format!("{}", d.as_secs()), "relative".into()]))) }, Seek::RelativeBackward(d) => unsafe { self.command(Command::new("seek", Some(vec![format!("-{}", d.as_secs()), "relative".into()]))) }, Seek::Absolute(d) => unsafe { self.command(Command::new("seek", Some(vec![format!("{}", d.as_secs()), "absolute".into()]))) }, Seek::RelativePercent(p) => { if p > 100 { // This is actually allowed in libmpv (seek to end), // but it's confusing and may be an indicator of bugs. Err(Error::OutOfBounds) } else { unsafe { self.command(Command::new("seek", Some(vec![format!("{}", p), "relative-percent".into()]))) } } } Seek::AbsolutePercent(p) => { if p > 100 { // See `Seek::RelativePercent` above. Err(Error::OutOfBounds) } else { unsafe { self.command(Command::new("seek", Some(vec![format!("{}", p), "absolute-percent".into()]))) } } } Seek::Revert => unsafe { self.command(Command::new("revert-seek", None)) }, Seek::RevertMark => unsafe { self.command(Command::new("revert-seek", Some(vec!["mark".into()]))) }, Seek::Frame => unsafe { self.command(Command::new("frame-step", None)) }, Seek::FrameBack => unsafe { self.command(Command::new("frame-back-step", None)) }, } } /// Take a screenshot as defined by `Screenshot`. fn screenshot(&self, st: Screenshot) -> Result<(), Error> { match st { Screenshot::Subtitles => unsafe { self.command(Command::new("screenshot", Some(vec!["subtitles".into()]))) }, Screenshot::SubtitlesFile(ref p) => unsafe { self.command(Command::new("screenshot", Some(vec![p.to_str().unwrap().into(), "subtitles".into()]))) }, Screenshot::Video => unsafe { self.command(Command::new("screenshot", Some(vec!["video".into()]))) }, Screenshot::VideoFile(ref p) => unsafe { self.command(Command::new("screenshot", Some(vec![p.to_str().unwrap().into(), "video".into()]))) }, Screenshot::Window => unsafe { self.command(Command::new("screenshot", Some(vec!["window".into()]))) }, Screenshot::WindowFile(ref p) => unsafe { self.command(Command::new("screenshot", Some(vec![p.to_str().unwrap().into(), "window".into()]))) }, } } /// Execute an operation on the playlist as defined by `PlaylistOp` fn playlist(&self, op: PlaylistOp) -> Result<(), Error> { match op { PlaylistOp::NextWeak => unsafe { self.command(Command::new("playlist-next", Some(vec!["weak".into()]))) }, PlaylistOp::NextForce => unsafe { self.command(Command::new("playlist-next", Some(vec!["force".into()]))) }, PlaylistOp::PreviousWeak => unsafe { self.command(Command::new("playlist-previous", Some(vec!["weak".into()]))) }, PlaylistOp::PreviousForce => unsafe { self.command(Command::new("playlist-previous", Some(vec!["force".into()]))) }, PlaylistOp::LoadlistReplace(p) => unsafe { self.command(Command::new("loadlist", Some(vec![format!("\"{}\"", p.to_str().unwrap()), "replace".into()]))) }, PlaylistOp::LoadlistAppend(p) => unsafe { self.command(Command::new("loadlist", Some(vec![format!("\"{}\"", p.to_str().unwrap()), "append".into()]))) }, PlaylistOp::Clear => unsafe { self.command(Command::new("playlist-clear", None)) }, PlaylistOp::RemoveCurrent => unsafe { self.command(Command::new("playlist-remove", Some(vec!["current".into()]))) }, PlaylistOp::RemoveIndex(i) => unsafe { self.command(Command::new("playlist-remove", Some(vec![format!("{}", i)]))) }, PlaylistOp::Move((old, new)) => unsafe { self.command(Command::new("playlist-move", Some(vec![format!("{}", new), format!("{}", old)]))) }, PlaylistOp::Shuffle => unsafe { self.command(Command::new("playlist-shuffle", None)) }, PlaylistOp::Loadfiles(lfiles) => { for (i, elem) in lfiles.iter().enumerate() { let ret = unsafe { self.command(Command { name: "loadfile", args: Some(match elem.options { Some(v) => { vec![format!("\"{}\"", elem.path .to_str() .unwrap()), elem.state.val().into(), v.into()] } None => { vec![format!("\"{}\"", elem.path .to_str() .unwrap()), elem.state.val().into(), "".into()] } }), }) }; if ret.is_err() { return Err(Error::Loadfiles((i, box ret.unwrap_err()))); } } Ok(()) } } } /// Cycle through a given property. `up` specifies direction. On /// overflow, set the property back to the minimum, on underflow set it to the maximum. fn cycle(&self, property: &str, up: bool) -> Result<(), Error> { unsafe { self.command(Command::new("cycle", Some(vec![property.into(), if up { "up" } else { "down" } .into()]))) } } /// Multiply any property with any positive factor. fn multiply(&self, property: &str, factor: usize) -> Result<(), Error> { unsafe { self.command(Command::new("multiply", Some(vec![property.into(), format!("{}", factor)]))) } } /// Execute an operation as defined by `SubOp`. fn subtitle(&self, op: SubOp) -> Result<(), Error> { match op { SubOp::AddSelect(p, t, l) => unsafe { self.command(Command::new("sub-add", Some(vec![format!("\"{}\"", p.to_str().unwrap()), format!("select{}{}", if t.is_some() { format!(" {}", t.unwrap()) } else { "".into() }, if l.is_some() { format!(" {}", l.unwrap()) } else { "".into() })]))) }, SubOp::AddAuto(p, t, l) => unsafe { self.command(Command::new("sub-add", Some(vec![format!("\"{}\"", p.to_str().unwrap()), format!("auto{}{}", if t.is_some() { format!(" {}", t.unwrap()) } else { "".into() }, if l.is_some() { format!(" {}", l.unwrap()) } else { "".into() })]))) }, SubOp::AddCached(p, t, l) => unsafe { self.command(Command::new("sub-add", Some(vec![format!("\"{}\"", p.to_str().unwrap()), format!("cached{}{}", if t.is_some() { format!(" {}", t.unwrap()) } else { "".into() }, if l.is_some() { format!(" {}", l.unwrap()) } else { "".into() })]))) }, SubOp::Remove(i) => unsafe { self.command(Command::new("sub-remove", if i.is_some() { Some(vec![format!("{}", i.unwrap())]) } else { None })) }, SubOp::Reload(i) => unsafe { self.command(Command::new("sub-reload", if i.is_some() { Some(vec![format!("{}", i.unwrap())]) } else { None })) }, SubOp::Step(i) => unsafe { self.command(Command::new("sub-step", Some(vec![format!("{}", i)]))) }, SubOp::SeekForward => unsafe { self.command(Command::new("sub-seek", Some(vec!["1".into()]))) }, SubOp::SeekBackward => unsafe { self.command(Command::new("sub-seek", Some(vec!["-1".into()]))) }, } } // --- Convenience property functions --- // /// Pause playback at runtime. fn pause(&self) -> Result<(), Error> { self.set_property(Property::new("pause", Data::Flag(true))) } /// Unpause playback at runtime. fn unpause(&self) -> Result<(), Error> { self.set_property(Property::new("pause", Data::Flag(false))) } }
use libc::{c_int, c_ushort, c_uchar, size_t}; use symbols::{ZOPFLI_WINDOW_MASK, ZOPFLI_MIN_MATCH}; const HASH_SHIFT: c_int = 5; const HASH_MASK: c_int = 32767; #[repr(C)] pub struct ZopfliHash { head: Vec<c_int>, /* Hash value to index of its most recent occurrence. */ prev: Vec<c_ushort>, /* Index to index of prev. occurrence of same hash. */ hashval: Vec<c_int>, /* Index to hash value at this index. */ val: c_int, /* Current hash value. */ /* Fields with similar purpose as the above hash, but for the second hash with a value that is calculated differently. */ head2: Vec<c_int>, /* Hash value to index of its most recent occurrence. */ prev2: Vec<c_ushort>, /* Index to index of prev. occurrence of same hash. */ hashval2: Vec<c_int>, /* Index to hash value at this index. */ val2: c_int, /* Current hash value. */ same: Vec<c_ushort>, /* Amount of repetitions of same byte after this .*/ } impl ZopfliHash { pub fn new(window_size: size_t) -> ZopfliHash { ZopfliHash { head: vec![-1; 65536], prev: (0..window_size as c_ushort).collect::<Vec<_>>(), hashval: vec![-1; window_size], val: 0, /* Fields with similar purpose as the above hash, but for the second hash with a value that is calculated differently. */ head2: vec![-1; 65536], prev2: (0..window_size as c_ushort).collect::<Vec<_>>(), hashval2: vec![-1; window_size], val2: 0, same: vec![0; window_size], } } /// Update the sliding hash value with the given byte. All calls to this function /// must be made on consecutive input characters. Since the hash value exists out /// of multiple input bytes, a few warmups with this function are needed initially. pub fn update_val(&mut self, c: c_uchar) { self.val = ((self.val << HASH_SHIFT) ^ c as c_int) & HASH_MASK; } pub fn update(&mut self, array: *const c_uchar, pos: size_t, end: size_t) { let hpos = (pos & ZOPFLI_WINDOW_MASK) as usize; let mut amount: c_int = 0; let hash_value = if pos + ZOPFLI_MIN_MATCH as size_t <= end { unsafe { *array.offset((pos + ZOPFLI_MIN_MATCH as size_t - 1) as isize) } } else { 0 }; self.update_val(hash_value); self.hashval[hpos] = self.val; let index = self.val as usize; if self.head[index] != -1 && self.hashval[self.head[index] as usize] == self.val { self.prev[hpos] = self.head[index] as c_ushort; } else { self.prev[hpos] = hpos as c_ushort; } self.head[index] = hpos as c_int; // Update "same". if self.same[((pos - 1) & ZOPFLI_WINDOW_MASK) as usize] > 1 { amount = self.same[((pos - 1) & ZOPFLI_WINDOW_MASK) as usize] as c_int - 1; } unsafe { while pos + amount as size_t + 1 < end && *array.offset(pos as isize) == *array.offset((pos + amount as size_t + 1) as isize) && amount < -1 { amount += 1; } } self.same[hpos] = amount as c_ushort; self.val2 = (((self.same[hpos] - ZOPFLI_MIN_MATCH) & 255) ^ self.val as c_ushort) as c_int; self.hashval2[hpos] = self.val2; let index2 = self.val2 as usize; if self.head2[index2] != -1 as i32 && self.hashval2[self.head2[index2] as usize] == self.val2 { self.prev2[hpos] = self.head2[index2] as c_ushort; } else { self.prev2[hpos] = hpos as c_ushort; } self.head2[index2] = hpos as c_int; } } #[no_mangle] #[allow(non_snake_case)] pub extern fn UpdateHashValue(h_ptr: *mut ZopfliHash, c: c_uchar) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.update_val(c); } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliWarmupHash(array: *const c_uchar, pos: size_t, end: size_t, h_ptr: *mut ZopfliHash) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; let c = unsafe { *array.offset((pos + 0) as isize) }; h.update_val(c); if pos + 1 < end { let c = unsafe { *array.offset((pos + 1) as isize) }; h.update_val(c); } } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliUpdateHash(array: *const c_uchar, pos: size_t, end: size_t, h_ptr: *mut ZopfliHash) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.update(array, pos, end); } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHead(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.head.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashPrev(h_ptr: *mut ZopfliHash) -> *mut c_ushort { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.prev.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHashval(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.hashval.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashVal(h_ptr: *mut ZopfliHash) -> c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.val } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHead2(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.head2.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashPrev2(h_ptr: *mut ZopfliHash) -> *mut c_ushort { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.prev2.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHashval2(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.hashval2.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashVal2(h_ptr: *mut ZopfliHash) -> c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.val2 } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashSame(h_ptr: *mut ZopfliHash) -> *mut c_ushort { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.same.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliCleanHash(ptr: *mut ZopfliHash) { if ptr.is_null() { return } unsafe { Box::from_raw(ptr); } } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliInitHash(window_size: size_t) -> *mut ZopfliHash { Box::into_raw(Box::new(ZopfliHash::new(window_size))) } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliResetHash(window_size: size_t, h_ptr: *mut ZopfliHash) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.val = 0; h.head = vec![-1; 65536]; h.prev = (0..window_size as c_ushort).collect::<Vec<_>>(); h.hashval = vec![-1; window_size]; h.same = vec![0; window_size]; h.val2 = 0; h.head2 = vec![-1; 65536]; h.prev2 = (0..window_size as c_ushort).collect::<Vec<_>>(); h.hashval2 = vec![-1; window_size]; } Turn array into a slice instead of pointer math use std::slice; use libc::{c_int, c_ushort, c_uchar, size_t}; use symbols::{ZOPFLI_WINDOW_MASK, ZOPFLI_MIN_MATCH}; const HASH_SHIFT: c_int = 5; const HASH_MASK: c_int = 32767; #[repr(C)] pub struct ZopfliHash { head: Vec<c_int>, /* Hash value to index of its most recent occurrence. */ prev: Vec<c_ushort>, /* Index to index of prev. occurrence of same hash. */ hashval: Vec<c_int>, /* Index to hash value at this index. */ val: c_int, /* Current hash value. */ /* Fields with similar purpose as the above hash, but for the second hash with a value that is calculated differently. */ head2: Vec<c_int>, /* Hash value to index of its most recent occurrence. */ prev2: Vec<c_ushort>, /* Index to index of prev. occurrence of same hash. */ hashval2: Vec<c_int>, /* Index to hash value at this index. */ val2: c_int, /* Current hash value. */ same: Vec<c_ushort>, /* Amount of repetitions of same byte after this .*/ } impl ZopfliHash { pub fn new(window_size: size_t) -> ZopfliHash { ZopfliHash { head: vec![-1; 65536], prev: (0..window_size as c_ushort).collect::<Vec<_>>(), hashval: vec![-1; window_size], val: 0, /* Fields with similar purpose as the above hash, but for the second hash with a value that is calculated differently. */ head2: vec![-1; 65536], prev2: (0..window_size as c_ushort).collect::<Vec<_>>(), hashval2: vec![-1; window_size], val2: 0, same: vec![0; window_size], } } /// Update the sliding hash value with the given byte. All calls to this function /// must be made on consecutive input characters. Since the hash value exists out /// of multiple input bytes, a few warmups with this function are needed initially. pub fn update_val(&mut self, c: c_uchar) { self.val = ((self.val << HASH_SHIFT) ^ c as c_int) & HASH_MASK; } pub fn update(&mut self, array: *const c_uchar, pos: size_t, end: size_t) { let hpos = (pos & ZOPFLI_WINDOW_MASK) as usize; let mut amount: c_int = 0; let hash_value = if pos + ZOPFLI_MIN_MATCH as size_t <= end { unsafe { *array.offset((pos + ZOPFLI_MIN_MATCH as size_t - 1) as isize) } } else { 0 }; self.update_val(hash_value); self.hashval[hpos] = self.val; let index = self.val as usize; if self.head[index] != -1 && self.hashval[self.head[index] as usize] == self.val { self.prev[hpos] = self.head[index] as c_ushort; } else { self.prev[hpos] = hpos as c_ushort; } self.head[index] = hpos as c_int; // Update "same". if self.same[((pos - 1) & ZOPFLI_WINDOW_MASK) as usize] > 1 { amount = self.same[((pos - 1) & ZOPFLI_WINDOW_MASK) as usize] as c_int - 1; } unsafe { while pos + amount as size_t + 1 < end && *array.offset(pos as isize) == *array.offset((pos + amount as size_t + 1) as isize) && amount < -1 { amount += 1; } } self.same[hpos] = amount as c_ushort; self.val2 = (((self.same[hpos] - ZOPFLI_MIN_MATCH) & 255) ^ self.val as c_ushort) as c_int; self.hashval2[hpos] = self.val2; let index2 = self.val2 as usize; if self.head2[index2] != -1 as i32 && self.hashval2[self.head2[index2] as usize] == self.val2 { self.prev2[hpos] = self.head2[index2] as c_ushort; } else { self.prev2[hpos] = hpos as c_ushort; } self.head2[index2] = hpos as c_int; } } #[no_mangle] #[allow(non_snake_case)] pub extern fn UpdateHashValue(h_ptr: *mut ZopfliHash, c: c_uchar) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.update_val(c); } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliWarmupHash(array: *const c_uchar, pos: size_t, end: size_t, h_ptr: *mut ZopfliHash) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; let arr = unsafe { slice::from_raw_parts(array, end) }; let c = arr[pos]; h.update_val(c); if pos + 1 < end { let c = arr[pos + 1]; h.update_val(c); } } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliUpdateHash(array: *const c_uchar, pos: size_t, end: size_t, h_ptr: *mut ZopfliHash) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.update(array, pos, end); } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHead(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.head.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashPrev(h_ptr: *mut ZopfliHash) -> *mut c_ushort { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.prev.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHashval(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.hashval.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashVal(h_ptr: *mut ZopfliHash) -> c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.val } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHead2(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.head2.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashPrev2(h_ptr: *mut ZopfliHash) -> *mut c_ushort { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.prev2.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashHashval2(h_ptr: *mut ZopfliHash) -> *mut c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.hashval2.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashVal2(h_ptr: *mut ZopfliHash) -> c_int { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.val2 } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliHashSame(h_ptr: *mut ZopfliHash) -> *mut c_ushort { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.same.as_mut_ptr() } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliCleanHash(ptr: *mut ZopfliHash) { if ptr.is_null() { return } unsafe { Box::from_raw(ptr); } } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliInitHash(window_size: size_t) -> *mut ZopfliHash { Box::into_raw(Box::new(ZopfliHash::new(window_size))) } #[no_mangle] #[allow(non_snake_case)] pub extern fn ZopfliResetHash(window_size: size_t, h_ptr: *mut ZopfliHash) { let h = unsafe { assert!(!h_ptr.is_null()); &mut *h_ptr }; h.val = 0; h.head = vec![-1; 65536]; h.prev = (0..window_size as c_ushort).collect::<Vec<_>>(); h.hashval = vec![-1; window_size]; h.same = vec![0; window_size]; h.val2 = 0; h.head2 = vec![-1; 65536]; h.prev2 = (0..window_size as c_ushort).collect::<Vec<_>>(); h.hashval2 = vec![-1; window_size]; }
// CITA // Copyright 2016-2019 Cryptape Technologies LLC. // This program is free software: you can redistribute it // and/or modify it under the terms of the GNU General Public // License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any // later version. // This program is distributed in the hope that it will be // useful, but WITHOUT ANY WARRANTY; without even the implied // warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use crate::cita_protocol::{pubsub_message_to_network_message, CITA_FRAME_HEADER_LEN}; use crate::config::NetConfig; use crate::p2p_protocol::transfer::TRANSFER_PROTOCOL_ID; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use cita_types::Address; use fnv::FnvHashMap as HashMap; use libproto::{Message as ProtoMessage, TryInto}; use logger::{debug, error, info, trace, warn}; use notify::DebouncedEvent; use pubsub::channel::{select, tick, unbounded, Receiver, Sender}; use rand; use std::sync::mpsc::Receiver as StdReceiver; use std::{ collections::{BTreeMap, BTreeSet}, convert::Into, io::Cursor, net::{SocketAddr, ToSocketAddrs}, time::{Duration, Instant}, }; use tentacle::{ service::{DialProtocol, ServiceControl, SessionType, TargetSession}, utils::socketaddr_to_multiaddr, SessionId, }; pub const DEFAULT_MAX_CONNECTS: usize = 666; pub const DEFAULT_MAX_KNOWN_ADDRS: usize = 1000; pub const DEFAULT_PORT: usize = 4000; pub const CHECK_CONNECTED_NODES: Duration = Duration::from_secs(3); // Score uses to manage known_nodes list. If a node has too low score, do not dial it again. // Maybe some complex algorithm can be designed later. But for now, just keeps as simple as below: // 1. Deducts 10 score for each Dial; // 2. Deducts 25 score for each Dial Error; // 3. Deducts 20 score for each Disconnected by server; // 4. Add 5 score for every dialing round if the node keep on line; so If a node keep on line, // it will get FULL_SCORE very fast. // 5. Gives a Time sugar score (2 : nodes was configured in config file, and 1 : nodes was // discovered by P2P framework ) when a node's score less than MIN_DIALING_SCORE; // A new node come into known_nodes list has a FULL_SCORE. pub const FULL_SCORE: i32 = 100; // Score lower than MIN_DIALING_SCORE, stop dialing. pub const MIN_DIALING_SCORE: i32 = 60; // A node needs DIALING_SCORE for every dial. pub const DIALING_SCORE: i32 = 10; // A node connected successfully, can get SUCCESS_DIALING_SCORE. pub const SUCCESS_DIALING_SCORE: i32 = 10; // A node is refused by server, should need REFUSED_SCORE each time. pub const REFUSED_SCORE: i32 = 20; // A node is dialed error by client, should need DIALED_ERROR_SCORE each time. pub const DIALED_ERROR_SCORE: i32 = 25; // A node is dialed error by client, should need DIALED_ERROR_SCORE each time. pub const KEEP_ON_LINE_SCORE: i32 = 5; #[derive(Debug, PartialEq)] pub enum NodeSource { FromConfig, FromDiscovery, } #[derive(Debug)] pub struct NodeStatus { // score: Score for a node, it will affect whether the node will be chosen to dail again, // or be deleted from the known_addresses list. But for now, it useless. pub score: i32, // session_id: Indicates that this node has been connected to a session. 'None' for has not // connected yet. pub session_id: Option<SessionId>, pub node_src: NodeSource, } impl NodeStatus { pub fn new(score: i32, session_id: Option<SessionId>, node_src: NodeSource) -> Self { NodeStatus { score, session_id, node_src, } } } #[derive(Debug)] pub struct SessionInfo { pub ty: SessionType, pub addr: SocketAddr, } impl SessionInfo { pub fn new(ty: SessionType, addr: SocketAddr) -> Self { SessionInfo { ty, addr } } } #[derive(Debug)] pub struct TransformAddr { // Real linked addr pub conn_addr: SocketAddr, // Outbound addr transformed from Inbound addr pub trans_addr: Option<SocketAddr>, } impl TransformAddr { pub fn new(conn_addr: SocketAddr, trans_addr: Option<SocketAddr>) -> Self { TransformAddr { conn_addr, trans_addr, } } } pub struct NodesManager { known_addrs: HashMap<SocketAddr, NodeStatus>, config_addrs: BTreeMap<String, Option<SocketAddr>>, connected_addrs: HashMap<SessionId, TransformAddr>, pending_connected_addrs: HashMap<SessionId, SessionInfo>, connected_peer_keys: HashMap<Address, SessionId>, check_connected_nodes: Receiver<Instant>, max_connects: usize, nodes_manager_client: NodesManagerClient, nodes_manager_service_receiver: Receiver<NodesManagerMessage>, service_ctrl: Option<ServiceControl>, peer_key: Address, enable_tls: bool, dialing_node: Option<SocketAddr>, self_addr: Option<SocketAddr>, } impl NodesManager { pub fn new(known_addrs: HashMap<SocketAddr, NodeStatus>) -> Self { let mut node_mgr = NodesManager::default(); node_mgr.known_addrs = known_addrs; node_mgr } pub fn from_config(cfg: NetConfig, key: Address) -> Self { let mut node_mgr = NodesManager::default(); let max_connects = cfg.max_connects.unwrap_or(DEFAULT_MAX_CONNECTS); node_mgr.max_connects = max_connects; node_mgr.peer_key = key; if let Some(enable_tls) = cfg.enable_tls { node_mgr.enable_tls = enable_tls; } if let Some(cfg_addrs) = cfg.peers { for addr in cfg_addrs { if let (Some(ip), Some(port)) = (addr.ip, addr.port) { let addr_str = format!("{}:{}", ip, port); node_mgr.config_addrs.insert(addr_str, None); } else { warn!("[NodeManager] ip(host) & port 'MUST' be set in peers."); } } } else { warn!("[NodeManager] Does not set any peers in config file!"); } node_mgr } pub fn notify_config_change( rx: StdReceiver<DebouncedEvent>, node_client: NodesManagerClient, fname: String, ) { loop { match rx.recv() { Ok(event) => match event { DebouncedEvent::Create(path_buf) | DebouncedEvent::Write(path_buf) => { if path_buf.is_file() { let file_name = path_buf.file_name().unwrap().to_str().unwrap(); if file_name == fname { info!("file {} changed, will auto reload!", file_name); let config = NetConfig::new(file_name); if let Some(peers) = config.peers { let mut addr_strs = Vec::new(); for addr in peers { if let (Some(ip), Some(port)) = (addr.ip, addr.port) { addr_strs.push(format!("{}:{}", ip, port)); } } node_client.fix_modified_config(ModifiedConfigPeersReq::new( addr_strs, )); } } } } _ => trace!("file notify event: {:?}", event), }, Err(e) => warn!("watch error: {:?}", e), } } } pub fn run(&mut self) { loop { select! { recv(self.nodes_manager_service_receiver) -> msg => { match msg { Ok(data) => { data.handle(self); }, Err(err) => error!("[NodeManager] Receive data error {:?}", err), } } recv(self.check_connected_nodes) -> _ => { self.dial_nodes(); } } } } pub fn client(&self) -> NodesManagerClient { self.nodes_manager_client.clone() } pub fn dial_nodes(&mut self) { if let Some(dialing_node) = self.dialing_node { info!( "[NodeManager] Dialing node: {:?}, waiting for next round.", dialing_node ); return; } self.translate_address(); // If connected node has not reach MAX, select a node from known_addrs to dial. if self.connected_addrs.len() < self.max_connects { for (key, value) in self.known_addrs.iter_mut() { // Node has been connected if let Some(session_id) = value.session_id { debug!( "[NodeManager] Address {:?} has been connected on : {:?}.", *key, session_id ); // Node keep on line, reward KEEP_ON_LINE_SCORE. value.score = if (value.score + KEEP_ON_LINE_SCORE) > FULL_SCORE { FULL_SCORE as i32 } else { value.score + KEEP_ON_LINE_SCORE }; continue; } // Give 50% probability to select this node, this design can avoid two nodes // simultaneously dialing each other. let selected_miss: bool = (rand::random::<u32>() % 2) != 0; if selected_miss { debug!( "[NodeManager] Address {:?} selects miss in this round.", *key ); continue; } if let Some(self_addr) = self.self_addr { if *key == self_addr { debug!( "[NodeManager] Trying to connected self: {:?}, skip it", self_addr ); continue; } } // Score design prevents the client from dialing to a node all the time. if value.score < MIN_DIALING_SCORE { debug!( "[NodeManager] Address {:?} has to low score ({:?}) to dial.", *key, value.score ); // The node will get time sugar, the nodes which in config file can get 2, and the // other nodes which discovered by P2P can get 1. value.score += if value.node_src == NodeSource::FromConfig { 2 } else { 1 }; continue; } // Dial this address if let Some(ref mut ctrl) = self.service_ctrl { self.dialing_node = Some(*key); info!("Trying to dial: {:?}", self.dialing_node); match ctrl.dial(socketaddr_to_multiaddr(*key), DialProtocol::All) { Ok(_) => { // Need DIALING_SCORE for every dial. value.score -= DIALING_SCORE; debug!("[NodeManager] Dail success"); } Err(err) => { warn!("[NodeManager] Dail failed : {:?}", err); } } } break; } } debug!( "[NodeManager] connected_addrs info: {:?}", self.connected_addrs ); debug!("[NodeManager] known_addrs info: {:?}", self.known_addrs); debug!( "[NodeManager] Address in connected : {:?}", self.connected_peer_keys ); } pub fn set_service_task_sender(&mut self, ctrl: ServiceControl) { self.service_ctrl = Some(ctrl); } pub fn is_enable_tls(&self) -> bool { self.enable_tls } pub fn translate_address(&mut self) { for (key, value) in self.config_addrs.iter_mut() { // The address has translated. if value.is_some() { debug!("[NodeManager] The Address {:?} has been translated.", key); continue; } match key.to_socket_addrs() { Ok(mut result) => { if let Some(socket_addr) = result.next() { // An init node from config file, give it FULL_SCORE. let node_status = NodeStatus::new(FULL_SCORE, None, NodeSource::FromConfig); self.known_addrs.insert(socket_addr, node_status); *value = Some(socket_addr); } else { error!("[NodeManager] Can not convert to socket address!"); } } Err(e) => { error!( "[NodeManager] Can not convert to socket address! error: {}", e ); } } } } } impl Default for NodesManager { fn default() -> NodesManager { let (tx, rx) = unbounded(); let ticker = tick(CHECK_CONNECTED_NODES); let client = NodesManagerClient { sender: tx }; // Set enable_tls = false as default. NodesManager { check_connected_nodes: ticker, known_addrs: HashMap::default(), config_addrs: BTreeMap::default(), connected_addrs: HashMap::default(), connected_peer_keys: HashMap::default(), pending_connected_addrs: HashMap::default(), max_connects: DEFAULT_MAX_CONNECTS, nodes_manager_client: client, nodes_manager_service_receiver: rx, service_ctrl: None, peer_key: Address::zero(), enable_tls: false, dialing_node: None, self_addr: None, } } } #[derive(Clone, Debug)] pub struct NodesManagerClient { sender: Sender<NodesManagerMessage>, } impl NodesManagerClient { pub fn new(sender: Sender<NodesManagerMessage>) -> Self { NodesManagerClient { sender } } pub fn add_node(&self, req: AddNodeReq) { self.send_req(NodesManagerMessage::AddNodeReq(req)); } pub fn dialed_error(&self, req: DialedErrorReq) { self.send_req(NodesManagerMessage::DialedErrorReq(req)); } pub fn connected_self(&self, req: ConnectedSelfReq) { self.send_req(NodesManagerMessage::ConnectedSelf(req)); } pub fn get_random_nodes(&self, req: GetRandomNodesReq) { self.send_req(NodesManagerMessage::GetRandomNodesReq(req)); } pub fn pending_connected_node(&self, req: PendingConnectedNodeReq) { self.send_req(NodesManagerMessage::PendingConnectedNodeReq(req)); } pub fn del_connected_node(&self, req: DelConnectedNodeReq) { self.send_req(NodesManagerMessage::DelConnectedNodeReq(req)); } pub fn add_repeated_node(&self, req: AddRepeatedNodeReq) { self.send_req(NodesManagerMessage::AddRepeatedNode(req)); } pub fn broadcast(&self, req: BroadcastReq) { self.send_req(NodesManagerMessage::Broadcast(req)); } pub fn send_message(&self, req: SingleTxReq) { self.send_req(NodesManagerMessage::SingleTxReq(req)); } pub fn get_peer_count(&self, req: GetPeerCountReq) { self.send_req(NodesManagerMessage::GetPeerCount(req)); } pub fn get_peers_info(&self, req: GetPeersInfoReq) { self.send_req(NodesManagerMessage::GetPeersInfo(req)); } pub fn network_init(&self, req: NetworkInitReq) { self.send_req(NodesManagerMessage::NetworkInit(req)); } pub fn add_connected_node(&self, req: AddConnectedNodeReq) { self.send_req(NodesManagerMessage::AddConnectedNode(req)); } pub fn fix_modified_config(&self, req: ModifiedConfigPeersReq) { self.send_req(NodesManagerMessage::ModifiedConfigPeers(req)); } fn send_req(&self, req: NodesManagerMessage) { if let Err(e) = self.sender.try_send(req) { warn!( "[NodesManager] Send message to node manager failed : {:?}", e ); } } } // Define messages for NodesManager pub enum NodesManagerMessage { AddNodeReq(AddNodeReq), DialedErrorReq(DialedErrorReq), GetRandomNodesReq(GetRandomNodesReq), PendingConnectedNodeReq(PendingConnectedNodeReq), DelConnectedNodeReq(DelConnectedNodeReq), Broadcast(BroadcastReq), SingleTxReq(SingleTxReq), GetPeerCount(GetPeerCountReq), NetworkInit(NetworkInitReq), AddConnectedNode(AddConnectedNodeReq), AddRepeatedNode(AddRepeatedNodeReq), ConnectedSelf(ConnectedSelfReq), GetPeersInfo(GetPeersInfoReq), ModifiedConfigPeers(ModifiedConfigPeersReq), } impl NodesManagerMessage { pub fn handle(self, service: &mut NodesManager) { match self { NodesManagerMessage::AddNodeReq(req) => req.handle(service), NodesManagerMessage::DialedErrorReq(req) => req.handle(service), NodesManagerMessage::GetRandomNodesReq(req) => req.handle(service), NodesManagerMessage::PendingConnectedNodeReq(req) => req.handle(service), NodesManagerMessage::DelConnectedNodeReq(req) => req.handle(service), NodesManagerMessage::Broadcast(req) => req.handle(service), NodesManagerMessage::SingleTxReq(req) => req.handle(service), NodesManagerMessage::GetPeerCount(req) => req.handle(service), NodesManagerMessage::NetworkInit(req) => req.handle(service), NodesManagerMessage::AddConnectedNode(req) => req.handle(service), NodesManagerMessage::AddRepeatedNode(req) => req.handle(service), NodesManagerMessage::ConnectedSelf(req) => req.handle(service), NodesManagerMessage::GetPeersInfo(req) => req.handle(service), NodesManagerMessage::ModifiedConfigPeers(req) => req.handle(service), } } } #[derive(Default, Clone)] pub struct InitMsg { pub chain_id: u64, pub peer_key: Address, } impl Into<Vec<u8>> for InitMsg { fn into(self) -> Vec<u8> { let mut out = Vec::new(); let mut key_data: [u8; 20] = Default::default(); let mut chain_id_data = vec![]; chain_id_data.write_u64::<BigEndian>(self.chain_id).unwrap(); self.peer_key.copy_to(&mut key_data[..]); out.extend_from_slice(&chain_id_data); out.extend_from_slice(&key_data); out } } impl From<Vec<u8>> for InitMsg { fn from(data: Vec<u8>) -> InitMsg { let mut chain_id_data: [u8; 8] = Default::default(); chain_id_data.copy_from_slice(&data[..8]); let mut chain_id_data = Cursor::new(chain_id_data); let chain_id = chain_id_data.read_u64::<BigEndian>().unwrap(); let peer_key = Address::from_slice(&data[8..]); InitMsg { chain_id, peer_key } } } pub struct AddConnectedNodeReq { session_id: SessionId, ty: SessionType, init_msg: InitMsg, } impl AddConnectedNodeReq { pub fn new(session_id: SessionId, ty: SessionType, init_msg: InitMsg) -> Self { AddConnectedNodeReq { session_id, ty, init_msg, } } pub fn handle(self, service: &mut NodesManager) { if let Some(repeated_id) = service.connected_peer_keys.get(&self.init_msg.peer_key) { // Repeated connected, it can a duplicated connected to the same node, or a duplicated // node connected to this server. But in either case, disconnect this session. // In P2P encrypted communication mode, the repeated connection will be detected by // P2P framework, handling this situation by sending a `AddRepeatedNodeReq` message to // NodesManager. See the `handle` in `AddRepeatedNodeReq` for more detail. info!( "[NodeManager] New session [{:?}] repeated with [{:?}], disconnect this session.", self.session_id, *repeated_id ); // It is a repeated_session, but not a repeated node. if let Some(dialing_addr) = service.dialing_node { if self.ty == SessionType::Outbound { if let Some(ref mut node_status) = service.known_addrs.get_mut(&dialing_addr) { node_status.session_id = Some(*repeated_id); node_status.score += SUCCESS_DIALING_SCORE; let _ = service.connected_addrs.entry(*repeated_id).and_modify(|v| { v.trans_addr = Some(dialing_addr); }); } } } if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.disconnect(self.session_id); } } else if service.peer_key == self.init_msg.peer_key { // Connected self, disconnected the session. // In P2P encrypted communication mode, the `connected self` will be detected by // P2P framework, handling this situation by sending a `ConnectedSelfReq` message to // NodesManager. See the `handle` in `ConnectedSelfReq` for more detail. // This logic would be entry twice: // one as server, and the other one as client. if let Some(dialing_node) = service.dialing_node { debug!( "[NodeManager] Connected Self, Delete {:?} from know_addrs", dialing_node ); service.self_addr = Some(dialing_node); if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.disconnect(self.session_id); } } } else { // Found a successful connection after exchanging `init message`. // FIXME: If have reached to max_connects, disconnected this node. // Add connected address. if let Some(session_info) = service.pending_connected_addrs.remove(&self.session_id) { info!( "[NodeManager] Add session [{:?}], address: {:?} to Connected_addrs.", self.session_id, session_info.addr ); let _ = service .connected_addrs .insert(self.session_id, TransformAddr::new(session_info.addr, None)); // If it is an active connection, need to set this node in known_addrs has been connected. if self.ty == SessionType::Outbound { if let Some(ref mut node_status) = service.known_addrs.get_mut(&session_info.addr) { node_status.session_id = Some(self.session_id); node_status.score += SUCCESS_DIALING_SCORE; } } } // Add connected peer keys // Because AddRepeatedNodeReq maybe already did above action let _ = service .connected_peer_keys .insert(self.init_msg.peer_key, self.session_id); info!( "[NodeManager] connected_addrs info: {:?}", service.connected_addrs ); info!("[NodeManager] known_addrs info: {:?}", service.known_addrs); info!( "[NodeManager] Address in connected : {:?}", service.connected_peer_keys ); } // End of dealing node for this round. if self.ty == SessionType::Outbound { service.dialing_node = None; } } } #[derive(Default)] pub struct NetworkInitReq { session_id: SessionId, } impl NetworkInitReq { pub fn new(session_id: SessionId) -> Self { NetworkInitReq { session_id } } pub fn handle(self, service: &mut NodesManager) { let peer_key = service.peer_key; let send_key = "network.init".to_string(); let init_msg = InitMsg { chain_id: 0, peer_key, }; let msg_bytes: Vec<u8> = init_msg.into(); let mut buf = Vec::with_capacity(CITA_FRAME_HEADER_LEN + send_key.len() + msg_bytes.len()); pubsub_message_to_network_message(&mut buf, Some((send_key, msg_bytes))); if let Some(ref mut ctrl) = service.service_ctrl { // FIXME: handle the error! let ret = ctrl.send_message_to(self.session_id, TRANSFER_PROTOCOL_ID, buf.into()); info!( "[NodeManager] Send network init message!, id: {:?}, peer_addr: {:?}, ret: {:?}", self.session_id, peer_key, ret, ); } } } pub struct AddNodeReq { addr: SocketAddr, source: NodeSource, } impl AddNodeReq { pub fn new(addr: SocketAddr, source: NodeSource) -> Self { AddNodeReq { addr, source } } pub fn handle(self, service: &mut NodesManager) { if service.known_addrs.len() > DEFAULT_MAX_KNOWN_ADDRS { warn!( "[NodeManager] Known address has reach Max: {:?}", DEFAULT_MAX_KNOWN_ADDRS, ); return; } // Add a new node, using a default node status. let default_node_status = NodeStatus::new(FULL_SCORE, None, self.source); service .known_addrs .entry(self.addr) .or_insert(default_node_status); } } pub struct DialedErrorReq { addr: SocketAddr, } impl DialedErrorReq { pub fn new(addr: SocketAddr) -> Self { DialedErrorReq { addr } } pub fn handle(self, service: &mut NodesManager) { if let Some(ref mut node_status) = service.known_addrs.get_mut(&self.addr) { node_status.score -= DIALED_ERROR_SCORE; } // Catch a dial error, this dialing finished service.dialing_node = None; } } pub struct AddRepeatedNodeReq { addr: SocketAddr, session_id: SessionId, } impl AddRepeatedNodeReq { pub fn new(addr: SocketAddr, session_id: SessionId) -> Self { AddRepeatedNodeReq { addr, session_id } } pub fn handle(self, service: &mut NodesManager) { info!( "[NodeManager] Dialing a repeated node [{:?}], on session: {:?}.", self.addr, self.session_id ); if let Some(ref mut node_status) = service.known_addrs.get_mut(&self.addr) { node_status.session_id = Some(self.session_id); node_status.score += SUCCESS_DIALING_SCORE; if let Some(session_info) = service.pending_connected_addrs.remove(&self.session_id) { let _ = service.connected_addrs.insert( self.session_id, TransformAddr::new(session_info.addr, Some(self.addr)), ); } else { let _ = service .connected_addrs .entry(self.session_id) .and_modify(|v| { v.trans_addr = Some(self.addr); }); } } else { warn!("[NodeManager] Cant find repeated sessionid in known addrs"); } // This dialing is finished. service.dialing_node = None; } } pub struct GetRandomNodesReq { num: usize, return_channel: Sender<Vec<SocketAddr>>, } impl GetRandomNodesReq { pub fn new(num: usize, return_channel: Sender<Vec<SocketAddr>>) -> Self { GetRandomNodesReq { num, return_channel, } } pub fn handle(self, service: &mut NodesManager) { let addrs = service.known_addrs.keys().take(self.num).cloned().collect(); if let Err(e) = self.return_channel.try_send(addrs) { warn!( "[NodeManager] Get random n nodes, send them failed : {:?}", e ); } } } pub struct PendingConnectedNodeReq { session_id: SessionId, addr: SocketAddr, ty: SessionType, } impl PendingConnectedNodeReq { pub fn new(session_id: SessionId, addr: SocketAddr, ty: SessionType) -> Self { PendingConnectedNodeReq { session_id, addr, ty, } } pub fn handle(self, service: &mut NodesManager) { if service.connected_addrs.len() >= service.max_connects { // Has reached to max connects, refuse this connection info!( "[NodeManager] Has reached to max connects [{:?}], refuse Session [{:?}], address: {:?}", service.max_connects, self.session_id, self.addr ); if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.disconnect(self.session_id); } return; } info!( "[NodeManager] Session [{:?}], address: {:?} pending to add to Connected_addrs.", self.session_id, self.addr ); service .pending_connected_addrs .insert(self.session_id, SessionInfo::new(self.ty, self.addr)); } } pub struct DelConnectedNodeReq { session_id: SessionId, } impl DelConnectedNodeReq { pub fn new(session_id: SessionId) -> Self { DelConnectedNodeReq { session_id } } pub fn handle(self, service: &mut NodesManager) { info!("[NodeManager] Disconnected session [{:?}]", self.session_id); if let Some(addr) = service.connected_addrs.remove(&self.session_id) { let trans_addr = addr.trans_addr.unwrap_or(addr.conn_addr); self.fix_node_status(trans_addr, service); // Remove connected peer keys for (key, value) in service.connected_peer_keys.iter() { if self.session_id == *value { info!( "[NodeManager] Remove session [{:?}] from connected_peer_keys.", *value ); service.connected_peer_keys.remove(&key.clone()); break; } } } // Remove pending connected if let Some(session_info) = service.pending_connected_addrs.remove(&self.session_id) { if session_info.ty == SessionType::Outbound { self.fix_node_status(session_info.addr, service); // Close a session which open as client, end of this dialing. service.dialing_node = None; } } } fn fix_node_status(&self, addr: SocketAddr, service: &mut NodesManager) { // Set the node as disconnected in known_addrs if let Some(ref mut node_status) = service.known_addrs.get_mut(&addr) { if let Some(session_id) = node_status.session_id { if session_id == self.session_id { info!("Reset node status of address {:?} to None", addr); node_status.score -= REFUSED_SCORE; node_status.session_id = None; } else { warn!( "[NodeManager] Expected session id: {:?}, but found: {:?}", self.session_id, session_id ); } } else { error!("[NodeManager] Can not get node status from known_addr, this should not happen!"); } } } } #[derive(Debug)] pub struct BroadcastReq { key: String, msg: ProtoMessage, } impl BroadcastReq { pub fn new(key: String, msg: ProtoMessage) -> Self { BroadcastReq { key, msg } } pub fn handle(self, service: &mut NodesManager) { trace!( "[NodeManager] Broadcast msg {:?}, from key {}", self.msg, self.key ); let msg_bytes: Vec<u8> = self.msg.try_into().unwrap(); let mut buf = Vec::with_capacity(CITA_FRAME_HEADER_LEN + self.key.len() + msg_bytes.len()); pubsub_message_to_network_message(&mut buf, Some((self.key, msg_bytes))); if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.filter_broadcast(TargetSession::All, TRANSFER_PROTOCOL_ID, buf.into()); } } } pub struct SingleTxReq { dst: SessionId, key: String, msg: ProtoMessage, } impl SingleTxReq { pub fn new(dst: SessionId, key: String, msg: ProtoMessage) -> Self { SingleTxReq { dst, key, msg } } pub fn handle(self, service: &mut NodesManager) { trace!( "[NodeManager] Send msg {:?} to {}, from key {}", self.msg, self.dst, self.key ); let msg_bytes: Vec<u8> = self.msg.try_into().unwrap(); let mut buf = Vec::with_capacity(CITA_FRAME_HEADER_LEN + self.key.len() + msg_bytes.len()); pubsub_message_to_network_message(&mut buf, Some((self.key, msg_bytes))); if let Some(ref mut ctrl) = service.service_ctrl { // FIXME: handle the error! let _ = ctrl.send_message_to(self.dst, TRANSFER_PROTOCOL_ID, buf.into()); } } } pub struct GetPeerCountReq { return_channel: Sender<usize>, } impl GetPeerCountReq { pub fn new(return_channel: Sender<usize>) -> Self { GetPeerCountReq { return_channel } } pub fn handle(self, service: &mut NodesManager) { let peer_count = service.connected_addrs.len(); if let Err(e) = self.return_channel.try_send(peer_count) { warn!( "[NodeManager] Get peer count {}, but send it failed : {:?}", peer_count, e ); } } } pub struct GetPeersInfoReq { return_channel: Sender<HashMap<Address, String>>, } impl GetPeersInfoReq { pub fn new(return_channel: Sender<HashMap<Address, String>>) -> Self { GetPeersInfoReq { return_channel } } pub fn handle(self, service: &mut NodesManager) { let mut peers = HashMap::default(); for (key, value) in service.connected_peer_keys.iter() { if let Some(addr) = service.connected_addrs.get(&value) { peers.insert(key.clone(), addr.conn_addr.ip().to_string()); } else { warn!( "[NodeManager] Can not get socket address for session {} from connected_addr. It must be something wrong!", value ); } } debug!("[NodeManager] get peers info : {:?}", peers); if let Err(e) = self.return_channel.try_send(peers) { warn!("[NodeManager] Send peers info failed : {:?}", e); } } } pub struct ConnectedSelfReq { addr: SocketAddr, } impl ConnectedSelfReq { pub fn new(addr: SocketAddr) -> Self { ConnectedSelfReq { addr } } pub fn handle(self, service: &mut NodesManager) { service.self_addr = Some(self.addr); service.dialing_node = None; } } pub struct ModifiedConfigPeersReq { peers: Vec<String>, } impl ModifiedConfigPeersReq { pub fn new(peers: Vec<String>) -> Self { ModifiedConfigPeersReq { peers } } pub fn handle(self, service: &mut NodesManager) { // If new config deleted some peer,disconnect and remove it from known addrs let mut keys: BTreeSet<_> = service.config_addrs.keys().cloned().collect(); for peer in &self.peers { keys.remove(peer); } info!("left peers {:?}", self.peers); // The remainder in keys will be disconnected for key in keys { service.config_addrs.remove(&key).and_then(|addr| { addr.and_then(|addr| { service.known_addrs.remove(&addr).and_then(|node_status| { node_status.session_id.and_then(|sid| { service .service_ctrl .as_mut() .and_then(|ctrl| ctrl.disconnect(sid).ok()) }) }) }) }); } for peer in self.peers { service.config_addrs.entry(peer).or_insert(None); } } } Update node_manager.rs // CITA // Copyright 2016-2019 Cryptape Technologies LLC. // This program is free software: you can redistribute it // and/or modify it under the terms of the GNU General Public // License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any // later version. // This program is distributed in the hope that it will be // useful, but WITHOUT ANY WARRANTY; without even the implied // warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. use crate::cita_protocol::{pubsub_message_to_network_message, CITA_FRAME_HEADER_LEN}; use crate::config::NetConfig; use crate::p2p_protocol::transfer::TRANSFER_PROTOCOL_ID; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use cita_types::Address; use fnv::FnvHashMap as HashMap; use libproto::{Message as ProtoMessage, TryInto}; use logger::{debug, error, info, trace, warn}; use notify::DebouncedEvent; use pubsub::channel::{select, tick, unbounded, Receiver, Sender}; use rand; use std::sync::mpsc::Receiver as StdReceiver; use std::{ collections::{BTreeMap, BTreeSet}, convert::Into, io::Cursor, net::{SocketAddr, ToSocketAddrs}, time::{Duration, Instant}, }; use tentacle::{ service::{DialProtocol, ServiceControl, SessionType, TargetSession}, utils::socketaddr_to_multiaddr, SessionId, }; pub const DEFAULT_MAX_CONNECTS: usize = 666; pub const DEFAULT_MAX_KNOWN_ADDRS: usize = 1000; pub const DEFAULT_PORT: usize = 4000; pub const CHECK_CONNECTED_NODES: Duration = Duration::from_secs(3); // Score uses to manage known_nodes list. If a node has too low score, do not dial it again. // Maybe some complex algorithm can be designed later. But for now, just keeps as simple as below: // 1. Deducts 10 score for each Dial; // 2. Deducts 25 score for each Dial Error; // 3. Deducts 20 score for each Disconnected by server; // 4. Add 5 score for every dialing round if the node keep on line; so If a node keep on line, // it will get FULL_SCORE very fast. // 5. Gives a Time sugar score (2 : nodes was configured in config file, and 1 : nodes was // discovered by P2P framework ) when a node's score less than MIN_DIALING_SCORE; // A new node come into known_nodes list has a FULL_SCORE. pub const FULL_SCORE: i32 = 100; // Score lower than MIN_DIALING_SCORE, stop dialing. pub const MIN_DIALING_SCORE: i32 = 60; // A node needs DIALING_SCORE for every dial. pub const DIALING_SCORE: i32 = 10; // A node connected successfully, can get SUCCESS_DIALING_SCORE. pub const SUCCESS_DIALING_SCORE: i32 = 10; // A node is refused by server, should need REFUSED_SCORE each time. pub const REFUSED_SCORE: i32 = 20; // A node is dialed error by client, should need DIALED_ERROR_SCORE each time. pub const DIALED_ERROR_SCORE: i32 = 25; // A node is dialed error by client, should need DIALED_ERROR_SCORE each time. pub const KEEP_ON_LINE_SCORE: i32 = 5; #[derive(Debug, PartialEq)] pub enum NodeSource { FromConfig, FromDiscovery, } #[derive(Debug)] pub struct NodeStatus { // score: Score for a node, it will affect whether the node will be chosen to dail again, // or be deleted from the known_addresses list. But for now, it useless. pub score: i32, // session_id: Indicates that this node has been connected to a session. 'None' for has not // connected yet. pub session_id: Option<SessionId>, pub node_src: NodeSource, } impl NodeStatus { pub fn new(score: i32, session_id: Option<SessionId>, node_src: NodeSource) -> Self { NodeStatus { score, session_id, node_src, } } } #[derive(Debug)] pub struct SessionInfo { pub ty: SessionType, pub addr: SocketAddr, } impl SessionInfo { pub fn new(ty: SessionType, addr: SocketAddr) -> Self { SessionInfo { ty, addr } } } #[derive(Debug)] pub struct TransformAddr { // Real linked addr pub conn_addr: SocketAddr, // Outbound addr transformed from Inbound addr pub trans_addr: Option<SocketAddr>, } impl TransformAddr { pub fn new(conn_addr: SocketAddr, trans_addr: Option<SocketAddr>) -> Self { TransformAddr { conn_addr, trans_addr, } } } pub struct NodesManager { known_addrs: HashMap<SocketAddr, NodeStatus>, config_addrs: BTreeMap<String, Option<SocketAddr>>, connected_addrs: HashMap<SessionId, TransformAddr>, pending_connected_addrs: HashMap<SessionId, SessionInfo>, connected_peer_keys: HashMap<Address, SessionId>, check_connected_nodes: Receiver<Instant>, max_connects: usize, nodes_manager_client: NodesManagerClient, nodes_manager_service_receiver: Receiver<NodesManagerMessage>, service_ctrl: Option<ServiceControl>, peer_key: Address, enable_tls: bool, dialing_node: Option<SocketAddr>, self_addr: Option<SocketAddr>, } impl NodesManager { pub fn new(known_addrs: HashMap<SocketAddr, NodeStatus>) -> Self { let mut node_mgr = NodesManager::default(); node_mgr.known_addrs = known_addrs; node_mgr } pub fn from_config(cfg: NetConfig, key: Address) -> Self { let mut node_mgr = NodesManager::default(); let max_connects = cfg.max_connects.unwrap_or(DEFAULT_MAX_CONNECTS); node_mgr.max_connects = max_connects; node_mgr.peer_key = key; if let Some(enable_tls) = cfg.enable_tls { node_mgr.enable_tls = enable_tls; } if let Some(cfg_addrs) = cfg.peers { for addr in cfg_addrs { if let (Some(ip), Some(port)) = (addr.ip, addr.port) { let addr_str = format!("{}:{}", ip, port); node_mgr.config_addrs.insert(addr_str, None); } else { warn!("[NodeManager] ip(host) & port 'MUST' be set in peers."); } } } else { warn!("[NodeManager] Does not set any peers in config file!"); } node_mgr } pub fn notify_config_change( rx: StdReceiver<DebouncedEvent>, node_client: NodesManagerClient, fname: String, ) { loop { match rx.recv() { Ok(event) => match event { DebouncedEvent::Create(path_buf) | DebouncedEvent::Write(path_buf) => { if path_buf.is_file() { let file_name = path_buf.file_name().unwrap().to_str().unwrap(); if file_name == fname { info!("file {} changed, will auto reload!", file_name); let config = NetConfig::new(file_name); if let Some(peers) = config.peers { let mut addr_strs = Vec::new(); for addr in peers { if let (Some(ip), Some(port)) = (addr.ip, addr.port) { addr_strs.push(format!("{}:{}", ip, port)); } } node_client.fix_modified_config(ModifiedConfigPeersReq::new( addr_strs, )); } } } } _ => trace!("file notify event: {:?}", event), }, Err(e) => warn!("watch error: {:?}", e), } } } pub fn run(&mut self) { loop { select! { recv(self.nodes_manager_service_receiver) -> msg => { match msg { Ok(data) => { data.handle(self); }, Err(err) => error!("[NodeManager] Receive data error {:?}", err), } } recv(self.check_connected_nodes) -> _ => { self.dial_nodes(); } } } } pub fn client(&self) -> NodesManagerClient { self.nodes_manager_client.clone() } pub fn dial_nodes(&mut self) { if let Some(dialing_node) = self.dialing_node { info!( "[NodeManager] Dialing node: {:?}, waiting for next round.", dialing_node ); return; } self.translate_address(); // If connected node has not reach MAX, select a node from known_addrs to dial. if self.connected_addrs.len() < self.max_connects { for (key, value) in self.known_addrs.iter_mut() { // Node has been connected if let Some(session_id) = value.session_id { debug!( "[NodeManager] Address {:?} has been connected on : {:?}.", *key, session_id ); // Node keep on line, reward KEEP_ON_LINE_SCORE. value.score = if (value.score + KEEP_ON_LINE_SCORE) > FULL_SCORE { FULL_SCORE as i32 } else { value.score + KEEP_ON_LINE_SCORE }; continue; } // Give 50% probability to select this node, this design can avoid two nodes // simultaneously dialing each other. let selected_miss: bool = (rand::random::<u32>() % 2) != 0; if selected_miss { debug!( "[NodeManager] Address {:?} selects miss in this round.", *key ); continue; } if let Some(self_addr) = self.self_addr { if *key == self_addr { debug!( "[NodeManager] Trying to connected self: {:?}, skip it", self_addr ); continue; } } // Score design prevents the client from dialing to a node all the time. if value.score < MIN_DIALING_SCORE { debug!( "[NodeManager] Address {:?} has to low score ({:?}) to dial.", *key, value.score ); // The node will get time sugar, the nodes which in config file can get 2, and the // other nodes which discovered by P2P can get 1. value.score += if value.node_src == NodeSource::FromConfig { 2 } else { 1 }; continue; } // Dial this address if let Some(ref mut ctrl) = self.service_ctrl { self.dialing_node = Some(*key); info!("Trying to dial: {:?}", self.dialing_node); match ctrl.dial(socketaddr_to_multiaddr(*key), DialProtocol::All) { Ok(_) => { // Need DIALING_SCORE for every dial. value.score -= DIALING_SCORE; debug!("[NodeManager] Dail success"); } Err(err) => { warn!("[NodeManager] Dail failed : {:?}", err); } } } break; } } debug!( "[NodeManager] connected_addrs info: {:?}", self.connected_addrs ); debug!("[NodeManager] known_addrs info: {:?}", self.known_addrs); debug!( "[NodeManager] Address in connected : {:?}", self.connected_peer_keys ); } pub fn set_service_task_sender(&mut self, ctrl: ServiceControl) { self.service_ctrl = Some(ctrl); } pub fn is_enable_tls(&self) -> bool { self.enable_tls } pub fn translate_address(&mut self) { for (key, value) in self.config_addrs.iter_mut() { // The address has translated. if value.is_some() { debug!("[NodeManager] The Address {:?} has been translated.", key); continue; } match key.to_socket_addrs() { Ok(mut result) => { if let Some(socket_addr) = result.next() { // An init node from config file, give it FULL_SCORE. let node_status = NodeStatus::new(FULL_SCORE, None, NodeSource::FromConfig); self.known_addrs.insert(socket_addr, node_status); *value = Some(socket_addr); } else { error!("[NodeManager] Can not convert to socket address!"); } } Err(e) => { error!( "[NodeManager] Can not convert to socket address! error: {}", e ); } } } } } impl Default for NodesManager { fn default() -> NodesManager { let (tx, rx) = unbounded(); let ticker = tick(CHECK_CONNECTED_NODES); let client = NodesManagerClient { sender: tx }; // Set enable_tls = false as default. NodesManager { check_connected_nodes: ticker, known_addrs: HashMap::default(), config_addrs: BTreeMap::default(), connected_addrs: HashMap::default(), connected_peer_keys: HashMap::default(), pending_connected_addrs: HashMap::default(), max_connects: DEFAULT_MAX_CONNECTS, nodes_manager_client: client, nodes_manager_service_receiver: rx, service_ctrl: None, peer_key: Address::zero(), enable_tls: false, dialing_node: None, self_addr: None, } } } #[derive(Clone, Debug)] pub struct NodesManagerClient { sender: Sender<NodesManagerMessage>, } impl NodesManagerClient { pub fn new(sender: Sender<NodesManagerMessage>) -> Self { NodesManagerClient { sender } } pub fn add_node(&self, req: AddNodeReq) { self.send_req(NodesManagerMessage::AddNodeReq(req)); } pub fn dialed_error(&self, req: DialedErrorReq) { self.send_req(NodesManagerMessage::DialedErrorReq(req)); } pub fn connected_self(&self, req: ConnectedSelfReq) { self.send_req(NodesManagerMessage::ConnectedSelf(req)); } pub fn get_random_nodes(&self, req: GetRandomNodesReq) { self.send_req(NodesManagerMessage::GetRandomNodesReq(req)); } pub fn pending_connected_node(&self, req: PendingConnectedNodeReq) { self.send_req(NodesManagerMessage::PendingConnectedNodeReq(req)); } pub fn del_connected_node(&self, req: DelConnectedNodeReq) { self.send_req(NodesManagerMessage::DelConnectedNodeReq(req)); } pub fn add_repeated_node(&self, req: AddRepeatedNodeReq) { self.send_req(NodesManagerMessage::AddRepeatedNode(req)); } pub fn broadcast(&self, req: BroadcastReq) { self.send_req(NodesManagerMessage::Broadcast(req)); } pub fn send_message(&self, req: SingleTxReq) { self.send_req(NodesManagerMessage::SingleTxReq(req)); } pub fn get_peer_count(&self, req: GetPeerCountReq) { self.send_req(NodesManagerMessage::GetPeerCount(req)); } pub fn get_peers_info(&self, req: GetPeersInfoReq) { self.send_req(NodesManagerMessage::GetPeersInfo(req)); } pub fn network_init(&self, req: NetworkInitReq) { self.send_req(NodesManagerMessage::NetworkInit(req)); } pub fn add_connected_node(&self, req: AddConnectedNodeReq) { self.send_req(NodesManagerMessage::AddConnectedNode(req)); } pub fn fix_modified_config(&self, req: ModifiedConfigPeersReq) { self.send_req(NodesManagerMessage::ModifiedConfigPeers(req)); } fn send_req(&self, req: NodesManagerMessage) { if let Err(e) = self.sender.try_send(req) { warn!( "[NodesManager] Send message to node manager failed : {:?}", e ); } } } // Define messages for NodesManager pub enum NodesManagerMessage { AddNodeReq(AddNodeReq), DialedErrorReq(DialedErrorReq), GetRandomNodesReq(GetRandomNodesReq), PendingConnectedNodeReq(PendingConnectedNodeReq), DelConnectedNodeReq(DelConnectedNodeReq), Broadcast(BroadcastReq), SingleTxReq(SingleTxReq), GetPeerCount(GetPeerCountReq), NetworkInit(NetworkInitReq), AddConnectedNode(AddConnectedNodeReq), AddRepeatedNode(AddRepeatedNodeReq), ConnectedSelf(ConnectedSelfReq), GetPeersInfo(GetPeersInfoReq), ModifiedConfigPeers(ModifiedConfigPeersReq), } impl NodesManagerMessage { pub fn handle(self, service: &mut NodesManager) { match self { NodesManagerMessage::AddNodeReq(req) => req.handle(service), NodesManagerMessage::DialedErrorReq(req) => req.handle(service), NodesManagerMessage::GetRandomNodesReq(req) => req.handle(service), NodesManagerMessage::PendingConnectedNodeReq(req) => req.handle(service), NodesManagerMessage::DelConnectedNodeReq(req) => req.handle(service), NodesManagerMessage::Broadcast(req) => req.handle(service), NodesManagerMessage::SingleTxReq(req) => req.handle(service), NodesManagerMessage::GetPeerCount(req) => req.handle(service), NodesManagerMessage::NetworkInit(req) => req.handle(service), NodesManagerMessage::AddConnectedNode(req) => req.handle(service), NodesManagerMessage::AddRepeatedNode(req) => req.handle(service), NodesManagerMessage::ConnectedSelf(req) => req.handle(service), NodesManagerMessage::GetPeersInfo(req) => req.handle(service), NodesManagerMessage::ModifiedConfigPeers(req) => req.handle(service), } } } #[derive(Default, Clone)] pub struct InitMsg { pub chain_id: u64, pub peer_key: Address, } impl Into<Vec<u8>> for InitMsg { fn into(self) -> Vec<u8> { let mut out = Vec::new(); let mut key_data: [u8; 20] = Default::default(); let mut chain_id_data = vec![]; chain_id_data.write_u64::<BigEndian>(self.chain_id).unwrap(); self.peer_key.copy_to(&mut key_data[..]); out.extend_from_slice(&chain_id_data); out.extend_from_slice(&key_data); out } } impl From<Vec<u8>> for InitMsg { fn from(data: Vec<u8>) -> InitMsg { let mut chain_id_data: [u8; 8] = Default::default(); chain_id_data.copy_from_slice(&data[..8]); let mut chain_id_data = Cursor::new(chain_id_data); let chain_id = chain_id_data.read_u64::<BigEndian>().unwrap(); let peer_key = Address::from_slice(&data[8..]); InitMsg { chain_id, peer_key } } } pub struct AddConnectedNodeReq { session_id: SessionId, ty: SessionType, init_msg: InitMsg, } impl AddConnectedNodeReq { pub fn new(session_id: SessionId, ty: SessionType, init_msg: InitMsg) -> Self { AddConnectedNodeReq { session_id, ty, init_msg, } } pub fn handle(self, service: &mut NodesManager) { if let Some(repeated_id) = service.connected_peer_keys.get(&self.init_msg.peer_key) { // Repeated connected, it can a duplicated connected to the same node, or a duplicated // node connected to this server. But in either case, disconnect this session. // In P2P encrypted communication mode, the repeated connection will be detected by // P2P framework, handling this situation by sending a `AddRepeatedNodeReq` message to // NodesManager. See the `handle` in `AddRepeatedNodeReq` for more detail. info!( "[NodeManager] New session [{:?}] repeated with [{:?}], disconnect this session.", self.session_id, *repeated_id ); // It is a repeated_session, but not a repeated node. if let Some(dialing_addr) = service.dialing_node { if self.ty == SessionType::Outbound { if let Some(ref mut node_status) = service.known_addrs.get_mut(&dialing_addr) { node_status.session_id = Some(*repeated_id); node_status.score += SUCCESS_DIALING_SCORE; let _ = service.connected_addrs.entry(*repeated_id).and_modify(|v| { v.trans_addr = Some(dialing_addr); }); } } } if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.disconnect(self.session_id); } } else if service.peer_key == self.init_msg.peer_key { // Connected self, disconnected the session. // In P2P encrypted communication mode, the `connected self` will be detected by // P2P framework, handling this situation by sending a `ConnectedSelfReq` message to // NodesManager. See the `handle` in `ConnectedSelfReq` for more detail. // This logic would be entry twice: // one as server, and the other one as client. if let Some(dialing_node) = service.dialing_node { debug!( "[NodeManager] Connected Self, Delete {:?} from know_addrs", dialing_node ); service.self_addr = Some(dialing_node); if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.disconnect(self.session_id); } } } else { // Found a successful connection after exchanging `init message`. // FIXME: If have reached to max_connects, disconnected this node. // Add connected address. if let Some(session_info) = service.pending_connected_addrs.remove(&self.session_id) { info!( "[NodeManager] Add session [{:?}], address: {:?} to Connected_addrs.", self.session_id, session_info.addr ); let _ = service .connected_addrs .insert(self.session_id, TransformAddr::new(session_info.addr, None)); // If it is an active connection, need to set this node in known_addrs has been connected. if self.ty == SessionType::Outbound { if let Some(ref mut node_status) = service.known_addrs.get_mut(&session_info.addr) { node_status.session_id = Some(self.session_id); node_status.score += SUCCESS_DIALING_SCORE; } } } // Add connected peer keys // Because AddRepeatedNodeReq maybe already did above action let _ = service .connected_peer_keys .insert(self.init_msg.peer_key, self.session_id); info!( "[NodeManager] connected_addrs info: {:?}", service.connected_addrs ); info!("[NodeManager] known_addrs info: {:?}", service.known_addrs); info!( "[NodeManager] Address in connected : {:?}", service.connected_peer_keys ); } // End of dealing node for this round. if self.ty == SessionType::Outbound { service.dialing_node = None; } } } #[derive(Default)] pub struct NetworkInitReq { session_id: SessionId, } impl NetworkInitReq { pub fn new(session_id: SessionId) -> Self { NetworkInitReq { session_id } } pub fn handle(self, service: &mut NodesManager) { let peer_key = service.peer_key; let send_key = "network.init".to_string(); let init_msg = InitMsg { chain_id: 0, peer_key, }; let msg_bytes: Vec<u8> = init_msg.into(); let mut buf = Vec::with_capacity(CITA_FRAME_HEADER_LEN + send_key.len() + msg_bytes.len()); pubsub_message_to_network_message(&mut buf, Some((send_key, msg_bytes))); if let Some(ref mut ctrl) = service.service_ctrl { // FIXME: handle the error! let ret = ctrl.send_message_to(self.session_id, TRANSFER_PROTOCOL_ID, buf.into()); info!( "[NodeManager] Send network init message!, id: {:?}, peer_addr: {:?}, ret: {:?}", self.session_id, peer_key, ret, ); } } } pub struct AddNodeReq { addr: SocketAddr, source: NodeSource, } impl AddNodeReq { pub fn new(addr: SocketAddr, source: NodeSource) -> Self { AddNodeReq { addr, source } } pub fn handle(self, service: &mut NodesManager) { if service.known_addrs.len() > DEFAULT_MAX_KNOWN_ADDRS { warn!( "[NodeManager] Known address has reach Max: {:?}", DEFAULT_MAX_KNOWN_ADDRS, ); return; } // Add a new node, using a default node status. let default_node_status = NodeStatus::new(FULL_SCORE, None, self.source); service .known_addrs .entry(self.addr) .or_insert(default_node_status); } } pub struct DialedErrorReq { addr: SocketAddr, } impl DialedErrorReq { pub fn new(addr: SocketAddr) -> Self { DialedErrorReq { addr } } pub fn handle(self, service: &mut NodesManager) { if let Some(ref mut node_status) = service.known_addrs.get_mut(&self.addr) { node_status.score -= DIALED_ERROR_SCORE; } // Catch a dial error, this dialing finished service.dialing_node = None; } } pub struct AddRepeatedNodeReq { addr: SocketAddr, session_id: SessionId, } impl AddRepeatedNodeReq { pub fn new(addr: SocketAddr, session_id: SessionId) -> Self { AddRepeatedNodeReq { addr, session_id } } pub fn handle(self, service: &mut NodesManager) { info!( "[NodeManager] Dialing a repeated node [{:?}], on session: {:?}.", self.addr, self.session_id ); if let Some(ref mut node_status) = service.known_addrs.get_mut(&self.addr) { node_status.session_id = Some(self.session_id); node_status.score += SUCCESS_DIALING_SCORE; if let Some(session_info) = service.pending_connected_addrs.remove(&self.session_id) { let _ = service.connected_addrs.insert( self.session_id, TransformAddr::new(session_info.addr, Some(self.addr)), ); } else { let _ = service .connected_addrs .entry(self.session_id) .and_modify(|v| { v.trans_addr = Some(self.addr); }); } } else { warn!("[NodeManager] Cant find repeated sock addr in known addrs"); } // This dialing is finished. service.dialing_node = None; } } pub struct GetRandomNodesReq { num: usize, return_channel: Sender<Vec<SocketAddr>>, } impl GetRandomNodesReq { pub fn new(num: usize, return_channel: Sender<Vec<SocketAddr>>) -> Self { GetRandomNodesReq { num, return_channel, } } pub fn handle(self, service: &mut NodesManager) { let addrs = service.known_addrs.keys().take(self.num).cloned().collect(); if let Err(e) = self.return_channel.try_send(addrs) { warn!( "[NodeManager] Get random n nodes, send them failed : {:?}", e ); } } } pub struct PendingConnectedNodeReq { session_id: SessionId, addr: SocketAddr, ty: SessionType, } impl PendingConnectedNodeReq { pub fn new(session_id: SessionId, addr: SocketAddr, ty: SessionType) -> Self { PendingConnectedNodeReq { session_id, addr, ty, } } pub fn handle(self, service: &mut NodesManager) { if service.connected_addrs.len() >= service.max_connects { // Has reached to max connects, refuse this connection info!( "[NodeManager] Has reached to max connects [{:?}], refuse Session [{:?}], address: {:?}", service.max_connects, self.session_id, self.addr ); if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.disconnect(self.session_id); } return; } info!( "[NodeManager] Session [{:?}], address: {:?} pending to add to Connected_addrs.", self.session_id, self.addr ); service .pending_connected_addrs .insert(self.session_id, SessionInfo::new(self.ty, self.addr)); } } pub struct DelConnectedNodeReq { session_id: SessionId, } impl DelConnectedNodeReq { pub fn new(session_id: SessionId) -> Self { DelConnectedNodeReq { session_id } } pub fn handle(self, service: &mut NodesManager) { info!("[NodeManager] Disconnected session [{:?}]", self.session_id); if let Some(addr) = service.connected_addrs.remove(&self.session_id) { let trans_addr = addr.trans_addr.unwrap_or(addr.conn_addr); self.fix_node_status(trans_addr, service); // Remove connected peer keys for (key, value) in service.connected_peer_keys.iter() { if self.session_id == *value { info!( "[NodeManager] Remove session [{:?}] from connected_peer_keys.", *value ); service.connected_peer_keys.remove(&key.clone()); break; } } } // Remove pending connected if let Some(session_info) = service.pending_connected_addrs.remove(&self.session_id) { if session_info.ty == SessionType::Outbound { self.fix_node_status(session_info.addr, service); // Close a session which open as client, end of this dialing. service.dialing_node = None; } } } fn fix_node_status(&self, addr: SocketAddr, service: &mut NodesManager) { // Set the node as disconnected in known_addrs if let Some(ref mut node_status) = service.known_addrs.get_mut(&addr) { if let Some(session_id) = node_status.session_id { if session_id == self.session_id { info!("Reset node status of address {:?} to None", addr); node_status.score -= REFUSED_SCORE; node_status.session_id = None; } else { warn!( "[NodeManager] Expected session id: {:?}, but found: {:?}", self.session_id, session_id ); } } else { error!("[NodeManager] Can not get node status from known_addr, this should not happen!"); } } } } #[derive(Debug)] pub struct BroadcastReq { key: String, msg: ProtoMessage, } impl BroadcastReq { pub fn new(key: String, msg: ProtoMessage) -> Self { BroadcastReq { key, msg } } pub fn handle(self, service: &mut NodesManager) { trace!( "[NodeManager] Broadcast msg {:?}, from key {}", self.msg, self.key ); let msg_bytes: Vec<u8> = self.msg.try_into().unwrap(); let mut buf = Vec::with_capacity(CITA_FRAME_HEADER_LEN + self.key.len() + msg_bytes.len()); pubsub_message_to_network_message(&mut buf, Some((self.key, msg_bytes))); if let Some(ref mut ctrl) = service.service_ctrl { let _ = ctrl.filter_broadcast(TargetSession::All, TRANSFER_PROTOCOL_ID, buf.into()); } } } pub struct SingleTxReq { dst: SessionId, key: String, msg: ProtoMessage, } impl SingleTxReq { pub fn new(dst: SessionId, key: String, msg: ProtoMessage) -> Self { SingleTxReq { dst, key, msg } } pub fn handle(self, service: &mut NodesManager) { trace!( "[NodeManager] Send msg {:?} to {}, from key {}", self.msg, self.dst, self.key ); let msg_bytes: Vec<u8> = self.msg.try_into().unwrap(); let mut buf = Vec::with_capacity(CITA_FRAME_HEADER_LEN + self.key.len() + msg_bytes.len()); pubsub_message_to_network_message(&mut buf, Some((self.key, msg_bytes))); if let Some(ref mut ctrl) = service.service_ctrl { // FIXME: handle the error! let _ = ctrl.send_message_to(self.dst, TRANSFER_PROTOCOL_ID, buf.into()); } } } pub struct GetPeerCountReq { return_channel: Sender<usize>, } impl GetPeerCountReq { pub fn new(return_channel: Sender<usize>) -> Self { GetPeerCountReq { return_channel } } pub fn handle(self, service: &mut NodesManager) { let peer_count = service.connected_addrs.len(); if let Err(e) = self.return_channel.try_send(peer_count) { warn!( "[NodeManager] Get peer count {}, but send it failed : {:?}", peer_count, e ); } } } pub struct GetPeersInfoReq { return_channel: Sender<HashMap<Address, String>>, } impl GetPeersInfoReq { pub fn new(return_channel: Sender<HashMap<Address, String>>) -> Self { GetPeersInfoReq { return_channel } } pub fn handle(self, service: &mut NodesManager) { let mut peers = HashMap::default(); for (key, value) in service.connected_peer_keys.iter() { if let Some(addr) = service.connected_addrs.get(&value) { peers.insert(key.clone(), addr.conn_addr.ip().to_string()); } else { warn!( "[NodeManager] Can not get socket address for session {} from connected_addr. It must be something wrong!", value ); } } debug!("[NodeManager] get peers info : {:?}", peers); if let Err(e) = self.return_channel.try_send(peers) { warn!("[NodeManager] Send peers info failed : {:?}", e); } } } pub struct ConnectedSelfReq { addr: SocketAddr, } impl ConnectedSelfReq { pub fn new(addr: SocketAddr) -> Self { ConnectedSelfReq { addr } } pub fn handle(self, service: &mut NodesManager) { service.self_addr = Some(self.addr); service.dialing_node = None; } } pub struct ModifiedConfigPeersReq { peers: Vec<String>, } impl ModifiedConfigPeersReq { pub fn new(peers: Vec<String>) -> Self { ModifiedConfigPeersReq { peers } } pub fn handle(self, service: &mut NodesManager) { // If new config deleted some peer,disconnect and remove it from known addrs let mut keys: BTreeSet<_> = service.config_addrs.keys().cloned().collect(); for peer in &self.peers { keys.remove(peer); } info!("left peers {:?}", self.peers); // The remainder in keys will be disconnected for key in keys { service.config_addrs.remove(&key).and_then(|addr| { addr.and_then(|addr| { service.known_addrs.remove(&addr).and_then(|node_status| { node_status.session_id.and_then(|sid| { service .service_ctrl .as_mut() .and_then(|ctrl| ctrl.disconnect(sid).ok()) }) }) }) }); } for peer in self.peers { service.config_addrs.entry(peer).or_insert(None); } } }
#[macro_use] extern crate log; #[macro_use] extern crate rux; extern crate num_cpus; extern crate env_logger; use rux::{RawFd, Reset}; use rux::buf::ByteBuffer; use rux::handler::*; use rux::mux::*; use rux::poll::*; use rux::sys::socket::*; use rux::prop::server::*; use rux::system::System; const BUF_SIZE: usize = 2048; const EPOLL_BUF_CAP: usize = 2048; const EPOLL_LOOP_MS: isize = -1; const MAX_CONN: usize = 2048; /// Handler that echoes incoming bytes /// /// For benchmarking I/O throuput and latency pub struct EchoHandler; impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler { fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) -> MuxCmd { let fd = event.fd; let kind = event.kind; let buffer = event.resource; if kind.contains(EPOLLHUP) { trace!("socket's fd {}: EPOLLHUP", fd); return MuxCmd::Close; } if kind.contains(EPOLLERR) { error!("socket's fd {}: EPOLERR", fd); return MuxCmd::Close; } if kind.contains(EPOLLIN) { if let Some(n) = eintr!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() { buffer.extend(n); } } if kind.contains(EPOLLOUT) { if buffer.is_readable() { if let Some(cnt) = eintr!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() { buffer.consume(cnt); } } } MuxCmd::Keep } } impl EpollHandler for EchoHandler { fn interests() -> EpollEventKind { EPOLLIN | EPOLLOUT | EPOLLET } fn with_epfd(&mut self, _: EpollFd) { } } impl Reset for EchoHandler { fn reset(&mut self) {} } #[derive(Clone, Debug)] struct EchoFactory; impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory { fn new_resource(&self) -> ByteBuffer { ByteBuffer::with_capacity(BUF_SIZE) } fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler { EchoHandler } } fn main() { ::env_logger::init().unwrap(); info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}", BUF_SIZE, EPOLL_BUF_CAP, EPOLL_LOOP_MS, MAX_CONN); let config = ServerConfig::tcp(("127.0.0.1", 9999)) .unwrap() .max_conn(MAX_CONN) .io_threads(::std::cmp::max(1, ::num_cpus::get() / 2)) // .io_threads(1) .epoll_config(EpollConfig { loop_ms: EPOLL_LOOP_MS, buffer_capacity: EPOLL_BUF_CAP, }); let server = Server::new_with(config, |epfd| { SyncMux::new(MAX_CONN, epfd, EchoFactory) }) .unwrap(); System::build(server).start().unwrap(); } fixed example #[macro_use] extern crate log; #[macro_use] extern crate rux; extern crate num_cpus; extern crate env_logger; use rux::{RawFd, Reset}; use rux::error::*; use rux::buf::ByteBuffer; use rux::handler::*; use rux::mux::*; use rux::epoll::*; use rux::sys::socket::*; use rux::prop::server::*; use rux::system::System; const BUF_SIZE: usize = 2048; const EPOLL_BUF_CAP: usize = 2048; const EPOLL_LOOP_MS: isize = -1; const MAX_CONN: usize = 2048; /// Handler that echoes incoming bytes /// /// For benchmarking I/O throuput and latency pub struct EchoHandler; impl<'a> Handler<MuxEvent<'a, ByteBuffer>, MuxCmd> for EchoHandler { fn on_next(&mut self, event: MuxEvent<'a, ByteBuffer>) -> MuxCmd { let fd = event.fd; let kind = event.kind; let buffer = event.resource; if kind.contains(EPOLLHUP) { trace!("socket's fd {}: EPOLLHUP", fd); return MuxCmd::Close; } if kind.contains(EPOLLERR) { error!("socket's fd {}: EPOLERR", fd); return MuxCmd::Close; } if kind.contains(EPOLLIN) { if let Some(n) = eintr!(recv(fd, From::from(&mut *buffer), MSG_DONTWAIT)).unwrap() { buffer.extend(n); } } if kind.contains(EPOLLOUT) { if buffer.is_readable() { if let Some(cnt) = eintr!(send(fd, From::from(&*buffer), MSG_DONTWAIT)).unwrap() { buffer.consume(cnt); } } } MuxCmd::Keep } } impl EpollHandler for EchoHandler { fn interests() -> EpollEventKind { EPOLLIN | EPOLLOUT | EPOLLET } fn with_epfd(&mut self, _: EpollFd) { } } impl Reset for EchoHandler { fn reset(&mut self) {} } #[derive(Clone, Debug)] struct EchoFactory; impl<'a> HandlerFactory<'a, EchoHandler, ByteBuffer> for EchoFactory { fn new_resource(&self) -> ByteBuffer { ByteBuffer::with_capacity(BUF_SIZE) } fn new_handler(&mut self, _: EpollFd, _: RawFd) -> EchoHandler { EchoHandler } } fn main() { ::env_logger::init().unwrap(); info!("BUF_SIZE: {}; EPOLL_BUF_CAP: {}; EPOLL_LOOP_MS: {}; MAX_CONN: {}", BUF_SIZE, EPOLL_BUF_CAP, EPOLL_LOOP_MS, MAX_CONN); let config = ServerConfig::tcp(("127.0.0.1", 9999)) .unwrap() .max_conn(MAX_CONN) //.io_threads(::std::cmp::max(1, ::num_cpus::get() / 2)) .io_threads(1) .epoll_config(EpollConfig { loop_ms: EPOLL_LOOP_MS, buffer_capacity: EPOLL_BUF_CAP, }); let server = Server::new_with(config, |epfd| { SyncMux::new(MAX_CONN, epfd, EchoFactory) }) .unwrap(); System::build(server).start().unwrap(); }
extern crate time; #[cfg(test)] extern crate quickcheck; use std::cmp; use std::fmt; // Implementation of Hybrid Logical Clocks, based on the paper "Logical Physical Clocks // and Consistent Snapshots in Globally Distributed Databases". // trait ClockSource { type Time : Ord + Copy; fn now(&mut self) -> Self::Time; } #[derive(Debug,Clone,Copy,PartialEq,Eq,PartialOrd,Ord)] struct Timestamp<T>(T, u32); struct Clock<S: ClockSource> { src: S, latest: Timestamp<S::Time>, } impl<S: ClockSource> Clock<S> { pub fn new(mut src: S) -> Self { let init = src.now(); Clock { src: src, latest: Timestamp(init, 0), } } pub fn on_send(&mut self) -> Timestamp<S::Time> { let pt = self.src.now(); let lp = self.latest.clone(); self.latest.0 = cmp::max(lp.0, pt); self.latest.1 = if lp.0 == self.latest.0 { lp.1 + 1 } else { 0 }; self.latest } pub fn on_recv(&mut self, msg: &Timestamp<S::Time>) -> Timestamp<S::Time> { let pt = self.src.now(); let lp = self.latest.clone(); self.latest.0 = cmp::max(cmp::max(lp.0, msg.0), pt); self.latest.1 = match (self.latest.0 == lp.0, self.latest.0 == msg.0) { (true, true) => cmp::max(self.latest.1, msg.1) + 1, (true, false) => self.latest.1 + 1, (false, true) => msg.1 + 1, (false, false) => 0, }; self.latest.clone() } } #[cfg(test)] mod tests { use super::{Clock, ClockSource, Timestamp}; use std::cell::Cell; use std::cmp::{Ord, Ordering}; use quickcheck; struct ManualClock(Cell<u64>); impl<'a> ClockSource for &'a ManualClock { type Time = u64; fn now(&mut self) -> Self::Time { self.0.get() } } #[test] fn fig_6_proc_0_a() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); src.0.set(10); assert_eq!(clock.on_send(), Timestamp(10, 0)) } #[test] fn fig_6_proc_1_a() { let src = ManualClock(Cell::new(1)); let mut clock = Clock::new(&src); assert_eq!(clock.on_recv(&Timestamp(10, 0)), Timestamp(10, 1)) } #[test] fn fig_6_proc_1_b() { let src = ManualClock(Cell::new(1)); let mut clock = Clock::new(&src); let _ = clock.on_recv(&Timestamp(10, 0)); src.0.set(2); assert_eq!(clock.on_send(), Timestamp(10, 2)) } #[test] fn fig_6_proc_2_b() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); clock.latest = Timestamp(1, 0); src.0.set(2); assert_eq!(clock.on_recv(&Timestamp(10, 2)), Timestamp(10, 3)) } #[test] fn fig_6_proc_2_c() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); src.0.set(2); let _ = clock.on_recv(&Timestamp(10, 2)); src.0.set(3); assert_eq!(clock.on_send(), Timestamp(10, 4)) } #[test] fn all_sources_same() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); assert_eq!(clock.on_recv(&Timestamp(0, 5)), Timestamp(0, 6)) } #[test] fn handles_time_going_backwards_on_send() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(9); assert_eq!(clock.on_send(), Timestamp(10, 2)) } #[test] fn handles_time_going_backwards_on_recv() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(9); assert_eq!(clock.on_recv(&Timestamp(0, 0)), Timestamp(10, 2)) } #[test] fn handles_time_going_forwards_on_send() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(12); assert_eq!(clock.on_send(), Timestamp(12, 0)) } #[test] fn handles_time_going_forwards_on_recv() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(12); assert_eq!(clock.on_recv(&Timestamp(0, 0)), Timestamp(12, 0)) } } Add wall clock. extern crate time; #[cfg(test)] extern crate quickcheck; use std::cmp; use std::fmt; // Implementation of Hybrid Logical Clocks, based on the paper "Logical Physical Clocks // and Consistent Snapshots in Globally Distributed Databases". // pub trait ClockSource { type Time : Ord + Copy; fn now(&mut self) -> Self::Time; } #[derive(Debug,Clone,Copy,PartialEq,Eq,PartialOrd,Ord)] pub struct Timestamp<T>(T, u32); #[derive(Debug,Clone,Copy,PartialEq,Eq,PartialOrd,Ord)] pub struct Wall; pub struct Clock<S: ClockSource> { src: S, latest: Timestamp<S::Time>, } impl Clock<Wall> { pub fn wall() -> Clock<Wall> { Clock::new(Wall) } } impl<S: ClockSource> Clock<S> { pub fn new(mut src: S) -> Self { let init = src.now(); Clock { src: src, latest: Timestamp(init, 0), } } pub fn on_send(&mut self) -> Timestamp<S::Time> { let pt = self.src.now(); let lp = self.latest.clone(); self.latest.0 = cmp::max(lp.0, pt); self.latest.1 = if lp.0 == self.latest.0 { lp.1 + 1 } else { 0 }; self.latest } pub fn on_recv(&mut self, msg: &Timestamp<S::Time>) -> Timestamp<S::Time> { let pt = self.src.now(); let lp = self.latest.clone(); self.latest.0 = cmp::max(cmp::max(lp.0, msg.0), pt); self.latest.1 = match (self.latest.0 == lp.0, self.latest.0 == msg.0) { (true, true) => cmp::max(self.latest.1, msg.1) + 1, (true, false) => self.latest.1 + 1, (false, true) => msg.1 + 1, (false, false) => 0, }; self.latest.clone() } } impl ClockSource for Wall { type Time = time::Timespec; fn now(&mut self) -> Self::Time { time::get_time() } } #[cfg(test)] mod tests { use super::{Clock, ClockSource, Timestamp}; use std::cell::Cell; use std::cmp::{Ord, Ordering}; use quickcheck; struct ManualClock(Cell<u64>); impl<'a> ClockSource for &'a ManualClock { type Time = u64; fn now(&mut self) -> Self::Time { self.0.get() } } #[test] fn fig_6_proc_0_a() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); src.0.set(10); assert_eq!(clock.on_send(), Timestamp(10, 0)) } #[test] fn fig_6_proc_1_a() { let src = ManualClock(Cell::new(1)); let mut clock = Clock::new(&src); assert_eq!(clock.on_recv(&Timestamp(10, 0)), Timestamp(10, 1)) } #[test] fn fig_6_proc_1_b() { let src = ManualClock(Cell::new(1)); let mut clock = Clock::new(&src); let _ = clock.on_recv(&Timestamp(10, 0)); src.0.set(2); assert_eq!(clock.on_send(), Timestamp(10, 2)) } #[test] fn fig_6_proc_2_b() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); clock.latest = Timestamp(1, 0); src.0.set(2); assert_eq!(clock.on_recv(&Timestamp(10, 2)), Timestamp(10, 3)) } #[test] fn fig_6_proc_2_c() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); src.0.set(2); let _ = clock.on_recv(&Timestamp(10, 2)); src.0.set(3); assert_eq!(clock.on_send(), Timestamp(10, 4)) } #[test] fn all_sources_same() { let src = ManualClock(Cell::new(0)); let mut clock = Clock::new(&src); assert_eq!(clock.on_recv(&Timestamp(0, 5)), Timestamp(0, 6)) } #[test] fn handles_time_going_backwards_on_send() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(9); assert_eq!(clock.on_send(), Timestamp(10, 2)) } #[test] fn handles_time_going_backwards_on_recv() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(9); assert_eq!(clock.on_recv(&Timestamp(0, 0)), Timestamp(10, 2)) } #[test] fn handles_time_going_forwards_on_send() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(12); assert_eq!(clock.on_send(), Timestamp(12, 0)) } #[test] fn handles_time_going_forwards_on_recv() { let src = ManualClock(Cell::new(10)); let mut clock = Clock::new(&src); let _ = clock.on_send(); src.0.set(12); assert_eq!(clock.on_recv(&Timestamp(0, 0)), Timestamp(12, 0)) } }
import ptr::addr_of; import comm::{port, chan, methods}; import result::{result, ok, err}; import std::net::ip::{ get_addr, format_addr, ipv4, ipv6, ip_addr, ip_get_addr_err }; import std::net::tcp::{connect, tcp_socket}; import std::uv_global_loop; import comm::{methods}; import connection::{ Connection, ConnectionFactory, UvConnectionFactory, MockConnection, MockConnectionFactory }; import parser::{Parser, ParserCallbacks}; const timeout: uint = 2000; /** A quick hack URI type */ type Uri = { host: str, path: str }; /// HTTP status codes enum StatusCode { StatusOk = 200, StatusUnknown } /// HTTP request error conditions enum RequestError { ErrorDnsResolution, ErrorConnect, ErrorMisc } /// Request enum RequestEvent { Status(StatusCode), Payload(~mut option<~[u8]>), Error(RequestError) } type DnsResolver = fn@(host: str) -> result<~[ip_addr], ip_get_addr_err>; fn uv_dns_resolver() -> DnsResolver { |host| { let iotask = uv_global_loop::get(); get_addr(host, iotask) } } fn uv_http_request(+uri: Uri) -> HttpRequest<tcp_socket, UvConnectionFactory> { HttpRequest(uv_dns_resolver(), UvConnectionFactory, uri) } class HttpRequest<C: Connection, CF: ConnectionFactory<C>> { let resolve_ip_addr: DnsResolver; let connection_factory: CF; let uri: Uri; let parser: Parser; new(resolver: DnsResolver, +connection_factory: CF, +uri: Uri) { self.resolve_ip_addr = resolver; self.connection_factory = connection_factory; self.uri = uri; self.parser = Parser(); } fn begin(cb: fn(+RequestEvent)) { #debug("http_client: looking up uri %?", self.uri); let ip_addr = { let ip_addrs = self.resolve_ip_addr(self.uri.host); if ip_addrs.is_ok() { let ip_addrs = result::unwrap(ip_addrs); // FIXME: This log crashes //#debug("http_client: got IP addresses for %?: %?", self.uri, ip_addrs); if ip_addrs.is_not_empty() { // FIXME: Which address should we really pick? let best_ip = do ip_addrs.find |ip| { alt ip { ipv4(*) { true } ipv6(*) { false } } }; if best_ip.is_some() { option::unwrap(best_ip) } else { // FIXME: Need test cb(Error(ErrorMisc)); ret; } } else { #debug("http_client: got no IP addresses for %?", self.uri); // FIXME: Need test cb(Error(ErrorMisc)); ret; } } else { #debug("http_client: DNS lookup failure: %?", ip_addrs.get_err()); cb(Error(ErrorDnsResolution)); ret; } }; #debug("http_client: using IP %? for %?", format_addr(ip_addr), self.uri); let socket = { #debug("http_client: connecting to %?", ip_addr); let socket = self.connection_factory.connect(copy ip_addr, 80); if socket.is_ok() { result::unwrap(socket) } else { #debug("http_client: unable to connect to %?: %?", ip_addr, socket); cb(Error(ErrorConnect)); ret; } }; #debug("http_client: got socket for %?", ip_addr); let request_header = #fmt("GET %s HTTP/1.0\u000D\u000AHost: %s\u000D\u000A\u000D\u000A", self.uri.path, self.uri.host); #debug("http_client: writing request header: %?", request_header); let request_header_bytes = str::bytes(request_header); alt socket.write(request_header_bytes) { result::ok(*) { } result::err(e) { // FIXME: Need test cb(Error(ErrorMisc)); ret; } } let read_port = { let read_port = socket.read_start(); if read_port.is_ok() { result::unwrap(read_port) } else { cb(Error(ErrorMisc)); ret; } }; // This unsafety is unfortunate but we can't capture self // into shared closures let unsafe_self = addr_of(self); let callbacks: ParserCallbacks = unsafe {{ on_message_begin: || (*unsafe_self).on_message_begin(), on_url: |data| (*unsafe_self).on_url(data), on_header_field: |data| (*unsafe_self).on_header_field(data), on_header_value: |data| (*unsafe_self).on_header_value(data), on_headers_complete: || (*unsafe_self).on_headers_complete(), on_body: |data| (*unsafe_self).on_body(data), on_message_complete: || (*unsafe_self).on_message_complete() }}; loop { let next_data = read_port.recv(); if next_data.is_ok() { let next_data = result::unwrap(next_data); #debug("next_data: %?", next_data); self.parser.execute(next_data, &callbacks); let the_payload = Payload(~mut some(next_data)); cb(the_payload); } else { #debug("http_client: read error: %?", next_data); // This method of detecting EOF is lame alt next_data { result::err({err_name: "EOF", _}) { break; } _ { // FIXME: Need tests and error handling socket.read_stop(read_port); cb(Error(ErrorMisc)); ret; } } } } socket.read_stop(read_port); } fn on_message_begin() -> bool { #debug("on_message_begin"); true } fn on_url(+data: ~[u8]) -> bool { #debug("on_url"); true } fn on_header_field(+data: ~[u8]) -> bool { #debug("on_header_field"); true } fn on_header_value(+data: ~[u8]) -> bool { #debug("on_header_value"); true } fn on_headers_complete() -> bool { #debug("on_headers_complete"); true } fn on_body(+data: ~[u8]) -> bool { #debug("on_body"); true } fn on_message_complete() -> bool { #debug("on_message_complete"); true } } fn sequence<C: Connection, CF: ConnectionFactory<C>>(request: HttpRequest<C, CF>) -> ~[RequestEvent] { let mut events = ~[]; do request.begin |event| { vec::push(events, event) } ret events; } #[test] fn test_resolve_error() { let uri = { host: "example.com_not_real", path: "/" }; let request = uv_http_request(uri); let events = sequence(request); assert events == ~[ Error(ErrorDnsResolution), ]; } #[test] fn test_connect_error() { let uri = { // This address is invalid because the first octet // of a class A address cannot be 0 host: "0.42.42.42", path: "/" }; let request = uv_http_request(uri); let events = sequence(request); assert events == ~[ Error(ErrorConnect), ]; } #[test] fn test_connect_success() { let uri = { host: "example.com", path: "/" }; let request = uv_http_request(uri); let events = sequence(request); for events.each |ev| { alt ev { Error(*) { fail } _ { } } } } #[test] #[ignore(reason = "ICE")] fn test_simple_response() { let uri = { host: "whatever", path: "/" }; let mock_connection: MockConnection = { write_fn: |data| { ok(()) }, read_start_fn: || { let port = port(); let chan = port.chan(); let response = "HTTP/1.0 200 OK\ \ Test"; chan.send(ok(str::bytes(response))); ok(port) }, read_stop_fn: |_port| { ok(()) } }; let mock_connection_factory: MockConnectionFactory = { connect_fn: |ip, port| { // FIXME this doesn't work fail;//ok(mock_connection) } }; } Remove some logging import ptr::addr_of; import comm::{port, chan, methods}; import result::{result, ok, err}; import std::net::ip::{ get_addr, format_addr, ipv4, ipv6, ip_addr, ip_get_addr_err }; import std::net::tcp::{connect, tcp_socket}; import std::uv_global_loop; import comm::{methods}; import connection::{ Connection, ConnectionFactory, UvConnectionFactory, MockConnection, MockConnectionFactory }; import parser::{Parser, ParserCallbacks}; const timeout: uint = 2000; /** A quick hack URI type */ type Uri = { host: str, path: str }; /// HTTP status codes enum StatusCode { StatusOk = 200, StatusUnknown } /// HTTP request error conditions enum RequestError { ErrorDnsResolution, ErrorConnect, ErrorMisc } /// Request enum RequestEvent { Status(StatusCode), Payload(~mut option<~[u8]>), Error(RequestError) } type DnsResolver = fn@(host: str) -> result<~[ip_addr], ip_get_addr_err>; fn uv_dns_resolver() -> DnsResolver { |host| { let iotask = uv_global_loop::get(); get_addr(host, iotask) } } fn uv_http_request(+uri: Uri) -> HttpRequest<tcp_socket, UvConnectionFactory> { HttpRequest(uv_dns_resolver(), UvConnectionFactory, uri) } class HttpRequest<C: Connection, CF: ConnectionFactory<C>> { let resolve_ip_addr: DnsResolver; let connection_factory: CF; let uri: Uri; let parser: Parser; new(resolver: DnsResolver, +connection_factory: CF, +uri: Uri) { self.resolve_ip_addr = resolver; self.connection_factory = connection_factory; self.uri = uri; self.parser = Parser(); } fn begin(cb: fn(+RequestEvent)) { #debug("http_client: looking up uri %?", self.uri); let ip_addr = { let ip_addrs = self.resolve_ip_addr(self.uri.host); if ip_addrs.is_ok() { let ip_addrs = result::unwrap(ip_addrs); // FIXME: This log crashes //#debug("http_client: got IP addresses for %?: %?", self.uri, ip_addrs); if ip_addrs.is_not_empty() { // FIXME: Which address should we really pick? let best_ip = do ip_addrs.find |ip| { alt ip { ipv4(*) { true } ipv6(*) { false } } }; if best_ip.is_some() { option::unwrap(best_ip) } else { // FIXME: Need test cb(Error(ErrorMisc)); ret; } } else { #debug("http_client: got no IP addresses for %?", self.uri); // FIXME: Need test cb(Error(ErrorMisc)); ret; } } else { #debug("http_client: DNS lookup failure: %?", ip_addrs.get_err()); cb(Error(ErrorDnsResolution)); ret; } }; #debug("http_client: using IP %? for %?", format_addr(ip_addr), self.uri); let socket = { #debug("http_client: connecting to %?", ip_addr); let socket = self.connection_factory.connect(copy ip_addr, 80); if socket.is_ok() { result::unwrap(socket) } else { #debug("http_client: unable to connect to %?: %?", ip_addr, socket); cb(Error(ErrorConnect)); ret; } }; #debug("http_client: got socket for %?", ip_addr); let request_header = #fmt("GET %s HTTP/1.0\u000D\u000AHost: %s\u000D\u000A\u000D\u000A", self.uri.path, self.uri.host); #debug("http_client: writing request header: %?", request_header); let request_header_bytes = str::bytes(request_header); alt socket.write(request_header_bytes) { result::ok(*) { } result::err(e) { // FIXME: Need test cb(Error(ErrorMisc)); ret; } } let read_port = { let read_port = socket.read_start(); if read_port.is_ok() { result::unwrap(read_port) } else { cb(Error(ErrorMisc)); ret; } }; // This unsafety is unfortunate but we can't capture self // into shared closures let unsafe_self = addr_of(self); let callbacks: ParserCallbacks = unsafe {{ on_message_begin: || (*unsafe_self).on_message_begin(), on_url: |data| (*unsafe_self).on_url(data), on_header_field: |data| (*unsafe_self).on_header_field(data), on_header_value: |data| (*unsafe_self).on_header_value(data), on_headers_complete: || (*unsafe_self).on_headers_complete(), on_body: |data| (*unsafe_self).on_body(data), on_message_complete: || (*unsafe_self).on_message_complete() }}; loop { let next_data = read_port.recv(); if next_data.is_ok() { let next_data = result::unwrap(next_data); self.parser.execute(next_data, &callbacks); let the_payload = Payload(~mut some(next_data)); cb(the_payload); } else { #debug("http_client: read error: %?", next_data); // This method of detecting EOF is lame alt next_data { result::err({err_name: "EOF", _}) { break; } _ { // FIXME: Need tests and error handling socket.read_stop(read_port); cb(Error(ErrorMisc)); ret; } } } } socket.read_stop(read_port); } fn on_message_begin() -> bool { #debug("on_message_begin"); true } fn on_url(+data: ~[u8]) -> bool { #debug("on_url"); true } fn on_header_field(+data: ~[u8]) -> bool { #debug("on_header_field"); true } fn on_header_value(+data: ~[u8]) -> bool { #debug("on_header_value"); true } fn on_headers_complete() -> bool { #debug("on_headers_complete"); true } fn on_body(+data: ~[u8]) -> bool { #debug("on_body"); true } fn on_message_complete() -> bool { #debug("on_message_complete"); true } } fn sequence<C: Connection, CF: ConnectionFactory<C>>(request: HttpRequest<C, CF>) -> ~[RequestEvent] { let mut events = ~[]; do request.begin |event| { vec::push(events, event) } ret events; } #[test] fn test_resolve_error() { let uri = { host: "example.com_not_real", path: "/" }; let request = uv_http_request(uri); let events = sequence(request); assert events == ~[ Error(ErrorDnsResolution), ]; } #[test] fn test_connect_error() { let uri = { // This address is invalid because the first octet // of a class A address cannot be 0 host: "0.42.42.42", path: "/" }; let request = uv_http_request(uri); let events = sequence(request); assert events == ~[ Error(ErrorConnect), ]; } #[test] fn test_connect_success() { let uri = { host: "example.com", path: "/" }; let request = uv_http_request(uri); let events = sequence(request); for events.each |ev| { alt ev { Error(*) { fail } _ { } } } } #[test] #[ignore(reason = "ICE")] fn test_simple_response() { let uri = { host: "whatever", path: "/" }; let mock_connection: MockConnection = { write_fn: |data| { ok(()) }, read_start_fn: || { let port = port(); let chan = port.chan(); let response = "HTTP/1.0 200 OK\ \ Test"; chan.send(ok(str::bytes(response))); ok(port) }, read_stop_fn: |_port| { ok(()) } }; let mock_connection_factory: MockConnectionFactory = { connect_fn: |ip, port| { // FIXME this doesn't work fail;//ok(mock_connection) } }; }
//! [POST /_matrix/identity/v2/lookup](https://matrix.org/docs/spec/identity_service/0.3.0#post-matrix-identity-v2-lookup) use std::collections::BTreeMap; use ruma_api::ruma_api; use ruma_identifiers::UserId; use crate::lookup::IdentifierHashingAlgorithm; ruma_api! { metadata: { description: "Looks up the set of Matrix User IDs which have bound the 3PIDs given, if bindings are available.", method: POST, name: "lookup_3pid", path: "/_matrix/identity/v2/lookup", authentication: AccessToken, rate_limited: false, } request: { /// The algorithm the client is using to encode the `addresses`. This should be one of the /// available options from `/hash_details`. pub algorithm: &'a IdentifierHashingAlgorithm, /// The pepper from `/hash_details`. This is required even when the `algorithm` does not /// make use of it. pub pepper: &'a str, /// The addresses to look up. The format of the entries here depend on the `algorithm` /// used. Note that queries which have been incorrectly hashed or formatted will lead to no /// matches. pub addresses: &'a [String], } response: { /// Any applicable mappings of `addresses` to Matrix User IDs. Addresses which do not have /// associations will not be included, which can make this property be an empty object. pub mappings: BTreeMap<String, UserId>, } } impl<'a> Request<'a> { /// Create a `Request` with algorithm, pepper and addresses to loop up. pub fn new( algorithm: &'a IdentifierHashingAlgorithm, pepper: &'a str, addresses: &'a [String], ) -> Self { Self { algorithm, pepper, addresses } } } impl Response { /// Create a `Response` with the BTreeMap which map addresses from the request which were /// found to their corresponding User IDs. pub fn new(mappings: BTreeMap<String, UserId>) -> Self { Self { mappings } } } Fix identity-service spec link //! [POST /_matrix/identity/v2/lookup](https://matrix.org/docs/spec/identity_service/r0.3.0#post-matrix-identity-v2-lookup) use std::collections::BTreeMap; use ruma_api::ruma_api; use ruma_identifiers::UserId; use crate::lookup::IdentifierHashingAlgorithm; ruma_api! { metadata: { description: "Looks up the set of Matrix User IDs which have bound the 3PIDs given, if bindings are available.", method: POST, name: "lookup_3pid", path: "/_matrix/identity/v2/lookup", authentication: AccessToken, rate_limited: false, } request: { /// The algorithm the client is using to encode the `addresses`. This should be one of the /// available options from `/hash_details`. pub algorithm: &'a IdentifierHashingAlgorithm, /// The pepper from `/hash_details`. This is required even when the `algorithm` does not /// make use of it. pub pepper: &'a str, /// The addresses to look up. The format of the entries here depend on the `algorithm` /// used. Note that queries which have been incorrectly hashed or formatted will lead to no /// matches. pub addresses: &'a [String], } response: { /// Any applicable mappings of `addresses` to Matrix User IDs. Addresses which do not have /// associations will not be included, which can make this property be an empty object. pub mappings: BTreeMap<String, UserId>, } } impl<'a> Request<'a> { /// Create a `Request` with algorithm, pepper and addresses to loop up. pub fn new( algorithm: &'a IdentifierHashingAlgorithm, pepper: &'a str, addresses: &'a [String], ) -> Self { Self { algorithm, pepper, addresses } } } impl Response { /// Create a `Response` with the BTreeMap which map addresses from the request which were /// found to their corresponding User IDs. pub fn new(mappings: BTreeMap<String, UserId>) -> Self { Self { mappings } } }
use std::cell::{Cell, RefCell}; use cairo::{self, ImageSurface, MatrixTrait, PatternTrait, Rectangle}; use markup5ever::local_name; use crate::allowed_url::{Fragment, Href}; use crate::aspect_ratio::AspectRatio; use crate::drawing_ctx::DrawingCtx; use crate::error::{NodeError, RenderingError}; use crate::float_eq_cairo::ApproxEqCairo; use crate::node::{CascadedValues, NodeResult, NodeTrait, RsvgNode}; use crate::parsers::{ParseError, ParseValue}; use crate::property_bag::PropertyBag; use crate::rect::{IRect, RectangleExt}; use crate::surface_utils::shared_surface::{SharedImageSurface, SurfaceType}; use crate::viewbox::ViewBox; use super::context::{FilterContext, FilterOutput, FilterResult}; use super::{Filter, FilterError, Primitive}; /// The `feImage` filter primitive. pub struct Image { base: Primitive, aspect: Cell<AspectRatio>, href: RefCell<Option<Href>>, } impl Default for Image { /// Constructs a new `Image` with empty properties. #[inline] fn default() -> Image { Image { base: Primitive::new::<Self>(), aspect: Cell::new(AspectRatio::default()), href: RefCell::new(None), } } } impl Image { /// Renders the filter if the source is an existing node. fn render_node( &self, ctx: &FilterContext, draw_ctx: &mut DrawingCtx, bounds: IRect, fragment: &Fragment, ) -> Result<ImageSurface, FilterError> { let acquired_drawable = draw_ctx .acquired_nodes() .get_node(fragment) .ok_or(FilterError::InvalidInput)?; let drawable = acquired_drawable.get(); let surface = ImageSurface::create( cairo::Format::ARgb32, ctx.source_graphic().width(), ctx.source_graphic().height(), )?; draw_ctx.get_cairo_context().set_matrix(ctx.paffine()); let node_being_filtered_values = ctx.get_computed_values_from_node_being_filtered(); let cascaded = CascadedValues::new_from_values(&drawable, node_being_filtered_values); draw_ctx .draw_node_on_surface( &drawable, &cascaded, &surface, f64::from(ctx.source_graphic().width()), f64::from(ctx.source_graphic().height()), ) .map_err(|e| { if let RenderingError::Cairo(status) = e { FilterError::CairoError(status) } else { // FIXME: this is just a dummy value; we should probably have a way to indicate // an error in the underlying drawing process. FilterError::CairoError(cairo::Status::InvalidStatus) } })?; // Clip the output to bounds. let output_surface = ImageSurface::create( cairo::Format::ARgb32, ctx.source_graphic().width(), ctx.source_graphic().height(), )?; let cr = cairo::Context::new(&output_surface); cr.rectangle( f64::from(bounds.x0), f64::from(bounds.y0), f64::from(bounds.x1 - bounds.x0), f64::from(bounds.y1 - bounds.y0), ); cr.clip(); cr.set_source_surface(&surface, 0f64, 0f64); cr.paint(); Ok(output_surface) } /// Renders the filter if the source is an external image. fn render_external_image( &self, ctx: &FilterContext, draw_ctx: &DrawingCtx, bounds: &IRect, unclipped_bounds: &IRect, href: &Href, ) -> Result<ImageSurface, FilterError> { let surface = if let Href::PlainUrl(ref url) = *href { // FIXME: translate the error better here draw_ctx .lookup_image(&url) .map_err(|_| FilterError::InvalidInput)? } else { unreachable!(); }; let output_surface = ImageSurface::create( cairo::Format::ARgb32, ctx.source_graphic().width(), ctx.source_graphic().height(), )?; // TODO: this goes through a f64->i32->f64 conversion. let aspect = self.aspect.get(); let (x, y, w, h) = aspect.compute( &ViewBox::new( 0.0, 0.0, f64::from(surface.width()), f64::from(surface.height()), ), &Rectangle::new( f64::from(unclipped_bounds.x0), f64::from(unclipped_bounds.y0), f64::from(unclipped_bounds.x1 - unclipped_bounds.x0), f64::from(unclipped_bounds.y1 - unclipped_bounds.y0), ), ); if w.approx_eq_cairo(&0.0) || h.approx_eq_cairo(&0.0) { return Ok(output_surface); } let ptn = surface.to_cairo_pattern(); let mut matrix = cairo::Matrix::new( w / f64::from(surface.width()), 0f64, 0f64, h / f64::from(surface.height()), x, y, ); matrix.invert(); ptn.set_matrix(matrix); let cr = cairo::Context::new(&output_surface); cr.rectangle( f64::from(bounds.x0), f64::from(bounds.y0), f64::from(bounds.x1 - bounds.x0), f64::from(bounds.y1 - bounds.y0), ); cr.clip(); cr.set_source(&ptn); cr.paint(); Ok(output_surface) } } impl NodeTrait for Image { impl_node_as_filter!(); fn set_atts(&mut self, parent: Option<&RsvgNode>, pbag: &PropertyBag<'_>) -> NodeResult { self.base.set_atts(parent, pbag)?; for (attr, value) in pbag.iter() { match attr { local_name!("preserveAspectRatio") => self.aspect.set(attr.parse(value)?), // "path" is used by some older Adobe Illustrator versions local_name!("xlink:href") | local_name!("path") => { let href = Href::parse(value).map_err(|_| { NodeError::parse_error(attr, ParseError::new("could not parse href")) })?; *self.href.borrow_mut() = Some(href); } _ => (), } } Ok(()) } } impl Filter for Image { fn render( &self, _node: &RsvgNode, ctx: &FilterContext, draw_ctx: &mut DrawingCtx, ) -> Result<FilterResult, FilterError> { let bounds_builder = self.base.get_bounds(ctx); let bounds = bounds_builder.into_irect(draw_ctx); let href_borrow = self.href.borrow(); let href_opt = href_borrow.as_ref(); if let Some(href) = href_opt { let output_surface = match *href { Href::PlainUrl(_) => { let unclipped_bounds = bounds_builder.into_irect_without_clipping(draw_ctx); self.render_external_image(ctx, draw_ctx, &bounds, &unclipped_bounds, href)? } Href::WithFragment(ref frag) => self.render_node(ctx, draw_ctx, bounds, frag)?, }; Ok(FilterResult { name: self.base.result.borrow().clone(), output: FilterOutput { surface: SharedImageSurface::new(output_surface, SurfaceType::SRgb)?, bounds, }, }) } else { Err(FilterError::InvalidInput) } } #[inline] fn is_affected_by_color_interpolation_filters(&self) -> bool { false } } filters/image: remove interior mutability use cairo::{self, ImageSurface, MatrixTrait, PatternTrait, Rectangle}; use markup5ever::local_name; use crate::allowed_url::{Fragment, Href}; use crate::aspect_ratio::AspectRatio; use crate::drawing_ctx::DrawingCtx; use crate::error::{NodeError, RenderingError}; use crate::float_eq_cairo::ApproxEqCairo; use crate::node::{CascadedValues, NodeResult, NodeTrait, RsvgNode}; use crate::parsers::{ParseError, ParseValue}; use crate::property_bag::PropertyBag; use crate::rect::{IRect, RectangleExt}; use crate::surface_utils::shared_surface::{SharedImageSurface, SurfaceType}; use crate::viewbox::ViewBox; use super::context::{FilterContext, FilterOutput, FilterResult}; use super::{Filter, FilterError, Primitive}; /// The `feImage` filter primitive. pub struct Image { base: Primitive, aspect: AspectRatio, href: Option<Href>, } impl Default for Image { /// Constructs a new `Image` with empty properties. #[inline] fn default() -> Image { Image { base: Primitive::new::<Self>(), aspect: AspectRatio::default(), href: None, } } } impl Image { /// Renders the filter if the source is an existing node. fn render_node( &self, ctx: &FilterContext, draw_ctx: &mut DrawingCtx, bounds: IRect, fragment: &Fragment, ) -> Result<ImageSurface, FilterError> { let acquired_drawable = draw_ctx .acquired_nodes() .get_node(fragment) .ok_or(FilterError::InvalidInput)?; let drawable = acquired_drawable.get(); let surface = ImageSurface::create( cairo::Format::ARgb32, ctx.source_graphic().width(), ctx.source_graphic().height(), )?; draw_ctx.get_cairo_context().set_matrix(ctx.paffine()); let node_being_filtered_values = ctx.get_computed_values_from_node_being_filtered(); let cascaded = CascadedValues::new_from_values(&drawable, node_being_filtered_values); draw_ctx .draw_node_on_surface( &drawable, &cascaded, &surface, f64::from(ctx.source_graphic().width()), f64::from(ctx.source_graphic().height()), ) .map_err(|e| { if let RenderingError::Cairo(status) = e { FilterError::CairoError(status) } else { // FIXME: this is just a dummy value; we should probably have a way to indicate // an error in the underlying drawing process. FilterError::CairoError(cairo::Status::InvalidStatus) } })?; // Clip the output to bounds. let output_surface = ImageSurface::create( cairo::Format::ARgb32, ctx.source_graphic().width(), ctx.source_graphic().height(), )?; let cr = cairo::Context::new(&output_surface); cr.rectangle( f64::from(bounds.x0), f64::from(bounds.y0), f64::from(bounds.x1 - bounds.x0), f64::from(bounds.y1 - bounds.y0), ); cr.clip(); cr.set_source_surface(&surface, 0f64, 0f64); cr.paint(); Ok(output_surface) } /// Renders the filter if the source is an external image. fn render_external_image( &self, ctx: &FilterContext, draw_ctx: &DrawingCtx, bounds: &IRect, unclipped_bounds: &IRect, href: &Href, ) -> Result<ImageSurface, FilterError> { let surface = if let Href::PlainUrl(ref url) = *href { // FIXME: translate the error better here draw_ctx .lookup_image(&url) .map_err(|_| FilterError::InvalidInput)? } else { unreachable!(); }; let output_surface = ImageSurface::create( cairo::Format::ARgb32, ctx.source_graphic().width(), ctx.source_graphic().height(), )?; // TODO: this goes through a f64->i32->f64 conversion. let (x, y, w, h) = self.aspect.compute( &ViewBox::new( 0.0, 0.0, f64::from(surface.width()), f64::from(surface.height()), ), &Rectangle::new( f64::from(unclipped_bounds.x0), f64::from(unclipped_bounds.y0), f64::from(unclipped_bounds.x1 - unclipped_bounds.x0), f64::from(unclipped_bounds.y1 - unclipped_bounds.y0), ), ); if w.approx_eq_cairo(&0.0) || h.approx_eq_cairo(&0.0) { return Ok(output_surface); } let ptn = surface.to_cairo_pattern(); let mut matrix = cairo::Matrix::new( w / f64::from(surface.width()), 0f64, 0f64, h / f64::from(surface.height()), x, y, ); matrix.invert(); ptn.set_matrix(matrix); let cr = cairo::Context::new(&output_surface); cr.rectangle( f64::from(bounds.x0), f64::from(bounds.y0), f64::from(bounds.x1 - bounds.x0), f64::from(bounds.y1 - bounds.y0), ); cr.clip(); cr.set_source(&ptn); cr.paint(); Ok(output_surface) } } impl NodeTrait for Image { impl_node_as_filter!(); fn set_atts(&mut self, parent: Option<&RsvgNode>, pbag: &PropertyBag<'_>) -> NodeResult { self.base.set_atts(parent, pbag)?; for (attr, value) in pbag.iter() { match attr { local_name!("preserveAspectRatio") => self.aspect = attr.parse(value)?, // "path" is used by some older Adobe Illustrator versions local_name!("xlink:href") | local_name!("path") => { let href = Href::parse(value).map_err(|_| { NodeError::parse_error(attr, ParseError::new("could not parse href")) })?; self.href = Some(href); } _ => (), } } Ok(()) } } impl Filter for Image { fn render( &self, _node: &RsvgNode, ctx: &FilterContext, draw_ctx: &mut DrawingCtx, ) -> Result<FilterResult, FilterError> { let bounds_builder = self.base.get_bounds(ctx); let bounds = bounds_builder.into_irect(draw_ctx); if let Some(href) = self.href.as_ref() { let output_surface = match href { Href::PlainUrl(_) => { let unclipped_bounds = bounds_builder.into_irect_without_clipping(draw_ctx); self.render_external_image(ctx, draw_ctx, &bounds, &unclipped_bounds, href)? } Href::WithFragment(ref frag) => self.render_node(ctx, draw_ctx, bounds, frag)?, }; Ok(FilterResult { name: self.base.result.borrow().clone(), output: FilterOutput { surface: SharedImageSurface::new(output_surface, SurfaceType::SRgb)?, bounds, }, }) } else { Err(FilterError::InvalidInput) } } #[inline] fn is_affected_by_color_interpolation_filters(&self) -> bool { false } }
//! Details of the `metadata` section of the procedural macro. use syn::{ braced, parse::{Parse, ParseStream}, Expr, ExprLit, ExprPath, FieldValue, Ident, Lit, LitBool, LitStr, Member, Token, }; use crate::util; mod kw { syn::custom_keyword!(metadata); } /// The result of processing the `metadata` section of the macro. pub struct Metadata { /// The description field. pub description: LitStr, /// The method field. pub method: Ident, /// The name field. pub name: LitStr, /// The path field. pub path: LitStr, /// The rate_limited field. pub rate_limited: LitBool, /// The authentication field. pub authentication: Ident, } impl Parse for Metadata { fn parse(input: ParseStream<'_>) -> syn::Result<Self> { let metadata_kw = input.parse::<kw::metadata>()?; input.parse::<Token![:]>()?; let field_values; braced!(field_values in input); let field_values = field_values.parse_terminated::<FieldValue, Token![,]>(FieldValue::parse)?; let mut description = None; let mut method = None; let mut name = None; let mut path = None; let mut rate_limited = None; let mut authentication = None; for field_value in field_values { let identifier = match field_value.member.clone() { Member::Named(identifier) => identifier, _ => panic!("expected Member::Named"), }; let expr = field_value.expr.clone(); match &identifier.to_string()[..] { "description" => match expr { Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }) => { description = Some(literal); } _ => return Err(syn::Error::new_spanned(expr, "expected a string literal")), }, "method" => match expr { Expr::Path(ExprPath { ref path, .. }) if path.segments.len() == 1 => { method = Some(path.segments[0].ident.clone()); } _ => return Err(syn::Error::new_spanned(expr, "expected an identifier")), }, "name" => match expr { Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }) => { name = Some(literal); } _ => return Err(syn::Error::new_spanned(expr, "expected a string literal")), }, "path" => match expr { Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }) => { let path_str = literal.value(); if !util::is_valid_endpoint_path(&path_str) { return Err(syn::Error::new_spanned( literal, "path may only contain printable ASCII characters with no spaces", )); } path = Some(literal); } _ => return Err(syn::Error::new_spanned(expr, "expected a string literal")), }, "rate_limited" => match expr { Expr::Lit(ExprLit { lit: Lit::Bool(literal), .. }) => { rate_limited = Some(literal); } _ => return Err(syn::Error::new_spanned(expr, "expected a bool literal")), }, "authentication" => match expr { Expr::Path(ExprPath { ref path, .. }) if path.segments.len() == 1 => { authentication = Some(path.segments[0].ident.clone()); } _ => return Err(syn::Error::new_spanned(expr, "expected an identifier")), }, _ => return Err(syn::Error::new_spanned(field_value, "unexpected field")), } } let missing_field = |name| syn::Error::new_spanned(metadata_kw, format!("missing field `{}`", name)); Ok(Self { description: description.ok_or_else(|| missing_field("description"))?, method: method.ok_or_else(|| missing_field("method"))?, name: name.ok_or_else(|| missing_field("name"))?, path: path.ok_or_else(|| missing_field("path"))?, rate_limited: rate_limited.ok_or_else(|| missing_field("rate_limited"))?, authentication: authentication.ok_or_else(|| missing_field("authentication"))?, }) } } api-macros: Refactor metadata parsing * duplicate field assignment will now raise an error * parsing should now be faster (though it probably doesn't matter) * the code is now split into more independent parts //! Details of the `metadata` section of the procedural macro. use quote::ToTokens; use syn::{ braced, parse::{Parse, ParseStream}, Ident, LitBool, LitStr, Token, }; use crate::util; mod kw { syn::custom_keyword!(metadata); syn::custom_keyword!(description); syn::custom_keyword!(method); syn::custom_keyword!(name); syn::custom_keyword!(path); syn::custom_keyword!(rate_limited); syn::custom_keyword!(authentication); } /// The result of processing the `metadata` section of the macro. pub struct Metadata { /// The description field. pub description: LitStr, /// The method field. pub method: Ident, /// The name field. pub name: LitStr, /// The path field. pub path: LitStr, /// The rate_limited field. pub rate_limited: LitBool, /// The authentication field. pub authentication: Ident, } fn set_field<T: ToTokens>(field: &mut Option<T>, value: T) -> syn::Result<()> { match field { Some(existing_value) => { let mut error = syn::Error::new_spanned(value, "duplicate field assignment"); error.combine(syn::Error::new_spanned(existing_value, "first one here")); Err(error) } None => { *field = Some(value); Ok(()) } } } impl Parse for Metadata { fn parse(input: ParseStream<'_>) -> syn::Result<Self> { let metadata_kw: kw::metadata = input.parse()?; let _: Token![:] = input.parse()?; let field_values; braced!(field_values in input); let field_values = field_values.parse_terminated::<FieldValue, Token![,]>(FieldValue::parse)?; let mut description = None; let mut method = None; let mut name = None; let mut path = None; let mut rate_limited = None; let mut authentication = None; for field_value in field_values { match field_value { FieldValue::Description(d) => set_field(&mut description, d)?, FieldValue::Method(m) => set_field(&mut method, m)?, FieldValue::Name(n) => set_field(&mut name, n)?, FieldValue::Path(p) => set_field(&mut path, p)?, FieldValue::RateLimited(rl) => set_field(&mut rate_limited, rl)?, FieldValue::Authentication(a) => set_field(&mut authentication, a)?, } } let missing_field = |name| syn::Error::new_spanned(metadata_kw, format!("missing field `{}`", name)); Ok(Self { description: description.ok_or_else(|| missing_field("description"))?, method: method.ok_or_else(|| missing_field("method"))?, name: name.ok_or_else(|| missing_field("name"))?, path: path.ok_or_else(|| missing_field("path"))?, rate_limited: rate_limited.ok_or_else(|| missing_field("rate_limited"))?, authentication: authentication.ok_or_else(|| missing_field("authentication"))?, }) } } enum Field { Description, Method, Name, Path, RateLimited, Authentication, } impl Parse for Field { fn parse(input: ParseStream) -> syn::Result<Self> { let lookahead = input.lookahead1(); if lookahead.peek(kw::description) { let _: kw::description = input.parse()?; Ok(Self::Description) } else if lookahead.peek(kw::method) { let _: kw::method = input.parse()?; Ok(Self::Method) } else if lookahead.peek(kw::name) { let _: kw::name = input.parse()?; Ok(Self::Name) } else if lookahead.peek(kw::path) { let _: kw::path = input.parse()?; Ok(Self::Path) } else if lookahead.peek(kw::rate_limited) { let _: kw::rate_limited = input.parse()?; Ok(Self::RateLimited) } else if lookahead.peek(kw::authentication) { let _: kw::authentication = input.parse()?; Ok(Self::Authentication) } else { Err(lookahead.error()) } } } enum FieldValue { Description(LitStr), Method(Ident), Name(LitStr), Path(LitStr), RateLimited(LitBool), Authentication(Ident), } impl Parse for FieldValue { fn parse(input: ParseStream) -> syn::Result<Self> { let field: Field = input.parse()?; let _: Token![:] = input.parse()?; Ok(match field { Field::Description => Self::Description(input.parse()?), Field::Method => Self::Method(input.parse()?), Field::Name => Self::Name(input.parse()?), Field::Path => { let path: LitStr = input.parse()?; if !util::is_valid_endpoint_path(&path.value()) { return Err(syn::Error::new_spanned( &path, "path may only contain printable ASCII characters with no spaces", )); } Self::Path(path) } Field::RateLimited => Self::RateLimited(input.parse()?), Field::Authentication => Self::Authentication(input.parse()?), }) } }
// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= use std::error::Error; use std::fmt; use std::io; #[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::reactor::{CredentialsProvider, RequestDispatcher}; use rusoto_core::region; use rusoto_core::request::DispatchSignedRequest; use rusoto_core::{ClientInner, RusotoFuture}; use rusoto_core::credential::{CredentialsError, ProvideAwsCredentials}; use rusoto_core::request::HttpDispatchError; use hyper::StatusCode; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::signature::SignedRequest; use rusoto_core::xmlerror::*; use rusoto_core::xmlutil::{characters, end_element, find_start_element, peek_at_name, skip_tree, start_element}; use rusoto_core::xmlutil::{Next, Peek, XmlParseError, XmlResponse}; use std::io::Write; use std::str::FromStr; use xml; use xml::reader::ParserConfig; use xml::reader::XmlEvent; use xml::EventReader; use xml::EventWriter; enum DeserializerNext { Close, Skip, Element(String), } /// <p>Specifies the days since the initiation of an Incomplete Multipart Upload that Lifecycle will wait before permanently removing all parts of the upload.</p> #[derive(Default, Debug, Clone)] pub struct AbortIncompleteMultipartUpload { /// <p>Indicates the number of days that must pass since initiation for Lifecycle to abort an Incomplete Multipart Upload.</p> pub days_after_initiation: Option<i64>, } struct AbortIncompleteMultipartUploadDeserializer; impl AbortIncompleteMultipartUploadDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AbortIncompleteMultipartUpload, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AbortIncompleteMultipartUpload::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DaysAfterInitiation" => { obj.days_after_initiation = Some(try!(DaysAfterInitiationDeserializer::deserialize( "DaysAfterInitiation", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AbortIncompleteMultipartUploadSerializer; impl AbortIncompleteMultipartUploadSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AbortIncompleteMultipartUpload, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.days_after_initiation { writer.write(xml::writer::XmlEvent::start_element("DaysAfterInitiation"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AbortMultipartUploadOutput { pub request_charged: Option<String>, } struct AbortMultipartUploadOutputDeserializer; impl AbortMultipartUploadOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AbortMultipartUploadOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = AbortMultipartUploadOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct AbortMultipartUploadRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, pub upload_id: String, } #[derive(Default, Debug, Clone)] pub struct AccelerateConfiguration { /// <p>The accelerate configuration of the bucket.</p> pub status: Option<String>, } pub struct AccelerateConfigurationSerializer; impl AccelerateConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AccelerateConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.status { writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AccessControlPolicy { /// <p>A list of grants.</p> pub grants: Option<Vec<Grant>>, pub owner: Option<Owner>, } pub struct AccessControlPolicySerializer; impl AccessControlPolicySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AccessControlPolicy, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.grants { &GrantsSerializer::serialize(&mut writer, "AccessControlList", value)?; } if let Some(ref value) = obj.owner { &OwnerSerializer::serialize(&mut writer, "Owner", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information regarding the access control for replicas.</p> #[derive(Default, Debug, Clone)] pub struct AccessControlTranslation { /// <p>The override value for the owner of the replica object.</p> pub owner: String, } struct AccessControlTranslationDeserializer; impl AccessControlTranslationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AccessControlTranslation, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AccessControlTranslation::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Owner" => { obj.owner = try!(OwnerOverrideDeserializer::deserialize("Owner", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AccessControlTranslationSerializer; impl AccessControlTranslationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AccessControlTranslation, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Owner"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.owner )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AccountIdDeserializer; impl AccountIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AccountIdSerializer; impl AccountIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedHeaderDeserializer; impl AllowedHeaderDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AllowedHeaderSerializer; impl AllowedHeaderSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedHeadersDeserializer; impl AllowedHeadersDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AllowedHeaderDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct AllowedHeadersSerializer; impl AllowedHeadersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { AllowedHeaderSerializer::serialize(writer, name, element)?; } Ok(()) } } struct AllowedMethodDeserializer; impl AllowedMethodDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AllowedMethodSerializer; impl AllowedMethodSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedMethodsDeserializer; impl AllowedMethodsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AllowedMethodDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct AllowedMethodsSerializer; impl AllowedMethodsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { AllowedMethodSerializer::serialize(writer, name, element)?; } Ok(()) } } struct AllowedOriginDeserializer; impl AllowedOriginDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AllowedOriginSerializer; impl AllowedOriginSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedOriginsDeserializer; impl AllowedOriginsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AllowedOriginDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct AllowedOriginsSerializer; impl AllowedOriginsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { AllowedOriginSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsAndOperator { /// <p>The prefix to use when evaluating an AND predicate.</p> pub prefix: Option<String>, /// <p>The list of tags to use when evaluating an AND predicate.</p> pub tags: Option<Vec<Tag>>, } struct AnalyticsAndOperatorDeserializer; impl AnalyticsAndOperatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsAndOperator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsAndOperator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tags = Some(try!(TagSetDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsAndOperatorSerializer; impl AnalyticsAndOperatorSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsAndOperator, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tags { &TagSetSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsConfiguration { /// <p>The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.</p> pub filter: Option<AnalyticsFilter>, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, /// <p>If present, it indicates that data related to access patterns will be collected and made available to analyze the tradeoffs between different storage classes.</p> pub storage_class_analysis: StorageClassAnalysis, } struct AnalyticsConfigurationDeserializer; impl AnalyticsConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Filter" => { obj.filter = Some(try!(AnalyticsFilterDeserializer::deserialize( "Filter", stack ))); } "Id" => { obj.id = try!(AnalyticsIdDeserializer::deserialize("Id", stack)); } "StorageClassAnalysis" => { obj.storage_class_analysis = try!(StorageClassAnalysisDeserializer::deserialize( "StorageClassAnalysis", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsConfigurationSerializer; impl AnalyticsConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.filter { &AnalyticsFilterSerializer::serialize(&mut writer, "Filter", value)?; } writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.id )))?; writer.write(xml::writer::XmlEvent::end_element())?; StorageClassAnalysisSerializer::serialize( &mut writer, "StorageClassAnalysis", &obj.storage_class_analysis, )?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AnalyticsConfigurationListDeserializer; impl AnalyticsConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<AnalyticsConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AnalyticsConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsExportDestination { /// <p>A destination signifying output to an S3 bucket.</p> pub s3_bucket_destination: AnalyticsS3BucketDestination, } struct AnalyticsExportDestinationDeserializer; impl AnalyticsExportDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsExportDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsExportDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "S3BucketDestination" => { obj.s3_bucket_destination = try!(AnalyticsS3BucketDestinationDeserializer::deserialize( "S3BucketDestination", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsExportDestinationSerializer; impl AnalyticsExportDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsExportDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; AnalyticsS3BucketDestinationSerializer::serialize( &mut writer, "S3BucketDestination", &obj.s3_bucket_destination, )?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsFilter { /// <p>A conjunction (logical AND) of predicates, which is used in evaluating an analytics filter. The operator must have at least two predicates.</p> pub and: Option<AnalyticsAndOperator>, /// <p>The prefix to use when evaluating an analytics filter.</p> pub prefix: Option<String>, /// <p>The tag to use when evaluating an analytics filter.</p> pub tag: Option<Tag>, } struct AnalyticsFilterDeserializer; impl AnalyticsFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "And" => { obj.and = Some(try!(AnalyticsAndOperatorDeserializer::deserialize( "And", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tag = Some(try!(TagDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsFilterSerializer; impl AnalyticsFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.and { &AnalyticsAndOperatorSerializer::serialize(&mut writer, "And", value)?; } if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tag { &TagSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct AnalyticsIdDeserializer; impl AnalyticsIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsIdSerializer; impl AnalyticsIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsS3BucketDestination { /// <p>The Amazon resource name (ARN) of the bucket to which data is exported.</p> pub bucket: String, /// <p>The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.</p> pub bucket_account_id: Option<String>, /// <p>The file format used when exporting data to Amazon S3.</p> pub format: String, /// <p>The prefix to use when exporting data. The exported data begins with this prefix.</p> pub prefix: Option<String>, } struct AnalyticsS3BucketDestinationDeserializer; impl AnalyticsS3BucketDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsS3BucketDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsS3BucketDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = try!(BucketNameDeserializer::deserialize("Bucket", stack)); } "BucketAccountId" => { obj.bucket_account_id = Some(try!(AccountIdDeserializer::deserialize( "BucketAccountId", stack ))); } "Format" => { obj.format = try!(AnalyticsS3ExportFileFormatDeserializer::deserialize( "Format", stack )); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsS3BucketDestinationSerializer; impl AnalyticsS3BucketDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsS3BucketDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Bucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.bucket_account_id { writer.write(xml::writer::XmlEvent::start_element("BucketAccountId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Format"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.format )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct AnalyticsS3ExportFileFormatDeserializer; impl AnalyticsS3ExportFileFormatDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsS3ExportFileFormatSerializer; impl AnalyticsS3ExportFileFormatSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct StreamingBody { len: Option<usize>, inner: Box<::futures::Stream<Item = Vec<u8>, Error = ::std::io::Error> + Send>, } impl StreamingBody { pub fn new<S>(len: usize, stream: S) -> StreamingBody where S: ::futures::Stream<Item = Vec<u8>, Error = ::std::io::Error> + Send + 'static, { StreamingBody { len: Some(len), inner: Box::new(stream), } } } impl From<Vec<u8>> for StreamingBody { fn from(buf: Vec<u8>) -> StreamingBody { StreamingBody::new(buf.len(), ::futures::stream::once(Ok(buf))) } } impl fmt::Debug for StreamingBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<Body: streaming content, len = {:?}>", self.len) } } impl ::futures::Stream for StreamingBody { type Item = Vec<u8>; type Error = ::std::io::Error; fn poll(&mut self) -> ::futures::Poll<Option<Self::Item>, Self::Error> { self.inner.poll() } } struct BodyDeserializer; impl BodyDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<u8>, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)).into_bytes(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BodySerializer; impl BodySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<u8>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = String::from_utf8(obj.to_vec()).expect("Not a UTF-8 string") )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Bucket { /// <p>Date the bucket was created.</p> pub creation_date: Option<String>, /// <p>The name of the bucket.</p> pub name: Option<String>, } struct BucketDeserializer; impl BucketDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Bucket, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Bucket::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CreationDate" => { obj.creation_date = Some(try!(CreationDateDeserializer::deserialize( "CreationDate", stack ))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct BucketAccelerateStatusDeserializer; impl BucketAccelerateStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketAccelerateStatusSerializer; impl BucketAccelerateStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct BucketLifecycleConfiguration { pub rules: Vec<LifecycleRule>, } pub struct BucketLifecycleConfigurationSerializer; impl BucketLifecycleConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &BucketLifecycleConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; LifecycleRulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketLocationConstraintDeserializer; impl BucketLocationConstraintDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketLocationConstraintSerializer; impl BucketLocationConstraintSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct BucketLoggingStatus { pub logging_enabled: Option<LoggingEnabled>, } pub struct BucketLoggingStatusSerializer; impl BucketLoggingStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &BucketLoggingStatus, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.logging_enabled { &LoggingEnabledSerializer::serialize(&mut writer, "LoggingEnabled", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketLogsPermissionDeserializer; impl BucketLogsPermissionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketLogsPermissionSerializer; impl BucketLogsPermissionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketNameDeserializer; impl BucketNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketNameSerializer; impl BucketNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketVersioningStatusDeserializer; impl BucketVersioningStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketVersioningStatusSerializer; impl BucketVersioningStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketsDeserializer; impl BucketsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Bucket>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Bucket" { obj.push(try!(BucketDeserializer::deserialize("Bucket", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct BytesProcessedDeserializer; impl BytesProcessedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct BytesScannedDeserializer; impl BytesScannedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CORSConfiguration { pub cors_rules: Vec<CORSRule>, } pub struct CORSConfigurationSerializer; impl CORSConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CORSConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; CORSRulesSerializer::serialize(&mut writer, "CORSRule", &obj.cors_rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CORSRule { /// <p>Specifies which headers are allowed in a pre-flight OPTIONS request.</p> pub allowed_headers: Option<Vec<String>>, /// <p>Identifies HTTP methods that the domain/origin specified in the rule is allowed to execute.</p> pub allowed_methods: Vec<String>, /// <p>One or more origins you want customers to be able to access the bucket from.</p> pub allowed_origins: Vec<String>, /// <p>One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).</p> pub expose_headers: Option<Vec<String>>, /// <p>The time in seconds that your browser is to cache the preflight response for the specified resource.</p> pub max_age_seconds: Option<i64>, } struct CORSRuleDeserializer; impl CORSRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CORSRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CORSRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { match &name[..] { "AllowedHeader" => { obj.allowed_headers = Some(try!( AllowedHeadersDeserializer::deserialize("AllowedHeader", stack) )); } "AllowedMethod" => { obj.allowed_methods = try!(AllowedMethodsDeserializer::deserialize( "AllowedMethod", stack )); } "AllowedOrigin" => { obj.allowed_origins = try!(AllowedOriginsDeserializer::deserialize( "AllowedOrigin", stack )); } "ExposeHeader" => { obj.expose_headers = Some(try!( ExposeHeadersDeserializer::deserialize("ExposeHeader", stack) )); } "MaxAgeSeconds" => { obj.max_age_seconds = Some(try!( MaxAgeSecondsDeserializer::deserialize("MaxAgeSeconds", stack) )); } _ => skip_tree(stack), } } DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CORSRuleSerializer; impl CORSRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CORSRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.allowed_headers { &AllowedHeadersSerializer::serialize(&mut writer, "AllowedHeader", value)?; } AllowedMethodsSerializer::serialize(&mut writer, "AllowedMethod", &obj.allowed_methods)?; AllowedOriginsSerializer::serialize(&mut writer, "AllowedOrigin", &obj.allowed_origins)?; if let Some(ref value) = obj.expose_headers { &ExposeHeadersSerializer::serialize(&mut writer, "ExposeHeader", value)?; } if let Some(ref value) = obj.max_age_seconds { writer.write(xml::writer::XmlEvent::start_element("MaxAgeSeconds"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct CORSRulesDeserializer; impl CORSRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<CORSRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(CORSRuleDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct CORSRulesSerializer; impl CORSRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<CORSRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { CORSRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } /// <p>Describes how a CSV-formatted input object is formatted.</p> #[derive(Default, Debug, Clone)] pub struct CSVInput { /// <p>Single character used to indicate a row should be ignored when present at the start of a row.</p> pub comments: Option<String>, /// <p>Value used to separate individual fields in a record.</p> pub field_delimiter: Option<String>, /// <p>Describes the first line of input. Valid values: None, Ignore, Use.</p> pub file_header_info: Option<String>, /// <p>Value used for escaping where the field delimiter is part of the value.</p> pub quote_character: Option<String>, /// <p>Single character used for escaping the quote character inside an already escaped value.</p> pub quote_escape_character: Option<String>, /// <p>Value used to separate individual records.</p> pub record_delimiter: Option<String>, } pub struct CSVInputSerializer; impl CSVInputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CSVInput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.comments { writer.write(xml::writer::XmlEvent::start_element("Comments"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.field_delimiter { writer.write(xml::writer::XmlEvent::start_element("FieldDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.file_header_info { writer.write(xml::writer::XmlEvent::start_element("FileHeaderInfo"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_character { writer.write(xml::writer::XmlEvent::start_element("QuoteCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_escape_character { writer.write(xml::writer::XmlEvent::start_element("QuoteEscapeCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.record_delimiter { writer.write(xml::writer::XmlEvent::start_element("RecordDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes how CSV-formatted results are formatted.</p> #[derive(Default, Debug, Clone)] pub struct CSVOutput { /// <p>Value used to separate individual fields in a record.</p> pub field_delimiter: Option<String>, /// <p>Value used for escaping where the field delimiter is part of the value.</p> pub quote_character: Option<String>, /// <p>Single character used for escaping the quote character inside an already escaped value.</p> pub quote_escape_character: Option<String>, /// <p>Indicates whether or not all output fields should be quoted.</p> pub quote_fields: Option<String>, /// <p>Value used to separate individual records.</p> pub record_delimiter: Option<String>, } pub struct CSVOutputSerializer; impl CSVOutputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CSVOutput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.field_delimiter { writer.write(xml::writer::XmlEvent::start_element("FieldDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_character { writer.write(xml::writer::XmlEvent::start_element("QuoteCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_escape_character { writer.write(xml::writer::XmlEvent::start_element("QuoteEscapeCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_fields { writer.write(xml::writer::XmlEvent::start_element("QuoteFields"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.record_delimiter { writer.write(xml::writer::XmlEvent::start_element("RecordDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct CloudFunctionDeserializer; impl CloudFunctionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CloudFunctionSerializer; impl CloudFunctionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CloudFunctionConfiguration { pub cloud_function: Option<String>, pub events: Option<Vec<String>>, pub id: Option<String>, pub invocation_role: Option<String>, } struct CloudFunctionConfigurationDeserializer; impl CloudFunctionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CloudFunctionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CloudFunctionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CloudFunction" => { obj.cloud_function = Some(try!(CloudFunctionDeserializer::deserialize( "CloudFunction", stack ))); } "Event" => { obj.events = Some(try!(EventListDeserializer::deserialize("Event", stack))); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "InvocationRole" => { obj.invocation_role = Some(try!(CloudFunctionInvocationRoleDeserializer::deserialize( "InvocationRole", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CloudFunctionConfigurationSerializer; impl CloudFunctionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CloudFunctionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.cloud_function { writer.write(xml::writer::XmlEvent::start_element("CloudFunction"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.events { &EventListSerializer::serialize(&mut writer, "Event", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.invocation_role { writer.write(xml::writer::XmlEvent::start_element("InvocationRole"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct CloudFunctionInvocationRoleDeserializer; impl CloudFunctionInvocationRoleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CloudFunctionInvocationRoleSerializer; impl CloudFunctionInvocationRoleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct CodeDeserializer; impl CodeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CommentsSerializer; impl CommentsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CommonPrefix { pub prefix: Option<String>, } struct CommonPrefixDeserializer; impl CommonPrefixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CommonPrefix, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CommonPrefix::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct CommonPrefixListDeserializer; impl CommonPrefixListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<CommonPrefix>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(CommonPrefixDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CompleteMultipartUploadOutput { pub bucket: Option<String>, /// <p>Entity tag of the object.</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p> pub expiration: Option<String>, pub key: Option<String>, pub location: Option<String>, pub request_charged: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>Version of the object.</p> pub version_id: Option<String>, } struct CompleteMultipartUploadOutputDeserializer; impl CompleteMultipartUploadOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CompleteMultipartUploadOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CompleteMultipartUploadOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "Location" => { obj.location = Some(try!(LocationDeserializer::deserialize("Location", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CompleteMultipartUploadRequest { pub bucket: String, pub key: String, pub multipart_upload: Option<CompletedMultipartUpload>, pub request_payer: Option<String>, pub upload_id: String, } #[derive(Default, Debug, Clone)] pub struct CompletedMultipartUpload { pub parts: Option<Vec<CompletedPart>>, } pub struct CompletedMultipartUploadSerializer; impl CompletedMultipartUploadSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CompletedMultipartUpload, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.parts { &CompletedPartListSerializer::serialize(&mut writer, "Part", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CompletedPart { /// <p>Entity tag returned when the part was uploaded.</p> pub e_tag: Option<String>, /// <p>Part number that identifies the part. This is a positive integer between 1 and 10,000.</p> pub part_number: Option<i64>, } pub struct CompletedPartSerializer; impl CompletedPartSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CompletedPart, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.e_tag { writer.write(xml::writer::XmlEvent::start_element("ETag"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.part_number { writer.write(xml::writer::XmlEvent::start_element("PartNumber"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct CompletedPartListSerializer; impl CompletedPartListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<CompletedPart>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { CompletedPartSerializer::serialize(writer, name, element)?; } Ok(()) } } pub struct CompressionTypeSerializer; impl CompressionTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Condition { /// <p>The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition is specified and sibling KeyPrefixEquals is not specified. If both are specified, then both must be true for the redirect to be applied.</p> pub http_error_code_returned_equals: Option<String>, /// <p>The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.</p> pub key_prefix_equals: Option<String>, } struct ConditionDeserializer; impl ConditionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Condition, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Condition::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "HttpErrorCodeReturnedEquals" => { obj.http_error_code_returned_equals = Some(try!(HttpErrorCodeReturnedEqualsDeserializer::deserialize( "HttpErrorCodeReturnedEquals", stack ))); } "KeyPrefixEquals" => { obj.key_prefix_equals = Some(try!( KeyPrefixEqualsDeserializer::deserialize("KeyPrefixEquals", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ConditionSerializer; impl ConditionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Condition, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.http_error_code_returned_equals { writer.write(xml::writer::XmlEvent::start_element( "HttpErrorCodeReturnedEquals", ))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.key_prefix_equals { writer.write(xml::writer::XmlEvent::start_element("KeyPrefixEquals"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct ContinuationEvent {} struct ContinuationEventDeserializer; impl ContinuationEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ContinuationEvent, XmlParseError> { try!(start_element(tag_name, stack)); let obj = ContinuationEvent::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CopyObjectOutput { pub copy_object_result: Option<CopyObjectResult>, pub copy_source_version_id: Option<String>, /// <p>If the object expiration is configured, the response includes this header.</p> pub expiration: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>Version ID of the newly created copy.</p> pub version_id: Option<String>, } struct CopyObjectOutputDeserializer; impl CopyObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyObjectOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CopyObjectResult" => { obj.copy_object_result = Some(try!( CopyObjectResultDeserializer::deserialize("CopyObjectResult", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CopyObjectRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, pub bucket: String, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.</p> pub copy_source: String, /// <p>Copies the object if its entity tag (ETag) matches the specified tag.</p> pub copy_source_if_match: Option<String>, /// <p>Copies the object if it has been modified since the specified time.</p> pub copy_source_if_modified_since: Option<String>, /// <p>Copies the object if its entity tag (ETag) is different than the specified ETag.</p> pub copy_source_if_none_match: Option<String>, /// <p>Copies the object if it hasn&#39;t been modified since the specified time.</p> pub copy_source_if_unmodified_since: Option<String>, /// <p>Specifies the algorithm to use when decrypting the source object (e.g., AES256).</p> pub copy_source_sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.</p> pub copy_source_sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub copy_source_sse_customer_key_md5: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Gives the grantee READ, READ<em>ACP, and WRITE</em>ACP permissions on the object.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to read the object data and its metadata.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the object ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to write the ACL for the applicable object.</p> pub grant_write_acp: Option<String>, pub key: String, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, /// <p>Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.</p> pub metadata_directive: Option<String>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>The type of storage to use for the object. Defaults to &#39;STANDARD&#39;.</p> pub storage_class: Option<String>, /// <p>The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters</p> pub tagging: Option<String>, /// <p>Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request.</p> pub tagging_directive: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } #[derive(Default, Debug, Clone)] pub struct CopyObjectResult { pub e_tag: Option<String>, pub last_modified: Option<String>, } struct CopyObjectResultDeserializer; impl CopyObjectResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyObjectResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyObjectResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CopyPartResult { /// <p>Entity tag of the object.</p> pub e_tag: Option<String>, /// <p>Date and time at which the object was uploaded.</p> pub last_modified: Option<String>, } struct CopyPartResultDeserializer; impl CopyPartResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyPartResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyPartResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CreateBucketConfiguration { /// <p>Specifies the region where the bucket will be created. If you don&#39;t specify a region, the bucket will be created in US Standard.</p> pub location_constraint: Option<String>, } pub struct CreateBucketConfigurationSerializer; impl CreateBucketConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CreateBucketConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.location_constraint { writer.write(xml::writer::XmlEvent::start_element("LocationConstraint"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CreateBucketOutput { pub location: Option<String>, } struct CreateBucketOutputDeserializer; impl CreateBucketOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateBucketOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = CreateBucketOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CreateBucketRequest { /// <p>The canned ACL to apply to the bucket.</p> pub acl: Option<String>, pub bucket: String, pub create_bucket_configuration: Option<CreateBucketConfiguration>, /// <p>Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to list the objects in the bucket.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the bucket ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p> pub grant_write: Option<String>, /// <p>Allows grantee to write the ACL for the applicable bucket.</p> pub grant_write_acp: Option<String>, } #[derive(Default, Debug, Clone)] pub struct CreateMultipartUploadOutput { /// <p>Date when multipart upload will become eligible for abort operation by lifecycle.</p> pub abort_date: Option<String>, /// <p>Id of the lifecycle rule that makes a multipart upload eligible for abort operation.</p> pub abort_rule_id: Option<String>, /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: Option<String>, /// <p>Object key for which the multipart upload was initiated.</p> pub key: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>ID for the initiated multipart upload.</p> pub upload_id: Option<String>, } struct CreateMultipartUploadOutputDeserializer; impl CreateMultipartUploadOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateMultipartUploadOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateMultipartUploadOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "UploadId" => { obj.upload_id = Some(try!(MultipartUploadIdDeserializer::deserialize( "UploadId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CreateMultipartUploadRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, pub bucket: String, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Gives the grantee READ, READ<em>ACP, and WRITE</em>ACP permissions on the object.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to read the object data and its metadata.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the object ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to write the ACL for the applicable object.</p> pub grant_write_acp: Option<String>, pub key: String, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>The type of storage to use for the object. Defaults to &#39;STANDARD&#39;.</p> pub storage_class: Option<String>, /// <p>The tag-set for the object. The tag-set must be encoded as URL Query parameters</p> pub tagging: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } struct CreationDateDeserializer; impl CreationDateDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct DateDeserializer; impl DateDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DateSerializer; impl DateSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct DaysDeserializer; impl DaysDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DaysSerializer; impl DaysSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct DaysAfterInitiationDeserializer; impl DaysAfterInitiationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DaysAfterInitiationSerializer; impl DaysAfterInitiationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Delete { pub objects: Vec<ObjectIdentifier>, /// <p>Element to enable quiet mode for the request. When you add this element, you must set its value to true.</p> pub quiet: Option<bool>, } pub struct DeleteSerializer; impl DeleteSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Delete, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; ObjectIdentifierListSerializer::serialize(&mut writer, "Object", &obj.objects)?; if let Some(ref value) = obj.quiet { writer.write(xml::writer::XmlEvent::start_element("Quiet"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct DeleteBucketAnalyticsConfigurationRequest { /// <p>The name of the bucket from which an analytics configuration is deleted.</p> pub bucket: String, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketCorsRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketEncryptionRequest { /// <p>The name of the bucket containing the server-side encryption configuration to delete.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketInventoryConfigurationRequest { /// <p>The name of the bucket containing the inventory configuration to delete.</p> pub bucket: String, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketLifecycleRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketMetricsConfigurationRequest { /// <p>The name of the bucket containing the metrics configuration to delete.</p> pub bucket: String, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketPolicyRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketReplicationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketTaggingRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketWebsiteRequest { pub bucket: String, } struct DeleteMarkerDeserializer; impl DeleteMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteMarkerEntry { /// <p>Specifies whether the object is (true) or is not (false) the latest version of an object.</p> pub is_latest: Option<bool>, /// <p>The object key.</p> pub key: Option<String>, /// <p>Date and time the object was last modified.</p> pub last_modified: Option<String>, pub owner: Option<Owner>, /// <p>Version ID of an object.</p> pub version_id: Option<String>, } struct DeleteMarkerEntryDeserializer; impl DeleteMarkerEntryDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteMarkerEntry, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteMarkerEntry::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "IsLatest" => { obj.is_latest = Some(try!(IsLatestDeserializer::deserialize("IsLatest", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DeleteMarkerVersionIdDeserializer; impl DeleteMarkerVersionIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct DeleteMarkersDeserializer; impl DeleteMarkersDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DeleteMarkerEntry>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(DeleteMarkerEntryDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectOutput { /// <p>Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.</p> pub delete_marker: Option<bool>, pub request_charged: Option<String>, /// <p>Returns the version ID of the delete marker created as a result of the DELETE operation.</p> pub version_id: Option<String>, } struct DeleteObjectOutputDeserializer; impl DeleteObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = DeleteObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectRequest { pub bucket: String, pub key: String, /// <p>The concatenation of the authentication device&#39;s serial number, a space, and the value that is displayed on your authentication device.</p> pub mfa: Option<String>, pub request_payer: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct DeleteObjectTaggingOutput { /// <p>The versionId of the object the tag-set was removed from.</p> pub version_id: Option<String>, } struct DeleteObjectTaggingOutputDeserializer; impl DeleteObjectTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteObjectTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = DeleteObjectTaggingOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectTaggingRequest { pub bucket: String, pub key: String, /// <p>The versionId of the object that the tag-set will be removed from.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct DeleteObjectsOutput { pub deleted: Option<Vec<DeletedObject>>, pub errors: Option<Vec<S3Error>>, pub request_charged: Option<String>, } struct DeleteObjectsOutputDeserializer; impl DeleteObjectsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteObjectsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteObjectsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Deleted" => { obj.deleted = Some(try!(DeletedObjectsDeserializer::deserialize( "Deleted", stack ))); } "Error" => { obj.errors = Some(try!(ErrorsDeserializer::deserialize("Error", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectsRequest { pub bucket: String, pub delete: Delete, /// <p>The concatenation of the authentication device&#39;s serial number, a space, and the value that is displayed on your authentication device.</p> pub mfa: Option<String>, pub request_payer: Option<String>, } #[derive(Default, Debug, Clone)] pub struct DeletedObject { pub delete_marker: Option<bool>, pub delete_marker_version_id: Option<String>, pub key: Option<String>, pub version_id: Option<String>, } struct DeletedObjectDeserializer; impl DeletedObjectDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeletedObject, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeletedObject::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DeleteMarker" => { obj.delete_marker = Some(try!(DeleteMarkerDeserializer::deserialize( "DeleteMarker", stack ))); } "DeleteMarkerVersionId" => { obj.delete_marker_version_id = Some(try!(DeleteMarkerVersionIdDeserializer::deserialize( "DeleteMarkerVersionId", stack ))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DeletedObjectsDeserializer; impl DeletedObjectsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DeletedObject>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(DeletedObjectDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } struct DelimiterDeserializer; impl DelimiterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DelimiterSerializer; impl DelimiterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct DescriptionSerializer; impl DescriptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for replication destination information.</p> #[derive(Default, Debug, Clone)] pub struct Destination { /// <p>Container for information regarding the access control for replicas.</p> pub access_control_translation: Option<AccessControlTranslation>, /// <p>Account ID of the destination bucket. Currently this is only being verified if Access Control Translation is enabled</p> pub account: Option<String>, /// <p>Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.</p> pub bucket: String, /// <p>Container for information regarding encryption based configuration for replicas.</p> pub encryption_configuration: Option<EncryptionConfiguration>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct DestinationDeserializer; impl DestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Destination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Destination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccessControlTranslation" => { obj.access_control_translation = Some(try!(AccessControlTranslationDeserializer::deserialize( "AccessControlTranslation", stack ))); } "Account" => { obj.account = Some(try!(AccountIdDeserializer::deserialize("Account", stack))); } "Bucket" => { obj.bucket = try!(BucketNameDeserializer::deserialize("Bucket", stack)); } "EncryptionConfiguration" => { obj.encryption_configuration = Some(try!(EncryptionConfigurationDeserializer::deserialize( "EncryptionConfiguration", stack ))); } "StorageClass" => { obj.storage_class = Some(try!(StorageClassDeserializer::deserialize( "StorageClass", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DestinationSerializer; impl DestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Destination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.access_control_translation { &AccessControlTranslationSerializer::serialize( &mut writer, "AccessControlTranslation", value, )?; } if let Some(ref value) = obj.account { writer.write(xml::writer::XmlEvent::start_element("Account"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Bucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.encryption_configuration { &EncryptionConfigurationSerializer::serialize( &mut writer, "EncryptionConfiguration", value, )?; } if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct DisplayNameDeserializer; impl DisplayNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DisplayNameSerializer; impl DisplayNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ETagDeserializer; impl ETagDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ETagSerializer; impl ETagSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct EmailAddressDeserializer; impl EmailAddressDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EmailAddressSerializer; impl EmailAddressSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct EnableRequestProgressSerializer; impl EnableRequestProgressSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct EncodingTypeDeserializer; impl EncodingTypeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EncodingTypeSerializer; impl EncodingTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes the server-side encryption that will be applied to the restore results.</p> #[derive(Default, Debug, Clone)] pub struct Encryption { /// <p>The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms).</p> pub encryption_type: String, /// <p>If the encryption type is aws:kms, this optional value can be used to specify the encryption context for the restore results.</p> pub kms_context: Option<String>, /// <p>If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.</p> pub kms_key_id: Option<String>, } pub struct EncryptionSerializer; impl EncryptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Encryption, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("EncryptionType"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.encryption_type )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.kms_context { writer.write(xml::writer::XmlEvent::start_element("KMSContext"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.kms_key_id { writer.write(xml::writer::XmlEvent::start_element("KMSKeyId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information regarding encryption based configuration for replicas.</p> #[derive(Default, Debug, Clone)] pub struct EncryptionConfiguration { /// <p>The id of the KMS key used to encrypt the replica object.</p> pub replica_kms_key_id: Option<String>, } struct EncryptionConfigurationDeserializer; impl EncryptionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EncryptionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EncryptionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ReplicaKmsKeyID" => { obj.replica_kms_key_id = Some(try!( ReplicaKmsKeyIDDeserializer::deserialize("ReplicaKmsKeyID", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EncryptionConfigurationSerializer; impl EncryptionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &EncryptionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.replica_kms_key_id { writer.write(xml::writer::XmlEvent::start_element("ReplicaKmsKeyID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct EndEvent {} struct EndEventDeserializer; impl EndEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EndEvent, XmlParseError> { try!(start_element(tag_name, stack)); let obj = EndEvent::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct S3Error { pub code: Option<String>, pub key: Option<String>, pub message: Option<String>, pub version_id: Option<String>, } struct S3ErrorDeserializer; impl S3ErrorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<S3Error, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = S3Error::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Code" => { obj.code = Some(try!(CodeDeserializer::deserialize("Code", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "Message" => { obj.message = Some(try!(MessageDeserializer::deserialize("Message", stack))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ErrorDocument { /// <p>The object key name to use when a 4XX class error occurs.</p> pub key: String, } struct ErrorDocumentDeserializer; impl ErrorDocumentDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ErrorDocument, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ErrorDocument::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Key" => { obj.key = try!(ObjectKeyDeserializer::deserialize("Key", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ErrorDocumentSerializer; impl ErrorDocumentSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ErrorDocument, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Key"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ErrorsDeserializer; impl ErrorsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<S3Error>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(S3ErrorDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } struct EventDeserializer; impl EventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EventSerializer; impl EventSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct EventListDeserializer; impl EventListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(EventDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct EventListSerializer; impl EventListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { EventSerializer::serialize(writer, name, element)?; } Ok(()) } } struct ExpirationStatusDeserializer; impl ExpirationStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ExpirationStatusSerializer; impl ExpirationStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ExpiredObjectDeleteMarkerDeserializer; impl ExpiredObjectDeleteMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ExpiredObjectDeleteMarkerSerializer; impl ExpiredObjectDeleteMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ExposeHeaderDeserializer; impl ExposeHeaderDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ExposeHeaderSerializer; impl ExposeHeaderSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ExposeHeadersDeserializer; impl ExposeHeadersDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ExposeHeaderDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct ExposeHeadersSerializer; impl ExposeHeadersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ExposeHeaderSerializer::serialize(writer, name, element)?; } Ok(()) } } pub struct ExpressionSerializer; impl ExpressionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ExpressionTypeSerializer; impl ExpressionTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct FetchOwnerSerializer; impl FetchOwnerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct FieldDelimiterSerializer; impl FieldDelimiterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct FileHeaderInfoSerializer; impl FileHeaderInfoSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for key value pair that defines the criteria for the filter rule.</p> #[derive(Default, Debug, Clone)] pub struct FilterRule { /// <p>Object key name prefix or suffix identifying one or more objects to which the filtering rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, go to <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring Event Notifications</a> in the Amazon Simple Storage Service Developer Guide.</p> pub name: Option<String>, pub value: Option<String>, } struct FilterRuleDeserializer; impl FilterRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<FilterRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = FilterRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Name" => { obj.name = Some(try!(FilterRuleNameDeserializer::deserialize("Name", stack))); } "Value" => { obj.value = Some(try!(FilterRuleValueDeserializer::deserialize( "Value", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct FilterRuleSerializer; impl FilterRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &FilterRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.name { writer.write(xml::writer::XmlEvent::start_element("Name"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.value { writer.write(xml::writer::XmlEvent::start_element("Value"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct FilterRuleListDeserializer; impl FilterRuleListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<FilterRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(FilterRuleDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct FilterRuleListSerializer; impl FilterRuleListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<FilterRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { FilterRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } struct FilterRuleNameDeserializer; impl FilterRuleNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct FilterRuleNameSerializer; impl FilterRuleNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct FilterRuleValueDeserializer; impl FilterRuleValueDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct FilterRuleValueSerializer; impl FilterRuleValueSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAccelerateConfigurationOutput { /// <p>The accelerate configuration of the bucket.</p> pub status: Option<String>, } struct GetBucketAccelerateConfigurationOutputDeserializer; impl GetBucketAccelerateConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketAccelerateConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketAccelerateConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Status" => { obj.status = Some(try!(BucketAccelerateStatusDeserializer::deserialize( "Status", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAccelerateConfigurationRequest { /// <p>Name of the bucket for which the accelerate configuration is retrieved.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketAclOutput { /// <p>A list of grants.</p> pub grants: Option<Vec<Grant>>, pub owner: Option<Owner>, } struct GetBucketAclOutputDeserializer; impl GetBucketAclOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketAclOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketAclOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccessControlList" => { obj.grants = Some(try!(GrantsDeserializer::deserialize( "AccessControlList", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAclRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketAnalyticsConfigurationOutput { /// <p>The configuration and any analyses for the analytics filter.</p> pub analytics_configuration: Option<AnalyticsConfiguration>, } struct GetBucketAnalyticsConfigurationOutputDeserializer; impl GetBucketAnalyticsConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketAnalyticsConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketAnalyticsConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AnalyticsConfiguration" => { obj.analytics_configuration = Some(try!(AnalyticsConfigurationDeserializer::deserialize( "AnalyticsConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAnalyticsConfigurationRequest { /// <p>The name of the bucket from which an analytics configuration is retrieved.</p> pub bucket: String, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketCorsOutput { pub cors_rules: Option<Vec<CORSRule>>, } struct GetBucketCorsOutputDeserializer; impl GetBucketCorsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketCorsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketCorsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CORSRule" => { obj.cors_rules = Some(try!(CORSRulesDeserializer::deserialize("CORSRule", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketCorsRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketEncryptionOutput { pub server_side_encryption_configuration: Option<ServerSideEncryptionConfiguration>, } struct GetBucketEncryptionOutputDeserializer; impl GetBucketEncryptionOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketEncryptionOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketEncryptionOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ServerSideEncryptionConfiguration" => { obj.server_side_encryption_configuration = Some(try!( ServerSideEncryptionConfigurationDeserializer::deserialize( "ServerSideEncryptionConfiguration", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketEncryptionRequest { /// <p>The name of the bucket from which the server-side encryption configuration is retrieved.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketInventoryConfigurationOutput { /// <p>Specifies the inventory configuration.</p> pub inventory_configuration: Option<InventoryConfiguration>, } struct GetBucketInventoryConfigurationOutputDeserializer; impl GetBucketInventoryConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketInventoryConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketInventoryConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "InventoryConfiguration" => { obj.inventory_configuration = Some(try!(InventoryConfigurationDeserializer::deserialize( "InventoryConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketInventoryConfigurationRequest { /// <p>The name of the bucket containing the inventory configuration to retrieve.</p> pub bucket: String, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleConfigurationOutput { pub rules: Option<Vec<LifecycleRule>>, } struct GetBucketLifecycleConfigurationOutputDeserializer; impl GetBucketLifecycleConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLifecycleConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketLifecycleConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Rule" => { obj.rules = Some(try!(LifecycleRulesDeserializer::deserialize("Rule", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleConfigurationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleOutput { pub rules: Option<Vec<Rule>>, } struct GetBucketLifecycleOutputDeserializer; impl GetBucketLifecycleOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLifecycleOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketLifecycleOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Rule" => { obj.rules = Some(try!(RulesDeserializer::deserialize("Rule", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLocationOutput { pub location_constraint: Option<String>, } struct GetBucketLocationOutputDeserializer; impl GetBucketLocationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLocationOutput, XmlParseError> { let mut obj = GetBucketLocationOutput::default(); obj.location_constraint = Some(try!(BucketLocationConstraintDeserializer::deserialize( "LocationConstraint", stack ))); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLocationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLoggingOutput { pub logging_enabled: Option<LoggingEnabled>, } struct GetBucketLoggingOutputDeserializer; impl GetBucketLoggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLoggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketLoggingOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "LoggingEnabled" => { obj.logging_enabled = Some(try!(LoggingEnabledDeserializer::deserialize( "LoggingEnabled", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLoggingRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketMetricsConfigurationOutput { /// <p>Specifies the metrics configuration.</p> pub metrics_configuration: Option<MetricsConfiguration>, } struct GetBucketMetricsConfigurationOutputDeserializer; impl GetBucketMetricsConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketMetricsConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketMetricsConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "MetricsConfiguration" => { obj.metrics_configuration = Some(try!(MetricsConfigurationDeserializer::deserialize( "MetricsConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketMetricsConfigurationRequest { /// <p>The name of the bucket containing the metrics configuration to retrieve.</p> pub bucket: String, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketNotificationConfigurationRequest { /// <p>Name of the bucket to get the notification configuration for.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketPolicyOutput { /// <p>The bucket policy as a JSON document.</p> pub policy: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetBucketPolicyRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketReplicationOutput { pub replication_configuration: Option<ReplicationConfiguration>, } struct GetBucketReplicationOutputDeserializer; impl GetBucketReplicationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketReplicationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketReplicationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ReplicationConfiguration" => { obj.replication_configuration = Some(try!(ReplicationConfigurationDeserializer::deserialize( "ReplicationConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketReplicationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketRequestPaymentOutput { /// <p>Specifies who pays for the download and request fees.</p> pub payer: Option<String>, } struct GetBucketRequestPaymentOutputDeserializer; impl GetBucketRequestPaymentOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketRequestPaymentOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketRequestPaymentOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Payer" => { obj.payer = Some(try!(PayerDeserializer::deserialize("Payer", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketRequestPaymentRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketTaggingOutput { pub tag_set: Vec<Tag>, } struct GetBucketTaggingOutputDeserializer; impl GetBucketTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketTaggingOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TagSet" => { obj.tag_set = try!(TagSetDeserializer::deserialize("TagSet", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketTaggingRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketVersioningOutput { /// <p>Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.</p> pub mfa_delete: Option<String>, /// <p>The versioning state of the bucket.</p> pub status: Option<String>, } struct GetBucketVersioningOutputDeserializer; impl GetBucketVersioningOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketVersioningOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketVersioningOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "MfaDelete" => { obj.mfa_delete = Some(try!(MFADeleteStatusDeserializer::deserialize( "MfaDelete", stack ))); } "Status" => { obj.status = Some(try!(BucketVersioningStatusDeserializer::deserialize( "Status", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketVersioningRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketWebsiteOutput { pub error_document: Option<ErrorDocument>, pub index_document: Option<IndexDocument>, pub redirect_all_requests_to: Option<RedirectAllRequestsTo>, pub routing_rules: Option<Vec<RoutingRule>>, } struct GetBucketWebsiteOutputDeserializer; impl GetBucketWebsiteOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketWebsiteOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketWebsiteOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { match &name[..] { "ErrorDocument" => { obj.error_document = Some(try!( ErrorDocumentDeserializer::deserialize("ErrorDocument", stack) )); } "IndexDocument" => { obj.index_document = Some(try!( IndexDocumentDeserializer::deserialize("IndexDocument", stack) )); } "RedirectAllRequestsTo" => { obj.redirect_all_requests_to = Some(try!(RedirectAllRequestsToDeserializer::deserialize( "RedirectAllRequestsTo", stack ))); } "RoutingRules" => { obj.routing_rules = Some(try!(RoutingRulesDeserializer::deserialize( "RoutingRules", stack ))); } _ => skip_tree(stack), } } DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketWebsiteRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetObjectAclOutput { /// <p>A list of grants.</p> pub grants: Option<Vec<Grant>>, pub owner: Option<Owner>, pub request_charged: Option<String>, } struct GetObjectAclOutputDeserializer; impl GetObjectAclOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetObjectAclOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetObjectAclOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccessControlList" => { obj.grants = Some(try!(GrantsDeserializer::deserialize( "AccessControlList", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetObjectAclRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug)] pub struct GetObjectOutput { pub accept_ranges: Option<String>, /// <p>Object data.</p> pub body: Option<StreamingBody>, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>Size of the body in bytes.</p> pub content_length: Option<i64>, /// <p>The portion of the object returned in the response.</p> pub content_range: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.</p> pub delete_marker: Option<bool>, /// <p>An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.</p> pub expiration: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Last modified date of the object</p> pub last_modified: Option<String>, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, /// <p>This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.</p> pub missing_meta: Option<i64>, /// <p>The count of parts this object has.</p> pub parts_count: Option<i64>, pub replication_status: Option<String>, pub request_charged: Option<String>, /// <p>Provides information about object restoration operation and expiration time of the restored object copy.</p> pub restore: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, pub storage_class: Option<String>, /// <p>The number of tags, if any, on the object.</p> pub tag_count: Option<i64>, /// <p>Version of the object.</p> pub version_id: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetObjectRequest { pub bucket: String, /// <p>Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).</p> pub if_match: Option<String>, /// <p>Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).</p> pub if_modified_since: Option<String>, /// <p>Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).</p> pub if_none_match: Option<String>, /// <p>Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).</p> pub if_unmodified_since: Option<String>, pub key: String, /// <p>Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a &#39;ranged&#39; GET request for the part specified. Useful for downloading just a part of an object.</p> pub part_number: Option<i64>, /// <p>Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.</p> pub range: Option<String>, pub request_payer: Option<String>, /// <p>Sets the Cache-Control header of the response.</p> pub response_cache_control: Option<String>, /// <p>Sets the Content-Disposition header of the response</p> pub response_content_disposition: Option<String>, /// <p>Sets the Content-Encoding header of the response.</p> pub response_content_encoding: Option<String>, /// <p>Sets the Content-Language header of the response.</p> pub response_content_language: Option<String>, /// <p>Sets the Content-Type header of the response.</p> pub response_content_type: Option<String>, /// <p>Sets the Expires header of the response.</p> pub response_expires: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetObjectTaggingOutput { pub tag_set: Vec<Tag>, pub version_id: Option<String>, } struct GetObjectTaggingOutputDeserializer; impl GetObjectTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetObjectTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetObjectTaggingOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TagSet" => { obj.tag_set = try!(TagSetDeserializer::deserialize("TagSet", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetObjectTaggingRequest { pub bucket: String, pub key: String, pub version_id: Option<String>, } #[derive(Default, Debug)] pub struct GetObjectTorrentOutput { pub body: Option<StreamingBody>, pub request_charged: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetObjectTorrentRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GlacierJobParameters { /// <p>Glacier retrieval tier at which the restore will be processed.</p> pub tier: String, } pub struct GlacierJobParametersSerializer; impl GlacierJobParametersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &GlacierJobParameters, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Tier"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.tier )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Grant { pub grantee: Option<Grantee>, /// <p>Specifies the permission given to the grantee.</p> pub permission: Option<String>, } struct GrantDeserializer; impl GrantDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Grant, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Grant::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Grantee" => { obj.grantee = Some(try!(GranteeDeserializer::deserialize("Grantee", stack))); } "Permission" => { obj.permission = Some(try!(PermissionDeserializer::deserialize( "Permission", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct GrantSerializer; impl GrantSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Grant, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.grantee { &GranteeSerializer::serialize(&mut writer, "Grantee", value)?; } if let Some(ref value) = obj.permission { writer.write(xml::writer::XmlEvent::start_element("Permission"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Grantee { /// <p>Screen name of the grantee.</p> pub display_name: Option<String>, /// <p>Email address of the grantee.</p> pub email_address: Option<String>, /// <p>The canonical user ID of the grantee.</p> pub id: Option<String>, /// <p>Type of grantee</p> pub type_: String, /// <p>URI of the grantee group.</p> pub uri: Option<String>, } struct GranteeDeserializer; impl GranteeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Grantee, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Grantee::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DisplayName" => { obj.display_name = Some(try!(DisplayNameDeserializer::deserialize( "DisplayName", stack ))); } "EmailAddress" => { obj.email_address = Some(try!(EmailAddressDeserializer::deserialize( "EmailAddress", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "xsi:type" => { obj.type_ = try!(TypeDeserializer::deserialize("xsi:type", stack)); } "URI" => { obj.uri = Some(try!(URIDeserializer::deserialize("URI", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct GranteeSerializer; impl GranteeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Grantee, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.display_name { writer.write(xml::writer::XmlEvent::start_element("DisplayName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.email_address { writer.write(xml::writer::XmlEvent::start_element("EmailAddress"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("xsi:type"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.type_ )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.uri { writer.write(xml::writer::XmlEvent::start_element("URI"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct GrantsDeserializer; impl GrantsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Grant>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Grant" { obj.push(try!(GrantDeserializer::deserialize("Grant", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct GrantsSerializer; impl GrantsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Grant>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { GrantSerializer::serialize(writer, "Grant", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct HeadBucketRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct HeadObjectOutput { pub accept_ranges: Option<String>, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>Size of the body in bytes.</p> pub content_length: Option<i64>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.</p> pub delete_marker: Option<bool>, /// <p>An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.</p> pub expiration: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Last modified date of the object</p> pub last_modified: Option<String>, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, /// <p>This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.</p> pub missing_meta: Option<i64>, /// <p>The count of parts this object has.</p> pub parts_count: Option<i64>, pub replication_status: Option<String>, pub request_charged: Option<String>, /// <p>Provides information about object restoration operation and expiration time of the restored object copy.</p> pub restore: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, pub storage_class: Option<String>, /// <p>Version of the object.</p> pub version_id: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } struct HeadObjectOutputDeserializer; impl HeadObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<HeadObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = HeadObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct HeadObjectRequest { pub bucket: String, /// <p>Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).</p> pub if_match: Option<String>, /// <p>Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).</p> pub if_modified_since: Option<String>, /// <p>Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).</p> pub if_none_match: Option<String>, /// <p>Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).</p> pub if_unmodified_since: Option<String>, pub key: String, /// <p>Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a &#39;ranged&#39; HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object.</p> pub part_number: Option<i64>, /// <p>Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.</p> pub range: Option<String>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } struct HostNameDeserializer; impl HostNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct HostNameSerializer; impl HostNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct HttpErrorCodeReturnedEqualsDeserializer; impl HttpErrorCodeReturnedEqualsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct HttpErrorCodeReturnedEqualsSerializer; impl HttpErrorCodeReturnedEqualsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct HttpRedirectCodeDeserializer; impl HttpRedirectCodeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct HttpRedirectCodeSerializer; impl HttpRedirectCodeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct IDDeserializer; impl IDDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct IDSerializer; impl IDSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct IndexDocument { /// <p>A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.</p> pub suffix: String, } struct IndexDocumentDeserializer; impl IndexDocumentDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<IndexDocument, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = IndexDocument::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Suffix" => { obj.suffix = try!(SuffixDeserializer::deserialize("Suffix", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct IndexDocumentSerializer; impl IndexDocumentSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &IndexDocument, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Suffix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.suffix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InitiatedDeserializer; impl InitiatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct Initiator { /// <p>Name of the Principal.</p> pub display_name: Option<String>, /// <p>If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value.</p> pub id: Option<String>, } struct InitiatorDeserializer; impl InitiatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Initiator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Initiator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DisplayName" => { obj.display_name = Some(try!(DisplayNameDeserializer::deserialize( "DisplayName", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Describes the serialization format of the object.</p> #[derive(Default, Debug, Clone)] pub struct InputSerialization { /// <p>Describes the serialization of a CSV-encoded object.</p> pub csv: Option<CSVInput>, /// <p>Specifies object&#39;s compression format. Valid values: NONE, GZIP. Default Value: NONE.</p> pub compression_type: Option<String>, /// <p>Specifies JSON as object&#39;s input serialization format.</p> pub json: Option<JSONInput>, } pub struct InputSerializationSerializer; impl InputSerializationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InputSerialization, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.csv { &CSVInputSerializer::serialize(&mut writer, "CSV", value)?; } if let Some(ref value) = obj.compression_type { writer.write(xml::writer::XmlEvent::start_element("CompressionType"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.json { &JSONInputSerializer::serialize(&mut writer, "JSON", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct InventoryConfiguration { /// <p>Contains information about where to publish the inventory results.</p> pub destination: InventoryDestination, /// <p>Specifies an inventory filter. The inventory only includes objects that meet the filter&#39;s criteria.</p> pub filter: Option<InventoryFilter>, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, /// <p>Specifies which object version(s) to included in the inventory results.</p> pub included_object_versions: String, /// <p>Specifies whether the inventory is enabled or disabled.</p> pub is_enabled: bool, /// <p>Contains the optional fields that are included in the inventory results.</p> pub optional_fields: Option<Vec<String>>, /// <p>Specifies the schedule for generating inventory results.</p> pub schedule: InventorySchedule, } struct InventoryConfigurationDeserializer; impl InventoryConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Destination" => { obj.destination = try!(InventoryDestinationDeserializer::deserialize( "Destination", stack )); } "Filter" => { obj.filter = Some(try!(InventoryFilterDeserializer::deserialize( "Filter", stack ))); } "Id" => { obj.id = try!(InventoryIdDeserializer::deserialize("Id", stack)); } "IncludedObjectVersions" => { obj.included_object_versions = try!(InventoryIncludedObjectVersionsDeserializer::deserialize( "IncludedObjectVersions", stack )); } "IsEnabled" => { obj.is_enabled = try!(IsEnabledDeserializer::deserialize("IsEnabled", stack)); } "OptionalFields" => { obj.optional_fields = Some(try!(InventoryOptionalFieldsDeserializer::deserialize( "OptionalFields", stack ))); } "Schedule" => { obj.schedule = try!(InventoryScheduleDeserializer::deserialize( "Schedule", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryConfigurationSerializer; impl InventoryConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; InventoryDestinationSerializer::serialize(&mut writer, "Destination", &obj.destination)?; if let Some(ref value) = obj.filter { &InventoryFilterSerializer::serialize(&mut writer, "Filter", value)?; } writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.id )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element( "IncludedObjectVersions", ))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.included_object_versions )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("IsEnabled"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.is_enabled )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.optional_fields { &InventoryOptionalFieldsSerializer::serialize(&mut writer, "OptionalFields", value)?; } InventoryScheduleSerializer::serialize(&mut writer, "Schedule", &obj.schedule)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryConfigurationListDeserializer; impl InventoryConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<InventoryConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(InventoryConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct InventoryDestination { /// <p>Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.</p> pub s3_bucket_destination: InventoryS3BucketDestination, } struct InventoryDestinationDeserializer; impl InventoryDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "S3BucketDestination" => { obj.s3_bucket_destination = try!(InventoryS3BucketDestinationDeserializer::deserialize( "S3BucketDestination", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryDestinationSerializer; impl InventoryDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; InventoryS3BucketDestinationSerializer::serialize( &mut writer, "S3BucketDestination", &obj.s3_bucket_destination, )?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Contains the type of server-side encryption used to encrypt the inventory results.</p> #[derive(Default, Debug, Clone)] pub struct InventoryEncryption { /// <p>Specifies the use of SSE-KMS to encrypt delievered Inventory reports.</p> pub ssekms: Option<SSEKMS>, /// <p>Specifies the use of SSE-S3 to encrypt delievered Inventory reports.</p> pub sses3: Option<SSES3>, } struct InventoryEncryptionDeserializer; impl InventoryEncryptionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryEncryption, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryEncryption::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "SSE-KMS" => { obj.ssekms = Some(try!(SSEKMSDeserializer::deserialize("SSE-KMS", stack))); } "SSE-S3" => { obj.sses3 = Some(try!(SSES3Deserializer::deserialize("SSE-S3", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryEncryptionSerializer; impl InventoryEncryptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryEncryption, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.ssekms { &SSEKMSSerializer::serialize(&mut writer, "SSE-KMS", value)?; } if let Some(ref value) = obj.sses3 { &SSES3Serializer::serialize(&mut writer, "SSE-S3", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct InventoryFilter { /// <p>The prefix that an object must have to be included in the inventory results.</p> pub prefix: String, } struct InventoryFilterDeserializer; impl InventoryFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = try!(PrefixDeserializer::deserialize("Prefix", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryFilterSerializer; impl InventoryFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryFormatDeserializer; impl InventoryFormatDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryFormatSerializer; impl InventoryFormatSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryFrequencyDeserializer; impl InventoryFrequencyDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryFrequencySerializer; impl InventoryFrequencySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryIdDeserializer; impl InventoryIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryIdSerializer; impl InventoryIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryIncludedObjectVersionsDeserializer; impl InventoryIncludedObjectVersionsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryIncludedObjectVersionsSerializer; impl InventoryIncludedObjectVersionsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryOptionalFieldDeserializer; impl InventoryOptionalFieldDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryOptionalFieldSerializer; impl InventoryOptionalFieldSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryOptionalFieldsDeserializer; impl InventoryOptionalFieldsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Field" { obj.push(try!(InventoryOptionalFieldDeserializer::deserialize( "Field", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct InventoryOptionalFieldsSerializer; impl InventoryOptionalFieldsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { InventoryOptionalFieldSerializer::serialize(writer, "Field", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct InventoryS3BucketDestination { /// <p>The ID of the account that owns the destination bucket.</p> pub account_id: Option<String>, /// <p>The Amazon resource name (ARN) of the bucket where inventory results will be published.</p> pub bucket: String, /// <p>Contains the type of server-side encryption used to encrypt the inventory results.</p> pub encryption: Option<InventoryEncryption>, /// <p>Specifies the output format of the inventory results.</p> pub format: String, /// <p>The prefix that is prepended to all inventory results.</p> pub prefix: Option<String>, } struct InventoryS3BucketDestinationDeserializer; impl InventoryS3BucketDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryS3BucketDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryS3BucketDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccountId" => { obj.account_id = Some(try!(AccountIdDeserializer::deserialize("AccountId", stack))); } "Bucket" => { obj.bucket = try!(BucketNameDeserializer::deserialize("Bucket", stack)); } "Encryption" => { obj.encryption = Some(try!(InventoryEncryptionDeserializer::deserialize( "Encryption", stack ))); } "Format" => { obj.format = try!(InventoryFormatDeserializer::deserialize("Format", stack)); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryS3BucketDestinationSerializer; impl InventoryS3BucketDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryS3BucketDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.account_id { writer.write(xml::writer::XmlEvent::start_element("AccountId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Bucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.encryption { &InventoryEncryptionSerializer::serialize(&mut writer, "Encryption", value)?; } writer.write(xml::writer::XmlEvent::start_element("Format"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.format )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct InventorySchedule { /// <p>Specifies how frequently inventory results are produced.</p> pub frequency: String, } struct InventoryScheduleDeserializer; impl InventoryScheduleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventorySchedule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventorySchedule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Frequency" => { obj.frequency = try!(InventoryFrequencyDeserializer::deserialize( "Frequency", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryScheduleSerializer; impl InventoryScheduleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventorySchedule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Frequency"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.frequency )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct IsEnabledDeserializer; impl IsEnabledDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct IsEnabledSerializer; impl IsEnabledSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct IsLatestDeserializer; impl IsLatestDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct IsTruncatedDeserializer; impl IsTruncatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct JSONInput { /// <p>The type of JSON. Valid values: Document, Lines.</p> pub type_: Option<String>, } pub struct JSONInputSerializer; impl JSONInputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &JSONInput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.type_ { writer.write(xml::writer::XmlEvent::start_element("Type"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct JSONOutput { /// <p>The value used to separate individual records in the output.</p> pub record_delimiter: Option<String>, } pub struct JSONOutputSerializer; impl JSONOutputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &JSONOutput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.record_delimiter { writer.write(xml::writer::XmlEvent::start_element("RecordDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct JSONTypeSerializer; impl JSONTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct KMSContextSerializer; impl KMSContextSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct KeyCountDeserializer; impl KeyCountDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct KeyMarkerDeserializer; impl KeyMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct KeyMarkerSerializer; impl KeyMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct KeyPrefixEqualsDeserializer; impl KeyPrefixEqualsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct KeyPrefixEqualsSerializer; impl KeyPrefixEqualsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct LambdaFunctionArnDeserializer; impl LambdaFunctionArnDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LambdaFunctionArnSerializer; impl LambdaFunctionArnSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for specifying the AWS Lambda notification configuration.</p> #[derive(Default, Debug, Clone)] pub struct LambdaFunctionConfiguration { pub events: Vec<String>, pub filter: Option<NotificationConfigurationFilter>, pub id: Option<String>, /// <p>Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.</p> pub lambda_function_arn: String, } struct LambdaFunctionConfigurationDeserializer; impl LambdaFunctionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LambdaFunctionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LambdaFunctionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = try!(EventListDeserializer::deserialize("Event", stack)); } "Filter" => { obj.filter = Some(try!( NotificationConfigurationFilterDeserializer::deserialize( "Filter", stack ) )); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "CloudFunction" => { obj.lambda_function_arn = try!(LambdaFunctionArnDeserializer::deserialize( "CloudFunction", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LambdaFunctionConfigurationSerializer; impl LambdaFunctionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LambdaFunctionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; EventListSerializer::serialize(&mut writer, "Event", &obj.events)?; if let Some(ref value) = obj.filter { &NotificationConfigurationFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("CloudFunction"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.lambda_function_arn )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct LambdaFunctionConfigurationListDeserializer; impl LambdaFunctionConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<LambdaFunctionConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(LambdaFunctionConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct LambdaFunctionConfigurationListSerializer; impl LambdaFunctionConfigurationListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<LambdaFunctionConfiguration>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { LambdaFunctionConfigurationSerializer::serialize(writer, name, element)?; } Ok(()) } } struct LastModifiedDeserializer; impl LastModifiedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct LifecycleConfiguration { pub rules: Vec<Rule>, } pub struct LifecycleConfigurationSerializer; impl LifecycleConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; RulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct LifecycleExpiration { /// <p>Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.</p> pub date: Option<String>, /// <p>Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.</p> pub days: Option<i64>, /// <p>Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.</p> pub expired_object_delete_marker: Option<bool>, } struct LifecycleExpirationDeserializer; impl LifecycleExpirationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleExpiration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleExpiration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Date" => { obj.date = Some(try!(DateDeserializer::deserialize("Date", stack))); } "Days" => { obj.days = Some(try!(DaysDeserializer::deserialize("Days", stack))); } "ExpiredObjectDeleteMarker" => { obj.expired_object_delete_marker = Some(try!(ExpiredObjectDeleteMarkerDeserializer::deserialize( "ExpiredObjectDeleteMarker", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleExpirationSerializer; impl LifecycleExpirationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleExpiration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.date { writer.write(xml::writer::XmlEvent::start_element("Date"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.days { writer.write(xml::writer::XmlEvent::start_element("Days"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.expired_object_delete_marker { writer.write(xml::writer::XmlEvent::start_element( "ExpiredObjectDeleteMarker", ))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct LifecycleRule { pub abort_incomplete_multipart_upload: Option<AbortIncompleteMultipartUpload>, pub expiration: Option<LifecycleExpiration>, pub filter: Option<LifecycleRuleFilter>, /// <p>Unique identifier for the rule. The value cannot be longer than 255 characters.</p> pub id: Option<String>, pub noncurrent_version_expiration: Option<NoncurrentVersionExpiration>, pub noncurrent_version_transitions: Option<Vec<NoncurrentVersionTransition>>, /// <p>If &#39;Enabled&#39;, the rule is currently being applied. If &#39;Disabled&#39;, the rule is not currently being applied.</p> pub status: String, pub transitions: Option<Vec<Transition>>, } struct LifecycleRuleDeserializer; impl LifecycleRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AbortIncompleteMultipartUpload" => { obj.abort_incomplete_multipart_upload = Some(try!( AbortIncompleteMultipartUploadDeserializer::deserialize( "AbortIncompleteMultipartUpload", stack ) )); } "Expiration" => { obj.expiration = Some(try!(LifecycleExpirationDeserializer::deserialize( "Expiration", stack ))); } "Filter" => { obj.filter = Some(try!(LifecycleRuleFilterDeserializer::deserialize( "Filter", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "NoncurrentVersionExpiration" => { obj.noncurrent_version_expiration = Some(try!(NoncurrentVersionExpirationDeserializer::deserialize( "NoncurrentVersionExpiration", stack ))); } "NoncurrentVersionTransition" => { obj.noncurrent_version_transitions = Some(try!( NoncurrentVersionTransitionListDeserializer::deserialize( "NoncurrentVersionTransition", stack ) )); } "Status" => { obj.status = try!(ExpirationStatusDeserializer::deserialize("Status", stack)); } "Transition" => { obj.transitions = Some(try!(TransitionListDeserializer::deserialize( "Transition", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleRuleSerializer; impl LifecycleRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.abort_incomplete_multipart_upload { &AbortIncompleteMultipartUploadSerializer::serialize( &mut writer, "AbortIncompleteMultipartUpload", value, )?; } if let Some(ref value) = obj.expiration { &LifecycleExpirationSerializer::serialize(&mut writer, "Expiration", value)?; } if let Some(ref value) = obj.filter { &LifecycleRuleFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.noncurrent_version_expiration { &NoncurrentVersionExpirationSerializer::serialize( &mut writer, "NoncurrentVersionExpiration", value, )?; } if let Some(ref value) = obj.noncurrent_version_transitions { &NoncurrentVersionTransitionListSerializer::serialize( &mut writer, "NoncurrentVersionTransition", value, )?; } writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.transitions { &TransitionListSerializer::serialize(&mut writer, "Transition", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator.</p> #[derive(Default, Debug, Clone)] pub struct LifecycleRuleAndOperator { pub prefix: Option<String>, /// <p>All of these tags must exist in the object&#39;s tag set in order for the rule to apply.</p> pub tags: Option<Vec<Tag>>, } struct LifecycleRuleAndOperatorDeserializer; impl LifecycleRuleAndOperatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleRuleAndOperator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleRuleAndOperator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tags = Some(try!(TagSetDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleRuleAndOperatorSerializer; impl LifecycleRuleAndOperatorSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleRuleAndOperator, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tags { &TagSetSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.</p> #[derive(Default, Debug, Clone)] pub struct LifecycleRuleFilter { pub and: Option<LifecycleRuleAndOperator>, /// <p>Prefix identifying one or more objects to which the rule applies.</p> pub prefix: Option<String>, /// <p>This tag must exist in the object&#39;s tag set in order for the rule to apply.</p> pub tag: Option<Tag>, } struct LifecycleRuleFilterDeserializer; impl LifecycleRuleFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleRuleFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleRuleFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "And" => { obj.and = Some(try!(LifecycleRuleAndOperatorDeserializer::deserialize( "And", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tag = Some(try!(TagDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleRuleFilterSerializer; impl LifecycleRuleFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleRuleFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.and { &LifecycleRuleAndOperatorSerializer::serialize(&mut writer, "And", value)?; } if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tag { &TagSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct LifecycleRulesDeserializer; impl LifecycleRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<LifecycleRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(LifecycleRuleDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct LifecycleRulesSerializer; impl LifecycleRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<LifecycleRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { LifecycleRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct ListBucketAnalyticsConfigurationsOutput { /// <p>The list of analytics configurations for a bucket.</p> pub analytics_configuration_list: Option<Vec<AnalyticsConfiguration>>, /// <p>The ContinuationToken that represents where this request began.</p> pub continuation_token: Option<String>, /// <p>Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.</p> pub is_truncated: Option<bool>, /// <p>NextContinuationToken is sent when isTruncated is true, which indicates that there are more analytics configurations to list. The next request must include this NextContinuationToken. The token is obfuscated and is not a usable value.</p> pub next_continuation_token: Option<String>, } struct ListBucketAnalyticsConfigurationsOutputDeserializer; impl ListBucketAnalyticsConfigurationsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketAnalyticsConfigurationsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketAnalyticsConfigurationsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AnalyticsConfiguration" => { obj.analytics_configuration_list = Some(try!(AnalyticsConfigurationListDeserializer::deserialize( "AnalyticsConfiguration", stack ))); } "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListBucketAnalyticsConfigurationsRequest { /// <p>The name of the bucket from which analytics configurations are retrieved.</p> pub bucket: String, /// <p>The ContinuationToken that represents a placeholder from where this request should begin.</p> pub continuation_token: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListBucketInventoryConfigurationsOutput { /// <p>If sent in the request, the marker that is used as a starting point for this inventory configuration list response.</p> pub continuation_token: Option<String>, /// <p>The list of inventory configurations for a bucket.</p> pub inventory_configuration_list: Option<Vec<InventoryConfiguration>>, /// <p>Indicates whether the returned list of inventory configurations is truncated in this response. A value of true indicates that the list is truncated.</p> pub is_truncated: Option<bool>, /// <p>The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.</p> pub next_continuation_token: Option<String>, } struct ListBucketInventoryConfigurationsOutputDeserializer; impl ListBucketInventoryConfigurationsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketInventoryConfigurationsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketInventoryConfigurationsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "InventoryConfiguration" => { obj.inventory_configuration_list = Some(try!(InventoryConfigurationListDeserializer::deserialize( "InventoryConfiguration", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListBucketInventoryConfigurationsRequest { /// <p>The name of the bucket containing the inventory configurations to retrieve.</p> pub bucket: String, /// <p>The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.</p> pub continuation_token: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListBucketMetricsConfigurationsOutput { /// <p>The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request.</p> pub continuation_token: Option<String>, /// <p>Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.</p> pub is_truncated: Option<bool>, /// <p>The list of metrics configurations for a bucket.</p> pub metrics_configuration_list: Option<Vec<MetricsConfiguration>>, /// <p>The marker used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.</p> pub next_continuation_token: Option<String>, } struct ListBucketMetricsConfigurationsOutputDeserializer; impl ListBucketMetricsConfigurationsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketMetricsConfigurationsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketMetricsConfigurationsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "MetricsConfiguration" => { obj.metrics_configuration_list = Some(try!(MetricsConfigurationListDeserializer::deserialize( "MetricsConfiguration", stack ))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListBucketMetricsConfigurationsRequest { /// <p>The name of the bucket containing the metrics configurations to retrieve.</p> pub bucket: String, /// <p>The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.</p> pub continuation_token: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListBucketsOutput { pub buckets: Option<Vec<Bucket>>, pub owner: Option<Owner>, } struct ListBucketsOutputDeserializer; impl ListBucketsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Buckets" => { obj.buckets = Some(try!(BucketsDeserializer::deserialize("Buckets", stack))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListMultipartUploadsOutput { /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: Option<String>, pub common_prefixes: Option<Vec<CommonPrefix>>, pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.</p> pub is_truncated: Option<bool>, /// <p>The key at or after which the listing began.</p> pub key_marker: Option<String>, /// <p>Maximum number of multipart uploads that could have been included in the response.</p> pub max_uploads: Option<i64>, /// <p>When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request.</p> pub next_key_marker: Option<String>, /// <p>When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request.</p> pub next_upload_id_marker: Option<String>, /// <p>When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.</p> pub prefix: Option<String>, /// <p>Upload ID after which listing began.</p> pub upload_id_marker: Option<String>, pub uploads: Option<Vec<MultipartUpload>>, } struct ListMultipartUploadsOutputDeserializer; impl ListMultipartUploadsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListMultipartUploadsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListMultipartUploadsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "KeyMarker" => { obj.key_marker = Some(try!(KeyMarkerDeserializer::deserialize("KeyMarker", stack))); } "MaxUploads" => { obj.max_uploads = Some(try!(MaxUploadsDeserializer::deserialize( "MaxUploads", stack ))); } "NextKeyMarker" => { obj.next_key_marker = Some(try!(NextKeyMarkerDeserializer::deserialize( "NextKeyMarker", stack ))); } "NextUploadIdMarker" => { obj.next_upload_id_marker = Some(try!(NextUploadIdMarkerDeserializer::deserialize( "NextUploadIdMarker", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "UploadIdMarker" => { obj.upload_id_marker = Some(try!(UploadIdMarkerDeserializer::deserialize( "UploadIdMarker", stack ))); } "Upload" => { obj.uploads = Some(try!(MultipartUploadListDeserializer::deserialize( "Upload", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListMultipartUploadsRequest { pub bucket: String, /// <p>Character you use to group keys.</p> pub delimiter: Option<String>, pub encoding_type: Option<String>, /// <p>Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.</p> pub key_marker: Option<String>, /// <p>Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response.</p> pub max_uploads: Option<i64>, /// <p>Lists in-progress uploads only for those keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.</p> pub upload_id_marker: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListObjectVersionsOutput { pub common_prefixes: Option<Vec<CommonPrefix>>, pub delete_markers: Option<Vec<DeleteMarkerEntry>>, pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.</p> pub is_truncated: Option<bool>, /// <p>Marks the last Key returned in a truncated response.</p> pub key_marker: Option<String>, pub max_keys: Option<i64>, pub name: Option<String>, /// <p>Use this value for the key marker request parameter in a subsequent request.</p> pub next_key_marker: Option<String>, /// <p>Use this value for the next version id marker parameter in a subsequent request.</p> pub next_version_id_marker: Option<String>, pub prefix: Option<String>, pub version_id_marker: Option<String>, pub versions: Option<Vec<ObjectVersion>>, } struct ListObjectVersionsOutputDeserializer; impl ListObjectVersionsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListObjectVersionsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListObjectVersionsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "DeleteMarker" => { obj.delete_markers = Some(try!(DeleteMarkersDeserializer::deserialize( "DeleteMarker", stack ))); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "KeyMarker" => { obj.key_marker = Some(try!(KeyMarkerDeserializer::deserialize("KeyMarker", stack))); } "MaxKeys" => { obj.max_keys = Some(try!(MaxKeysDeserializer::deserialize("MaxKeys", stack))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } "NextKeyMarker" => { obj.next_key_marker = Some(try!(NextKeyMarkerDeserializer::deserialize( "NextKeyMarker", stack ))); } "NextVersionIdMarker" => { obj.next_version_id_marker = Some(try!(NextVersionIdMarkerDeserializer::deserialize( "NextVersionIdMarker", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "VersionIdMarker" => { obj.version_id_marker = Some(try!( VersionIdMarkerDeserializer::deserialize("VersionIdMarker", stack) )); } "Version" => { obj.versions = Some(try!(ObjectVersionListDeserializer::deserialize( "Version", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListObjectVersionsRequest { pub bucket: String, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, pub encoding_type: Option<String>, /// <p>Specifies the key to start with when listing objects in a bucket.</p> pub key_marker: Option<String>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Specifies the object version you want to start listing from.</p> pub version_id_marker: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListObjectsOutput { pub common_prefixes: Option<Vec<CommonPrefix>>, pub contents: Option<Vec<Object>>, pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.</p> pub is_truncated: Option<bool>, pub marker: Option<String>, pub max_keys: Option<i64>, pub name: Option<String>, /// <p>When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMaker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys.</p> pub next_marker: Option<String>, pub prefix: Option<String>, } struct ListObjectsOutputDeserializer; impl ListObjectsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListObjectsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListObjectsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "Contents" => { obj.contents = Some(try!(ObjectListDeserializer::deserialize("Contents", stack))); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "Marker" => { obj.marker = Some(try!(MarkerDeserializer::deserialize("Marker", stack))); } "MaxKeys" => { obj.max_keys = Some(try!(MaxKeysDeserializer::deserialize("MaxKeys", stack))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } "NextMarker" => { obj.next_marker = Some(try!(NextMarkerDeserializer::deserialize( "NextMarker", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListObjectsRequest { pub bucket: String, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, pub encoding_type: Option<String>, /// <p>Specifies the key to start with when listing objects in a bucket.</p> pub marker: Option<String>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.</p> pub request_payer: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListObjectsV2Output { /// <p>CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter</p> pub common_prefixes: Option<Vec<CommonPrefix>>, /// <p>Metadata about each object returned.</p> pub contents: Option<Vec<Object>>, /// <p>ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key</p> pub continuation_token: Option<String>, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.</p> pub is_truncated: Option<bool>, /// <p>KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys</p> pub key_count: Option<i64>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Name of the bucket to list.</p> pub name: Option<String>, /// <p>NextContinuationToken is sent when isTruncated is true which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key</p> pub next_continuation_token: Option<String>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket</p> pub start_after: Option<String>, } struct ListObjectsV2OutputDeserializer; impl ListObjectsV2OutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListObjectsV2Output, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListObjectsV2Output::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "Contents" => { obj.contents = Some(try!(ObjectListDeserializer::deserialize("Contents", stack))); } "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "KeyCount" => { obj.key_count = Some(try!(KeyCountDeserializer::deserialize("KeyCount", stack))); } "MaxKeys" => { obj.max_keys = Some(try!(MaxKeysDeserializer::deserialize("MaxKeys", stack))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "StartAfter" => { obj.start_after = Some(try!(StartAfterDeserializer::deserialize( "StartAfter", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListObjectsV2Request { /// <p>Name of the bucket to list.</p> pub bucket: String, /// <p>ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key</p> pub continuation_token: Option<String>, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true</p> pub fetch_owner: Option<bool>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests.</p> pub request_payer: Option<String>, /// <p>StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket</p> pub start_after: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListPartsOutput { /// <p>Date when multipart upload will become eligible for abort operation by lifecycle.</p> pub abort_date: Option<String>, /// <p>Id of the lifecycle rule that makes a multipart upload eligible for abort operation.</p> pub abort_rule_id: Option<String>, /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: Option<String>, /// <p>Identifies who initiated the multipart upload.</p> pub initiator: Option<Initiator>, /// <p>Indicates whether the returned list of parts is truncated.</p> pub is_truncated: Option<bool>, /// <p>Object key for which the multipart upload was initiated.</p> pub key: Option<String>, /// <p>Maximum number of parts that were allowed in the response.</p> pub max_parts: Option<i64>, /// <p>When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.</p> pub next_part_number_marker: Option<i64>, pub owner: Option<Owner>, /// <p>Part number after which listing begins.</p> pub part_number_marker: Option<i64>, pub parts: Option<Vec<Part>>, pub request_charged: Option<String>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, /// <p>Upload ID identifying the multipart upload whose parts are being listed.</p> pub upload_id: Option<String>, } struct ListPartsOutputDeserializer; impl ListPartsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListPartsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListPartsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "Initiator" => { obj.initiator = Some(try!(InitiatorDeserializer::deserialize("Initiator", stack))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "MaxParts" => { obj.max_parts = Some(try!(MaxPartsDeserializer::deserialize("MaxParts", stack))); } "NextPartNumberMarker" => { obj.next_part_number_marker = Some(try!(NextPartNumberMarkerDeserializer::deserialize( "NextPartNumberMarker", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "PartNumberMarker" => { obj.part_number_marker = Some(try!( PartNumberMarkerDeserializer::deserialize("PartNumberMarker", stack) )); } "Part" => { obj.parts = Some(try!(PartsDeserializer::deserialize("Part", stack))); } "StorageClass" => { obj.storage_class = Some(try!(StorageClassDeserializer::deserialize( "StorageClass", stack ))); } "UploadId" => { obj.upload_id = Some(try!(MultipartUploadIdDeserializer::deserialize( "UploadId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListPartsRequest { pub bucket: String, pub key: String, /// <p>Sets the maximum number of parts to return.</p> pub max_parts: Option<i64>, /// <p>Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.</p> pub part_number_marker: Option<i64>, pub request_payer: Option<String>, /// <p>Upload ID identifying the multipart upload whose parts are being listed.</p> pub upload_id: String, } struct LocationDeserializer; impl LocationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LocationPrefixSerializer; impl LocationPrefixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for logging information. Presence of this element indicates that logging is enabled. Parameters TargetBucket and TargetPrefix are required in this case.</p> #[derive(Default, Debug, Clone)] pub struct LoggingEnabled { /// <p>Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key.</p> pub target_bucket: String, pub target_grants: Option<Vec<TargetGrant>>, /// <p>This element lets you specify a prefix for the keys that the log files will be stored under.</p> pub target_prefix: String, } struct LoggingEnabledDeserializer; impl LoggingEnabledDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LoggingEnabled, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LoggingEnabled::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TargetBucket" => { obj.target_bucket = try!(TargetBucketDeserializer::deserialize("TargetBucket", stack)); } "TargetGrants" => { obj.target_grants = Some(try!(TargetGrantsDeserializer::deserialize( "TargetGrants", stack ))); } "TargetPrefix" => { obj.target_prefix = try!(TargetPrefixDeserializer::deserialize("TargetPrefix", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LoggingEnabledSerializer; impl LoggingEnabledSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LoggingEnabled, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("TargetBucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.target_bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.target_grants { &TargetGrantsSerializer::serialize(&mut writer, "TargetGrants", value)?; } writer.write(xml::writer::XmlEvent::start_element("TargetPrefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.target_prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct MFADeleteSerializer; impl MFADeleteSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MFADeleteStatusDeserializer; impl MFADeleteStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct MarkerDeserializer; impl MarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MarkerSerializer; impl MarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxAgeSecondsDeserializer; impl MaxAgeSecondsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxAgeSecondsSerializer; impl MaxAgeSecondsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxKeysDeserializer; impl MaxKeysDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxKeysSerializer; impl MaxKeysSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxPartsDeserializer; impl MaxPartsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxPartsSerializer; impl MaxPartsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxUploadsDeserializer; impl MaxUploadsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxUploadsSerializer; impl MaxUploadsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MessageDeserializer; impl MessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>A metadata key-value pair to store with an object.</p> #[derive(Default, Debug, Clone)] pub struct MetadataEntry { pub name: Option<String>, pub value: Option<String>, } pub struct MetadataEntrySerializer; impl MetadataEntrySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetadataEntry, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.name { writer.write(xml::writer::XmlEvent::start_element("Name"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.value { writer.write(xml::writer::XmlEvent::start_element("Value"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct MetadataKeySerializer; impl MetadataKeySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct MetadataValueSerializer; impl MetadataValueSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct MetricsAndOperator { /// <p>The prefix used when evaluating an AND predicate.</p> pub prefix: Option<String>, /// <p>The list of tags used when evaluating an AND predicate.</p> pub tags: Option<Vec<Tag>>, } struct MetricsAndOperatorDeserializer; impl MetricsAndOperatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MetricsAndOperator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MetricsAndOperator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tags = Some(try!(TagSetDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsAndOperatorSerializer; impl MetricsAndOperatorSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetricsAndOperator, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tags { &TagSetSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct MetricsConfiguration { /// <p>Specifies a metrics configuration filter. The metrics configuration will only include objects that meet the filter&#39;s criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).</p> pub filter: Option<MetricsFilter>, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, } struct MetricsConfigurationDeserializer; impl MetricsConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MetricsConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MetricsConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Filter" => { obj.filter = Some(try!(MetricsFilterDeserializer::deserialize( "Filter", stack ))); } "Id" => { obj.id = try!(MetricsIdDeserializer::deserialize("Id", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsConfigurationSerializer; impl MetricsConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetricsConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.filter { &MetricsFilterSerializer::serialize(&mut writer, "Filter", value)?; } writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.id )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MetricsConfigurationListDeserializer; impl MetricsConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<MetricsConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(MetricsConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct MetricsFilter { /// <p>A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.</p> pub and: Option<MetricsAndOperator>, /// <p>The prefix used when evaluating a metrics filter.</p> pub prefix: Option<String>, /// <p>The tag used when evaluating a metrics filter.</p> pub tag: Option<Tag>, } struct MetricsFilterDeserializer; impl MetricsFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MetricsFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MetricsFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "And" => { obj.and = Some(try!(MetricsAndOperatorDeserializer::deserialize( "And", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tag = Some(try!(TagDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsFilterSerializer; impl MetricsFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetricsFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.and { &MetricsAndOperatorSerializer::serialize(&mut writer, "And", value)?; } if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tag { &TagSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct MetricsIdDeserializer; impl MetricsIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsIdSerializer; impl MetricsIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct MultipartUpload { /// <p>Date and time at which the multipart upload was initiated.</p> pub initiated: Option<String>, /// <p>Identifies who initiated the multipart upload.</p> pub initiator: Option<Initiator>, /// <p>Key of the object for which the multipart upload was initiated.</p> pub key: Option<String>, pub owner: Option<Owner>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, /// <p>Upload ID that identifies the multipart upload.</p> pub upload_id: Option<String>, } struct MultipartUploadDeserializer; impl MultipartUploadDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MultipartUpload, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MultipartUpload::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Initiated" => { obj.initiated = Some(try!(InitiatedDeserializer::deserialize("Initiated", stack))); } "Initiator" => { obj.initiator = Some(try!(InitiatorDeserializer::deserialize("Initiator", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "StorageClass" => { obj.storage_class = Some(try!(StorageClassDeserializer::deserialize( "StorageClass", stack ))); } "UploadId" => { obj.upload_id = Some(try!(MultipartUploadIdDeserializer::deserialize( "UploadId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct MultipartUploadIdDeserializer; impl MultipartUploadIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MultipartUploadIdSerializer; impl MultipartUploadIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MultipartUploadListDeserializer; impl MultipartUploadListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<MultipartUpload>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(MultipartUploadDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } struct NextKeyMarkerDeserializer; impl NextKeyMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextMarkerDeserializer; impl NextMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextPartNumberMarkerDeserializer; impl NextPartNumberMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextTokenDeserializer; impl NextTokenDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextUploadIdMarkerDeserializer; impl NextUploadIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextVersionIdMarkerDeserializer; impl NextVersionIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object&#39;s lifetime.</p> #[derive(Default, Debug, Clone)] pub struct NoncurrentVersionExpiration { /// <p>Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html">How Amazon S3 Calculates When an Object Became Noncurrent</a> in the Amazon Simple Storage Service Developer Guide.</p> pub noncurrent_days: Option<i64>, } struct NoncurrentVersionExpirationDeserializer; impl NoncurrentVersionExpirationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NoncurrentVersionExpiration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NoncurrentVersionExpiration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "NoncurrentDays" => { obj.noncurrent_days = Some(try!(DaysDeserializer::deserialize("NoncurrentDays", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NoncurrentVersionExpirationSerializer; impl NoncurrentVersionExpirationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NoncurrentVersionExpiration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.noncurrent_days { writer.write(xml::writer::XmlEvent::start_element("NoncurrentDays"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for the transition rule that describes when noncurrent objects transition to the STANDARD<em>IA, ONEZONE</em>IA or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD<em>IA, ONEZONE</em>IA or GLACIER storage class at a specific period in the object&#39;s lifetime.</p> #[derive(Default, Debug, Clone)] pub struct NoncurrentVersionTransition { /// <p>Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html">How Amazon S3 Calculates When an Object Became Noncurrent</a> in the Amazon Simple Storage Service Developer Guide.</p> pub noncurrent_days: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct NoncurrentVersionTransitionDeserializer; impl NoncurrentVersionTransitionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NoncurrentVersionTransition, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NoncurrentVersionTransition::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "NoncurrentDays" => { obj.noncurrent_days = Some(try!(DaysDeserializer::deserialize("NoncurrentDays", stack))); } "StorageClass" => { obj.storage_class = Some(try!( TransitionStorageClassDeserializer::deserialize("StorageClass", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NoncurrentVersionTransitionSerializer; impl NoncurrentVersionTransitionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NoncurrentVersionTransition, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.noncurrent_days { writer.write(xml::writer::XmlEvent::start_element("NoncurrentDays"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct NoncurrentVersionTransitionListDeserializer; impl NoncurrentVersionTransitionListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<NoncurrentVersionTransition>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(NoncurrentVersionTransitionDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct NoncurrentVersionTransitionListSerializer; impl NoncurrentVersionTransitionListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<NoncurrentVersionTransition>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { NoncurrentVersionTransitionSerializer::serialize(writer, name, element)?; } Ok(()) } } /// <p>Container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off on the bucket.</p> #[derive(Default, Debug, Clone)] pub struct NotificationConfiguration { pub lambda_function_configurations: Option<Vec<LambdaFunctionConfiguration>>, pub queue_configurations: Option<Vec<QueueConfiguration>>, pub topic_configurations: Option<Vec<TopicConfiguration>>, } struct NotificationConfigurationDeserializer; impl NotificationConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NotificationConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NotificationConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CloudFunctionConfiguration" => { obj.lambda_function_configurations = Some(try!( LambdaFunctionConfigurationListDeserializer::deserialize( "CloudFunctionConfiguration", stack ) )); } "QueueConfiguration" => { obj.queue_configurations = Some(try!(QueueConfigurationListDeserializer::deserialize( "QueueConfiguration", stack ))); } "TopicConfiguration" => { obj.topic_configurations = Some(try!(TopicConfigurationListDeserializer::deserialize( "TopicConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationConfigurationSerializer; impl NotificationConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NotificationConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.lambda_function_configurations { &LambdaFunctionConfigurationListSerializer::serialize( &mut writer, "CloudFunctionConfiguration", value, )?; } if let Some(ref value) = obj.queue_configurations { &QueueConfigurationListSerializer::serialize(&mut writer, "QueueConfiguration", value)?; } if let Some(ref value) = obj.topic_configurations { &TopicConfigurationListSerializer::serialize(&mut writer, "TopicConfiguration", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct NotificationConfigurationDeprecated { pub cloud_function_configuration: Option<CloudFunctionConfiguration>, pub queue_configuration: Option<QueueConfigurationDeprecated>, pub topic_configuration: Option<TopicConfigurationDeprecated>, } struct NotificationConfigurationDeprecatedDeserializer; impl NotificationConfigurationDeprecatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NotificationConfigurationDeprecated, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NotificationConfigurationDeprecated::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CloudFunctionConfiguration" => { obj.cloud_function_configuration = Some(try!(CloudFunctionConfigurationDeserializer::deserialize( "CloudFunctionConfiguration", stack ))); } "QueueConfiguration" => { obj.queue_configuration = Some(try!(QueueConfigurationDeprecatedDeserializer::deserialize( "QueueConfiguration", stack ))); } "TopicConfiguration" => { obj.topic_configuration = Some(try!(TopicConfigurationDeprecatedDeserializer::deserialize( "TopicConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationConfigurationDeprecatedSerializer; impl NotificationConfigurationDeprecatedSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NotificationConfigurationDeprecated, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.cloud_function_configuration { &CloudFunctionConfigurationSerializer::serialize( &mut writer, "CloudFunctionConfiguration", value, )?; } if let Some(ref value) = obj.queue_configuration { &QueueConfigurationDeprecatedSerializer::serialize( &mut writer, "QueueConfiguration", value, )?; } if let Some(ref value) = obj.topic_configuration { &TopicConfigurationDeprecatedSerializer::serialize( &mut writer, "TopicConfiguration", value, )?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for object key name filtering rules. For information about key name filtering, go to <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring Event Notifications</a> in the Amazon Simple Storage Service Developer Guide.</p> #[derive(Default, Debug, Clone)] pub struct NotificationConfigurationFilter { pub key: Option<S3KeyFilter>, } struct NotificationConfigurationFilterDeserializer; impl NotificationConfigurationFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NotificationConfigurationFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NotificationConfigurationFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "S3Key" => { obj.key = Some(try!(S3KeyFilterDeserializer::deserialize("S3Key", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationConfigurationFilterSerializer; impl NotificationConfigurationFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NotificationConfigurationFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.key { &S3KeyFilterSerializer::serialize(&mut writer, "S3Key", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct NotificationIdDeserializer; impl NotificationIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationIdSerializer; impl NotificationIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Object { pub e_tag: Option<String>, pub key: Option<String>, pub last_modified: Option<String>, pub owner: Option<Owner>, pub size: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct ObjectDeserializer; impl ObjectDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Object, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Object::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "Size" => { obj.size = Some(try!(SizeDeserializer::deserialize("Size", stack))); } "StorageClass" => { obj.storage_class = Some(try!( ObjectStorageClassDeserializer::deserialize("StorageClass", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ObjectCannedACLSerializer; impl ObjectCannedACLSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct ObjectIdentifier { /// <p>Key name of the object to delete.</p> pub key: String, /// <p>VersionId for the specific version of the object to delete.</p> pub version_id: Option<String>, } pub struct ObjectIdentifierSerializer; impl ObjectIdentifierSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ObjectIdentifier, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Key"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.version_id { writer.write(xml::writer::XmlEvent::start_element("VersionId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ObjectIdentifierListSerializer; impl ObjectIdentifierListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<ObjectIdentifier>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ObjectIdentifierSerializer::serialize(writer, name, element)?; } Ok(()) } } struct ObjectKeyDeserializer; impl ObjectKeyDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ObjectKeySerializer; impl ObjectKeySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ObjectListDeserializer; impl ObjectListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Object>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ObjectDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } struct ObjectStorageClassDeserializer; impl ObjectStorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ObjectVersion { pub e_tag: Option<String>, /// <p>Specifies whether the object is (true) or is not (false) the latest version of an object.</p> pub is_latest: Option<bool>, /// <p>The object key.</p> pub key: Option<String>, /// <p>Date and time the object was last modified.</p> pub last_modified: Option<String>, pub owner: Option<Owner>, /// <p>Size in bytes of the object.</p> pub size: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, /// <p>Version ID of an object.</p> pub version_id: Option<String>, } struct ObjectVersionDeserializer; impl ObjectVersionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ObjectVersion, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ObjectVersion::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "IsLatest" => { obj.is_latest = Some(try!(IsLatestDeserializer::deserialize("IsLatest", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "Size" => { obj.size = Some(try!(SizeDeserializer::deserialize("Size", stack))); } "StorageClass" => { obj.storage_class = Some(try!(ObjectVersionStorageClassDeserializer::deserialize( "StorageClass", stack ))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct ObjectVersionIdDeserializer; impl ObjectVersionIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ObjectVersionIdSerializer; impl ObjectVersionIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ObjectVersionListDeserializer; impl ObjectVersionListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ObjectVersion>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ObjectVersionDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } struct ObjectVersionStorageClassDeserializer; impl ObjectVersionStorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Describes the location where the restore job&#39;s output is stored.</p> #[derive(Default, Debug, Clone)] pub struct OutputLocation { /// <p>Describes an S3 location that will receive the results of the restore request.</p> pub s3: Option<S3Location>, } pub struct OutputLocationSerializer; impl OutputLocationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &OutputLocation, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.s3 { &S3LocationSerializer::serialize(&mut writer, "S3", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes how results of the Select job are serialized.</p> #[derive(Default, Debug, Clone)] pub struct OutputSerialization { /// <p>Describes the serialization of CSV-encoded Select results.</p> pub csv: Option<CSVOutput>, /// <p>Specifies JSON as request&#39;s output serialization format.</p> pub json: Option<JSONOutput>, } pub struct OutputSerializationSerializer; impl OutputSerializationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &OutputSerialization, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.csv { &CSVOutputSerializer::serialize(&mut writer, "CSV", value)?; } if let Some(ref value) = obj.json { &JSONOutputSerializer::serialize(&mut writer, "JSON", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Owner { pub display_name: Option<String>, pub id: Option<String>, } struct OwnerDeserializer; impl OwnerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Owner, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Owner::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DisplayName" => { obj.display_name = Some(try!(DisplayNameDeserializer::deserialize( "DisplayName", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct OwnerSerializer; impl OwnerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Owner, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.display_name { writer.write(xml::writer::XmlEvent::start_element("DisplayName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct OwnerOverrideDeserializer; impl OwnerOverrideDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct OwnerOverrideSerializer; impl OwnerOverrideSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Part { /// <p>Entity tag returned when the part was uploaded.</p> pub e_tag: Option<String>, /// <p>Date and time at which the part was uploaded.</p> pub last_modified: Option<String>, /// <p>Part number identifying the part. This is a positive integer between 1 and 10,000.</p> pub part_number: Option<i64>, /// <p>Size of the uploaded part data.</p> pub size: Option<i64>, } struct PartDeserializer; impl PartDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Part, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Part::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "PartNumber" => { obj.part_number = Some(try!(PartNumberDeserializer::deserialize( "PartNumber", stack ))); } "Size" => { obj.size = Some(try!(SizeDeserializer::deserialize("Size", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct PartNumberDeserializer; impl PartNumberDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PartNumberSerializer; impl PartNumberSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PartNumberMarkerDeserializer; impl PartNumberMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PartNumberMarkerSerializer; impl PartNumberMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PartsDeserializer; impl PartsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Part>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(PartDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } struct PayerDeserializer; impl PayerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PayerSerializer; impl PayerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PermissionDeserializer; impl PermissionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PermissionSerializer; impl PermissionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct PolicySerializer; impl PolicySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PrefixDeserializer; impl PrefixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PrefixSerializer; impl PrefixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Progress { /// <p>Current number of uncompressed object bytes processed.</p> pub bytes_processed: Option<i64>, /// <p>Current number of object bytes scanned.</p> pub bytes_scanned: Option<i64>, } struct ProgressDeserializer; impl ProgressDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Progress, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Progress::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "BytesProcessed" => { obj.bytes_processed = Some(try!(BytesProcessedDeserializer::deserialize( "BytesProcessed", stack ))); } "BytesScanned" => { obj.bytes_scanned = Some(try!(BytesScannedDeserializer::deserialize( "BytesScanned", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ProgressEvent { /// <p>The Progress event details.</p> pub details: Option<Progress>, } struct ProgressEventDeserializer; impl ProgressEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ProgressEvent, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ProgressEvent::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Details" => { obj.details = Some(try!(ProgressDeserializer::deserialize("Details", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct ProtocolDeserializer; impl ProtocolDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ProtocolSerializer; impl ProtocolSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct PutBucketAccelerateConfigurationRequest { /// <p>Specifies the Accelerate Configuration you want to set for the bucket.</p> pub accelerate_configuration: AccelerateConfiguration, /// <p>Name of the bucket for which the accelerate configuration is set.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct PutBucketAclRequest { /// <p>The canned ACL to apply to the bucket.</p> pub acl: Option<String>, pub access_control_policy: Option<AccessControlPolicy>, pub bucket: String, pub content_md5: Option<String>, /// <p>Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to list the objects in the bucket.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the bucket ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p> pub grant_write: Option<String>, /// <p>Allows grantee to write the ACL for the applicable bucket.</p> pub grant_write_acp: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutBucketAnalyticsConfigurationRequest { /// <p>The configuration and any analyses for the analytics filter.</p> pub analytics_configuration: AnalyticsConfiguration, /// <p>The name of the bucket to which an analytics configuration is stored.</p> pub bucket: String, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct PutBucketCorsRequest { pub bucket: String, pub cors_configuration: CORSConfiguration, pub content_md5: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutBucketEncryptionRequest { /// <p>The name of the bucket for which the server-side encryption configuration is set.</p> pub bucket: String, /// <p>The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.</p> pub content_md5: Option<String>, pub server_side_encryption_configuration: ServerSideEncryptionConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketInventoryConfigurationRequest { /// <p>The name of the bucket where the inventory configuration will be stored.</p> pub bucket: String, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, /// <p>Specifies the inventory configuration.</p> pub inventory_configuration: InventoryConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketLifecycleConfigurationRequest { pub bucket: String, pub lifecycle_configuration: Option<BucketLifecycleConfiguration>, } #[derive(Default, Debug, Clone)] pub struct PutBucketLifecycleRequest { pub bucket: String, pub content_md5: Option<String>, pub lifecycle_configuration: Option<LifecycleConfiguration>, } #[derive(Default, Debug, Clone)] pub struct PutBucketLoggingRequest { pub bucket: String, pub bucket_logging_status: BucketLoggingStatus, pub content_md5: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutBucketMetricsConfigurationRequest { /// <p>The name of the bucket for which the metrics configuration is set.</p> pub bucket: String, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, /// <p>Specifies the metrics configuration.</p> pub metrics_configuration: MetricsConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketNotificationConfigurationRequest { pub bucket: String, pub notification_configuration: NotificationConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketNotificationRequest { pub bucket: String, pub content_md5: Option<String>, pub notification_configuration: NotificationConfigurationDeprecated, } #[derive(Default, Debug, Clone)] pub struct PutBucketPolicyRequest { pub bucket: String, /// <p>Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.</p> pub confirm_remove_self_bucket_access: Option<bool>, pub content_md5: Option<String>, /// <p>The bucket policy as a JSON document.</p> pub policy: String, } #[derive(Default, Debug, Clone)] pub struct PutBucketReplicationRequest { pub bucket: String, pub content_md5: Option<String>, pub replication_configuration: ReplicationConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketRequestPaymentRequest { pub bucket: String, pub content_md5: Option<String>, pub request_payment_configuration: RequestPaymentConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketTaggingRequest { pub bucket: String, pub content_md5: Option<String>, pub tagging: Tagging, } #[derive(Default, Debug, Clone)] pub struct PutBucketVersioningRequest { pub bucket: String, pub content_md5: Option<String>, /// <p>The concatenation of the authentication device&#39;s serial number, a space, and the value that is displayed on your authentication device.</p> pub mfa: Option<String>, pub versioning_configuration: VersioningConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketWebsiteRequest { pub bucket: String, pub content_md5: Option<String>, pub website_configuration: WebsiteConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutObjectAclOutput { pub request_charged: Option<String>, } struct PutObjectAclOutputDeserializer; impl PutObjectAclOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PutObjectAclOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = PutObjectAclOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct PutObjectAclRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, pub access_control_policy: Option<AccessControlPolicy>, pub bucket: String, pub content_md5: Option<String>, /// <p>Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to list the objects in the bucket.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the bucket ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p> pub grant_write: Option<String>, /// <p>Allows grantee to write the ACL for the applicable bucket.</p> pub grant_write_acp: Option<String>, pub key: String, pub request_payer: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutObjectOutput { /// <p>Entity tag for the uploaded object.</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p> pub expiration: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>Version of the object.</p> pub version_id: Option<String>, } struct PutObjectOutputDeserializer; impl PutObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PutObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = PutObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug)] pub struct PutObjectRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, /// <p>Object data.</p> pub body: Option<StreamingBody>, /// <p>Name of the bucket to which the PUT operation was initiated.</p> pub bucket: String, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.</p> pub content_length: Option<i64>, /// <p>The base64-encoded 128-bit MD5 digest of the part data.</p> pub content_md5: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Gives the grantee READ, READ<em>ACP, and WRITE</em>ACP permissions on the object.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to read the object data and its metadata.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the object ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to write the ACL for the applicable object.</p> pub grant_write_acp: Option<String>, /// <p>Object key for which the PUT operation was initiated.</p> pub key: String, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>The type of storage to use for the object. Defaults to &#39;STANDARD&#39;.</p> pub storage_class: Option<String>, /// <p>The tag-set for the object. The tag-set must be encoded as URL Query parameters</p> pub tagging: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutObjectTaggingOutput { pub version_id: Option<String>, } struct PutObjectTaggingOutputDeserializer; impl PutObjectTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PutObjectTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = PutObjectTaggingOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct PutObjectTaggingRequest { pub bucket: String, pub content_md5: Option<String>, pub key: String, pub tagging: Tagging, pub version_id: Option<String>, } struct QueueArnDeserializer; impl QueueArnDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct QueueArnSerializer; impl QueueArnSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for specifying an configuration when you want Amazon S3 to publish events to an Amazon Simple Queue Service (Amazon SQS) queue.</p> #[derive(Default, Debug, Clone)] pub struct QueueConfiguration { pub events: Vec<String>, pub filter: Option<NotificationConfigurationFilter>, pub id: Option<String>, /// <p>Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type.</p> pub queue_arn: String, } struct QueueConfigurationDeserializer; impl QueueConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<QueueConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = QueueConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = try!(EventListDeserializer::deserialize("Event", stack)); } "Filter" => { obj.filter = Some(try!( NotificationConfigurationFilterDeserializer::deserialize( "Filter", stack ) )); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Queue" => { obj.queue_arn = try!(QueueArnDeserializer::deserialize("Queue", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct QueueConfigurationSerializer; impl QueueConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &QueueConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; EventListSerializer::serialize(&mut writer, "Event", &obj.events)?; if let Some(ref value) = obj.filter { &NotificationConfigurationFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Queue"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.queue_arn )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct QueueConfigurationDeprecated { pub events: Option<Vec<String>>, pub id: Option<String>, pub queue: Option<String>, } struct QueueConfigurationDeprecatedDeserializer; impl QueueConfigurationDeprecatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<QueueConfigurationDeprecated, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = QueueConfigurationDeprecated::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = Some(try!(EventListDeserializer::deserialize("Event", stack))); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Queue" => { obj.queue = Some(try!(QueueArnDeserializer::deserialize("Queue", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct QueueConfigurationDeprecatedSerializer; impl QueueConfigurationDeprecatedSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &QueueConfigurationDeprecated, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.events { &EventListSerializer::serialize(&mut writer, "Event", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.queue { writer.write(xml::writer::XmlEvent::start_element("Queue"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct QueueConfigurationListDeserializer; impl QueueConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<QueueConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(QueueConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct QueueConfigurationListSerializer; impl QueueConfigurationListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<QueueConfiguration>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { QueueConfigurationSerializer::serialize(writer, name, element)?; } Ok(()) } } pub struct QuietSerializer; impl QuietSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct QuoteCharacterSerializer; impl QuoteCharacterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct QuoteEscapeCharacterSerializer; impl QuoteEscapeCharacterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct QuoteFieldsSerializer; impl QuoteFieldsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct RecordDelimiterSerializer; impl RecordDelimiterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RecordsEvent { /// <p>The byte array of partial, one or more result records.</p> pub payload: Option<Vec<u8>>, } struct RecordsEventDeserializer; impl RecordsEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RecordsEvent, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RecordsEvent::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Payload" => { obj.payload = Some(try!(BodyDeserializer::deserialize("Payload", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct Redirect { /// <p>The host name to use in the redirect request.</p> pub host_name: Option<String>, /// <p>The HTTP redirect code to use on the response. Not required if one of the siblings is present.</p> pub http_redirect_code: Option<String>, /// <p>Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.</p> pub protocol: Option<String>, /// <p>The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.</p> pub replace_key_prefix_with: Option<String>, /// <p>The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the sibling is present. Can be present only if ReplaceKeyPrefixWith is not provided.</p> pub replace_key_with: Option<String>, } struct RedirectDeserializer; impl RedirectDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Redirect, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Redirect::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "HostName" => { obj.host_name = Some(try!(HostNameDeserializer::deserialize("HostName", stack))); } "HttpRedirectCode" => { obj.http_redirect_code = Some(try!( HttpRedirectCodeDeserializer::deserialize("HttpRedirectCode", stack) )); } "Protocol" => { obj.protocol = Some(try!(ProtocolDeserializer::deserialize("Protocol", stack))); } "ReplaceKeyPrefixWith" => { obj.replace_key_prefix_with = Some(try!(ReplaceKeyPrefixWithDeserializer::deserialize( "ReplaceKeyPrefixWith", stack ))); } "ReplaceKeyWith" => { obj.replace_key_with = Some(try!(ReplaceKeyWithDeserializer::deserialize( "ReplaceKeyWith", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RedirectSerializer; impl RedirectSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Redirect, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.host_name { writer.write(xml::writer::XmlEvent::start_element("HostName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.http_redirect_code { writer.write(xml::writer::XmlEvent::start_element("HttpRedirectCode"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.protocol { writer.write(xml::writer::XmlEvent::start_element("Protocol"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.replace_key_prefix_with { writer.write(xml::writer::XmlEvent::start_element("ReplaceKeyPrefixWith"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.replace_key_with { writer.write(xml::writer::XmlEvent::start_element("ReplaceKeyWith"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RedirectAllRequestsTo { /// <p>Name of the host where requests will be redirected.</p> pub host_name: String, /// <p>Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.</p> pub protocol: Option<String>, } struct RedirectAllRequestsToDeserializer; impl RedirectAllRequestsToDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RedirectAllRequestsTo, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RedirectAllRequestsTo::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "HostName" => { obj.host_name = try!(HostNameDeserializer::deserialize("HostName", stack)); } "Protocol" => { obj.protocol = Some(try!(ProtocolDeserializer::deserialize("Protocol", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RedirectAllRequestsToSerializer; impl RedirectAllRequestsToSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RedirectAllRequestsTo, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("HostName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.host_name )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.protocol { writer.write(xml::writer::XmlEvent::start_element("Protocol"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplaceKeyPrefixWithDeserializer; impl ReplaceKeyPrefixWithDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplaceKeyPrefixWithSerializer; impl ReplaceKeyPrefixWithSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplaceKeyWithDeserializer; impl ReplaceKeyWithDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplaceKeyWithSerializer; impl ReplaceKeyWithSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplicaKmsKeyIDDeserializer; impl ReplicaKmsKeyIDDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicaKmsKeyIDSerializer; impl ReplicaKmsKeyIDSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for replication rules. You can add as many as 1,000 rules. Total replication configuration size can be up to 2 MB.</p> #[derive(Default, Debug, Clone)] pub struct ReplicationConfiguration { /// <p>Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating the objects.</p> pub role: String, /// <p>Container for information about a particular replication rule. Replication configuration must have at least one rule and can contain up to 1,000 rules.</p> pub rules: Vec<ReplicationRule>, } struct ReplicationConfigurationDeserializer; impl ReplicationConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ReplicationConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ReplicationConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Role" => { obj.role = try!(RoleDeserializer::deserialize("Role", stack)); } "Rule" => { obj.rules = try!(ReplicationRulesDeserializer::deserialize("Rule", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicationConfigurationSerializer; impl ReplicationConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ReplicationConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Role"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.role )))?; writer.write(xml::writer::XmlEvent::end_element())?; ReplicationRulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information about a particular replication rule.</p> #[derive(Default, Debug, Clone)] pub struct ReplicationRule { /// <p>Container for replication destination information.</p> pub destination: Destination, /// <p>Unique identifier for the rule. The value cannot be longer than 255 characters.</p> pub id: Option<String>, /// <p>Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes are not supported.</p> pub prefix: String, /// <p>Container for filters that define which source objects should be replicated.</p> pub source_selection_criteria: Option<SourceSelectionCriteria>, /// <p>The rule is ignored if status is not Enabled.</p> pub status: String, } struct ReplicationRuleDeserializer; impl ReplicationRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ReplicationRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ReplicationRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Destination" => { obj.destination = try!(DestinationDeserializer::deserialize("Destination", stack)); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "Prefix" => { obj.prefix = try!(PrefixDeserializer::deserialize("Prefix", stack)); } "SourceSelectionCriteria" => { obj.source_selection_criteria = Some(try!(SourceSelectionCriteriaDeserializer::deserialize( "SourceSelectionCriteria", stack ))); } "Status" => { obj.status = try!(ReplicationRuleStatusDeserializer::deserialize( "Status", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicationRuleSerializer; impl ReplicationRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ReplicationRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; DestinationSerializer::serialize(&mut writer, "Destination", &obj.destination)?; if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.source_selection_criteria { &SourceSelectionCriteriaSerializer::serialize( &mut writer, "SourceSelectionCriteria", value, )?; } writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplicationRuleStatusDeserializer; impl ReplicationRuleStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicationRuleStatusSerializer; impl ReplicationRuleStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplicationRulesDeserializer; impl ReplicationRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ReplicationRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ReplicationRuleDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct ReplicationRulesSerializer; impl ReplicationRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<ReplicationRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ReplicationRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct RequestPaymentConfiguration { /// <p>Specifies who pays for the download and request fees.</p> pub payer: String, } pub struct RequestPaymentConfigurationSerializer; impl RequestPaymentConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RequestPaymentConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Payer"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.payer )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RequestProgress { /// <p>Specifies whether periodic QueryProgress frames should be sent. Valid values: TRUE, FALSE. Default value: FALSE.</p> pub enabled: Option<bool>, } pub struct RequestProgressSerializer; impl RequestProgressSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RequestProgress, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.enabled { writer.write(xml::writer::XmlEvent::start_element("Enabled"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseCacheControlSerializer; impl ResponseCacheControlSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentDispositionSerializer; impl ResponseContentDispositionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentEncodingSerializer; impl ResponseContentEncodingSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentLanguageSerializer; impl ResponseContentLanguageSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentTypeSerializer; impl ResponseContentTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseExpiresSerializer; impl ResponseExpiresSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RestoreObjectOutput { pub request_charged: Option<String>, /// <p>Indicates the path in the provided S3 output location where Select results will be restored to.</p> pub restore_output_path: Option<String>, } struct RestoreObjectOutputDeserializer; impl RestoreObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RestoreObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = RestoreObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct RestoreObjectRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, pub restore_request: Option<RestoreRequest>, pub version_id: Option<String>, } /// <p>Container for restore job parameters.</p> #[derive(Default, Debug, Clone)] pub struct RestoreRequest { /// <p>Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.</p> pub days: Option<i64>, /// <p>The optional description for the job.</p> pub description: Option<String>, /// <p>Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.</p> pub glacier_job_parameters: Option<GlacierJobParameters>, /// <p>Describes the location where the restore job&#39;s output is stored.</p> pub output_location: Option<OutputLocation>, /// <p>Describes the parameters for Select job types.</p> pub select_parameters: Option<SelectParameters>, /// <p>Glacier retrieval tier at which the restore will be processed.</p> pub tier: Option<String>, /// <p>Type of restore request.</p> pub type_: Option<String>, } pub struct RestoreRequestSerializer; impl RestoreRequestSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RestoreRequest, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.days { writer.write(xml::writer::XmlEvent::start_element("Days"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.description { writer.write(xml::writer::XmlEvent::start_element("Description"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.glacier_job_parameters { &GlacierJobParametersSerializer::serialize(&mut writer, "GlacierJobParameters", value)?; } if let Some(ref value) = obj.output_location { &OutputLocationSerializer::serialize(&mut writer, "OutputLocation", value)?; } if let Some(ref value) = obj.select_parameters { &SelectParametersSerializer::serialize(&mut writer, "SelectParameters", value)?; } if let Some(ref value) = obj.tier { writer.write(xml::writer::XmlEvent::start_element("Tier"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.type_ { writer.write(xml::writer::XmlEvent::start_element("Type"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct RestoreRequestTypeSerializer; impl RestoreRequestTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct RoleDeserializer; impl RoleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RoleSerializer; impl RoleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RoutingRule { /// <p>A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.</p> pub condition: Option<Condition>, /// <p>Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can can specify a different error code to return.</p> pub redirect: Redirect, } struct RoutingRuleDeserializer; impl RoutingRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RoutingRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RoutingRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Condition" => { obj.condition = Some(try!(ConditionDeserializer::deserialize("Condition", stack))); } "Redirect" => { obj.redirect = try!(RedirectDeserializer::deserialize("Redirect", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RoutingRuleSerializer; impl RoutingRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RoutingRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.condition { &ConditionSerializer::serialize(&mut writer, "Condition", value)?; } RedirectSerializer::serialize(&mut writer, "Redirect", &obj.redirect)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct RoutingRulesDeserializer; impl RoutingRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<RoutingRule>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "RoutingRule" { obj.push(try!(RoutingRuleDeserializer::deserialize( "RoutingRule", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct RoutingRulesSerializer; impl RoutingRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<RoutingRule>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { RoutingRuleSerializer::serialize(writer, "RoutingRule", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct Rule { pub abort_incomplete_multipart_upload: Option<AbortIncompleteMultipartUpload>, pub expiration: Option<LifecycleExpiration>, /// <p>Unique identifier for the rule. The value cannot be longer than 255 characters.</p> pub id: Option<String>, pub noncurrent_version_expiration: Option<NoncurrentVersionExpiration>, pub noncurrent_version_transition: Option<NoncurrentVersionTransition>, /// <p>Prefix identifying one or more objects to which the rule applies.</p> pub prefix: String, /// <p>If &#39;Enabled&#39;, the rule is currently being applied. If &#39;Disabled&#39;, the rule is not currently being applied.</p> pub status: String, pub transition: Option<Transition>, } struct RuleDeserializer; impl RuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Rule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Rule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AbortIncompleteMultipartUpload" => { obj.abort_incomplete_multipart_upload = Some(try!( AbortIncompleteMultipartUploadDeserializer::deserialize( "AbortIncompleteMultipartUpload", stack ) )); } "Expiration" => { obj.expiration = Some(try!(LifecycleExpirationDeserializer::deserialize( "Expiration", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "NoncurrentVersionExpiration" => { obj.noncurrent_version_expiration = Some(try!(NoncurrentVersionExpirationDeserializer::deserialize( "NoncurrentVersionExpiration", stack ))); } "NoncurrentVersionTransition" => { obj.noncurrent_version_transition = Some(try!(NoncurrentVersionTransitionDeserializer::deserialize( "NoncurrentVersionTransition", stack ))); } "Prefix" => { obj.prefix = try!(PrefixDeserializer::deserialize("Prefix", stack)); } "Status" => { obj.status = try!(ExpirationStatusDeserializer::deserialize("Status", stack)); } "Transition" => { obj.transition = Some(try!(TransitionDeserializer::deserialize( "Transition", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RuleSerializer; impl RuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Rule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.abort_incomplete_multipart_upload { &AbortIncompleteMultipartUploadSerializer::serialize( &mut writer, "AbortIncompleteMultipartUpload", value, )?; } if let Some(ref value) = obj.expiration { &LifecycleExpirationSerializer::serialize(&mut writer, "Expiration", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.noncurrent_version_expiration { &NoncurrentVersionExpirationSerializer::serialize( &mut writer, "NoncurrentVersionExpiration", value, )?; } if let Some(ref value) = obj.noncurrent_version_transition { &NoncurrentVersionTransitionSerializer::serialize( &mut writer, "NoncurrentVersionTransition", value, )?; } writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.transition { &TransitionSerializer::serialize(&mut writer, "Transition", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct RulesDeserializer; impl RulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Rule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(RuleDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct RulesSerializer; impl RulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Rule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { RuleSerializer::serialize(writer, name, element)?; } Ok(()) } } /// <p>Container for object key name prefix and suffix filtering rules.</p> #[derive(Default, Debug, Clone)] pub struct S3KeyFilter { pub filter_rules: Option<Vec<FilterRule>>, } struct S3KeyFilterDeserializer; impl S3KeyFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<S3KeyFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = S3KeyFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "FilterRule" => { obj.filter_rules = Some(try!(FilterRuleListDeserializer::deserialize( "FilterRule", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct S3KeyFilterSerializer; impl S3KeyFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &S3KeyFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.filter_rules { &FilterRuleListSerializer::serialize(&mut writer, "FilterRule", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes an S3 location that will receive the results of the restore request.</p> #[derive(Default, Debug, Clone)] pub struct S3Location { /// <p>A list of grants that control access to the staged results.</p> pub access_control_list: Option<Vec<Grant>>, /// <p>The name of the bucket where the restore results will be placed.</p> pub bucket_name: String, /// <p>The canned ACL to apply to the restore results.</p> pub canned_acl: Option<String>, pub encryption: Option<Encryption>, /// <p>The prefix that is prepended to the restore results for this request.</p> pub prefix: String, /// <p>The class of storage used to store the restore results.</p> pub storage_class: Option<String>, /// <p>The tag-set that is applied to the restore results.</p> pub tagging: Option<Tagging>, /// <p>A list of metadata to store with the restore results in S3.</p> pub user_metadata: Option<Vec<MetadataEntry>>, } pub struct S3LocationSerializer; impl S3LocationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &S3Location, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.access_control_list { &GrantsSerializer::serialize(&mut writer, "AccessControlList", value)?; } writer.write(xml::writer::XmlEvent::start_element("BucketName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket_name )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.canned_acl { writer.write(xml::writer::XmlEvent::start_element("CannedACL"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.encryption { &EncryptionSerializer::serialize(&mut writer, "Encryption", value)?; } writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tagging { &TaggingSerializer::serialize(&mut writer, "Tagging", value)?; } if let Some(ref value) = obj.user_metadata { &UserMetadataSerializer::serialize(&mut writer, "UserMetadata", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Specifies the use of SSE-KMS to encrypt delievered Inventory reports.</p> #[derive(Default, Debug, Clone)] pub struct SSEKMS { /// <p>Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports.</p> pub key_id: String, } struct SSEKMSDeserializer; impl SSEKMSDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SSEKMS, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SSEKMS::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "KeyId" => { obj.key_id = try!(SSEKMSKeyIdDeserializer::deserialize("KeyId", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SSEKMSSerializer; impl SSEKMSSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SSEKMS, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("KeyId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key_id )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct SSEKMSKeyIdDeserializer; impl SSEKMSKeyIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SSEKMSKeyIdSerializer; impl SSEKMSKeyIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Specifies the use of SSE-S3 to encrypt delievered Inventory reports.</p> #[derive(Default, Debug, Clone)] pub struct SSES3 {} struct SSES3Deserializer; impl SSES3Deserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SSES3, XmlParseError> { try!(start_element(tag_name, stack)); let obj = SSES3::default(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SSES3Serializer; impl SSES3Serializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SSES3, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct SelectObjectContentEventStream { /// <p>The Continuation Event.</p> pub cont: Option<ContinuationEvent>, /// <p>The End Event.</p> pub end: Option<EndEvent>, /// <p>The Progress Event.</p> pub progress: Option<ProgressEvent>, /// <p>The Records Event.</p> pub records: Option<RecordsEvent>, /// <p>The Stats Event.</p> pub stats: Option<StatsEvent>, } struct SelectObjectContentEventStreamDeserializer; impl SelectObjectContentEventStreamDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SelectObjectContentEventStream, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SelectObjectContentEventStream::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Cont" => { obj.cont = Some(try!(ContinuationEventDeserializer::deserialize( "Cont", stack ))); } "End" => { obj.end = Some(try!(EndEventDeserializer::deserialize("End", stack))); } "Progress" => { obj.progress = Some(try!(ProgressEventDeserializer::deserialize( "Progress", stack ))); } "Records" => { obj.records = Some(try!(RecordsEventDeserializer::deserialize( "Records", stack ))); } "Stats" => { obj.stats = Some(try!(StatsEventDeserializer::deserialize("Stats", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct SelectObjectContentOutput { pub payload: Option<SelectObjectContentEventStream>, } struct SelectObjectContentOutputDeserializer; impl SelectObjectContentOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SelectObjectContentOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SelectObjectContentOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Payload" => { obj.payload = Some(try!( SelectObjectContentEventStreamDeserializer::deserialize( "Payload", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html">S3Select API Documentation</a>.</p> #[derive(Default, Debug, Clone)] pub struct SelectObjectContentRequest { /// <p>The S3 Bucket.</p> pub bucket: String, /// <p>The expression that is used to query the object.</p> pub expression: String, /// <p>The type of the provided expression (e.g., SQL).</p> pub expression_type: String, /// <p>Describes the format of the data in the object that is being queried.</p> pub input_serialization: InputSerialization, /// <p>The Object Key.</p> pub key: String, /// <p>Describes the format of the data that you want Amazon S3 to return in response.</p> pub output_serialization: OutputSerialization, /// <p>Specifies if periodic request progress information should be enabled.</p> pub request_progress: Option<RequestProgress>, /// <p>The SSE Algorithm used to encrypt the object. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html"> Server-Side Encryption (Using Customer-Provided Encryption Keys</a>.</p> pub sse_customer_algorithm: Option<String>, /// <p>The SSE Customer Key. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html"> Server-Side Encryption (Using Customer-Provided Encryption Keys</a>.</p> pub sse_customer_key: Option<String>, /// <p>The SSE Customer Key MD5. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html"> Server-Side Encryption (Using Customer-Provided Encryption Keys</a>.</p> pub sse_customer_key_md5: Option<String>, } pub struct SelectObjectContentRequestSerializer; impl SelectObjectContentRequestSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SelectObjectContentRequest, xmlns: &str, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name).default_ns(xmlns))?; ExpressionSerializer::serialize(&mut writer, "Expression", &obj.expression)?; ExpressionTypeSerializer::serialize(&mut writer, "ExpressionType", &obj.expression_type)?; InputSerializationSerializer::serialize( &mut writer, "InputSerialization", &obj.input_serialization, )?; OutputSerializationSerializer::serialize( &mut writer, "OutputSerialization", &obj.output_serialization, )?; if let Some(ref value) = obj.request_progress { &RequestProgressSerializer::serialize(&mut writer, "RequestProgress", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes the parameters for Select job types.</p> #[derive(Default, Debug, Clone)] pub struct SelectParameters { /// <p>The expression that is used to query the object.</p> pub expression: String, /// <p>The type of the provided expression (e.g., SQL).</p> pub expression_type: String, /// <p>Describes the serialization format of the object.</p> pub input_serialization: InputSerialization, /// <p>Describes how the results of the Select job are serialized.</p> pub output_serialization: OutputSerialization, } pub struct SelectParametersSerializer; impl SelectParametersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SelectParameters, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Expression"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.expression )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("ExpressionType"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.expression_type )))?; writer.write(xml::writer::XmlEvent::end_element())?; InputSerializationSerializer::serialize( &mut writer, "InputSerialization", &obj.input_serialization, )?; OutputSerializationSerializer::serialize( &mut writer, "OutputSerialization", &obj.output_serialization, )?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ServerSideEncryptionDeserializer; impl ServerSideEncryptionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionSerializer; impl ServerSideEncryptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied.</p> #[derive(Default, Debug, Clone)] pub struct ServerSideEncryptionByDefault { /// <p>KMS master key ID to use for the default encryption. This parameter is allowed if SSEAlgorithm is aws:kms.</p> pub kms_master_key_id: Option<String>, /// <p>Server-side encryption algorithm to use for the default encryption.</p> pub sse_algorithm: String, } struct ServerSideEncryptionByDefaultDeserializer; impl ServerSideEncryptionByDefaultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ServerSideEncryptionByDefault, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ServerSideEncryptionByDefault::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "KMSMasterKeyID" => { obj.kms_master_key_id = Some(try!(SSEKMSKeyIdDeserializer::deserialize( "KMSMasterKeyID", stack ))); } "SSEAlgorithm" => { obj.sse_algorithm = try!(ServerSideEncryptionDeserializer::deserialize( "SSEAlgorithm", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionByDefaultSerializer; impl ServerSideEncryptionByDefaultSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ServerSideEncryptionByDefault, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.kms_master_key_id { writer.write(xml::writer::XmlEvent::start_element("KMSMasterKeyID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("SSEAlgorithm"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.sse_algorithm )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for server-side encryption configuration rules. Currently S3 supports one rule only.</p> #[derive(Default, Debug, Clone)] pub struct ServerSideEncryptionConfiguration { /// <p>Container for information about a particular server-side encryption configuration rule.</p> pub rules: Vec<ServerSideEncryptionRule>, } struct ServerSideEncryptionConfigurationDeserializer; impl ServerSideEncryptionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ServerSideEncryptionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ServerSideEncryptionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Rule" => { obj.rules = try!(ServerSideEncryptionRulesDeserializer::deserialize( "Rule", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionConfigurationSerializer; impl ServerSideEncryptionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ServerSideEncryptionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; ServerSideEncryptionRulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information about a particular server-side encryption configuration rule.</p> #[derive(Default, Debug, Clone)] pub struct ServerSideEncryptionRule { /// <p>Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied.</p> pub apply_server_side_encryption_by_default: Option<ServerSideEncryptionByDefault>, } struct ServerSideEncryptionRuleDeserializer; impl ServerSideEncryptionRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ServerSideEncryptionRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ServerSideEncryptionRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ApplyServerSideEncryptionByDefault" => { obj.apply_server_side_encryption_by_default = Some(try!( ServerSideEncryptionByDefaultDeserializer::deserialize( "ApplyServerSideEncryptionByDefault", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionRuleSerializer; impl ServerSideEncryptionRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ServerSideEncryptionRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.apply_server_side_encryption_by_default { &ServerSideEncryptionByDefaultSerializer::serialize( &mut writer, "ApplyServerSideEncryptionByDefault", value, )?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct ServerSideEncryptionRulesDeserializer; impl ServerSideEncryptionRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ServerSideEncryptionRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ServerSideEncryptionRuleDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct ServerSideEncryptionRulesSerializer; impl ServerSideEncryptionRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<ServerSideEncryptionRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ServerSideEncryptionRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } struct SizeDeserializer; impl SizeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Container for filters that define which source objects should be replicated.</p> #[derive(Default, Debug, Clone)] pub struct SourceSelectionCriteria { /// <p>Container for filter information of selection of KMS Encrypted S3 objects.</p> pub sse_kms_encrypted_objects: Option<SseKmsEncryptedObjects>, } struct SourceSelectionCriteriaDeserializer; impl SourceSelectionCriteriaDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SourceSelectionCriteria, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SourceSelectionCriteria::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "SseKmsEncryptedObjects" => { obj.sse_kms_encrypted_objects = Some(try!(SseKmsEncryptedObjectsDeserializer::deserialize( "SseKmsEncryptedObjects", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SourceSelectionCriteriaSerializer; impl SourceSelectionCriteriaSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SourceSelectionCriteria, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.sse_kms_encrypted_objects { &SseKmsEncryptedObjectsSerializer::serialize( &mut writer, "SseKmsEncryptedObjects", value, )?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for filter information of selection of KMS Encrypted S3 objects.</p> #[derive(Default, Debug, Clone)] pub struct SseKmsEncryptedObjects { /// <p>The replication for KMS encrypted S3 objects is disabled if status is not Enabled.</p> pub status: String, } struct SseKmsEncryptedObjectsDeserializer; impl SseKmsEncryptedObjectsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SseKmsEncryptedObjects, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SseKmsEncryptedObjects::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Status" => { obj.status = try!(SseKmsEncryptedObjectsStatusDeserializer::deserialize( "Status", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SseKmsEncryptedObjectsSerializer; impl SseKmsEncryptedObjectsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SseKmsEncryptedObjects, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct SseKmsEncryptedObjectsStatusDeserializer; impl SseKmsEncryptedObjectsStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SseKmsEncryptedObjectsStatusSerializer; impl SseKmsEncryptedObjectsStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct StartAfterDeserializer; impl StartAfterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StartAfterSerializer; impl StartAfterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Stats { /// <p>Total number of uncompressed object bytes processed.</p> pub bytes_processed: Option<i64>, /// <p>Total number of object bytes scanned.</p> pub bytes_scanned: Option<i64>, } struct StatsDeserializer; impl StatsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Stats, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Stats::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "BytesProcessed" => { obj.bytes_processed = Some(try!(BytesProcessedDeserializer::deserialize( "BytesProcessed", stack ))); } "BytesScanned" => { obj.bytes_scanned = Some(try!(BytesScannedDeserializer::deserialize( "BytesScanned", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct StatsEvent { /// <p>The Stats event details.</p> pub details: Option<Stats>, } struct StatsEventDeserializer; impl StatsEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<StatsEvent, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = StatsEvent::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Details" => { obj.details = Some(try!(StatsDeserializer::deserialize("Details", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct StorageClassDeserializer; impl StorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassSerializer; impl StorageClassSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct StorageClassAnalysis { /// <p>A container used to describe how data related to the storage class analysis should be exported.</p> pub data_export: Option<StorageClassAnalysisDataExport>, } struct StorageClassAnalysisDeserializer; impl StorageClassAnalysisDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<StorageClassAnalysis, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = StorageClassAnalysis::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DataExport" => { obj.data_export = Some(try!( StorageClassAnalysisDataExportDeserializer::deserialize( "DataExport", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassAnalysisSerializer; impl StorageClassAnalysisSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &StorageClassAnalysis, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.data_export { &StorageClassAnalysisDataExportSerializer::serialize(&mut writer, "DataExport", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct StorageClassAnalysisDataExport { /// <p>The place to store the data for an analysis.</p> pub destination: AnalyticsExportDestination, /// <p>The version of the output schema to use when exporting data. Must be V_1.</p> pub output_schema_version: String, } struct StorageClassAnalysisDataExportDeserializer; impl StorageClassAnalysisDataExportDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<StorageClassAnalysisDataExport, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = StorageClassAnalysisDataExport::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Destination" => { obj.destination = try!(AnalyticsExportDestinationDeserializer::deserialize( "Destination", stack )); } "OutputSchemaVersion" => { obj.output_schema_version = try!(StorageClassAnalysisSchemaVersionDeserializer::deserialize( "OutputSchemaVersion", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassAnalysisDataExportSerializer; impl StorageClassAnalysisDataExportSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &StorageClassAnalysisDataExport, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; AnalyticsExportDestinationSerializer::serialize( &mut writer, "Destination", &obj.destination, )?; writer.write(xml::writer::XmlEvent::start_element("OutputSchemaVersion"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.output_schema_version )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct StorageClassAnalysisSchemaVersionDeserializer; impl StorageClassAnalysisSchemaVersionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassAnalysisSchemaVersionSerializer; impl StorageClassAnalysisSchemaVersionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct SuffixDeserializer; impl SuffixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SuffixSerializer; impl SuffixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Tag { /// <p>Name of the tag.</p> pub key: String, /// <p>Value of the tag.</p> pub value: String, } struct TagDeserializer; impl TagDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Tag, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Tag::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Key" => { obj.key = try!(ObjectKeyDeserializer::deserialize("Key", stack)); } "Value" => { obj.value = try!(ValueDeserializer::deserialize("Value", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TagSerializer; impl TagSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Tag, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Key"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("Value"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.value )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TagSetDeserializer; impl TagSetDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Tag>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Tag" { obj.push(try!(TagDeserializer::deserialize("Tag", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct TagSetSerializer; impl TagSetSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Tag>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { TagSerializer::serialize(writer, "Tag", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct Tagging { pub tag_set: Vec<Tag>, } pub struct TaggingSerializer; impl TaggingSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Tagging, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; TagSetSerializer::serialize(&mut writer, "TagSet", &obj.tag_set)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TargetBucketDeserializer; impl TargetBucketDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TargetBucketSerializer; impl TargetBucketSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct TargetGrant { pub grantee: Option<Grantee>, /// <p>Logging permissions assigned to the Grantee for the bucket.</p> pub permission: Option<String>, } struct TargetGrantDeserializer; impl TargetGrantDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<TargetGrant, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = TargetGrant::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Grantee" => { obj.grantee = Some(try!(GranteeDeserializer::deserialize("Grantee", stack))); } "Permission" => { obj.permission = Some(try!(BucketLogsPermissionDeserializer::deserialize( "Permission", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TargetGrantSerializer; impl TargetGrantSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &TargetGrant, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.grantee { &GranteeSerializer::serialize(&mut writer, "Grantee", value)?; } if let Some(ref value) = obj.permission { writer.write(xml::writer::XmlEvent::start_element("Permission"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct TargetGrantsDeserializer; impl TargetGrantsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<TargetGrant>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Grant" { obj.push(try!(TargetGrantDeserializer::deserialize("Grant", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct TargetGrantsSerializer; impl TargetGrantsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<TargetGrant>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { TargetGrantSerializer::serialize(writer, "Grant", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } struct TargetPrefixDeserializer; impl TargetPrefixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TargetPrefixSerializer; impl TargetPrefixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct TierSerializer; impl TierSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TokenDeserializer; impl TokenDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TokenSerializer; impl TokenSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TopicArnDeserializer; impl TopicArnDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TopicArnSerializer; impl TopicArnSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for specifying the configuration when you want Amazon S3 to publish events to an Amazon Simple Notification Service (Amazon SNS) topic.</p> #[derive(Default, Debug, Clone)] pub struct TopicConfiguration { pub events: Vec<String>, pub filter: Option<NotificationConfigurationFilter>, pub id: Option<String>, /// <p>Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type.</p> pub topic_arn: String, } struct TopicConfigurationDeserializer; impl TopicConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<TopicConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = TopicConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = try!(EventListDeserializer::deserialize("Event", stack)); } "Filter" => { obj.filter = Some(try!( NotificationConfigurationFilterDeserializer::deserialize( "Filter", stack ) )); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Topic" => { obj.topic_arn = try!(TopicArnDeserializer::deserialize("Topic", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TopicConfigurationSerializer; impl TopicConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &TopicConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; EventListSerializer::serialize(&mut writer, "Event", &obj.events)?; if let Some(ref value) = obj.filter { &NotificationConfigurationFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Topic"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.topic_arn )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct TopicConfigurationDeprecated { pub events: Option<Vec<String>>, pub id: Option<String>, /// <p>Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket.</p> pub topic: Option<String>, } struct TopicConfigurationDeprecatedDeserializer; impl TopicConfigurationDeprecatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<TopicConfigurationDeprecated, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = TopicConfigurationDeprecated::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = Some(try!(EventListDeserializer::deserialize("Event", stack))); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Topic" => { obj.topic = Some(try!(TopicArnDeserializer::deserialize("Topic", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TopicConfigurationDeprecatedSerializer; impl TopicConfigurationDeprecatedSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &TopicConfigurationDeprecated, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.events { &EventListSerializer::serialize(&mut writer, "Event", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.topic { writer.write(xml::writer::XmlEvent::start_element("Topic"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct TopicConfigurationListDeserializer; impl TopicConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<TopicConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(TopicConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct TopicConfigurationListSerializer; impl TopicConfigurationListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<TopicConfiguration>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { TopicConfigurationSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct Transition { /// <p>Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.</p> pub date: Option<String>, /// <p>Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.</p> pub days: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct TransitionDeserializer; impl TransitionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Transition, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Transition::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Date" => { obj.date = Some(try!(DateDeserializer::deserialize("Date", stack))); } "Days" => { obj.days = Some(try!(DaysDeserializer::deserialize("Days", stack))); } "StorageClass" => { obj.storage_class = Some(try!( TransitionStorageClassDeserializer::deserialize("StorageClass", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TransitionSerializer; impl TransitionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Transition, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.date { writer.write(xml::writer::XmlEvent::start_element("Date"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.days { writer.write(xml::writer::XmlEvent::start_element("Days"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct TransitionListDeserializer; impl TransitionListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Transition>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(TransitionDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct TransitionListSerializer; impl TransitionListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Transition>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { TransitionSerializer::serialize(writer, name, element)?; } Ok(()) } } struct TransitionStorageClassDeserializer; impl TransitionStorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TransitionStorageClassSerializer; impl TransitionStorageClassSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TypeDeserializer; impl TypeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TypeSerializer; impl TypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct URIDeserializer; impl URIDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct URISerializer; impl URISerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct UploadIdMarkerDeserializer; impl UploadIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct UploadIdMarkerSerializer; impl UploadIdMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct UploadPartCopyOutput { pub copy_part_result: Option<CopyPartResult>, /// <p>The version of the source object that was copied, if you have enabled versioning on the source bucket.</p> pub copy_source_version_id: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, } struct UploadPartCopyOutputDeserializer; impl UploadPartCopyOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<UploadPartCopyOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = UploadPartCopyOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CopyPartResult" => { obj.copy_part_result = Some(try!(CopyPartResultDeserializer::deserialize( "CopyPartResult", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct UploadPartCopyRequest { pub bucket: String, /// <p>The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.</p> pub copy_source: String, /// <p>Copies the object if its entity tag (ETag) matches the specified tag.</p> pub copy_source_if_match: Option<String>, /// <p>Copies the object if it has been modified since the specified time.</p> pub copy_source_if_modified_since: Option<String>, /// <p>Copies the object if its entity tag (ETag) is different than the specified ETag.</p> pub copy_source_if_none_match: Option<String>, /// <p>Copies the object if it hasn&#39;t been modified since the specified time.</p> pub copy_source_if_unmodified_since: Option<String>, /// <p>The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 GB.</p> pub copy_source_range: Option<String>, /// <p>Specifies the algorithm to use when decrypting the source object (e.g., AES256).</p> pub copy_source_sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.</p> pub copy_source_sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub copy_source_sse_customer_key_md5: Option<String>, pub key: String, /// <p>Part number of part being copied. This is a positive integer between 1 and 10,000.</p> pub part_number: i64, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Upload ID identifying the multipart upload whose part is being copied.</p> pub upload_id: String, } #[derive(Default, Debug, Clone)] pub struct UploadPartOutput { /// <p>Entity tag for the uploaded object.</p> pub e_tag: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, } struct UploadPartOutputDeserializer; impl UploadPartOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<UploadPartOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = UploadPartOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug)] pub struct UploadPartRequest { /// <p>Object data.</p> pub body: Option<StreamingBody>, /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: String, /// <p>Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.</p> pub content_length: Option<i64>, /// <p>The base64-encoded 128-bit MD5 digest of the part data.</p> pub content_md5: Option<String>, /// <p>Object key for which the multipart upload was initiated.</p> pub key: String, /// <p>Part number of part being uploaded. This is a positive integer between 1 and 10,000.</p> pub part_number: i64, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Upload ID identifying the multipart upload whose part is being uploaded.</p> pub upload_id: String, } pub struct UserMetadataSerializer; impl UserMetadataSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<MetadataEntry>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { MetadataEntrySerializer::serialize(writer, "MetadataEntry", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } struct ValueDeserializer; impl ValueDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ValueSerializer; impl ValueSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct VersionIdMarkerDeserializer; impl VersionIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct VersionIdMarkerSerializer; impl VersionIdMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct VersioningConfiguration { /// <p>Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.</p> pub mfa_delete: Option<String>, /// <p>The versioning state of the bucket.</p> pub status: Option<String>, } pub struct VersioningConfigurationSerializer; impl VersioningConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &VersioningConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.mfa_delete { writer.write(xml::writer::XmlEvent::start_element("MfaDelete"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.status { writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct WebsiteConfiguration { pub error_document: Option<ErrorDocument>, pub index_document: Option<IndexDocument>, pub redirect_all_requests_to: Option<RedirectAllRequestsTo>, pub routing_rules: Option<Vec<RoutingRule>>, } pub struct WebsiteConfigurationSerializer; impl WebsiteConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &WebsiteConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.error_document { &ErrorDocumentSerializer::serialize(&mut writer, "ErrorDocument", value)?; } if let Some(ref value) = obj.index_document { &IndexDocumentSerializer::serialize(&mut writer, "IndexDocument", value)?; } if let Some(ref value) = obj.redirect_all_requests_to { &RedirectAllRequestsToSerializer::serialize( &mut writer, "RedirectAllRequestsTo", value, )?; } if let Some(ref value) = obj.routing_rules { &RoutingRulesSerializer::serialize(&mut writer, "RoutingRules", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// Errors returned by AbortMultipartUpload #[derive(Debug, PartialEq)] pub enum AbortMultipartUploadError { /// <p>The specified multipart upload does not exist.</p> NoSuchUpload(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl AbortMultipartUploadError { pub fn from_body(body: &str) -> AbortMultipartUploadError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchUpload" => { AbortMultipartUploadError::NoSuchUpload(String::from(parsed_error.message)) } _ => AbortMultipartUploadError::Unknown(String::from(body)), }, Err(_) => AbortMultipartUploadError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for AbortMultipartUploadError { fn from(err: XmlParseError) -> AbortMultipartUploadError { let XmlParseError(message) = err; AbortMultipartUploadError::Unknown(message.to_string()) } } impl From<CredentialsError> for AbortMultipartUploadError { fn from(err: CredentialsError) -> AbortMultipartUploadError { AbortMultipartUploadError::Credentials(err) } } impl From<HttpDispatchError> for AbortMultipartUploadError { fn from(err: HttpDispatchError) -> AbortMultipartUploadError { AbortMultipartUploadError::HttpDispatch(err) } } impl From<io::Error> for AbortMultipartUploadError { fn from(err: io::Error) -> AbortMultipartUploadError { AbortMultipartUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for AbortMultipartUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for AbortMultipartUploadError { fn description(&self) -> &str { match *self { AbortMultipartUploadError::NoSuchUpload(ref cause) => cause, AbortMultipartUploadError::Validation(ref cause) => cause, AbortMultipartUploadError::Credentials(ref err) => err.description(), AbortMultipartUploadError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } AbortMultipartUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by CompleteMultipartUpload #[derive(Debug, PartialEq)] pub enum CompleteMultipartUploadError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CompleteMultipartUploadError { pub fn from_body(body: &str) -> CompleteMultipartUploadError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => CompleteMultipartUploadError::Unknown(String::from(body)), }, Err(_) => CompleteMultipartUploadError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CompleteMultipartUploadError { fn from(err: XmlParseError) -> CompleteMultipartUploadError { let XmlParseError(message) = err; CompleteMultipartUploadError::Unknown(message.to_string()) } } impl From<CredentialsError> for CompleteMultipartUploadError { fn from(err: CredentialsError) -> CompleteMultipartUploadError { CompleteMultipartUploadError::Credentials(err) } } impl From<HttpDispatchError> for CompleteMultipartUploadError { fn from(err: HttpDispatchError) -> CompleteMultipartUploadError { CompleteMultipartUploadError::HttpDispatch(err) } } impl From<io::Error> for CompleteMultipartUploadError { fn from(err: io::Error) -> CompleteMultipartUploadError { CompleteMultipartUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CompleteMultipartUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CompleteMultipartUploadError { fn description(&self) -> &str { match *self { CompleteMultipartUploadError::Validation(ref cause) => cause, CompleteMultipartUploadError::Credentials(ref err) => err.description(), CompleteMultipartUploadError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CompleteMultipartUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by CopyObject #[derive(Debug, PartialEq)] pub enum CopyObjectError { /// <p>The source object of the COPY operation is not in the active tier and is only stored in Amazon Glacier.</p> ObjectNotInActiveTierError(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CopyObjectError { pub fn from_body(body: &str) -> CopyObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "ObjectNotInActiveTierError" => { CopyObjectError::ObjectNotInActiveTierError(String::from(parsed_error.message)) } _ => CopyObjectError::Unknown(String::from(body)), }, Err(_) => CopyObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CopyObjectError { fn from(err: XmlParseError) -> CopyObjectError { let XmlParseError(message) = err; CopyObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for CopyObjectError { fn from(err: CredentialsError) -> CopyObjectError { CopyObjectError::Credentials(err) } } impl From<HttpDispatchError> for CopyObjectError { fn from(err: HttpDispatchError) -> CopyObjectError { CopyObjectError::HttpDispatch(err) } } impl From<io::Error> for CopyObjectError { fn from(err: io::Error) -> CopyObjectError { CopyObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CopyObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CopyObjectError { fn description(&self) -> &str { match *self { CopyObjectError::ObjectNotInActiveTierError(ref cause) => cause, CopyObjectError::Validation(ref cause) => cause, CopyObjectError::Credentials(ref err) => err.description(), CopyObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CopyObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateBucket #[derive(Debug, PartialEq)] pub enum CreateBucketError { /// <p>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</p> BucketAlreadyExists(String), BucketAlreadyOwnedByYou(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateBucketError { pub fn from_body(body: &str) -> CreateBucketError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "BucketAlreadyExists" => { CreateBucketError::BucketAlreadyExists(String::from(parsed_error.message)) } "BucketAlreadyOwnedByYou" => { CreateBucketError::BucketAlreadyOwnedByYou(String::from(parsed_error.message)) } _ => CreateBucketError::Unknown(String::from(body)), }, Err(_) => CreateBucketError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateBucketError { fn from(err: XmlParseError) -> CreateBucketError { let XmlParseError(message) = err; CreateBucketError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateBucketError { fn from(err: CredentialsError) -> CreateBucketError { CreateBucketError::Credentials(err) } } impl From<HttpDispatchError> for CreateBucketError { fn from(err: HttpDispatchError) -> CreateBucketError { CreateBucketError::HttpDispatch(err) } } impl From<io::Error> for CreateBucketError { fn from(err: io::Error) -> CreateBucketError { CreateBucketError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateBucketError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateBucketError { fn description(&self) -> &str { match *self { CreateBucketError::BucketAlreadyExists(ref cause) => cause, CreateBucketError::BucketAlreadyOwnedByYou(ref cause) => cause, CreateBucketError::Validation(ref cause) => cause, CreateBucketError::Credentials(ref err) => err.description(), CreateBucketError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CreateBucketError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateMultipartUpload #[derive(Debug, PartialEq)] pub enum CreateMultipartUploadError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateMultipartUploadError { pub fn from_body(body: &str) -> CreateMultipartUploadError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => CreateMultipartUploadError::Unknown(String::from(body)), }, Err(_) => CreateMultipartUploadError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateMultipartUploadError { fn from(err: XmlParseError) -> CreateMultipartUploadError { let XmlParseError(message) = err; CreateMultipartUploadError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateMultipartUploadError { fn from(err: CredentialsError) -> CreateMultipartUploadError { CreateMultipartUploadError::Credentials(err) } } impl From<HttpDispatchError> for CreateMultipartUploadError { fn from(err: HttpDispatchError) -> CreateMultipartUploadError { CreateMultipartUploadError::HttpDispatch(err) } } impl From<io::Error> for CreateMultipartUploadError { fn from(err: io::Error) -> CreateMultipartUploadError { CreateMultipartUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateMultipartUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateMultipartUploadError { fn description(&self) -> &str { match *self { CreateMultipartUploadError::Validation(ref cause) => cause, CreateMultipartUploadError::Credentials(ref err) => err.description(), CreateMultipartUploadError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateMultipartUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucket #[derive(Debug, PartialEq)] pub enum DeleteBucketError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketError { pub fn from_body(body: &str) -> DeleteBucketError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketError::Unknown(String::from(body)), }, Err(_) => DeleteBucketError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketError { fn from(err: XmlParseError) -> DeleteBucketError { let XmlParseError(message) = err; DeleteBucketError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketError { fn from(err: CredentialsError) -> DeleteBucketError { DeleteBucketError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketError { fn from(err: HttpDispatchError) -> DeleteBucketError { DeleteBucketError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketError { fn from(err: io::Error) -> DeleteBucketError { DeleteBucketError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketError { fn description(&self) -> &str { match *self { DeleteBucketError::Validation(ref cause) => cause, DeleteBucketError::Credentials(ref err) => err.description(), DeleteBucketError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteBucketError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketAnalyticsConfiguration #[derive(Debug, PartialEq)] pub enum DeleteBucketAnalyticsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketAnalyticsConfigurationError { pub fn from_body(body: &str) -> DeleteBucketAnalyticsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketAnalyticsConfigurationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketAnalyticsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketAnalyticsConfigurationError { fn from(err: XmlParseError) -> DeleteBucketAnalyticsConfigurationError { let XmlParseError(message) = err; DeleteBucketAnalyticsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketAnalyticsConfigurationError { fn from(err: CredentialsError) -> DeleteBucketAnalyticsConfigurationError { DeleteBucketAnalyticsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketAnalyticsConfigurationError { fn from(err: HttpDispatchError) -> DeleteBucketAnalyticsConfigurationError { DeleteBucketAnalyticsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketAnalyticsConfigurationError { fn from(err: io::Error) -> DeleteBucketAnalyticsConfigurationError { DeleteBucketAnalyticsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketAnalyticsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketAnalyticsConfigurationError { fn description(&self) -> &str { match *self { DeleteBucketAnalyticsConfigurationError::Validation(ref cause) => cause, DeleteBucketAnalyticsConfigurationError::Credentials(ref err) => err.description(), DeleteBucketAnalyticsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketAnalyticsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketCors #[derive(Debug, PartialEq)] pub enum DeleteBucketCorsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketCorsError { pub fn from_body(body: &str) -> DeleteBucketCorsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketCorsError::Unknown(String::from(body)), }, Err(_) => DeleteBucketCorsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketCorsError { fn from(err: XmlParseError) -> DeleteBucketCorsError { let XmlParseError(message) = err; DeleteBucketCorsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketCorsError { fn from(err: CredentialsError) -> DeleteBucketCorsError { DeleteBucketCorsError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketCorsError { fn from(err: HttpDispatchError) -> DeleteBucketCorsError { DeleteBucketCorsError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketCorsError { fn from(err: io::Error) -> DeleteBucketCorsError { DeleteBucketCorsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketCorsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketCorsError { fn description(&self) -> &str { match *self { DeleteBucketCorsError::Validation(ref cause) => cause, DeleteBucketCorsError::Credentials(ref err) => err.description(), DeleteBucketCorsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteBucketCorsError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketEncryption #[derive(Debug, PartialEq)] pub enum DeleteBucketEncryptionError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketEncryptionError { pub fn from_body(body: &str) -> DeleteBucketEncryptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketEncryptionError::Unknown(String::from(body)), }, Err(_) => DeleteBucketEncryptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketEncryptionError { fn from(err: XmlParseError) -> DeleteBucketEncryptionError { let XmlParseError(message) = err; DeleteBucketEncryptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketEncryptionError { fn from(err: CredentialsError) -> DeleteBucketEncryptionError { DeleteBucketEncryptionError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketEncryptionError { fn from(err: HttpDispatchError) -> DeleteBucketEncryptionError { DeleteBucketEncryptionError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketEncryptionError { fn from(err: io::Error) -> DeleteBucketEncryptionError { DeleteBucketEncryptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketEncryptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketEncryptionError { fn description(&self) -> &str { match *self { DeleteBucketEncryptionError::Validation(ref cause) => cause, DeleteBucketEncryptionError::Credentials(ref err) => err.description(), DeleteBucketEncryptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketEncryptionError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketInventoryConfiguration #[derive(Debug, PartialEq)] pub enum DeleteBucketInventoryConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketInventoryConfigurationError { pub fn from_body(body: &str) -> DeleteBucketInventoryConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketInventoryConfigurationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketInventoryConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketInventoryConfigurationError { fn from(err: XmlParseError) -> DeleteBucketInventoryConfigurationError { let XmlParseError(message) = err; DeleteBucketInventoryConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketInventoryConfigurationError { fn from(err: CredentialsError) -> DeleteBucketInventoryConfigurationError { DeleteBucketInventoryConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketInventoryConfigurationError { fn from(err: HttpDispatchError) -> DeleteBucketInventoryConfigurationError { DeleteBucketInventoryConfigurationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketInventoryConfigurationError { fn from(err: io::Error) -> DeleteBucketInventoryConfigurationError { DeleteBucketInventoryConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketInventoryConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketInventoryConfigurationError { fn description(&self) -> &str { match *self { DeleteBucketInventoryConfigurationError::Validation(ref cause) => cause, DeleteBucketInventoryConfigurationError::Credentials(ref err) => err.description(), DeleteBucketInventoryConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketInventoryConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketLifecycle #[derive(Debug, PartialEq)] pub enum DeleteBucketLifecycleError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketLifecycleError { pub fn from_body(body: &str) -> DeleteBucketLifecycleError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketLifecycleError::Unknown(String::from(body)), }, Err(_) => DeleteBucketLifecycleError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketLifecycleError { fn from(err: XmlParseError) -> DeleteBucketLifecycleError { let XmlParseError(message) = err; DeleteBucketLifecycleError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketLifecycleError { fn from(err: CredentialsError) -> DeleteBucketLifecycleError { DeleteBucketLifecycleError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketLifecycleError { fn from(err: HttpDispatchError) -> DeleteBucketLifecycleError { DeleteBucketLifecycleError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketLifecycleError { fn from(err: io::Error) -> DeleteBucketLifecycleError { DeleteBucketLifecycleError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketLifecycleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketLifecycleError { fn description(&self) -> &str { match *self { DeleteBucketLifecycleError::Validation(ref cause) => cause, DeleteBucketLifecycleError::Credentials(ref err) => err.description(), DeleteBucketLifecycleError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketLifecycleError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketMetricsConfiguration #[derive(Debug, PartialEq)] pub enum DeleteBucketMetricsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketMetricsConfigurationError { pub fn from_body(body: &str) -> DeleteBucketMetricsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketMetricsConfigurationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketMetricsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketMetricsConfigurationError { fn from(err: XmlParseError) -> DeleteBucketMetricsConfigurationError { let XmlParseError(message) = err; DeleteBucketMetricsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketMetricsConfigurationError { fn from(err: CredentialsError) -> DeleteBucketMetricsConfigurationError { DeleteBucketMetricsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketMetricsConfigurationError { fn from(err: HttpDispatchError) -> DeleteBucketMetricsConfigurationError { DeleteBucketMetricsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketMetricsConfigurationError { fn from(err: io::Error) -> DeleteBucketMetricsConfigurationError { DeleteBucketMetricsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketMetricsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketMetricsConfigurationError { fn description(&self) -> &str { match *self { DeleteBucketMetricsConfigurationError::Validation(ref cause) => cause, DeleteBucketMetricsConfigurationError::Credentials(ref err) => err.description(), DeleteBucketMetricsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketMetricsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketPolicy #[derive(Debug, PartialEq)] pub enum DeleteBucketPolicyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketPolicyError { pub fn from_body(body: &str) -> DeleteBucketPolicyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketPolicyError::Unknown(String::from(body)), }, Err(_) => DeleteBucketPolicyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketPolicyError { fn from(err: XmlParseError) -> DeleteBucketPolicyError { let XmlParseError(message) = err; DeleteBucketPolicyError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketPolicyError { fn from(err: CredentialsError) -> DeleteBucketPolicyError { DeleteBucketPolicyError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketPolicyError { fn from(err: HttpDispatchError) -> DeleteBucketPolicyError { DeleteBucketPolicyError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketPolicyError { fn from(err: io::Error) -> DeleteBucketPolicyError { DeleteBucketPolicyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketPolicyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketPolicyError { fn description(&self) -> &str { match *self { DeleteBucketPolicyError::Validation(ref cause) => cause, DeleteBucketPolicyError::Credentials(ref err) => err.description(), DeleteBucketPolicyError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketPolicyError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketReplication #[derive(Debug, PartialEq)] pub enum DeleteBucketReplicationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketReplicationError { pub fn from_body(body: &str) -> DeleteBucketReplicationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketReplicationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketReplicationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketReplicationError { fn from(err: XmlParseError) -> DeleteBucketReplicationError { let XmlParseError(message) = err; DeleteBucketReplicationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketReplicationError { fn from(err: CredentialsError) -> DeleteBucketReplicationError { DeleteBucketReplicationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketReplicationError { fn from(err: HttpDispatchError) -> DeleteBucketReplicationError { DeleteBucketReplicationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketReplicationError { fn from(err: io::Error) -> DeleteBucketReplicationError { DeleteBucketReplicationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketReplicationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketReplicationError { fn description(&self) -> &str { match *self { DeleteBucketReplicationError::Validation(ref cause) => cause, DeleteBucketReplicationError::Credentials(ref err) => err.description(), DeleteBucketReplicationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketReplicationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketTagging #[derive(Debug, PartialEq)] pub enum DeleteBucketTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketTaggingError { pub fn from_body(body: &str) -> DeleteBucketTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketTaggingError::Unknown(String::from(body)), }, Err(_) => DeleteBucketTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketTaggingError { fn from(err: XmlParseError) -> DeleteBucketTaggingError { let XmlParseError(message) = err; DeleteBucketTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketTaggingError { fn from(err: CredentialsError) -> DeleteBucketTaggingError { DeleteBucketTaggingError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketTaggingError { fn from(err: HttpDispatchError) -> DeleteBucketTaggingError { DeleteBucketTaggingError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketTaggingError { fn from(err: io::Error) -> DeleteBucketTaggingError { DeleteBucketTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketTaggingError { fn description(&self) -> &str { match *self { DeleteBucketTaggingError::Validation(ref cause) => cause, DeleteBucketTaggingError::Credentials(ref err) => err.description(), DeleteBucketTaggingError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketWebsite #[derive(Debug, PartialEq)] pub enum DeleteBucketWebsiteError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketWebsiteError { pub fn from_body(body: &str) -> DeleteBucketWebsiteError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketWebsiteError::Unknown(String::from(body)), }, Err(_) => DeleteBucketWebsiteError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketWebsiteError { fn from(err: XmlParseError) -> DeleteBucketWebsiteError { let XmlParseError(message) = err; DeleteBucketWebsiteError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketWebsiteError { fn from(err: CredentialsError) -> DeleteBucketWebsiteError { DeleteBucketWebsiteError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketWebsiteError { fn from(err: HttpDispatchError) -> DeleteBucketWebsiteError { DeleteBucketWebsiteError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketWebsiteError { fn from(err: io::Error) -> DeleteBucketWebsiteError { DeleteBucketWebsiteError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketWebsiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketWebsiteError { fn description(&self) -> &str { match *self { DeleteBucketWebsiteError::Validation(ref cause) => cause, DeleteBucketWebsiteError::Credentials(ref err) => err.description(), DeleteBucketWebsiteError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketWebsiteError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteObject #[derive(Debug, PartialEq)] pub enum DeleteObjectError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteObjectError { pub fn from_body(body: &str) -> DeleteObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteObjectError::Unknown(String::from(body)), }, Err(_) => DeleteObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteObjectError { fn from(err: XmlParseError) -> DeleteObjectError { let XmlParseError(message) = err; DeleteObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteObjectError { fn from(err: CredentialsError) -> DeleteObjectError { DeleteObjectError::Credentials(err) } } impl From<HttpDispatchError> for DeleteObjectError { fn from(err: HttpDispatchError) -> DeleteObjectError { DeleteObjectError::HttpDispatch(err) } } impl From<io::Error> for DeleteObjectError { fn from(err: io::Error) -> DeleteObjectError { DeleteObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteObjectError { fn description(&self) -> &str { match *self { DeleteObjectError::Validation(ref cause) => cause, DeleteObjectError::Credentials(ref err) => err.description(), DeleteObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteObjectTagging #[derive(Debug, PartialEq)] pub enum DeleteObjectTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteObjectTaggingError { pub fn from_body(body: &str) -> DeleteObjectTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteObjectTaggingError::Unknown(String::from(body)), }, Err(_) => DeleteObjectTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteObjectTaggingError { fn from(err: XmlParseError) -> DeleteObjectTaggingError { let XmlParseError(message) = err; DeleteObjectTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteObjectTaggingError { fn from(err: CredentialsError) -> DeleteObjectTaggingError { DeleteObjectTaggingError::Credentials(err) } } impl From<HttpDispatchError> for DeleteObjectTaggingError { fn from(err: HttpDispatchError) -> DeleteObjectTaggingError { DeleteObjectTaggingError::HttpDispatch(err) } } impl From<io::Error> for DeleteObjectTaggingError { fn from(err: io::Error) -> DeleteObjectTaggingError { DeleteObjectTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteObjectTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteObjectTaggingError { fn description(&self) -> &str { match *self { DeleteObjectTaggingError::Validation(ref cause) => cause, DeleteObjectTaggingError::Credentials(ref err) => err.description(), DeleteObjectTaggingError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteObjectTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteObjects #[derive(Debug, PartialEq)] pub enum DeleteObjectsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteObjectsError { pub fn from_body(body: &str) -> DeleteObjectsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteObjectsError::Unknown(String::from(body)), }, Err(_) => DeleteObjectsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteObjectsError { fn from(err: XmlParseError) -> DeleteObjectsError { let XmlParseError(message) = err; DeleteObjectsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteObjectsError { fn from(err: CredentialsError) -> DeleteObjectsError { DeleteObjectsError::Credentials(err) } } impl From<HttpDispatchError> for DeleteObjectsError { fn from(err: HttpDispatchError) -> DeleteObjectsError { DeleteObjectsError::HttpDispatch(err) } } impl From<io::Error> for DeleteObjectsError { fn from(err: io::Error) -> DeleteObjectsError { DeleteObjectsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteObjectsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteObjectsError { fn description(&self) -> &str { match *self { DeleteObjectsError::Validation(ref cause) => cause, DeleteObjectsError::Credentials(ref err) => err.description(), DeleteObjectsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteObjectsError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketAccelerateConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketAccelerateConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketAccelerateConfigurationError { pub fn from_body(body: &str) -> GetBucketAccelerateConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketAccelerateConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketAccelerateConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketAccelerateConfigurationError { fn from(err: XmlParseError) -> GetBucketAccelerateConfigurationError { let XmlParseError(message) = err; GetBucketAccelerateConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketAccelerateConfigurationError { fn from(err: CredentialsError) -> GetBucketAccelerateConfigurationError { GetBucketAccelerateConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketAccelerateConfigurationError { fn from(err: HttpDispatchError) -> GetBucketAccelerateConfigurationError { GetBucketAccelerateConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketAccelerateConfigurationError { fn from(err: io::Error) -> GetBucketAccelerateConfigurationError { GetBucketAccelerateConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketAccelerateConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketAccelerateConfigurationError { fn description(&self) -> &str { match *self { GetBucketAccelerateConfigurationError::Validation(ref cause) => cause, GetBucketAccelerateConfigurationError::Credentials(ref err) => err.description(), GetBucketAccelerateConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketAccelerateConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketAcl #[derive(Debug, PartialEq)] pub enum GetBucketAclError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketAclError { pub fn from_body(body: &str) -> GetBucketAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketAclError::Unknown(String::from(body)), }, Err(_) => GetBucketAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketAclError { fn from(err: XmlParseError) -> GetBucketAclError { let XmlParseError(message) = err; GetBucketAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketAclError { fn from(err: CredentialsError) -> GetBucketAclError { GetBucketAclError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketAclError { fn from(err: HttpDispatchError) -> GetBucketAclError { GetBucketAclError::HttpDispatch(err) } } impl From<io::Error> for GetBucketAclError { fn from(err: io::Error) -> GetBucketAclError { GetBucketAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketAclError { fn description(&self) -> &str { match *self { GetBucketAclError::Validation(ref cause) => cause, GetBucketAclError::Credentials(ref err) => err.description(), GetBucketAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketAclError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketAnalyticsConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketAnalyticsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketAnalyticsConfigurationError { pub fn from_body(body: &str) -> GetBucketAnalyticsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketAnalyticsConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketAnalyticsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketAnalyticsConfigurationError { fn from(err: XmlParseError) -> GetBucketAnalyticsConfigurationError { let XmlParseError(message) = err; GetBucketAnalyticsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketAnalyticsConfigurationError { fn from(err: CredentialsError) -> GetBucketAnalyticsConfigurationError { GetBucketAnalyticsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketAnalyticsConfigurationError { fn from(err: HttpDispatchError) -> GetBucketAnalyticsConfigurationError { GetBucketAnalyticsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketAnalyticsConfigurationError { fn from(err: io::Error) -> GetBucketAnalyticsConfigurationError { GetBucketAnalyticsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketAnalyticsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketAnalyticsConfigurationError { fn description(&self) -> &str { match *self { GetBucketAnalyticsConfigurationError::Validation(ref cause) => cause, GetBucketAnalyticsConfigurationError::Credentials(ref err) => err.description(), GetBucketAnalyticsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketAnalyticsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketCors #[derive(Debug, PartialEq)] pub enum GetBucketCorsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketCorsError { pub fn from_body(body: &str) -> GetBucketCorsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketCorsError::Unknown(String::from(body)), }, Err(_) => GetBucketCorsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketCorsError { fn from(err: XmlParseError) -> GetBucketCorsError { let XmlParseError(message) = err; GetBucketCorsError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketCorsError { fn from(err: CredentialsError) -> GetBucketCorsError { GetBucketCorsError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketCorsError { fn from(err: HttpDispatchError) -> GetBucketCorsError { GetBucketCorsError::HttpDispatch(err) } } impl From<io::Error> for GetBucketCorsError { fn from(err: io::Error) -> GetBucketCorsError { GetBucketCorsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketCorsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketCorsError { fn description(&self) -> &str { match *self { GetBucketCorsError::Validation(ref cause) => cause, GetBucketCorsError::Credentials(ref err) => err.description(), GetBucketCorsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketCorsError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketEncryption #[derive(Debug, PartialEq)] pub enum GetBucketEncryptionError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketEncryptionError { pub fn from_body(body: &str) -> GetBucketEncryptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketEncryptionError::Unknown(String::from(body)), }, Err(_) => GetBucketEncryptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketEncryptionError { fn from(err: XmlParseError) -> GetBucketEncryptionError { let XmlParseError(message) = err; GetBucketEncryptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketEncryptionError { fn from(err: CredentialsError) -> GetBucketEncryptionError { GetBucketEncryptionError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketEncryptionError { fn from(err: HttpDispatchError) -> GetBucketEncryptionError { GetBucketEncryptionError::HttpDispatch(err) } } impl From<io::Error> for GetBucketEncryptionError { fn from(err: io::Error) -> GetBucketEncryptionError { GetBucketEncryptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketEncryptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketEncryptionError { fn description(&self) -> &str { match *self { GetBucketEncryptionError::Validation(ref cause) => cause, GetBucketEncryptionError::Credentials(ref err) => err.description(), GetBucketEncryptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketEncryptionError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketInventoryConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketInventoryConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketInventoryConfigurationError { pub fn from_body(body: &str) -> GetBucketInventoryConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketInventoryConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketInventoryConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketInventoryConfigurationError { fn from(err: XmlParseError) -> GetBucketInventoryConfigurationError { let XmlParseError(message) = err; GetBucketInventoryConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketInventoryConfigurationError { fn from(err: CredentialsError) -> GetBucketInventoryConfigurationError { GetBucketInventoryConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketInventoryConfigurationError { fn from(err: HttpDispatchError) -> GetBucketInventoryConfigurationError { GetBucketInventoryConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketInventoryConfigurationError { fn from(err: io::Error) -> GetBucketInventoryConfigurationError { GetBucketInventoryConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketInventoryConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketInventoryConfigurationError { fn description(&self) -> &str { match *self { GetBucketInventoryConfigurationError::Validation(ref cause) => cause, GetBucketInventoryConfigurationError::Credentials(ref err) => err.description(), GetBucketInventoryConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketInventoryConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLifecycle #[derive(Debug, PartialEq)] pub enum GetBucketLifecycleError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLifecycleError { pub fn from_body(body: &str) -> GetBucketLifecycleError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLifecycleError::Unknown(String::from(body)), }, Err(_) => GetBucketLifecycleError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLifecycleError { fn from(err: XmlParseError) -> GetBucketLifecycleError { let XmlParseError(message) = err; GetBucketLifecycleError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLifecycleError { fn from(err: CredentialsError) -> GetBucketLifecycleError { GetBucketLifecycleError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLifecycleError { fn from(err: HttpDispatchError) -> GetBucketLifecycleError { GetBucketLifecycleError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLifecycleError { fn from(err: io::Error) -> GetBucketLifecycleError { GetBucketLifecycleError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLifecycleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLifecycleError { fn description(&self) -> &str { match *self { GetBucketLifecycleError::Validation(ref cause) => cause, GetBucketLifecycleError::Credentials(ref err) => err.description(), GetBucketLifecycleError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketLifecycleError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLifecycleConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketLifecycleConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLifecycleConfigurationError { pub fn from_body(body: &str) -> GetBucketLifecycleConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLifecycleConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketLifecycleConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLifecycleConfigurationError { fn from(err: XmlParseError) -> GetBucketLifecycleConfigurationError { let XmlParseError(message) = err; GetBucketLifecycleConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLifecycleConfigurationError { fn from(err: CredentialsError) -> GetBucketLifecycleConfigurationError { GetBucketLifecycleConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLifecycleConfigurationError { fn from(err: HttpDispatchError) -> GetBucketLifecycleConfigurationError { GetBucketLifecycleConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLifecycleConfigurationError { fn from(err: io::Error) -> GetBucketLifecycleConfigurationError { GetBucketLifecycleConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLifecycleConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLifecycleConfigurationError { fn description(&self) -> &str { match *self { GetBucketLifecycleConfigurationError::Validation(ref cause) => cause, GetBucketLifecycleConfigurationError::Credentials(ref err) => err.description(), GetBucketLifecycleConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketLifecycleConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLocation #[derive(Debug, PartialEq)] pub enum GetBucketLocationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLocationError { pub fn from_body(body: &str) -> GetBucketLocationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLocationError::Unknown(String::from(body)), }, Err(_) => GetBucketLocationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLocationError { fn from(err: XmlParseError) -> GetBucketLocationError { let XmlParseError(message) = err; GetBucketLocationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLocationError { fn from(err: CredentialsError) -> GetBucketLocationError { GetBucketLocationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLocationError { fn from(err: HttpDispatchError) -> GetBucketLocationError { GetBucketLocationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLocationError { fn from(err: io::Error) -> GetBucketLocationError { GetBucketLocationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLocationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLocationError { fn description(&self) -> &str { match *self { GetBucketLocationError::Validation(ref cause) => cause, GetBucketLocationError::Credentials(ref err) => err.description(), GetBucketLocationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketLocationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLogging #[derive(Debug, PartialEq)] pub enum GetBucketLoggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLoggingError { pub fn from_body(body: &str) -> GetBucketLoggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLoggingError::Unknown(String::from(body)), }, Err(_) => GetBucketLoggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLoggingError { fn from(err: XmlParseError) -> GetBucketLoggingError { let XmlParseError(message) = err; GetBucketLoggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLoggingError { fn from(err: CredentialsError) -> GetBucketLoggingError { GetBucketLoggingError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLoggingError { fn from(err: HttpDispatchError) -> GetBucketLoggingError { GetBucketLoggingError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLoggingError { fn from(err: io::Error) -> GetBucketLoggingError { GetBucketLoggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLoggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLoggingError { fn description(&self) -> &str { match *self { GetBucketLoggingError::Validation(ref cause) => cause, GetBucketLoggingError::Credentials(ref err) => err.description(), GetBucketLoggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketLoggingError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketMetricsConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketMetricsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketMetricsConfigurationError { pub fn from_body(body: &str) -> GetBucketMetricsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketMetricsConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketMetricsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketMetricsConfigurationError { fn from(err: XmlParseError) -> GetBucketMetricsConfigurationError { let XmlParseError(message) = err; GetBucketMetricsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketMetricsConfigurationError { fn from(err: CredentialsError) -> GetBucketMetricsConfigurationError { GetBucketMetricsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketMetricsConfigurationError { fn from(err: HttpDispatchError) -> GetBucketMetricsConfigurationError { GetBucketMetricsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketMetricsConfigurationError { fn from(err: io::Error) -> GetBucketMetricsConfigurationError { GetBucketMetricsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketMetricsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketMetricsConfigurationError { fn description(&self) -> &str { match *self { GetBucketMetricsConfigurationError::Validation(ref cause) => cause, GetBucketMetricsConfigurationError::Credentials(ref err) => err.description(), GetBucketMetricsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketMetricsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketNotification #[derive(Debug, PartialEq)] pub enum GetBucketNotificationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketNotificationError { pub fn from_body(body: &str) -> GetBucketNotificationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketNotificationError::Unknown(String::from(body)), }, Err(_) => GetBucketNotificationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketNotificationError { fn from(err: XmlParseError) -> GetBucketNotificationError { let XmlParseError(message) = err; GetBucketNotificationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketNotificationError { fn from(err: CredentialsError) -> GetBucketNotificationError { GetBucketNotificationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketNotificationError { fn from(err: HttpDispatchError) -> GetBucketNotificationError { GetBucketNotificationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketNotificationError { fn from(err: io::Error) -> GetBucketNotificationError { GetBucketNotificationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketNotificationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketNotificationError { fn description(&self) -> &str { match *self { GetBucketNotificationError::Validation(ref cause) => cause, GetBucketNotificationError::Credentials(ref err) => err.description(), GetBucketNotificationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketNotificationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketNotificationConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketNotificationConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketNotificationConfigurationError { pub fn from_body(body: &str) -> GetBucketNotificationConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketNotificationConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketNotificationConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketNotificationConfigurationError { fn from(err: XmlParseError) -> GetBucketNotificationConfigurationError { let XmlParseError(message) = err; GetBucketNotificationConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketNotificationConfigurationError { fn from(err: CredentialsError) -> GetBucketNotificationConfigurationError { GetBucketNotificationConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketNotificationConfigurationError { fn from(err: HttpDispatchError) -> GetBucketNotificationConfigurationError { GetBucketNotificationConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketNotificationConfigurationError { fn from(err: io::Error) -> GetBucketNotificationConfigurationError { GetBucketNotificationConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketNotificationConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketNotificationConfigurationError { fn description(&self) -> &str { match *self { GetBucketNotificationConfigurationError::Validation(ref cause) => cause, GetBucketNotificationConfigurationError::Credentials(ref err) => err.description(), GetBucketNotificationConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketNotificationConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketPolicy #[derive(Debug, PartialEq)] pub enum GetBucketPolicyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketPolicyError { pub fn from_body(body: &str) -> GetBucketPolicyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketPolicyError::Unknown(String::from(body)), }, Err(_) => GetBucketPolicyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketPolicyError { fn from(err: XmlParseError) -> GetBucketPolicyError { let XmlParseError(message) = err; GetBucketPolicyError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketPolicyError { fn from(err: CredentialsError) -> GetBucketPolicyError { GetBucketPolicyError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketPolicyError { fn from(err: HttpDispatchError) -> GetBucketPolicyError { GetBucketPolicyError::HttpDispatch(err) } } impl From<io::Error> for GetBucketPolicyError { fn from(err: io::Error) -> GetBucketPolicyError { GetBucketPolicyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketPolicyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketPolicyError { fn description(&self) -> &str { match *self { GetBucketPolicyError::Validation(ref cause) => cause, GetBucketPolicyError::Credentials(ref err) => err.description(), GetBucketPolicyError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketPolicyError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketReplication #[derive(Debug, PartialEq)] pub enum GetBucketReplicationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketReplicationError { pub fn from_body(body: &str) -> GetBucketReplicationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketReplicationError::Unknown(String::from(body)), }, Err(_) => GetBucketReplicationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketReplicationError { fn from(err: XmlParseError) -> GetBucketReplicationError { let XmlParseError(message) = err; GetBucketReplicationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketReplicationError { fn from(err: CredentialsError) -> GetBucketReplicationError { GetBucketReplicationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketReplicationError { fn from(err: HttpDispatchError) -> GetBucketReplicationError { GetBucketReplicationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketReplicationError { fn from(err: io::Error) -> GetBucketReplicationError { GetBucketReplicationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketReplicationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketReplicationError { fn description(&self) -> &str { match *self { GetBucketReplicationError::Validation(ref cause) => cause, GetBucketReplicationError::Credentials(ref err) => err.description(), GetBucketReplicationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketReplicationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketRequestPayment #[derive(Debug, PartialEq)] pub enum GetBucketRequestPaymentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketRequestPaymentError { pub fn from_body(body: &str) -> GetBucketRequestPaymentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketRequestPaymentError::Unknown(String::from(body)), }, Err(_) => GetBucketRequestPaymentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketRequestPaymentError { fn from(err: XmlParseError) -> GetBucketRequestPaymentError { let XmlParseError(message) = err; GetBucketRequestPaymentError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketRequestPaymentError { fn from(err: CredentialsError) -> GetBucketRequestPaymentError { GetBucketRequestPaymentError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketRequestPaymentError { fn from(err: HttpDispatchError) -> GetBucketRequestPaymentError { GetBucketRequestPaymentError::HttpDispatch(err) } } impl From<io::Error> for GetBucketRequestPaymentError { fn from(err: io::Error) -> GetBucketRequestPaymentError { GetBucketRequestPaymentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketRequestPaymentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketRequestPaymentError { fn description(&self) -> &str { match *self { GetBucketRequestPaymentError::Validation(ref cause) => cause, GetBucketRequestPaymentError::Credentials(ref err) => err.description(), GetBucketRequestPaymentError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketRequestPaymentError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketTagging #[derive(Debug, PartialEq)] pub enum GetBucketTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketTaggingError { pub fn from_body(body: &str) -> GetBucketTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketTaggingError::Unknown(String::from(body)), }, Err(_) => GetBucketTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketTaggingError { fn from(err: XmlParseError) -> GetBucketTaggingError { let XmlParseError(message) = err; GetBucketTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketTaggingError { fn from(err: CredentialsError) -> GetBucketTaggingError { GetBucketTaggingError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketTaggingError { fn from(err: HttpDispatchError) -> GetBucketTaggingError { GetBucketTaggingError::HttpDispatch(err) } } impl From<io::Error> for GetBucketTaggingError { fn from(err: io::Error) -> GetBucketTaggingError { GetBucketTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketTaggingError { fn description(&self) -> &str { match *self { GetBucketTaggingError::Validation(ref cause) => cause, GetBucketTaggingError::Credentials(ref err) => err.description(), GetBucketTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketVersioning #[derive(Debug, PartialEq)] pub enum GetBucketVersioningError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketVersioningError { pub fn from_body(body: &str) -> GetBucketVersioningError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketVersioningError::Unknown(String::from(body)), }, Err(_) => GetBucketVersioningError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketVersioningError { fn from(err: XmlParseError) -> GetBucketVersioningError { let XmlParseError(message) = err; GetBucketVersioningError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketVersioningError { fn from(err: CredentialsError) -> GetBucketVersioningError { GetBucketVersioningError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketVersioningError { fn from(err: HttpDispatchError) -> GetBucketVersioningError { GetBucketVersioningError::HttpDispatch(err) } } impl From<io::Error> for GetBucketVersioningError { fn from(err: io::Error) -> GetBucketVersioningError { GetBucketVersioningError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketVersioningError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketVersioningError { fn description(&self) -> &str { match *self { GetBucketVersioningError::Validation(ref cause) => cause, GetBucketVersioningError::Credentials(ref err) => err.description(), GetBucketVersioningError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketVersioningError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketWebsite #[derive(Debug, PartialEq)] pub enum GetBucketWebsiteError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketWebsiteError { pub fn from_body(body: &str) -> GetBucketWebsiteError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketWebsiteError::Unknown(String::from(body)), }, Err(_) => GetBucketWebsiteError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketWebsiteError { fn from(err: XmlParseError) -> GetBucketWebsiteError { let XmlParseError(message) = err; GetBucketWebsiteError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketWebsiteError { fn from(err: CredentialsError) -> GetBucketWebsiteError { GetBucketWebsiteError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketWebsiteError { fn from(err: HttpDispatchError) -> GetBucketWebsiteError { GetBucketWebsiteError::HttpDispatch(err) } } impl From<io::Error> for GetBucketWebsiteError { fn from(err: io::Error) -> GetBucketWebsiteError { GetBucketWebsiteError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketWebsiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketWebsiteError { fn description(&self) -> &str { match *self { GetBucketWebsiteError::Validation(ref cause) => cause, GetBucketWebsiteError::Credentials(ref err) => err.description(), GetBucketWebsiteError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketWebsiteError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObject #[derive(Debug, PartialEq)] pub enum GetObjectError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectError { pub fn from_body(body: &str) -> GetObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => GetObjectError::NoSuchKey(String::from(parsed_error.message)), _ => GetObjectError::Unknown(String::from(body)), }, Err(_) => GetObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectError { fn from(err: XmlParseError) -> GetObjectError { let XmlParseError(message) = err; GetObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectError { fn from(err: CredentialsError) -> GetObjectError { GetObjectError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectError { fn from(err: HttpDispatchError) -> GetObjectError { GetObjectError::HttpDispatch(err) } } impl From<io::Error> for GetObjectError { fn from(err: io::Error) -> GetObjectError { GetObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectError { fn description(&self) -> &str { match *self { GetObjectError::NoSuchKey(ref cause) => cause, GetObjectError::Validation(ref cause) => cause, GetObjectError::Credentials(ref err) => err.description(), GetObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObjectAcl #[derive(Debug, PartialEq)] pub enum GetObjectAclError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectAclError { pub fn from_body(body: &str) -> GetObjectAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => GetObjectAclError::NoSuchKey(String::from(parsed_error.message)), _ => GetObjectAclError::Unknown(String::from(body)), }, Err(_) => GetObjectAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectAclError { fn from(err: XmlParseError) -> GetObjectAclError { let XmlParseError(message) = err; GetObjectAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectAclError { fn from(err: CredentialsError) -> GetObjectAclError { GetObjectAclError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectAclError { fn from(err: HttpDispatchError) -> GetObjectAclError { GetObjectAclError::HttpDispatch(err) } } impl From<io::Error> for GetObjectAclError { fn from(err: io::Error) -> GetObjectAclError { GetObjectAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectAclError { fn description(&self) -> &str { match *self { GetObjectAclError::NoSuchKey(ref cause) => cause, GetObjectAclError::Validation(ref cause) => cause, GetObjectAclError::Credentials(ref err) => err.description(), GetObjectAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectAclError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObjectTagging #[derive(Debug, PartialEq)] pub enum GetObjectTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectTaggingError { pub fn from_body(body: &str) -> GetObjectTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetObjectTaggingError::Unknown(String::from(body)), }, Err(_) => GetObjectTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectTaggingError { fn from(err: XmlParseError) -> GetObjectTaggingError { let XmlParseError(message) = err; GetObjectTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectTaggingError { fn from(err: CredentialsError) -> GetObjectTaggingError { GetObjectTaggingError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectTaggingError { fn from(err: HttpDispatchError) -> GetObjectTaggingError { GetObjectTaggingError::HttpDispatch(err) } } impl From<io::Error> for GetObjectTaggingError { fn from(err: io::Error) -> GetObjectTaggingError { GetObjectTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectTaggingError { fn description(&self) -> &str { match *self { GetObjectTaggingError::Validation(ref cause) => cause, GetObjectTaggingError::Credentials(ref err) => err.description(), GetObjectTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObjectTorrent #[derive(Debug, PartialEq)] pub enum GetObjectTorrentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectTorrentError { pub fn from_body(body: &str) -> GetObjectTorrentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetObjectTorrentError::Unknown(String::from(body)), }, Err(_) => GetObjectTorrentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectTorrentError { fn from(err: XmlParseError) -> GetObjectTorrentError { let XmlParseError(message) = err; GetObjectTorrentError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectTorrentError { fn from(err: CredentialsError) -> GetObjectTorrentError { GetObjectTorrentError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectTorrentError { fn from(err: HttpDispatchError) -> GetObjectTorrentError { GetObjectTorrentError::HttpDispatch(err) } } impl From<io::Error> for GetObjectTorrentError { fn from(err: io::Error) -> GetObjectTorrentError { GetObjectTorrentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectTorrentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectTorrentError { fn description(&self) -> &str { match *self { GetObjectTorrentError::Validation(ref cause) => cause, GetObjectTorrentError::Credentials(ref err) => err.description(), GetObjectTorrentError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectTorrentError::Unknown(ref cause) => cause, } } } /// Errors returned by HeadBucket #[derive(Debug, PartialEq)] pub enum HeadBucketError { /// <p>The specified bucket does not exist.</p> NoSuchBucket(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl HeadBucketError { pub fn from_body(body: &str) -> HeadBucketError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchBucket" => HeadBucketError::NoSuchBucket(String::from(parsed_error.message)), _ => HeadBucketError::Unknown(String::from(body)), }, Err(_) => HeadBucketError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for HeadBucketError { fn from(err: XmlParseError) -> HeadBucketError { let XmlParseError(message) = err; HeadBucketError::Unknown(message.to_string()) } } impl From<CredentialsError> for HeadBucketError { fn from(err: CredentialsError) -> HeadBucketError { HeadBucketError::Credentials(err) } } impl From<HttpDispatchError> for HeadBucketError { fn from(err: HttpDispatchError) -> HeadBucketError { HeadBucketError::HttpDispatch(err) } } impl From<io::Error> for HeadBucketError { fn from(err: io::Error) -> HeadBucketError { HeadBucketError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for HeadBucketError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for HeadBucketError { fn description(&self) -> &str { match *self { HeadBucketError::NoSuchBucket(ref cause) => cause, HeadBucketError::Validation(ref cause) => cause, HeadBucketError::Credentials(ref err) => err.description(), HeadBucketError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), HeadBucketError::Unknown(ref cause) => cause, } } } /// Errors returned by HeadObject #[derive(Debug, PartialEq)] pub enum HeadObjectError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl HeadObjectError { pub fn from_body(body: &str) -> HeadObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => HeadObjectError::NoSuchKey(String::from(parsed_error.message)), _ => HeadObjectError::Unknown(String::from(body)), }, Err(_) => HeadObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for HeadObjectError { fn from(err: XmlParseError) -> HeadObjectError { let XmlParseError(message) = err; HeadObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for HeadObjectError { fn from(err: CredentialsError) -> HeadObjectError { HeadObjectError::Credentials(err) } } impl From<HttpDispatchError> for HeadObjectError { fn from(err: HttpDispatchError) -> HeadObjectError { HeadObjectError::HttpDispatch(err) } } impl From<io::Error> for HeadObjectError { fn from(err: io::Error) -> HeadObjectError { HeadObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for HeadObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for HeadObjectError { fn description(&self) -> &str { match *self { HeadObjectError::NoSuchKey(ref cause) => cause, HeadObjectError::Validation(ref cause) => cause, HeadObjectError::Credentials(ref err) => err.description(), HeadObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), HeadObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBucketAnalyticsConfigurations #[derive(Debug, PartialEq)] pub enum ListBucketAnalyticsConfigurationsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketAnalyticsConfigurationsError { pub fn from_body(body: &str) -> ListBucketAnalyticsConfigurationsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketAnalyticsConfigurationsError::Unknown(String::from(body)), }, Err(_) => ListBucketAnalyticsConfigurationsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketAnalyticsConfigurationsError { fn from(err: XmlParseError) -> ListBucketAnalyticsConfigurationsError { let XmlParseError(message) = err; ListBucketAnalyticsConfigurationsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketAnalyticsConfigurationsError { fn from(err: CredentialsError) -> ListBucketAnalyticsConfigurationsError { ListBucketAnalyticsConfigurationsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketAnalyticsConfigurationsError { fn from(err: HttpDispatchError) -> ListBucketAnalyticsConfigurationsError { ListBucketAnalyticsConfigurationsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketAnalyticsConfigurationsError { fn from(err: io::Error) -> ListBucketAnalyticsConfigurationsError { ListBucketAnalyticsConfigurationsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketAnalyticsConfigurationsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketAnalyticsConfigurationsError { fn description(&self) -> &str { match *self { ListBucketAnalyticsConfigurationsError::Validation(ref cause) => cause, ListBucketAnalyticsConfigurationsError::Credentials(ref err) => err.description(), ListBucketAnalyticsConfigurationsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListBucketAnalyticsConfigurationsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBucketInventoryConfigurations #[derive(Debug, PartialEq)] pub enum ListBucketInventoryConfigurationsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketInventoryConfigurationsError { pub fn from_body(body: &str) -> ListBucketInventoryConfigurationsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketInventoryConfigurationsError::Unknown(String::from(body)), }, Err(_) => ListBucketInventoryConfigurationsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketInventoryConfigurationsError { fn from(err: XmlParseError) -> ListBucketInventoryConfigurationsError { let XmlParseError(message) = err; ListBucketInventoryConfigurationsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketInventoryConfigurationsError { fn from(err: CredentialsError) -> ListBucketInventoryConfigurationsError { ListBucketInventoryConfigurationsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketInventoryConfigurationsError { fn from(err: HttpDispatchError) -> ListBucketInventoryConfigurationsError { ListBucketInventoryConfigurationsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketInventoryConfigurationsError { fn from(err: io::Error) -> ListBucketInventoryConfigurationsError { ListBucketInventoryConfigurationsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketInventoryConfigurationsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketInventoryConfigurationsError { fn description(&self) -> &str { match *self { ListBucketInventoryConfigurationsError::Validation(ref cause) => cause, ListBucketInventoryConfigurationsError::Credentials(ref err) => err.description(), ListBucketInventoryConfigurationsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListBucketInventoryConfigurationsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBucketMetricsConfigurations #[derive(Debug, PartialEq)] pub enum ListBucketMetricsConfigurationsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketMetricsConfigurationsError { pub fn from_body(body: &str) -> ListBucketMetricsConfigurationsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketMetricsConfigurationsError::Unknown(String::from(body)), }, Err(_) => ListBucketMetricsConfigurationsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketMetricsConfigurationsError { fn from(err: XmlParseError) -> ListBucketMetricsConfigurationsError { let XmlParseError(message) = err; ListBucketMetricsConfigurationsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketMetricsConfigurationsError { fn from(err: CredentialsError) -> ListBucketMetricsConfigurationsError { ListBucketMetricsConfigurationsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketMetricsConfigurationsError { fn from(err: HttpDispatchError) -> ListBucketMetricsConfigurationsError { ListBucketMetricsConfigurationsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketMetricsConfigurationsError { fn from(err: io::Error) -> ListBucketMetricsConfigurationsError { ListBucketMetricsConfigurationsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketMetricsConfigurationsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketMetricsConfigurationsError { fn description(&self) -> &str { match *self { ListBucketMetricsConfigurationsError::Validation(ref cause) => cause, ListBucketMetricsConfigurationsError::Credentials(ref err) => err.description(), ListBucketMetricsConfigurationsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListBucketMetricsConfigurationsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBuckets #[derive(Debug, PartialEq)] pub enum ListBucketsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketsError { pub fn from_body(body: &str) -> ListBucketsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketsError::Unknown(String::from(body)), }, Err(_) => ListBucketsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketsError { fn from(err: XmlParseError) -> ListBucketsError { let XmlParseError(message) = err; ListBucketsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketsError { fn from(err: CredentialsError) -> ListBucketsError { ListBucketsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketsError { fn from(err: HttpDispatchError) -> ListBucketsError { ListBucketsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketsError { fn from(err: io::Error) -> ListBucketsError { ListBucketsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketsError { fn description(&self) -> &str { match *self { ListBucketsError::Validation(ref cause) => cause, ListBucketsError::Credentials(ref err) => err.description(), ListBucketsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListBucketsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListMultipartUploads #[derive(Debug, PartialEq)] pub enum ListMultipartUploadsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListMultipartUploadsError { pub fn from_body(body: &str) -> ListMultipartUploadsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListMultipartUploadsError::Unknown(String::from(body)), }, Err(_) => ListMultipartUploadsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListMultipartUploadsError { fn from(err: XmlParseError) -> ListMultipartUploadsError { let XmlParseError(message) = err; ListMultipartUploadsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListMultipartUploadsError { fn from(err: CredentialsError) -> ListMultipartUploadsError { ListMultipartUploadsError::Credentials(err) } } impl From<HttpDispatchError> for ListMultipartUploadsError { fn from(err: HttpDispatchError) -> ListMultipartUploadsError { ListMultipartUploadsError::HttpDispatch(err) } } impl From<io::Error> for ListMultipartUploadsError { fn from(err: io::Error) -> ListMultipartUploadsError { ListMultipartUploadsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListMultipartUploadsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListMultipartUploadsError { fn description(&self) -> &str { match *self { ListMultipartUploadsError::Validation(ref cause) => cause, ListMultipartUploadsError::Credentials(ref err) => err.description(), ListMultipartUploadsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListMultipartUploadsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListObjectVersions #[derive(Debug, PartialEq)] pub enum ListObjectVersionsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListObjectVersionsError { pub fn from_body(body: &str) -> ListObjectVersionsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListObjectVersionsError::Unknown(String::from(body)), }, Err(_) => ListObjectVersionsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListObjectVersionsError { fn from(err: XmlParseError) -> ListObjectVersionsError { let XmlParseError(message) = err; ListObjectVersionsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListObjectVersionsError { fn from(err: CredentialsError) -> ListObjectVersionsError { ListObjectVersionsError::Credentials(err) } } impl From<HttpDispatchError> for ListObjectVersionsError { fn from(err: HttpDispatchError) -> ListObjectVersionsError { ListObjectVersionsError::HttpDispatch(err) } } impl From<io::Error> for ListObjectVersionsError { fn from(err: io::Error) -> ListObjectVersionsError { ListObjectVersionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListObjectVersionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListObjectVersionsError { fn description(&self) -> &str { match *self { ListObjectVersionsError::Validation(ref cause) => cause, ListObjectVersionsError::Credentials(ref err) => err.description(), ListObjectVersionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListObjectVersionsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListObjects #[derive(Debug, PartialEq)] pub enum ListObjectsError { /// <p>The specified bucket does not exist.</p> NoSuchBucket(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListObjectsError { pub fn from_body(body: &str) -> ListObjectsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchBucket" => { ListObjectsError::NoSuchBucket(String::from(parsed_error.message)) } _ => ListObjectsError::Unknown(String::from(body)), }, Err(_) => ListObjectsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListObjectsError { fn from(err: XmlParseError) -> ListObjectsError { let XmlParseError(message) = err; ListObjectsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListObjectsError { fn from(err: CredentialsError) -> ListObjectsError { ListObjectsError::Credentials(err) } } impl From<HttpDispatchError> for ListObjectsError { fn from(err: HttpDispatchError) -> ListObjectsError { ListObjectsError::HttpDispatch(err) } } impl From<io::Error> for ListObjectsError { fn from(err: io::Error) -> ListObjectsError { ListObjectsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListObjectsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListObjectsError { fn description(&self) -> &str { match *self { ListObjectsError::NoSuchBucket(ref cause) => cause, ListObjectsError::Validation(ref cause) => cause, ListObjectsError::Credentials(ref err) => err.description(), ListObjectsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListObjectsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListObjectsV2 #[derive(Debug, PartialEq)] pub enum ListObjectsV2Error { /// <p>The specified bucket does not exist.</p> NoSuchBucket(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListObjectsV2Error { pub fn from_body(body: &str) -> ListObjectsV2Error { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchBucket" => { ListObjectsV2Error::NoSuchBucket(String::from(parsed_error.message)) } _ => ListObjectsV2Error::Unknown(String::from(body)), }, Err(_) => ListObjectsV2Error::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListObjectsV2Error { fn from(err: XmlParseError) -> ListObjectsV2Error { let XmlParseError(message) = err; ListObjectsV2Error::Unknown(message.to_string()) } } impl From<CredentialsError> for ListObjectsV2Error { fn from(err: CredentialsError) -> ListObjectsV2Error { ListObjectsV2Error::Credentials(err) } } impl From<HttpDispatchError> for ListObjectsV2Error { fn from(err: HttpDispatchError) -> ListObjectsV2Error { ListObjectsV2Error::HttpDispatch(err) } } impl From<io::Error> for ListObjectsV2Error { fn from(err: io::Error) -> ListObjectsV2Error { ListObjectsV2Error::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListObjectsV2Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListObjectsV2Error { fn description(&self) -> &str { match *self { ListObjectsV2Error::NoSuchBucket(ref cause) => cause, ListObjectsV2Error::Validation(ref cause) => cause, ListObjectsV2Error::Credentials(ref err) => err.description(), ListObjectsV2Error::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListObjectsV2Error::Unknown(ref cause) => cause, } } } /// Errors returned by ListParts #[derive(Debug, PartialEq)] pub enum ListPartsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListPartsError { pub fn from_body(body: &str) -> ListPartsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListPartsError::Unknown(String::from(body)), }, Err(_) => ListPartsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListPartsError { fn from(err: XmlParseError) -> ListPartsError { let XmlParseError(message) = err; ListPartsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListPartsError { fn from(err: CredentialsError) -> ListPartsError { ListPartsError::Credentials(err) } } impl From<HttpDispatchError> for ListPartsError { fn from(err: HttpDispatchError) -> ListPartsError { ListPartsError::HttpDispatch(err) } } impl From<io::Error> for ListPartsError { fn from(err: io::Error) -> ListPartsError { ListPartsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListPartsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListPartsError { fn description(&self) -> &str { match *self { ListPartsError::Validation(ref cause) => cause, ListPartsError::Credentials(ref err) => err.description(), ListPartsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListPartsError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketAccelerateConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketAccelerateConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketAccelerateConfigurationError { pub fn from_body(body: &str) -> PutBucketAccelerateConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketAccelerateConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketAccelerateConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketAccelerateConfigurationError { fn from(err: XmlParseError) -> PutBucketAccelerateConfigurationError { let XmlParseError(message) = err; PutBucketAccelerateConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketAccelerateConfigurationError { fn from(err: CredentialsError) -> PutBucketAccelerateConfigurationError { PutBucketAccelerateConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketAccelerateConfigurationError { fn from(err: HttpDispatchError) -> PutBucketAccelerateConfigurationError { PutBucketAccelerateConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketAccelerateConfigurationError { fn from(err: io::Error) -> PutBucketAccelerateConfigurationError { PutBucketAccelerateConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketAccelerateConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketAccelerateConfigurationError { fn description(&self) -> &str { match *self { PutBucketAccelerateConfigurationError::Validation(ref cause) => cause, PutBucketAccelerateConfigurationError::Credentials(ref err) => err.description(), PutBucketAccelerateConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketAccelerateConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketAcl #[derive(Debug, PartialEq)] pub enum PutBucketAclError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketAclError { pub fn from_body(body: &str) -> PutBucketAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketAclError::Unknown(String::from(body)), }, Err(_) => PutBucketAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketAclError { fn from(err: XmlParseError) -> PutBucketAclError { let XmlParseError(message) = err; PutBucketAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketAclError { fn from(err: CredentialsError) -> PutBucketAclError { PutBucketAclError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketAclError { fn from(err: HttpDispatchError) -> PutBucketAclError { PutBucketAclError::HttpDispatch(err) } } impl From<io::Error> for PutBucketAclError { fn from(err: io::Error) -> PutBucketAclError { PutBucketAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketAclError { fn description(&self) -> &str { match *self { PutBucketAclError::Validation(ref cause) => cause, PutBucketAclError::Credentials(ref err) => err.description(), PutBucketAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketAclError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketAnalyticsConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketAnalyticsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketAnalyticsConfigurationError { pub fn from_body(body: &str) -> PutBucketAnalyticsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketAnalyticsConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketAnalyticsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketAnalyticsConfigurationError { fn from(err: XmlParseError) -> PutBucketAnalyticsConfigurationError { let XmlParseError(message) = err; PutBucketAnalyticsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketAnalyticsConfigurationError { fn from(err: CredentialsError) -> PutBucketAnalyticsConfigurationError { PutBucketAnalyticsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketAnalyticsConfigurationError { fn from(err: HttpDispatchError) -> PutBucketAnalyticsConfigurationError { PutBucketAnalyticsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketAnalyticsConfigurationError { fn from(err: io::Error) -> PutBucketAnalyticsConfigurationError { PutBucketAnalyticsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketAnalyticsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketAnalyticsConfigurationError { fn description(&self) -> &str { match *self { PutBucketAnalyticsConfigurationError::Validation(ref cause) => cause, PutBucketAnalyticsConfigurationError::Credentials(ref err) => err.description(), PutBucketAnalyticsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketAnalyticsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketCors #[derive(Debug, PartialEq)] pub enum PutBucketCorsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketCorsError { pub fn from_body(body: &str) -> PutBucketCorsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketCorsError::Unknown(String::from(body)), }, Err(_) => PutBucketCorsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketCorsError { fn from(err: XmlParseError) -> PutBucketCorsError { let XmlParseError(message) = err; PutBucketCorsError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketCorsError { fn from(err: CredentialsError) -> PutBucketCorsError { PutBucketCorsError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketCorsError { fn from(err: HttpDispatchError) -> PutBucketCorsError { PutBucketCorsError::HttpDispatch(err) } } impl From<io::Error> for PutBucketCorsError { fn from(err: io::Error) -> PutBucketCorsError { PutBucketCorsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketCorsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketCorsError { fn description(&self) -> &str { match *self { PutBucketCorsError::Validation(ref cause) => cause, PutBucketCorsError::Credentials(ref err) => err.description(), PutBucketCorsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketCorsError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketEncryption #[derive(Debug, PartialEq)] pub enum PutBucketEncryptionError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketEncryptionError { pub fn from_body(body: &str) -> PutBucketEncryptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketEncryptionError::Unknown(String::from(body)), }, Err(_) => PutBucketEncryptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketEncryptionError { fn from(err: XmlParseError) -> PutBucketEncryptionError { let XmlParseError(message) = err; PutBucketEncryptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketEncryptionError { fn from(err: CredentialsError) -> PutBucketEncryptionError { PutBucketEncryptionError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketEncryptionError { fn from(err: HttpDispatchError) -> PutBucketEncryptionError { PutBucketEncryptionError::HttpDispatch(err) } } impl From<io::Error> for PutBucketEncryptionError { fn from(err: io::Error) -> PutBucketEncryptionError { PutBucketEncryptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketEncryptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketEncryptionError { fn description(&self) -> &str { match *self { PutBucketEncryptionError::Validation(ref cause) => cause, PutBucketEncryptionError::Credentials(ref err) => err.description(), PutBucketEncryptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketEncryptionError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketInventoryConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketInventoryConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketInventoryConfigurationError { pub fn from_body(body: &str) -> PutBucketInventoryConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketInventoryConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketInventoryConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketInventoryConfigurationError { fn from(err: XmlParseError) -> PutBucketInventoryConfigurationError { let XmlParseError(message) = err; PutBucketInventoryConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketInventoryConfigurationError { fn from(err: CredentialsError) -> PutBucketInventoryConfigurationError { PutBucketInventoryConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketInventoryConfigurationError { fn from(err: HttpDispatchError) -> PutBucketInventoryConfigurationError { PutBucketInventoryConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketInventoryConfigurationError { fn from(err: io::Error) -> PutBucketInventoryConfigurationError { PutBucketInventoryConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketInventoryConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketInventoryConfigurationError { fn description(&self) -> &str { match *self { PutBucketInventoryConfigurationError::Validation(ref cause) => cause, PutBucketInventoryConfigurationError::Credentials(ref err) => err.description(), PutBucketInventoryConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketInventoryConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketLifecycle #[derive(Debug, PartialEq)] pub enum PutBucketLifecycleError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketLifecycleError { pub fn from_body(body: &str) -> PutBucketLifecycleError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketLifecycleError::Unknown(String::from(body)), }, Err(_) => PutBucketLifecycleError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketLifecycleError { fn from(err: XmlParseError) -> PutBucketLifecycleError { let XmlParseError(message) = err; PutBucketLifecycleError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketLifecycleError { fn from(err: CredentialsError) -> PutBucketLifecycleError { PutBucketLifecycleError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketLifecycleError { fn from(err: HttpDispatchError) -> PutBucketLifecycleError { PutBucketLifecycleError::HttpDispatch(err) } } impl From<io::Error> for PutBucketLifecycleError { fn from(err: io::Error) -> PutBucketLifecycleError { PutBucketLifecycleError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketLifecycleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketLifecycleError { fn description(&self) -> &str { match *self { PutBucketLifecycleError::Validation(ref cause) => cause, PutBucketLifecycleError::Credentials(ref err) => err.description(), PutBucketLifecycleError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketLifecycleError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketLifecycleConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketLifecycleConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketLifecycleConfigurationError { pub fn from_body(body: &str) -> PutBucketLifecycleConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketLifecycleConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketLifecycleConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketLifecycleConfigurationError { fn from(err: XmlParseError) -> PutBucketLifecycleConfigurationError { let XmlParseError(message) = err; PutBucketLifecycleConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketLifecycleConfigurationError { fn from(err: CredentialsError) -> PutBucketLifecycleConfigurationError { PutBucketLifecycleConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketLifecycleConfigurationError { fn from(err: HttpDispatchError) -> PutBucketLifecycleConfigurationError { PutBucketLifecycleConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketLifecycleConfigurationError { fn from(err: io::Error) -> PutBucketLifecycleConfigurationError { PutBucketLifecycleConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketLifecycleConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketLifecycleConfigurationError { fn description(&self) -> &str { match *self { PutBucketLifecycleConfigurationError::Validation(ref cause) => cause, PutBucketLifecycleConfigurationError::Credentials(ref err) => err.description(), PutBucketLifecycleConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketLifecycleConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketLogging #[derive(Debug, PartialEq)] pub enum PutBucketLoggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketLoggingError { pub fn from_body(body: &str) -> PutBucketLoggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketLoggingError::Unknown(String::from(body)), }, Err(_) => PutBucketLoggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketLoggingError { fn from(err: XmlParseError) -> PutBucketLoggingError { let XmlParseError(message) = err; PutBucketLoggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketLoggingError { fn from(err: CredentialsError) -> PutBucketLoggingError { PutBucketLoggingError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketLoggingError { fn from(err: HttpDispatchError) -> PutBucketLoggingError { PutBucketLoggingError::HttpDispatch(err) } } impl From<io::Error> for PutBucketLoggingError { fn from(err: io::Error) -> PutBucketLoggingError { PutBucketLoggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketLoggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketLoggingError { fn description(&self) -> &str { match *self { PutBucketLoggingError::Validation(ref cause) => cause, PutBucketLoggingError::Credentials(ref err) => err.description(), PutBucketLoggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketLoggingError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketMetricsConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketMetricsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketMetricsConfigurationError { pub fn from_body(body: &str) -> PutBucketMetricsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketMetricsConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketMetricsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketMetricsConfigurationError { fn from(err: XmlParseError) -> PutBucketMetricsConfigurationError { let XmlParseError(message) = err; PutBucketMetricsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketMetricsConfigurationError { fn from(err: CredentialsError) -> PutBucketMetricsConfigurationError { PutBucketMetricsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketMetricsConfigurationError { fn from(err: HttpDispatchError) -> PutBucketMetricsConfigurationError { PutBucketMetricsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketMetricsConfigurationError { fn from(err: io::Error) -> PutBucketMetricsConfigurationError { PutBucketMetricsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketMetricsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketMetricsConfigurationError { fn description(&self) -> &str { match *self { PutBucketMetricsConfigurationError::Validation(ref cause) => cause, PutBucketMetricsConfigurationError::Credentials(ref err) => err.description(), PutBucketMetricsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketMetricsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketNotification #[derive(Debug, PartialEq)] pub enum PutBucketNotificationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketNotificationError { pub fn from_body(body: &str) -> PutBucketNotificationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketNotificationError::Unknown(String::from(body)), }, Err(_) => PutBucketNotificationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketNotificationError { fn from(err: XmlParseError) -> PutBucketNotificationError { let XmlParseError(message) = err; PutBucketNotificationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketNotificationError { fn from(err: CredentialsError) -> PutBucketNotificationError { PutBucketNotificationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketNotificationError { fn from(err: HttpDispatchError) -> PutBucketNotificationError { PutBucketNotificationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketNotificationError { fn from(err: io::Error) -> PutBucketNotificationError { PutBucketNotificationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketNotificationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketNotificationError { fn description(&self) -> &str { match *self { PutBucketNotificationError::Validation(ref cause) => cause, PutBucketNotificationError::Credentials(ref err) => err.description(), PutBucketNotificationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketNotificationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketNotificationConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketNotificationConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketNotificationConfigurationError { pub fn from_body(body: &str) -> PutBucketNotificationConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketNotificationConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketNotificationConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketNotificationConfigurationError { fn from(err: XmlParseError) -> PutBucketNotificationConfigurationError { let XmlParseError(message) = err; PutBucketNotificationConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketNotificationConfigurationError { fn from(err: CredentialsError) -> PutBucketNotificationConfigurationError { PutBucketNotificationConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketNotificationConfigurationError { fn from(err: HttpDispatchError) -> PutBucketNotificationConfigurationError { PutBucketNotificationConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketNotificationConfigurationError { fn from(err: io::Error) -> PutBucketNotificationConfigurationError { PutBucketNotificationConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketNotificationConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketNotificationConfigurationError { fn description(&self) -> &str { match *self { PutBucketNotificationConfigurationError::Validation(ref cause) => cause, PutBucketNotificationConfigurationError::Credentials(ref err) => err.description(), PutBucketNotificationConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketNotificationConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketPolicy #[derive(Debug, PartialEq)] pub enum PutBucketPolicyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketPolicyError { pub fn from_body(body: &str) -> PutBucketPolicyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketPolicyError::Unknown(String::from(body)), }, Err(_) => PutBucketPolicyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketPolicyError { fn from(err: XmlParseError) -> PutBucketPolicyError { let XmlParseError(message) = err; PutBucketPolicyError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketPolicyError { fn from(err: CredentialsError) -> PutBucketPolicyError { PutBucketPolicyError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketPolicyError { fn from(err: HttpDispatchError) -> PutBucketPolicyError { PutBucketPolicyError::HttpDispatch(err) } } impl From<io::Error> for PutBucketPolicyError { fn from(err: io::Error) -> PutBucketPolicyError { PutBucketPolicyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketPolicyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketPolicyError { fn description(&self) -> &str { match *self { PutBucketPolicyError::Validation(ref cause) => cause, PutBucketPolicyError::Credentials(ref err) => err.description(), PutBucketPolicyError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketPolicyError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketReplication #[derive(Debug, PartialEq)] pub enum PutBucketReplicationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketReplicationError { pub fn from_body(body: &str) -> PutBucketReplicationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketReplicationError::Unknown(String::from(body)), }, Err(_) => PutBucketReplicationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketReplicationError { fn from(err: XmlParseError) -> PutBucketReplicationError { let XmlParseError(message) = err; PutBucketReplicationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketReplicationError { fn from(err: CredentialsError) -> PutBucketReplicationError { PutBucketReplicationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketReplicationError { fn from(err: HttpDispatchError) -> PutBucketReplicationError { PutBucketReplicationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketReplicationError { fn from(err: io::Error) -> PutBucketReplicationError { PutBucketReplicationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketReplicationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketReplicationError { fn description(&self) -> &str { match *self { PutBucketReplicationError::Validation(ref cause) => cause, PutBucketReplicationError::Credentials(ref err) => err.description(), PutBucketReplicationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketReplicationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketRequestPayment #[derive(Debug, PartialEq)] pub enum PutBucketRequestPaymentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketRequestPaymentError { pub fn from_body(body: &str) -> PutBucketRequestPaymentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketRequestPaymentError::Unknown(String::from(body)), }, Err(_) => PutBucketRequestPaymentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketRequestPaymentError { fn from(err: XmlParseError) -> PutBucketRequestPaymentError { let XmlParseError(message) = err; PutBucketRequestPaymentError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketRequestPaymentError { fn from(err: CredentialsError) -> PutBucketRequestPaymentError { PutBucketRequestPaymentError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketRequestPaymentError { fn from(err: HttpDispatchError) -> PutBucketRequestPaymentError { PutBucketRequestPaymentError::HttpDispatch(err) } } impl From<io::Error> for PutBucketRequestPaymentError { fn from(err: io::Error) -> PutBucketRequestPaymentError { PutBucketRequestPaymentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketRequestPaymentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketRequestPaymentError { fn description(&self) -> &str { match *self { PutBucketRequestPaymentError::Validation(ref cause) => cause, PutBucketRequestPaymentError::Credentials(ref err) => err.description(), PutBucketRequestPaymentError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketRequestPaymentError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketTagging #[derive(Debug, PartialEq)] pub enum PutBucketTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketTaggingError { pub fn from_body(body: &str) -> PutBucketTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketTaggingError::Unknown(String::from(body)), }, Err(_) => PutBucketTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketTaggingError { fn from(err: XmlParseError) -> PutBucketTaggingError { let XmlParseError(message) = err; PutBucketTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketTaggingError { fn from(err: CredentialsError) -> PutBucketTaggingError { PutBucketTaggingError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketTaggingError { fn from(err: HttpDispatchError) -> PutBucketTaggingError { PutBucketTaggingError::HttpDispatch(err) } } impl From<io::Error> for PutBucketTaggingError { fn from(err: io::Error) -> PutBucketTaggingError { PutBucketTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketTaggingError { fn description(&self) -> &str { match *self { PutBucketTaggingError::Validation(ref cause) => cause, PutBucketTaggingError::Credentials(ref err) => err.description(), PutBucketTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketVersioning #[derive(Debug, PartialEq)] pub enum PutBucketVersioningError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketVersioningError { pub fn from_body(body: &str) -> PutBucketVersioningError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketVersioningError::Unknown(String::from(body)), }, Err(_) => PutBucketVersioningError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketVersioningError { fn from(err: XmlParseError) -> PutBucketVersioningError { let XmlParseError(message) = err; PutBucketVersioningError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketVersioningError { fn from(err: CredentialsError) -> PutBucketVersioningError { PutBucketVersioningError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketVersioningError { fn from(err: HttpDispatchError) -> PutBucketVersioningError { PutBucketVersioningError::HttpDispatch(err) } } impl From<io::Error> for PutBucketVersioningError { fn from(err: io::Error) -> PutBucketVersioningError { PutBucketVersioningError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketVersioningError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketVersioningError { fn description(&self) -> &str { match *self { PutBucketVersioningError::Validation(ref cause) => cause, PutBucketVersioningError::Credentials(ref err) => err.description(), PutBucketVersioningError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketVersioningError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketWebsite #[derive(Debug, PartialEq)] pub enum PutBucketWebsiteError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketWebsiteError { pub fn from_body(body: &str) -> PutBucketWebsiteError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketWebsiteError::Unknown(String::from(body)), }, Err(_) => PutBucketWebsiteError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketWebsiteError { fn from(err: XmlParseError) -> PutBucketWebsiteError { let XmlParseError(message) = err; PutBucketWebsiteError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketWebsiteError { fn from(err: CredentialsError) -> PutBucketWebsiteError { PutBucketWebsiteError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketWebsiteError { fn from(err: HttpDispatchError) -> PutBucketWebsiteError { PutBucketWebsiteError::HttpDispatch(err) } } impl From<io::Error> for PutBucketWebsiteError { fn from(err: io::Error) -> PutBucketWebsiteError { PutBucketWebsiteError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketWebsiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketWebsiteError { fn description(&self) -> &str { match *self { PutBucketWebsiteError::Validation(ref cause) => cause, PutBucketWebsiteError::Credentials(ref err) => err.description(), PutBucketWebsiteError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketWebsiteError::Unknown(ref cause) => cause, } } } /// Errors returned by PutObject #[derive(Debug, PartialEq)] pub enum PutObjectError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutObjectError { pub fn from_body(body: &str) -> PutObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutObjectError::Unknown(String::from(body)), }, Err(_) => PutObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutObjectError { fn from(err: XmlParseError) -> PutObjectError { let XmlParseError(message) = err; PutObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutObjectError { fn from(err: CredentialsError) -> PutObjectError { PutObjectError::Credentials(err) } } impl From<HttpDispatchError> for PutObjectError { fn from(err: HttpDispatchError) -> PutObjectError { PutObjectError::HttpDispatch(err) } } impl From<io::Error> for PutObjectError { fn from(err: io::Error) -> PutObjectError { PutObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutObjectError { fn description(&self) -> &str { match *self { PutObjectError::Validation(ref cause) => cause, PutObjectError::Credentials(ref err) => err.description(), PutObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by PutObjectAcl #[derive(Debug, PartialEq)] pub enum PutObjectAclError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutObjectAclError { pub fn from_body(body: &str) -> PutObjectAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => PutObjectAclError::NoSuchKey(String::from(parsed_error.message)), _ => PutObjectAclError::Unknown(String::from(body)), }, Err(_) => PutObjectAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutObjectAclError { fn from(err: XmlParseError) -> PutObjectAclError { let XmlParseError(message) = err; PutObjectAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutObjectAclError { fn from(err: CredentialsError) -> PutObjectAclError { PutObjectAclError::Credentials(err) } } impl From<HttpDispatchError> for PutObjectAclError { fn from(err: HttpDispatchError) -> PutObjectAclError { PutObjectAclError::HttpDispatch(err) } } impl From<io::Error> for PutObjectAclError { fn from(err: io::Error) -> PutObjectAclError { PutObjectAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutObjectAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutObjectAclError { fn description(&self) -> &str { match *self { PutObjectAclError::NoSuchKey(ref cause) => cause, PutObjectAclError::Validation(ref cause) => cause, PutObjectAclError::Credentials(ref err) => err.description(), PutObjectAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutObjectAclError::Unknown(ref cause) => cause, } } } /// Errors returned by PutObjectTagging #[derive(Debug, PartialEq)] pub enum PutObjectTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutObjectTaggingError { pub fn from_body(body: &str) -> PutObjectTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutObjectTaggingError::Unknown(String::from(body)), }, Err(_) => PutObjectTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutObjectTaggingError { fn from(err: XmlParseError) -> PutObjectTaggingError { let XmlParseError(message) = err; PutObjectTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutObjectTaggingError { fn from(err: CredentialsError) -> PutObjectTaggingError { PutObjectTaggingError::Credentials(err) } } impl From<HttpDispatchError> for PutObjectTaggingError { fn from(err: HttpDispatchError) -> PutObjectTaggingError { PutObjectTaggingError::HttpDispatch(err) } } impl From<io::Error> for PutObjectTaggingError { fn from(err: io::Error) -> PutObjectTaggingError { PutObjectTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutObjectTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutObjectTaggingError { fn description(&self) -> &str { match *self { PutObjectTaggingError::Validation(ref cause) => cause, PutObjectTaggingError::Credentials(ref err) => err.description(), PutObjectTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutObjectTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by RestoreObject #[derive(Debug, PartialEq)] pub enum RestoreObjectError { /// <p>This operation is not allowed against this storage tier</p> ObjectAlreadyInActiveTierError(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RestoreObjectError { pub fn from_body(body: &str) -> RestoreObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "ObjectAlreadyInActiveTierError" => { RestoreObjectError::ObjectAlreadyInActiveTierError(String::from( parsed_error.message, )) } _ => RestoreObjectError::Unknown(String::from(body)), }, Err(_) => RestoreObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RestoreObjectError { fn from(err: XmlParseError) -> RestoreObjectError { let XmlParseError(message) = err; RestoreObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for RestoreObjectError { fn from(err: CredentialsError) -> RestoreObjectError { RestoreObjectError::Credentials(err) } } impl From<HttpDispatchError> for RestoreObjectError { fn from(err: HttpDispatchError) -> RestoreObjectError { RestoreObjectError::HttpDispatch(err) } } impl From<io::Error> for RestoreObjectError { fn from(err: io::Error) -> RestoreObjectError { RestoreObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RestoreObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RestoreObjectError { fn description(&self) -> &str { match *self { RestoreObjectError::ObjectAlreadyInActiveTierError(ref cause) => cause, RestoreObjectError::Validation(ref cause) => cause, RestoreObjectError::Credentials(ref err) => err.description(), RestoreObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), RestoreObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by SelectObjectContent #[derive(Debug, PartialEq)] pub enum SelectObjectContentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl SelectObjectContentError { pub fn from_body(body: &str) -> SelectObjectContentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => SelectObjectContentError::Unknown(String::from(body)), }, Err(_) => SelectObjectContentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for SelectObjectContentError { fn from(err: XmlParseError) -> SelectObjectContentError { let XmlParseError(message) = err; SelectObjectContentError::Unknown(message.to_string()) } } impl From<CredentialsError> for SelectObjectContentError { fn from(err: CredentialsError) -> SelectObjectContentError { SelectObjectContentError::Credentials(err) } } impl From<HttpDispatchError> for SelectObjectContentError { fn from(err: HttpDispatchError) -> SelectObjectContentError { SelectObjectContentError::HttpDispatch(err) } } impl From<io::Error> for SelectObjectContentError { fn from(err: io::Error) -> SelectObjectContentError { SelectObjectContentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for SelectObjectContentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for SelectObjectContentError { fn description(&self) -> &str { match *self { SelectObjectContentError::Validation(ref cause) => cause, SelectObjectContentError::Credentials(ref err) => err.description(), SelectObjectContentError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } SelectObjectContentError::Unknown(ref cause) => cause, } } } /// Errors returned by UploadPart #[derive(Debug, PartialEq)] pub enum UploadPartError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UploadPartError { pub fn from_body(body: &str) -> UploadPartError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => UploadPartError::Unknown(String::from(body)), }, Err(_) => UploadPartError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for UploadPartError { fn from(err: XmlParseError) -> UploadPartError { let XmlParseError(message) = err; UploadPartError::Unknown(message.to_string()) } } impl From<CredentialsError> for UploadPartError { fn from(err: CredentialsError) -> UploadPartError { UploadPartError::Credentials(err) } } impl From<HttpDispatchError> for UploadPartError { fn from(err: HttpDispatchError) -> UploadPartError { UploadPartError::HttpDispatch(err) } } impl From<io::Error> for UploadPartError { fn from(err: io::Error) -> UploadPartError { UploadPartError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UploadPartError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UploadPartError { fn description(&self) -> &str { match *self { UploadPartError::Validation(ref cause) => cause, UploadPartError::Credentials(ref err) => err.description(), UploadPartError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), UploadPartError::Unknown(ref cause) => cause, } } } /// Errors returned by UploadPartCopy #[derive(Debug, PartialEq)] pub enum UploadPartCopyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UploadPartCopyError { pub fn from_body(body: &str) -> UploadPartCopyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => UploadPartCopyError::Unknown(String::from(body)), }, Err(_) => UploadPartCopyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for UploadPartCopyError { fn from(err: XmlParseError) -> UploadPartCopyError { let XmlParseError(message) = err; UploadPartCopyError::Unknown(message.to_string()) } } impl From<CredentialsError> for UploadPartCopyError { fn from(err: CredentialsError) -> UploadPartCopyError { UploadPartCopyError::Credentials(err) } } impl From<HttpDispatchError> for UploadPartCopyError { fn from(err: HttpDispatchError) -> UploadPartCopyError { UploadPartCopyError::HttpDispatch(err) } } impl From<io::Error> for UploadPartCopyError { fn from(err: io::Error) -> UploadPartCopyError { UploadPartCopyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UploadPartCopyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UploadPartCopyError { fn description(&self) -> &str { match *self { UploadPartCopyError::Validation(ref cause) => cause, UploadPartCopyError::Credentials(ref err) => err.description(), UploadPartCopyError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), UploadPartCopyError::Unknown(ref cause) => cause, } } } /// Trait representing the capabilities of the Amazon S3 API. Amazon S3 clients implement this trait. pub trait S3 { /// <p>Aborts a multipart upload.</p><p>To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.</p> fn abort_multipart_upload( &self, input: AbortMultipartUploadRequest, ) -> RusotoFuture<AbortMultipartUploadOutput, AbortMultipartUploadError>; /// <p>Completes a multipart upload by assembling previously uploaded parts.</p> fn complete_multipart_upload( &self, input: CompleteMultipartUploadRequest, ) -> RusotoFuture<CompleteMultipartUploadOutput, CompleteMultipartUploadError>; /// <p>Creates a copy of an object that is already stored in Amazon S3.</p> fn copy_object( &self, input: CopyObjectRequest, ) -> RusotoFuture<CopyObjectOutput, CopyObjectError>; /// <p>Creates a new bucket.</p> fn create_bucket( &self, input: CreateBucketRequest, ) -> RusotoFuture<CreateBucketOutput, CreateBucketError>; /// <p>Initiates a multipart upload and returns an upload ID.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> fn create_multipart_upload( &self, input: CreateMultipartUploadRequest, ) -> RusotoFuture<CreateMultipartUploadOutput, CreateMultipartUploadError>; /// <p>Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.</p> fn delete_bucket(&self, input: DeleteBucketRequest) -> RusotoFuture<(), DeleteBucketError>; /// <p>Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).</p> fn delete_bucket_analytics_configuration( &self, input: DeleteBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketAnalyticsConfigurationError>; /// <p>Deletes the cors configuration information set for the bucket.</p> fn delete_bucket_cors( &self, input: DeleteBucketCorsRequest, ) -> RusotoFuture<(), DeleteBucketCorsError>; /// <p>Deletes the server-side encryption configuration from the bucket.</p> fn delete_bucket_encryption( &self, input: DeleteBucketEncryptionRequest, ) -> RusotoFuture<(), DeleteBucketEncryptionError>; /// <p>Deletes an inventory configuration (identified by the inventory ID) from the bucket.</p> fn delete_bucket_inventory_configuration( &self, input: DeleteBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketInventoryConfigurationError>; /// <p>Deletes the lifecycle configuration from the bucket.</p> fn delete_bucket_lifecycle( &self, input: DeleteBucketLifecycleRequest, ) -> RusotoFuture<(), DeleteBucketLifecycleError>; /// <p>Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> fn delete_bucket_metrics_configuration( &self, input: DeleteBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketMetricsConfigurationError>; /// <p>Deletes the policy from the bucket.</p> fn delete_bucket_policy( &self, input: DeleteBucketPolicyRequest, ) -> RusotoFuture<(), DeleteBucketPolicyError>; /// <p>Deletes the replication configuration from the bucket.</p> fn delete_bucket_replication( &self, input: DeleteBucketReplicationRequest, ) -> RusotoFuture<(), DeleteBucketReplicationError>; /// <p>Deletes the tags from the bucket.</p> fn delete_bucket_tagging( &self, input: DeleteBucketTaggingRequest, ) -> RusotoFuture<(), DeleteBucketTaggingError>; /// <p>This operation removes the website configuration from the bucket.</p> fn delete_bucket_website( &self, input: DeleteBucketWebsiteRequest, ) -> RusotoFuture<(), DeleteBucketWebsiteError>; /// <p>Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn&#39;t a null version, Amazon S3 does not remove any objects.</p> fn delete_object( &self, input: DeleteObjectRequest, ) -> RusotoFuture<DeleteObjectOutput, DeleteObjectError>; /// <p>Removes the tag-set from an existing object.</p> fn delete_object_tagging( &self, input: DeleteObjectTaggingRequest, ) -> RusotoFuture<DeleteObjectTaggingOutput, DeleteObjectTaggingError>; /// <p>This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.</p> fn delete_objects( &self, input: DeleteObjectsRequest, ) -> RusotoFuture<DeleteObjectsOutput, DeleteObjectsError>; /// <p>Returns the accelerate configuration of a bucket.</p> fn get_bucket_accelerate_configuration( &self, input: GetBucketAccelerateConfigurationRequest, ) -> RusotoFuture<GetBucketAccelerateConfigurationOutput, GetBucketAccelerateConfigurationError>; /// <p>Gets the access control policy for the bucket.</p> fn get_bucket_acl( &self, input: GetBucketAclRequest, ) -> RusotoFuture<GetBucketAclOutput, GetBucketAclError>; /// <p>Gets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> fn get_bucket_analytics_configuration( &self, input: GetBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<GetBucketAnalyticsConfigurationOutput, GetBucketAnalyticsConfigurationError>; /// <p>Returns the cors configuration for the bucket.</p> fn get_bucket_cors( &self, input: GetBucketCorsRequest, ) -> RusotoFuture<GetBucketCorsOutput, GetBucketCorsError>; /// <p>Returns the server-side encryption configuration of a bucket.</p> fn get_bucket_encryption( &self, input: GetBucketEncryptionRequest, ) -> RusotoFuture<GetBucketEncryptionOutput, GetBucketEncryptionError>; /// <p>Returns an inventory configuration (identified by the inventory ID) from the bucket.</p> fn get_bucket_inventory_configuration( &self, input: GetBucketInventoryConfigurationRequest, ) -> RusotoFuture<GetBucketInventoryConfigurationOutput, GetBucketInventoryConfigurationError>; /// <p>Deprecated, see the GetBucketLifecycleConfiguration operation.</p> fn get_bucket_lifecycle( &self, input: GetBucketLifecycleRequest, ) -> RusotoFuture<GetBucketLifecycleOutput, GetBucketLifecycleError>; /// <p>Returns the lifecycle configuration information set on the bucket.</p> fn get_bucket_lifecycle_configuration( &self, input: GetBucketLifecycleConfigurationRequest, ) -> RusotoFuture<GetBucketLifecycleConfigurationOutput, GetBucketLifecycleConfigurationError>; /// <p>Returns the region the bucket resides in.</p> fn get_bucket_location( &self, input: GetBucketLocationRequest, ) -> RusotoFuture<GetBucketLocationOutput, GetBucketLocationError>; /// <p>Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.</p> fn get_bucket_logging( &self, input: GetBucketLoggingRequest, ) -> RusotoFuture<GetBucketLoggingOutput, GetBucketLoggingError>; /// <p>Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> fn get_bucket_metrics_configuration( &self, input: GetBucketMetricsConfigurationRequest, ) -> RusotoFuture<GetBucketMetricsConfigurationOutput, GetBucketMetricsConfigurationError>; /// <p>Deprecated, see the GetBucketNotificationConfiguration operation.</p> fn get_bucket_notification( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfigurationDeprecated, GetBucketNotificationError>; /// <p>Returns the notification configuration of a bucket.</p> fn get_bucket_notification_configuration( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfiguration, GetBucketNotificationConfigurationError>; /// <p>Returns the policy of a specified bucket.</p> fn get_bucket_policy( &self, input: GetBucketPolicyRequest, ) -> RusotoFuture<GetBucketPolicyOutput, GetBucketPolicyError>; /// <p>Returns the replication configuration of a bucket.</p> fn get_bucket_replication( &self, input: GetBucketReplicationRequest, ) -> RusotoFuture<GetBucketReplicationOutput, GetBucketReplicationError>; /// <p>Returns the request payment configuration of a bucket.</p> fn get_bucket_request_payment( &self, input: GetBucketRequestPaymentRequest, ) -> RusotoFuture<GetBucketRequestPaymentOutput, GetBucketRequestPaymentError>; /// <p>Returns the tag set associated with the bucket.</p> fn get_bucket_tagging( &self, input: GetBucketTaggingRequest, ) -> RusotoFuture<GetBucketTaggingOutput, GetBucketTaggingError>; /// <p>Returns the versioning state of a bucket.</p> fn get_bucket_versioning( &self, input: GetBucketVersioningRequest, ) -> RusotoFuture<GetBucketVersioningOutput, GetBucketVersioningError>; /// <p>Returns the website configuration for a bucket.</p> fn get_bucket_website( &self, input: GetBucketWebsiteRequest, ) -> RusotoFuture<GetBucketWebsiteOutput, GetBucketWebsiteError>; /// <p>Retrieves objects from Amazon S3.</p> fn get_object(&self, input: GetObjectRequest) -> RusotoFuture<GetObjectOutput, GetObjectError>; /// <p>Returns the access control list (ACL) of an object.</p> fn get_object_acl( &self, input: GetObjectAclRequest, ) -> RusotoFuture<GetObjectAclOutput, GetObjectAclError>; /// <p>Returns the tag-set of an object.</p> fn get_object_tagging( &self, input: GetObjectTaggingRequest, ) -> RusotoFuture<GetObjectTaggingOutput, GetObjectTaggingError>; /// <p>Return torrent files from a bucket.</p> fn get_object_torrent( &self, input: GetObjectTorrentRequest, ) -> RusotoFuture<GetObjectTorrentOutput, GetObjectTorrentError>; /// <p>This operation is useful to determine if a bucket exists and you have permission to access it.</p> fn head_bucket(&self, input: HeadBucketRequest) -> RusotoFuture<(), HeadBucketError>; /// <p>The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you&#39;re only interested in an object&#39;s metadata. To use HEAD, you must have READ access to the object.</p> fn head_object( &self, input: HeadObjectRequest, ) -> RusotoFuture<HeadObjectOutput, HeadObjectError>; /// <p>Lists the analytics configurations for the bucket.</p> fn list_bucket_analytics_configurations( &self, input: ListBucketAnalyticsConfigurationsRequest, ) -> RusotoFuture<ListBucketAnalyticsConfigurationsOutput, ListBucketAnalyticsConfigurationsError>; /// <p>Returns a list of inventory configurations for the bucket.</p> fn list_bucket_inventory_configurations( &self, input: ListBucketInventoryConfigurationsRequest, ) -> RusotoFuture<ListBucketInventoryConfigurationsOutput, ListBucketInventoryConfigurationsError>; /// <p>Lists the metrics configurations for the bucket.</p> fn list_bucket_metrics_configurations( &self, input: ListBucketMetricsConfigurationsRequest, ) -> RusotoFuture<ListBucketMetricsConfigurationsOutput, ListBucketMetricsConfigurationsError>; /// <p>Returns a list of all buckets owned by the authenticated sender of the request.</p> fn list_buckets(&self) -> RusotoFuture<ListBucketsOutput, ListBucketsError>; /// <p>This operation lists in-progress multipart uploads.</p> fn list_multipart_uploads( &self, input: ListMultipartUploadsRequest, ) -> RusotoFuture<ListMultipartUploadsOutput, ListMultipartUploadsError>; /// <p>Returns metadata about all of the versions of objects in a bucket.</p> fn list_object_versions( &self, input: ListObjectVersionsRequest, ) -> RusotoFuture<ListObjectVersionsOutput, ListObjectVersionsError>; /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.</p> fn list_objects( &self, input: ListObjectsRequest, ) -> RusotoFuture<ListObjectsOutput, ListObjectsError>; /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.</p> fn list_objects_v2( &self, input: ListObjectsV2Request, ) -> RusotoFuture<ListObjectsV2Output, ListObjectsV2Error>; /// <p>Lists the parts that have been uploaded for a specific multipart upload.</p> fn list_parts(&self, input: ListPartsRequest) -> RusotoFuture<ListPartsOutput, ListPartsError>; /// <p>Sets the accelerate configuration of an existing bucket.</p> fn put_bucket_accelerate_configuration( &self, input: PutBucketAccelerateConfigurationRequest, ) -> RusotoFuture<(), PutBucketAccelerateConfigurationError>; /// <p>Sets the permissions on a bucket using access control lists (ACL).</p> fn put_bucket_acl(&self, input: PutBucketAclRequest) -> RusotoFuture<(), PutBucketAclError>; /// <p>Sets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> fn put_bucket_analytics_configuration( &self, input: PutBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), PutBucketAnalyticsConfigurationError>; /// <p>Sets the cors configuration for a bucket.</p> fn put_bucket_cors(&self, input: PutBucketCorsRequest) -> RusotoFuture<(), PutBucketCorsError>; /// <p>Creates a new server-side encryption configuration (or replaces an existing one, if present).</p> fn put_bucket_encryption( &self, input: PutBucketEncryptionRequest, ) -> RusotoFuture<(), PutBucketEncryptionError>; /// <p>Adds an inventory configuration (identified by the inventory ID) from the bucket.</p> fn put_bucket_inventory_configuration( &self, input: PutBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), PutBucketInventoryConfigurationError>; /// <p>Deprecated, see the PutBucketLifecycleConfiguration operation.</p> fn put_bucket_lifecycle( &self, input: PutBucketLifecycleRequest, ) -> RusotoFuture<(), PutBucketLifecycleError>; /// <p>Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.</p> fn put_bucket_lifecycle_configuration( &self, input: PutBucketLifecycleConfigurationRequest, ) -> RusotoFuture<(), PutBucketLifecycleConfigurationError>; /// <p>Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.</p> fn put_bucket_logging( &self, input: PutBucketLoggingRequest, ) -> RusotoFuture<(), PutBucketLoggingError>; /// <p>Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.</p> fn put_bucket_metrics_configuration( &self, input: PutBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), PutBucketMetricsConfigurationError>; /// <p>Deprecated, see the PutBucketNotificationConfiguraiton operation.</p> fn put_bucket_notification( &self, input: PutBucketNotificationRequest, ) -> RusotoFuture<(), PutBucketNotificationError>; /// <p>Enables notifications of specified events for a bucket.</p> fn put_bucket_notification_configuration( &self, input: PutBucketNotificationConfigurationRequest, ) -> RusotoFuture<(), PutBucketNotificationConfigurationError>; /// <p>Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.</p> fn put_bucket_policy( &self, input: PutBucketPolicyRequest, ) -> RusotoFuture<(), PutBucketPolicyError>; /// <p>Creates a new replication configuration (or replaces an existing one, if present).</p> fn put_bucket_replication( &self, input: PutBucketReplicationRequest, ) -> RusotoFuture<(), PutBucketReplicationError>; /// <p>Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html</p> fn put_bucket_request_payment( &self, input: PutBucketRequestPaymentRequest, ) -> RusotoFuture<(), PutBucketRequestPaymentError>; /// <p>Sets the tags for a bucket.</p> fn put_bucket_tagging( &self, input: PutBucketTaggingRequest, ) -> RusotoFuture<(), PutBucketTaggingError>; /// <p>Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.</p> fn put_bucket_versioning( &self, input: PutBucketVersioningRequest, ) -> RusotoFuture<(), PutBucketVersioningError>; /// <p>Set the website configuration for a bucket.</p> fn put_bucket_website( &self, input: PutBucketWebsiteRequest, ) -> RusotoFuture<(), PutBucketWebsiteError>; /// <p>Adds an object to a bucket.</p> fn put_object(&self, input: PutObjectRequest) -> RusotoFuture<PutObjectOutput, PutObjectError>; /// <p>uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket</p> fn put_object_acl( &self, input: PutObjectAclRequest, ) -> RusotoFuture<PutObjectAclOutput, PutObjectAclError>; /// <p>Sets the supplied tag-set to an object that already exists in a bucket</p> fn put_object_tagging( &self, input: PutObjectTaggingRequest, ) -> RusotoFuture<PutObjectTaggingOutput, PutObjectTaggingError>; /// <p>Restores an archived copy of an object back into Amazon S3</p> fn restore_object( &self, input: RestoreObjectRequest, ) -> RusotoFuture<RestoreObjectOutput, RestoreObjectError>; /// <p>This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.</p> fn select_object_content( &self, input: SelectObjectContentRequest, ) -> RusotoFuture<SelectObjectContentOutput, SelectObjectContentError>; /// <p>Uploads a part in a multipart upload.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> fn upload_part( &self, input: UploadPartRequest, ) -> RusotoFuture<UploadPartOutput, UploadPartError>; /// <p>Uploads a part by copying data from an existing object as data source.</p> fn upload_part_copy( &self, input: UploadPartCopyRequest, ) -> RusotoFuture<UploadPartCopyOutput, UploadPartCopyError>; } /// A client for the Amazon S3 API. pub struct S3Client<P = CredentialsProvider, D = RequestDispatcher> where P: ProvideAwsCredentials, D: DispatchSignedRequest, { inner: ClientInner<P, D>, region: region::Region, } impl S3Client { /// Creates a simple client backed by an implicit event loop. /// /// The client will use the default credentials provider and tls client. /// /// See the `rusoto_core::reactor` module for more details. pub fn simple(region: region::Region) -> S3Client { S3Client::new( RequestDispatcher::default(), CredentialsProvider::default(), region, ) } } impl<P, D> S3Client<P, D> where P: ProvideAwsCredentials, D: DispatchSignedRequest, { pub fn new(request_dispatcher: D, credentials_provider: P, region: region::Region) -> Self { S3Client { inner: ClientInner::new(credentials_provider, request_dispatcher), region: region, } } } impl<P, D> S3 for S3Client<P, D> where P: ProvideAwsCredentials + 'static, D: DispatchSignedRequest + 'static, { /// <p>Aborts a multipart upload.</p><p>To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.</p> #[allow(unused_variables, warnings)] fn abort_multipart_upload( &self, input: AbortMultipartUploadRequest, ) -> RusotoFuture<AbortMultipartUploadOutput, AbortMultipartUploadError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put("uploadId", &input.upload_id); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(AbortMultipartUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = AbortMultipartUploadOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(AbortMultipartUploadOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Completes a multipart upload by assembling previously uploaded parts.</p> #[allow(unused_variables, warnings)] fn complete_multipart_upload( &self, input: CompleteMultipartUploadRequest, ) -> RusotoFuture<CompleteMultipartUploadOutput, CompleteMultipartUploadError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put("uploadId", &input.upload_id); request.set_params(params); if input.multipart_upload.is_some() { let mut writer = EventWriter::new(Vec::new()); CompletedMultipartUploadSerializer::serialize( &mut writer, "CompleteMultipartUpload", input.multipart_upload.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CompleteMultipartUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CompleteMultipartUploadOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CompleteMultipartUploadOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Creates a copy of an object that is already stored in Amazon S3.</p> #[allow(unused_variables, warnings)] fn copy_object( &self, input: CopyObjectRequest, ) -> RusotoFuture<CopyObjectOutput, CopyObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref cache_control) = input.cache_control { request.add_header("Cache-Control", &cache_control.to_string()); } if let Some(ref content_disposition) = input.content_disposition { request.add_header("Content-Disposition", &content_disposition.to_string()); } if let Some(ref content_encoding) = input.content_encoding { request.add_header("Content-Encoding", &content_encoding.to_string()); } if let Some(ref content_language) = input.content_language { request.add_header("Content-Language", &content_language.to_string()); } if let Some(ref content_type) = input.content_type { request.add_header("Content-Type", &content_type.to_string()); } request.add_header("x-amz-copy-source", &input.copy_source); if let Some(ref copy_source_if_match) = input.copy_source_if_match { request.add_header( "x-amz-copy-source-if-match", &copy_source_if_match.to_string(), ); } if let Some(ref copy_source_if_modified_since) = input.copy_source_if_modified_since { request.add_header( "x-amz-copy-source-if-modified-since", &copy_source_if_modified_since.to_string(), ); } if let Some(ref copy_source_if_none_match) = input.copy_source_if_none_match { request.add_header( "x-amz-copy-source-if-none-match", &copy_source_if_none_match.to_string(), ); } if let Some(ref copy_source_if_unmodified_since) = input.copy_source_if_unmodified_since { request.add_header( "x-amz-copy-source-if-unmodified-since", &copy_source_if_unmodified_since.to_string(), ); } if let Some(ref copy_source_sse_customer_algorithm) = input.copy_source_sse_customer_algorithm { request.add_header( "x-amz-copy-source-server-side-encryption-customer-algorithm", &copy_source_sse_customer_algorithm.to_string(), ); } if let Some(ref copy_source_sse_customer_key) = input.copy_source_sse_customer_key { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key", &copy_source_sse_customer_key.to_string(), ); } if let Some(ref copy_source_sse_customer_key_md5) = input.copy_source_sse_customer_key_md5 { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key-MD5", &copy_source_sse_customer_key_md5.to_string(), ); } if let Some(ref expires) = input.expires { request.add_header("Expires", &expires.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref metadata) = input.metadata { for (header_name, header_value) in metadata.iter() { let header = format!("x-amz-meta-{}", header_name); request.add_header(header, header_value); } } if let Some(ref metadata_directive) = input.metadata_directive { request.add_header("x-amz-metadata-directive", &metadata_directive.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", &ssekms_key_id.to_string(), ); } if let Some(ref server_side_encryption) = input.server_side_encryption { request.add_header( "x-amz-server-side-encryption", &server_side_encryption.to_string(), ); } if let Some(ref storage_class) = input.storage_class { request.add_header("x-amz-storage-class", &storage_class.to_string()); } if let Some(ref tagging) = input.tagging { request.add_header("x-amz-tagging", &tagging.to_string()); } if let Some(ref tagging_directive) = input.tagging_directive { request.add_header("x-amz-tagging-directive", &tagging_directive.to_string()); } if let Some(ref website_redirect_location) = input.website_redirect_location { request.add_header( "x-amz-website-redirect-location", &website_redirect_location.to_string(), ); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CopyObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CopyObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CopyObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(copy_source_version_id) = response.headers.get("x-amz-copy-source-version-id") { let value = copy_source_version_id.to_owned(); result.copy_source_version_id = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Creates a new bucket.</p> #[allow(unused_variables, warnings)] fn create_bucket( &self, input: CreateBucketRequest, ) -> RusotoFuture<CreateBucketOutput, CreateBucketError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write) = input.grant_write { request.add_header("x-amz-grant-write", &grant_write.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if input.create_bucket_configuration.is_some() { let mut writer = EventWriter::new(Vec::new()); CreateBucketConfigurationSerializer::serialize( &mut writer, "CreateBucketConfiguration", input.create_bucket_configuration.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateBucketError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CreateBucketOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CreateBucketOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(location) = response.headers.get("Location") { let value = location.to_owned(); result.location = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Initiates a multipart upload and returns an upload ID.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> #[allow(unused_variables, warnings)] fn create_multipart_upload( &self, input: CreateMultipartUploadRequest, ) -> RusotoFuture<CreateMultipartUploadOutput, CreateMultipartUploadError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref cache_control) = input.cache_control { request.add_header("Cache-Control", &cache_control.to_string()); } if let Some(ref content_disposition) = input.content_disposition { request.add_header("Content-Disposition", &content_disposition.to_string()); } if let Some(ref content_encoding) = input.content_encoding { request.add_header("Content-Encoding", &content_encoding.to_string()); } if let Some(ref content_language) = input.content_language { request.add_header("Content-Language", &content_language.to_string()); } if let Some(ref content_type) = input.content_type { request.add_header("Content-Type", &content_type.to_string()); } if let Some(ref expires) = input.expires { request.add_header("Expires", &expires.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref metadata) = input.metadata { for (header_name, header_value) in metadata.iter() { let header = format!("x-amz-meta-{}", header_name); request.add_header(header, header_value); } } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", &ssekms_key_id.to_string(), ); } if let Some(ref server_side_encryption) = input.server_side_encryption { request.add_header( "x-amz-server-side-encryption", &server_side_encryption.to_string(), ); } if let Some(ref storage_class) = input.storage_class { request.add_header("x-amz-storage-class", &storage_class.to_string()); } if let Some(ref tagging) = input.tagging { request.add_header("x-amz-tagging", &tagging.to_string()); } if let Some(ref website_redirect_location) = input.website_redirect_location { request.add_header( "x-amz-website-redirect-location", &website_redirect_location.to_string(), ); } let mut params = Params::new(); params.put_key("uploads"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateMultipartUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CreateMultipartUploadOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CreateMultipartUploadOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(abort_date) = response.headers.get("x-amz-abort-date") { let value = abort_date.to_owned(); result.abort_date = Some(value) }; if let Some(abort_rule_id) = response.headers.get("x-amz-abort-rule-id") { let value = abort_rule_id.to_owned(); result.abort_rule_id = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.</p> #[allow(unused_variables, warnings)] fn delete_bucket(&self, input: DeleteBucketRequest) -> RusotoFuture<(), DeleteBucketError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).</p> #[allow(unused_variables, warnings)] fn delete_bucket_analytics_configuration( &self, input: DeleteBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketAnalyticsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("analytics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketAnalyticsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the cors configuration information set for the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_cors( &self, input: DeleteBucketCorsRequest, ) -> RusotoFuture<(), DeleteBucketCorsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("cors"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketCorsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the server-side encryption configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_encryption( &self, input: DeleteBucketEncryptionRequest, ) -> RusotoFuture<(), DeleteBucketEncryptionError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("encryption"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketEncryptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes an inventory configuration (identified by the inventory ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_inventory_configuration( &self, input: DeleteBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketInventoryConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("inventory"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketInventoryConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the lifecycle configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_lifecycle( &self, input: DeleteBucketLifecycleRequest, ) -> RusotoFuture<(), DeleteBucketLifecycleError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketLifecycleError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_metrics_configuration( &self, input: DeleteBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketMetricsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("metrics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketMetricsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the policy from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_policy( &self, input: DeleteBucketPolicyRequest, ) -> RusotoFuture<(), DeleteBucketPolicyError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("policy"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketPolicyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the replication configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_replication( &self, input: DeleteBucketReplicationRequest, ) -> RusotoFuture<(), DeleteBucketReplicationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("replication"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketReplicationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the tags from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_tagging( &self, input: DeleteBucketTaggingRequest, ) -> RusotoFuture<(), DeleteBucketTaggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>This operation removes the website configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_website( &self, input: DeleteBucketWebsiteRequest, ) -> RusotoFuture<(), DeleteBucketWebsiteError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("website"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketWebsiteError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn&#39;t a null version, Amazon S3 does not remove any objects.</p> #[allow(unused_variables, warnings)] fn delete_object( &self, input: DeleteObjectRequest, ) -> RusotoFuture<DeleteObjectOutput, DeleteObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); if let Some(ref mfa) = input.mfa { request.add_header("x-amz-mfa", &mfa.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = DeleteObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(DeleteObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(delete_marker) = response.headers.get("x-amz-delete-marker") { let value = delete_marker.to_owned(); result.delete_marker = Some(value.parse::<bool>().unwrap()) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Removes the tag-set from an existing object.</p> #[allow(unused_variables, warnings)] fn delete_object_tagging( &self, input: DeleteObjectTaggingRequest, ) -> RusotoFuture<DeleteObjectTaggingOutput, DeleteObjectTaggingError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteObjectTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = DeleteObjectTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(DeleteObjectTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.</p> #[allow(unused_variables, warnings)] fn delete_objects( &self, input: DeleteObjectsRequest, ) -> RusotoFuture<DeleteObjectsOutput, DeleteObjectsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref mfa) = input.mfa { request.add_header("x-amz-mfa", &mfa.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put_key("delete"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); DeleteSerializer::serialize(&mut writer, "Delete", &input.delete); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteObjectsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = DeleteObjectsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(DeleteObjectsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the accelerate configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_accelerate_configuration( &self, input: GetBucketAccelerateConfigurationRequest, ) -> RusotoFuture<GetBucketAccelerateConfigurationOutput, GetBucketAccelerateConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("accelerate"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketAccelerateConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketAccelerateConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketAccelerateConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Gets the access control policy for the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_acl( &self, input: GetBucketAclRequest, ) -> RusotoFuture<GetBucketAclOutput, GetBucketAclError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("acl"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketAclOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketAclOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Gets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> #[allow(unused_variables, warnings)] fn get_bucket_analytics_configuration( &self, input: GetBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<GetBucketAnalyticsConfigurationOutput, GetBucketAnalyticsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("analytics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketAnalyticsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketAnalyticsConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketAnalyticsConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the cors configuration for the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_cors( &self, input: GetBucketCorsRequest, ) -> RusotoFuture<GetBucketCorsOutput, GetBucketCorsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("cors"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketCorsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketCorsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketCorsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the server-side encryption configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_encryption( &self, input: GetBucketEncryptionRequest, ) -> RusotoFuture<GetBucketEncryptionOutput, GetBucketEncryptionError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("encryption"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketEncryptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketEncryptionOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketEncryptionOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns an inventory configuration (identified by the inventory ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_inventory_configuration( &self, input: GetBucketInventoryConfigurationRequest, ) -> RusotoFuture<GetBucketInventoryConfigurationOutput, GetBucketInventoryConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("inventory"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketInventoryConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketInventoryConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketInventoryConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Deprecated, see the GetBucketLifecycleConfiguration operation.</p> #[allow(unused_variables, warnings)] fn get_bucket_lifecycle( &self, input: GetBucketLifecycleRequest, ) -> RusotoFuture<GetBucketLifecycleOutput, GetBucketLifecycleError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLifecycleError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLifecycleOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketLifecycleOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the lifecycle configuration information set on the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_lifecycle_configuration( &self, input: GetBucketLifecycleConfigurationRequest, ) -> RusotoFuture<GetBucketLifecycleConfigurationOutput, GetBucketLifecycleConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLifecycleConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLifecycleConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketLifecycleConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the region the bucket resides in.</p> #[allow(unused_variables, warnings)] fn get_bucket_location( &self, input: GetBucketLocationRequest, ) -> RusotoFuture<GetBucketLocationOutput, GetBucketLocationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("location"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLocationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLocationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketLocationOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.</p> #[allow(unused_variables, warnings)] fn get_bucket_logging( &self, input: GetBucketLoggingRequest, ) -> RusotoFuture<GetBucketLoggingOutput, GetBucketLoggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("logging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLoggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLoggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketLoggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_metrics_configuration( &self, input: GetBucketMetricsConfigurationRequest, ) -> RusotoFuture<GetBucketMetricsConfigurationOutput, GetBucketMetricsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("metrics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketMetricsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketMetricsConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketMetricsConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Deprecated, see the GetBucketNotificationConfiguration operation.</p> #[allow(unused_variables, warnings)] fn get_bucket_notification( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfigurationDeprecated, GetBucketNotificationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketNotificationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = NotificationConfigurationDeprecated::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( NotificationConfigurationDeprecatedDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the notification configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_notification_configuration( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfiguration, GetBucketNotificationConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketNotificationConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = NotificationConfiguration::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(NotificationConfigurationDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the policy of a specified bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_policy( &self, input: GetBucketPolicyRequest, ) -> RusotoFuture<GetBucketPolicyOutput, GetBucketPolicyError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("policy"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketPolicyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().map(move |response| { let mut result = GetBucketPolicyOutput::default(); result.policy = Some(String::from_utf8_lossy(response.body.as_ref()).into()); result })) }); RusotoFuture::new(future) } /// <p>Returns the replication configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_replication( &self, input: GetBucketReplicationRequest, ) -> RusotoFuture<GetBucketReplicationOutput, GetBucketReplicationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("replication"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketReplicationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketReplicationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketReplicationOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the request payment configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_request_payment( &self, input: GetBucketRequestPaymentRequest, ) -> RusotoFuture<GetBucketRequestPaymentOutput, GetBucketRequestPaymentError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("requestPayment"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketRequestPaymentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketRequestPaymentOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketRequestPaymentOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the tag set associated with the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_tagging( &self, input: GetBucketTaggingRequest, ) -> RusotoFuture<GetBucketTaggingOutput, GetBucketTaggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the versioning state of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_versioning( &self, input: GetBucketVersioningRequest, ) -> RusotoFuture<GetBucketVersioningOutput, GetBucketVersioningError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("versioning"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketVersioningError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketVersioningOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketVersioningOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the website configuration for a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_website( &self, input: GetBucketWebsiteRequest, ) -> RusotoFuture<GetBucketWebsiteOutput, GetBucketWebsiteError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("website"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketWebsiteError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketWebsiteOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketWebsiteOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Retrieves objects from Amazon S3.</p> #[allow(unused_variables, warnings)] fn get_object(&self, input: GetObjectRequest) -> RusotoFuture<GetObjectOutput, GetObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref if_match) = input.if_match { request.add_header("If-Match", &if_match.to_string()); } if let Some(ref if_modified_since) = input.if_modified_since { request.add_header("If-Modified-Since", &if_modified_since.to_string()); } if let Some(ref if_none_match) = input.if_none_match { request.add_header("If-None-Match", &if_none_match.to_string()); } if let Some(ref if_unmodified_since) = input.if_unmodified_since { request.add_header("If-Unmodified-Since", &if_unmodified_since.to_string()); } if let Some(ref range) = input.range { request.add_header("Range", &range.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); if let Some(ref x) = input.part_number { params.put("partNumber", x); } if let Some(ref x) = input.response_cache_control { params.put("response-cache-control", x); } if let Some(ref x) = input.response_content_disposition { params.put("response-content-disposition", x); } if let Some(ref x) = input.response_content_encoding { params.put("response-content-encoding", x); } if let Some(ref x) = input.response_content_language { params.put("response-content-language", x); } if let Some(ref x) = input.response_content_type { params.put("response-content-type", x); } if let Some(ref x) = input.response_expires { params.put("response-expires", x); } if let Some(ref x) = input.version_id { params.put("versionId", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } let mut result = GetObjectOutput::default(); result.body = Some(StreamingBody { len: None, inner: response.body, }); if let Some(accept_ranges) = response.headers.get("accept-ranges") { let value = accept_ranges.to_owned(); result.accept_ranges = Some(value) }; if let Some(cache_control) = response.headers.get("Cache-Control") { let value = cache_control.to_owned(); result.cache_control = Some(value) }; if let Some(content_disposition) = response.headers.get("Content-Disposition") { let value = content_disposition.to_owned(); result.content_disposition = Some(value) }; if let Some(content_encoding) = response.headers.get("Content-Encoding") { let value = content_encoding.to_owned(); result.content_encoding = Some(value) }; if let Some(content_language) = response.headers.get("Content-Language") { let value = content_language.to_owned(); result.content_language = Some(value) }; if let Some(content_length) = response.headers.get("Content-Length") { let value = content_length.to_owned(); result.content_length = Some(value.parse::<i64>().unwrap()) }; if let Some(content_range) = response.headers.get("Content-Range") { let value = content_range.to_owned(); result.content_range = Some(value) }; if let Some(content_type) = response.headers.get("Content-Type") { let value = content_type.to_owned(); result.content_type = Some(value) }; if let Some(delete_marker) = response.headers.get("x-amz-delete-marker") { let value = delete_marker.to_owned(); result.delete_marker = Some(value.parse::<bool>().unwrap()) }; if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(expires) = response.headers.get("Expires") { let value = expires.to_owned(); result.expires = Some(value) }; if let Some(last_modified) = response.headers.get("Last-Modified") { let value = last_modified.to_owned(); result.last_modified = Some(value) }; let mut values = ::std::collections::HashMap::new(); for (key, value) in response.headers.iter() { if key.starts_with("x-amz-meta-") { values.insert(key["x-amz-meta-".len()..].to_owned(), value.to_owned()); } } result.metadata = Some(values); if let Some(missing_meta) = response.headers.get("x-amz-missing-meta") { let value = missing_meta.to_owned(); result.missing_meta = Some(value.parse::<i64>().unwrap()) }; if let Some(parts_count) = response.headers.get("x-amz-mp-parts-count") { let value = parts_count.to_owned(); result.parts_count = Some(value.parse::<i64>().unwrap()) }; if let Some(replication_status) = response.headers.get("x-amz-replication-status") { let value = replication_status.to_owned(); result.replication_status = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(restore) = response.headers.get("x-amz-restore") { let value = restore.to_owned(); result.restore = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(storage_class) = response.headers.get("x-amz-storage-class") { let value = storage_class.to_owned(); result.storage_class = Some(value) }; if let Some(tag_count) = response.headers.get("x-amz-tagging-count") { let value = tag_count.to_owned(); result.tag_count = Some(value.parse::<i64>().unwrap()) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; if let Some(website_redirect_location) = response.headers.get("x-amz-website-redirect-location") { let value = website_redirect_location.to_owned(); result.website_redirect_location = Some(value) }; future::Either::A(future::ok(result)) }); RusotoFuture::new(future) } /// <p>Returns the access control list (ACL) of an object.</p> #[allow(unused_variables, warnings)] fn get_object_acl( &self, input: GetObjectAclRequest, ) -> RusotoFuture<GetObjectAclOutput, GetObjectAclError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("acl"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetObjectAclOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetObjectAclOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the tag-set of an object.</p> #[allow(unused_variables, warnings)] fn get_object_tagging( &self, input: GetObjectTaggingRequest, ) -> RusotoFuture<GetObjectTaggingOutput, GetObjectTaggingError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetObjectTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetObjectTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Return torrent files from a bucket.</p> #[allow(unused_variables, warnings)] fn get_object_torrent( &self, input: GetObjectTorrentRequest, ) -> RusotoFuture<GetObjectTorrentOutput, GetObjectTorrentError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put_key("torrent"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectTorrentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } let mut result = GetObjectTorrentOutput::default(); result.body = Some(StreamingBody { len: None, inner: response.body, }); if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; future::Either::A(future::ok(result)) }); RusotoFuture::new(future) } /// <p>This operation is useful to determine if a bucket exists and you have permission to access it.</p> #[allow(unused_variables, warnings)] fn head_bucket(&self, input: HeadBucketRequest) -> RusotoFuture<(), HeadBucketError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("HEAD", "s3", &self.region, &request_uri); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(HeadBucketError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you&#39;re only interested in an object&#39;s metadata. To use HEAD, you must have READ access to the object.</p> #[allow(unused_variables, warnings)] fn head_object( &self, input: HeadObjectRequest, ) -> RusotoFuture<HeadObjectOutput, HeadObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("HEAD", "s3", &self.region, &request_uri); if let Some(ref if_match) = input.if_match { request.add_header("If-Match", &if_match.to_string()); } if let Some(ref if_modified_since) = input.if_modified_since { request.add_header("If-Modified-Since", &if_modified_since.to_string()); } if let Some(ref if_none_match) = input.if_none_match { request.add_header("If-None-Match", &if_none_match.to_string()); } if let Some(ref if_unmodified_since) = input.if_unmodified_since { request.add_header("If-Unmodified-Since", &if_unmodified_since.to_string()); } if let Some(ref range) = input.range { request.add_header("Range", &range.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); if let Some(ref x) = input.part_number { params.put("partNumber", x); } if let Some(ref x) = input.version_id { params.put("versionId", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(HeadObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = HeadObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(HeadObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(accept_ranges) = response.headers.get("accept-ranges") { let value = accept_ranges.to_owned(); result.accept_ranges = Some(value) }; if let Some(cache_control) = response.headers.get("Cache-Control") { let value = cache_control.to_owned(); result.cache_control = Some(value) }; if let Some(content_disposition) = response.headers.get("Content-Disposition") { let value = content_disposition.to_owned(); result.content_disposition = Some(value) }; if let Some(content_encoding) = response.headers.get("Content-Encoding") { let value = content_encoding.to_owned(); result.content_encoding = Some(value) }; if let Some(content_language) = response.headers.get("Content-Language") { let value = content_language.to_owned(); result.content_language = Some(value) }; if let Some(content_length) = response.headers.get("Content-Length") { let value = content_length.to_owned(); result.content_length = Some(value.parse::<i64>().unwrap()) }; if let Some(content_type) = response.headers.get("Content-Type") { let value = content_type.to_owned(); result.content_type = Some(value) }; if let Some(delete_marker) = response.headers.get("x-amz-delete-marker") { let value = delete_marker.to_owned(); result.delete_marker = Some(value.parse::<bool>().unwrap()) }; if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(expires) = response.headers.get("Expires") { let value = expires.to_owned(); result.expires = Some(value) }; if let Some(last_modified) = response.headers.get("Last-Modified") { let value = last_modified.to_owned(); result.last_modified = Some(value) }; let mut values = ::std::collections::HashMap::new(); for (key, value) in response.headers.iter() { if key.starts_with("x-amz-meta-") { values.insert(key["x-amz-meta-".len()..].to_owned(), value.to_owned()); } } result.metadata = Some(values); if let Some(missing_meta) = response.headers.get("x-amz-missing-meta") { let value = missing_meta.to_owned(); result.missing_meta = Some(value.parse::<i64>().unwrap()) }; if let Some(parts_count) = response.headers.get("x-amz-mp-parts-count") { let value = parts_count.to_owned(); result.parts_count = Some(value.parse::<i64>().unwrap()) }; if let Some(replication_status) = response.headers.get("x-amz-replication-status") { let value = replication_status.to_owned(); result.replication_status = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(restore) = response.headers.get("x-amz-restore") { let value = restore.to_owned(); result.restore = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(storage_class) = response.headers.get("x-amz-storage-class") { let value = storage_class.to_owned(); result.storage_class = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; if let Some(website_redirect_location) = response.headers.get("x-amz-website-redirect-location") { let value = website_redirect_location.to_owned(); result.website_redirect_location = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Lists the analytics configurations for the bucket.</p> #[allow(unused_variables, warnings)] fn list_bucket_analytics_configurations( &self, input: ListBucketAnalyticsConfigurationsRequest, ) -> RusotoFuture<ListBucketAnalyticsConfigurationsOutput, ListBucketAnalyticsConfigurationsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } params.put_key("analytics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketAnalyticsConfigurationsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketAnalyticsConfigurationsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( ListBucketAnalyticsConfigurationsOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns a list of inventory configurations for the bucket.</p> #[allow(unused_variables, warnings)] fn list_bucket_inventory_configurations( &self, input: ListBucketInventoryConfigurationsRequest, ) -> RusotoFuture<ListBucketInventoryConfigurationsOutput, ListBucketInventoryConfigurationsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } params.put_key("inventory"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketInventoryConfigurationsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketInventoryConfigurationsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( ListBucketInventoryConfigurationsOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Lists the metrics configurations for the bucket.</p> #[allow(unused_variables, warnings)] fn list_bucket_metrics_configurations( &self, input: ListBucketMetricsConfigurationsRequest, ) -> RusotoFuture<ListBucketMetricsConfigurationsOutput, ListBucketMetricsConfigurationsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } params.put_key("metrics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketMetricsConfigurationsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketMetricsConfigurationsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( ListBucketMetricsConfigurationsOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns a list of all buckets owned by the authenticated sender of the request.</p> #[allow(unused_variables, warnings)] fn list_buckets(&self) -> RusotoFuture<ListBucketsOutput, ListBucketsError> { let request_uri = "/"; let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListBucketsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>This operation lists in-progress multipart uploads.</p> #[allow(unused_variables, warnings)] fn list_multipart_uploads( &self, input: ListMultipartUploadsRequest, ) -> RusotoFuture<ListMultipartUploadsOutput, ListMultipartUploadsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.key_marker { params.put("key-marker", x); } if let Some(ref x) = input.max_uploads { params.put("max-uploads", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } if let Some(ref x) = input.upload_id_marker { params.put("upload-id-marker", x); } params.put_key("uploads"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListMultipartUploadsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListMultipartUploadsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListMultipartUploadsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns metadata about all of the versions of objects in a bucket.</p> #[allow(unused_variables, warnings)] fn list_object_versions( &self, input: ListObjectVersionsRequest, ) -> RusotoFuture<ListObjectVersionsOutput, ListObjectVersionsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.key_marker { params.put("key-marker", x); } if let Some(ref x) = input.max_keys { params.put("max-keys", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } if let Some(ref x) = input.version_id_marker { params.put("version-id-marker", x); } params.put_key("versions"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListObjectVersionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListObjectVersionsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListObjectVersionsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.</p> #[allow(unused_variables, warnings)] fn list_objects( &self, input: ListObjectsRequest, ) -> RusotoFuture<ListObjectsOutput, ListObjectsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.marker { params.put("marker", x); } if let Some(ref x) = input.max_keys { params.put("max-keys", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListObjectsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListObjectsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListObjectsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.</p> #[allow(unused_variables, warnings)] fn list_objects_v2( &self, input: ListObjectsV2Request, ) -> RusotoFuture<ListObjectsV2Output, ListObjectsV2Error> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.fetch_owner { params.put("fetch-owner", x); } if let Some(ref x) = input.max_keys { params.put("max-keys", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } if let Some(ref x) = input.start_after { params.put("start-after", x); } params.put("list-type", "2"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListObjectsV2Error::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListObjectsV2Output::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListObjectsV2OutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Lists the parts that have been uploaded for a specific multipart upload.</p> #[allow(unused_variables, warnings)] fn list_parts(&self, input: ListPartsRequest) -> RusotoFuture<ListPartsOutput, ListPartsError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.max_parts { params.put("max-parts", x); } if let Some(ref x) = input.part_number_marker { params.put("part-number-marker", x); } params.put("uploadId", &input.upload_id); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListPartsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListPartsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListPartsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(abort_date) = response.headers.get("x-amz-abort-date") { let value = abort_date.to_owned(); result.abort_date = Some(value) }; if let Some(abort_rule_id) = response.headers.get("x-amz-abort-rule-id") { let value = abort_rule_id.to_owned(); result.abort_rule_id = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Sets the accelerate configuration of an existing bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_accelerate_configuration( &self, input: PutBucketAccelerateConfigurationRequest, ) -> RusotoFuture<(), PutBucketAccelerateConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("accelerate"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); AccelerateConfigurationSerializer::serialize( &mut writer, "AccelerateConfiguration", &input.accelerate_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketAccelerateConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the permissions on a bucket using access control lists (ACL).</p> #[allow(unused_variables, warnings)] fn put_bucket_acl(&self, input: PutBucketAclRequest) -> RusotoFuture<(), PutBucketAclError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write) = input.grant_write { request.add_header("x-amz-grant-write", &grant_write.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } let mut params = Params::new(); params.put_key("acl"); request.set_params(params); if input.access_control_policy.is_some() { let mut writer = EventWriter::new(Vec::new()); AccessControlPolicySerializer::serialize( &mut writer, "AccessControlPolicy", input.access_control_policy.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> #[allow(unused_variables, warnings)] fn put_bucket_analytics_configuration( &self, input: PutBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), PutBucketAnalyticsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("analytics"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); AnalyticsConfigurationSerializer::serialize( &mut writer, "AnalyticsConfiguration", &input.analytics_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketAnalyticsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the cors configuration for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_cors(&self, input: PutBucketCorsRequest) -> RusotoFuture<(), PutBucketCorsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("cors"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); CORSConfigurationSerializer::serialize( &mut writer, "CORSConfiguration", &input.cors_configuration, ); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketCorsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Creates a new server-side encryption configuration (or replaces an existing one, if present).</p> #[allow(unused_variables, warnings)] fn put_bucket_encryption( &self, input: PutBucketEncryptionRequest, ) -> RusotoFuture<(), PutBucketEncryptionError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("encryption"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); ServerSideEncryptionConfigurationSerializer::serialize( &mut writer, "ServerSideEncryptionConfiguration", &input.server_side_encryption_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketEncryptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Adds an inventory configuration (identified by the inventory ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_inventory_configuration( &self, input: PutBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), PutBucketInventoryConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("inventory"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); InventoryConfigurationSerializer::serialize( &mut writer, "InventoryConfiguration", &input.inventory_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketInventoryConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deprecated, see the PutBucketLifecycleConfiguration operation.</p> #[allow(unused_variables, warnings)] fn put_bucket_lifecycle( &self, input: PutBucketLifecycleRequest, ) -> RusotoFuture<(), PutBucketLifecycleError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); if input.lifecycle_configuration.is_some() { let mut writer = EventWriter::new(Vec::new()); LifecycleConfigurationSerializer::serialize( &mut writer, "LifecycleConfiguration", input.lifecycle_configuration.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketLifecycleError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.</p> #[allow(unused_variables, warnings)] fn put_bucket_lifecycle_configuration( &self, input: PutBucketLifecycleConfigurationRequest, ) -> RusotoFuture<(), PutBucketLifecycleConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); if input.lifecycle_configuration.is_some() { let mut writer = EventWriter::new(Vec::new()); BucketLifecycleConfigurationSerializer::serialize( &mut writer, "LifecycleConfiguration", input.lifecycle_configuration.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketLifecycleConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.</p> #[allow(unused_variables, warnings)] fn put_bucket_logging( &self, input: PutBucketLoggingRequest, ) -> RusotoFuture<(), PutBucketLoggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("logging"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); BucketLoggingStatusSerializer::serialize( &mut writer, "BucketLoggingStatus", &input.bucket_logging_status, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketLoggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_metrics_configuration( &self, input: PutBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), PutBucketMetricsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("metrics"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); MetricsConfigurationSerializer::serialize( &mut writer, "MetricsConfiguration", &input.metrics_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketMetricsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deprecated, see the PutBucketNotificationConfiguraiton operation.</p> #[allow(unused_variables, warnings)] fn put_bucket_notification( &self, input: PutBucketNotificationRequest, ) -> RusotoFuture<(), PutBucketNotificationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); NotificationConfigurationDeprecatedSerializer::serialize( &mut writer, "NotificationConfigurationDeprecated", &input.notification_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketNotificationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Enables notifications of specified events for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_notification_configuration( &self, input: PutBucketNotificationConfigurationRequest, ) -> RusotoFuture<(), PutBucketNotificationConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); NotificationConfigurationSerializer::serialize( &mut writer, "NotificationConfiguration", &input.notification_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketNotificationConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.</p> #[allow(unused_variables, warnings)] fn put_bucket_policy( &self, input: PutBucketPolicyRequest, ) -> RusotoFuture<(), PutBucketPolicyError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref confirm_remove_self_bucket_access) = input.confirm_remove_self_bucket_access { request.add_header( "x-amz-confirm-remove-self-bucket-access", &confirm_remove_self_bucket_access.to_string(), ); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("policy"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); PolicySerializer::serialize(&mut writer, "Policy", &input.policy); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketPolicyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Creates a new replication configuration (or replaces an existing one, if present).</p> #[allow(unused_variables, warnings)] fn put_bucket_replication( &self, input: PutBucketReplicationRequest, ) -> RusotoFuture<(), PutBucketReplicationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("replication"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); ReplicationConfigurationSerializer::serialize( &mut writer, "ReplicationConfiguration", &input.replication_configuration, ); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketReplicationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html</p> #[allow(unused_variables, warnings)] fn put_bucket_request_payment( &self, input: PutBucketRequestPaymentRequest, ) -> RusotoFuture<(), PutBucketRequestPaymentError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("requestPayment"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); RequestPaymentConfigurationSerializer::serialize( &mut writer, "RequestPaymentConfiguration", &input.request_payment_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketRequestPaymentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the tags for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_tagging( &self, input: PutBucketTaggingRequest, ) -> RusotoFuture<(), PutBucketTaggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("tagging"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); TaggingSerializer::serialize(&mut writer, "Tagging", &input.tagging); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.</p> #[allow(unused_variables, warnings)] fn put_bucket_versioning( &self, input: PutBucketVersioningRequest, ) -> RusotoFuture<(), PutBucketVersioningError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref mfa) = input.mfa { request.add_header("x-amz-mfa", &mfa.to_string()); } let mut params = Params::new(); params.put_key("versioning"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); VersioningConfigurationSerializer::serialize( &mut writer, "VersioningConfiguration", &input.versioning_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketVersioningError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Set the website configuration for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_website( &self, input: PutBucketWebsiteRequest, ) -> RusotoFuture<(), PutBucketWebsiteError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("website"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); WebsiteConfigurationSerializer::serialize( &mut writer, "WebsiteConfiguration", &input.website_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketWebsiteError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Adds an object to a bucket.</p> #[allow(unused_variables, warnings)] fn put_object(&self, input: PutObjectRequest) -> RusotoFuture<PutObjectOutput, PutObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref cache_control) = input.cache_control { request.add_header("Cache-Control", &cache_control.to_string()); } if let Some(ref content_disposition) = input.content_disposition { request.add_header("Content-Disposition", &content_disposition.to_string()); } if let Some(ref content_encoding) = input.content_encoding { request.add_header("Content-Encoding", &content_encoding.to_string()); } if let Some(ref content_language) = input.content_language { request.add_header("Content-Language", &content_language.to_string()); } if let Some(ref content_length) = input.content_length { request.add_header("Content-Length", &content_length.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref content_type) = input.content_type { request.add_header("Content-Type", &content_type.to_string()); } if let Some(ref expires) = input.expires { request.add_header("Expires", &expires.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref metadata) = input.metadata { for (header_name, header_value) in metadata.iter() { let header = format!("x-amz-meta-{}", header_name); request.add_header(header, header_value); } } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", &ssekms_key_id.to_string(), ); } if let Some(ref server_side_encryption) = input.server_side_encryption { request.add_header( "x-amz-server-side-encryption", &server_side_encryption.to_string(), ); } if let Some(ref storage_class) = input.storage_class { request.add_header("x-amz-storage-class", &storage_class.to_string()); } if let Some(ref tagging) = input.tagging { request.add_header("x-amz-tagging", &tagging.to_string()); } if let Some(ref website_redirect_location) = input.website_redirect_location { request.add_header( "x-amz-website-redirect-location", &website_redirect_location.to_string(), ); } if let Some(__body) = input.body { let __body_len = __body.len.expect("no length specified for streaming body"); request.set_payload_stream(__body_len, __body.inner); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = PutObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(PutObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket</p> #[allow(unused_variables, warnings)] fn put_object_acl( &self, input: PutObjectAclRequest, ) -> RusotoFuture<PutObjectAclOutput, PutObjectAclError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write) = input.grant_write { request.add_header("x-amz-grant-write", &grant_write.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("acl"); request.set_params(params); if input.access_control_policy.is_some() { let mut writer = EventWriter::new(Vec::new()); AccessControlPolicySerializer::serialize( &mut writer, "AccessControlPolicy", input.access_control_policy.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutObjectAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = PutObjectAclOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(PutObjectAclOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Sets the supplied tag-set to an object that already exists in a bucket</p> #[allow(unused_variables, warnings)] fn put_object_tagging( &self, input: PutObjectTaggingRequest, ) -> RusotoFuture<PutObjectTaggingOutput, PutObjectTaggingError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("tagging"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); TaggingSerializer::serialize(&mut writer, "Tagging", &input.tagging); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutObjectTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = PutObjectTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(PutObjectTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Restores an archived copy of an object back into Amazon S3</p> #[allow(unused_variables, warnings)] fn restore_object( &self, input: RestoreObjectRequest, ) -> RusotoFuture<RestoreObjectOutput, RestoreObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("restore"); request.set_params(params); if input.restore_request.is_some() { let mut writer = EventWriter::new(Vec::new()); RestoreRequestSerializer::serialize( &mut writer, "RestoreRequest", input.restore_request.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(RestoreObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = RestoreObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(RestoreObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(restore_output_path) = response.headers.get("x-amz-restore-output-path") { let value = restore_output_path.to_owned(); result.restore_output_path = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.</p> #[allow(unused_variables, warnings)] fn select_object_content( &self, input: SelectObjectContentRequest, ) -> RusotoFuture<SelectObjectContentOutput, SelectObjectContentError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); params.put("select&select-type", "2"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); SelectObjectContentRequestSerializer::serialize( &mut writer, "SelectObjectContentRequest", &input, "http://s3.amazonaws.com/doc/2006-03-01/", ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(SelectObjectContentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = SelectObjectContentOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(SelectObjectContentOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Uploads a part in a multipart upload.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> #[allow(unused_variables, warnings)] fn upload_part( &self, input: UploadPartRequest, ) -> RusotoFuture<UploadPartOutput, UploadPartError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_length) = input.content_length { request.add_header("Content-Length", &content_length.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); params.put("partNumber", &input.part_number); params.put("uploadId", &input.upload_id); request.set_params(params); if let Some(__body) = input.body { let __body_len = __body.len.expect("no length specified for streaming body"); request.set_payload_stream(__body_len, __body.inner); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(UploadPartError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = UploadPartOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(UploadPartOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Uploads a part by copying data from an existing object as data source.</p> #[allow(unused_variables, warnings)] fn upload_part_copy( &self, input: UploadPartCopyRequest, ) -> RusotoFuture<UploadPartCopyOutput, UploadPartCopyError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); request.add_header("x-amz-copy-source", &input.copy_source); if let Some(ref copy_source_if_match) = input.copy_source_if_match { request.add_header( "x-amz-copy-source-if-match", &copy_source_if_match.to_string(), ); } if let Some(ref copy_source_if_modified_since) = input.copy_source_if_modified_since { request.add_header( "x-amz-copy-source-if-modified-since", &copy_source_if_modified_since.to_string(), ); } if let Some(ref copy_source_if_none_match) = input.copy_source_if_none_match { request.add_header( "x-amz-copy-source-if-none-match", &copy_source_if_none_match.to_string(), ); } if let Some(ref copy_source_if_unmodified_since) = input.copy_source_if_unmodified_since { request.add_header( "x-amz-copy-source-if-unmodified-since", &copy_source_if_unmodified_since.to_string(), ); } if let Some(ref copy_source_range) = input.copy_source_range { request.add_header("x-amz-copy-source-range", &copy_source_range.to_string()); } if let Some(ref copy_source_sse_customer_algorithm) = input.copy_source_sse_customer_algorithm { request.add_header( "x-amz-copy-source-server-side-encryption-customer-algorithm", &copy_source_sse_customer_algorithm.to_string(), ); } if let Some(ref copy_source_sse_customer_key) = input.copy_source_sse_customer_key { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key", &copy_source_sse_customer_key.to_string(), ); } if let Some(ref copy_source_sse_customer_key_md5) = input.copy_source_sse_customer_key_md5 { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key-MD5", &copy_source_sse_customer_key_md5.to_string(), ); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); params.put("partNumber", &input.part_number); params.put("uploadId", &input.upload_id); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(UploadPartCopyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = UploadPartCopyOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(UploadPartCopyOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(copy_source_version_id) = response.headers.get("x-amz-copy-source-version-id") { let value = copy_source_version_id.to_owned(); result.copy_source_version_id = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } } #[cfg(test)] mod protocol_tests { extern crate rusoto_mock; use self::rusoto_mock::*; use super::*; use rusoto_core::Region as rusoto_region; #[test] fn test_parse_error_s3_create_bucket() { let mock_response = MockResponseReader::read_response( "test_resources/generated/error", "s3-create-bucket.xml", ); let mock = MockRequestDispatcher::with_status(400).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CreateBucketRequest::default(); let result = client.create_bucket(request).sync(); assert!(!result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_error_s3_list_objects() { let mock_response = MockResponseReader::read_response( "test_resources/generated/error", "s3-list-objects.xml", ); let mock = MockRequestDispatcher::with_status(400).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListObjectsRequest::default(); let result = client.list_objects(request).sync(); assert!(!result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_acl() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-acl.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketAclRequest::default(); let result = client.get_bucket_acl(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_location() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-location.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketLocationRequest::default(); let result = client.get_bucket_location(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_logging() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-logging.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketLoggingRequest::default(); let result = client.get_bucket_logging(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_policy() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-policy.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketPolicyRequest::default(); let result = client.get_bucket_policy(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_buckets() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-buckets.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let result = client.list_buckets().sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_multipart_uploads() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-multipart-uploads.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListMultipartUploadsRequest::default(); let result = client.list_multipart_uploads(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_object_versions() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-object-versions.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListObjectVersionsRequest::default(); let result = client.list_object_versions(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_objects() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-objects.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListObjectsRequest::default(); let result = client.list_objects(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } } re-gen S3 service // ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= use std::error::Error; use std::fmt; use std::io; #[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::reactor::{CredentialsProvider, RequestDispatcher}; use rusoto_core::region; use rusoto_core::request::DispatchSignedRequest; use rusoto_core::{ClientInner, RusotoFuture}; use rusoto_core::credential::{CredentialsError, ProvideAwsCredentials}; use rusoto_core::request::HttpDispatchError; use hyper::StatusCode; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::signature::SignedRequest; use rusoto_core::xmlerror::*; use rusoto_core::xmlutil::{characters, end_element, find_start_element, peek_at_name, skip_tree, start_element}; use rusoto_core::xmlutil::{Next, Peek, XmlParseError, XmlResponse}; use std::io::Write; use std::str::FromStr; use xml; use xml::reader::ParserConfig; use xml::reader::XmlEvent; use xml::EventReader; use xml::EventWriter; enum DeserializerNext { Close, Skip, Element(String), } /// <p>Specifies the days since the initiation of an Incomplete Multipart Upload that Lifecycle will wait before permanently removing all parts of the upload.</p> #[derive(Default, Debug, Clone)] pub struct AbortIncompleteMultipartUpload { /// <p>Indicates the number of days that must pass since initiation for Lifecycle to abort an Incomplete Multipart Upload.</p> pub days_after_initiation: Option<i64>, } struct AbortIncompleteMultipartUploadDeserializer; impl AbortIncompleteMultipartUploadDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AbortIncompleteMultipartUpload, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AbortIncompleteMultipartUpload::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DaysAfterInitiation" => { obj.days_after_initiation = Some(try!(DaysAfterInitiationDeserializer::deserialize( "DaysAfterInitiation", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AbortIncompleteMultipartUploadSerializer; impl AbortIncompleteMultipartUploadSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AbortIncompleteMultipartUpload, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.days_after_initiation { writer.write(xml::writer::XmlEvent::start_element("DaysAfterInitiation"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AbortMultipartUploadOutput { pub request_charged: Option<String>, } struct AbortMultipartUploadOutputDeserializer; impl AbortMultipartUploadOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AbortMultipartUploadOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = AbortMultipartUploadOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct AbortMultipartUploadRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, pub upload_id: String, } #[derive(Default, Debug, Clone)] pub struct AccelerateConfiguration { /// <p>The accelerate configuration of the bucket.</p> pub status: Option<String>, } pub struct AccelerateConfigurationSerializer; impl AccelerateConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AccelerateConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.status { writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AccessControlPolicy { /// <p>A list of grants.</p> pub grants: Option<Vec<Grant>>, pub owner: Option<Owner>, } pub struct AccessControlPolicySerializer; impl AccessControlPolicySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AccessControlPolicy, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.grants { &GrantsSerializer::serialize(&mut writer, "AccessControlList", value)?; } if let Some(ref value) = obj.owner { &OwnerSerializer::serialize(&mut writer, "Owner", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information regarding the access control for replicas.</p> #[derive(Default, Debug, Clone)] pub struct AccessControlTranslation { /// <p>The override value for the owner of the replica object.</p> pub owner: String, } struct AccessControlTranslationDeserializer; impl AccessControlTranslationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AccessControlTranslation, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AccessControlTranslation::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Owner" => { obj.owner = try!(OwnerOverrideDeserializer::deserialize("Owner", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AccessControlTranslationSerializer; impl AccessControlTranslationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AccessControlTranslation, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Owner"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.owner )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AccountIdDeserializer; impl AccountIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AccountIdSerializer; impl AccountIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedHeaderDeserializer; impl AllowedHeaderDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AllowedHeaderSerializer; impl AllowedHeaderSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedHeadersDeserializer; impl AllowedHeadersDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AllowedHeaderDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct AllowedHeadersSerializer; impl AllowedHeadersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { AllowedHeaderSerializer::serialize(writer, name, element)?; } Ok(()) } } struct AllowedMethodDeserializer; impl AllowedMethodDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AllowedMethodSerializer; impl AllowedMethodSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedMethodsDeserializer; impl AllowedMethodsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AllowedMethodDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct AllowedMethodsSerializer; impl AllowedMethodsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { AllowedMethodSerializer::serialize(writer, name, element)?; } Ok(()) } } struct AllowedOriginDeserializer; impl AllowedOriginDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AllowedOriginSerializer; impl AllowedOriginSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AllowedOriginsDeserializer; impl AllowedOriginsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AllowedOriginDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct AllowedOriginsSerializer; impl AllowedOriginsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { AllowedOriginSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsAndOperator { /// <p>The prefix to use when evaluating an AND predicate.</p> pub prefix: Option<String>, /// <p>The list of tags to use when evaluating an AND predicate.</p> pub tags: Option<Vec<Tag>>, } struct AnalyticsAndOperatorDeserializer; impl AnalyticsAndOperatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsAndOperator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsAndOperator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tags = Some(try!(TagSetDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsAndOperatorSerializer; impl AnalyticsAndOperatorSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsAndOperator, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tags { &TagSetSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsConfiguration { /// <p>The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.</p> pub filter: Option<AnalyticsFilter>, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, /// <p>If present, it indicates that data related to access patterns will be collected and made available to analyze the tradeoffs between different storage classes.</p> pub storage_class_analysis: StorageClassAnalysis, } struct AnalyticsConfigurationDeserializer; impl AnalyticsConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Filter" => { obj.filter = Some(try!(AnalyticsFilterDeserializer::deserialize( "Filter", stack ))); } "Id" => { obj.id = try!(AnalyticsIdDeserializer::deserialize("Id", stack)); } "StorageClassAnalysis" => { obj.storage_class_analysis = try!(StorageClassAnalysisDeserializer::deserialize( "StorageClassAnalysis", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsConfigurationSerializer; impl AnalyticsConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.filter { &AnalyticsFilterSerializer::serialize(&mut writer, "Filter", value)?; } writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.id )))?; writer.write(xml::writer::XmlEvent::end_element())?; StorageClassAnalysisSerializer::serialize( &mut writer, "StorageClassAnalysis", &obj.storage_class_analysis, )?; writer.write(xml::writer::XmlEvent::end_element()) } } struct AnalyticsConfigurationListDeserializer; impl AnalyticsConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<AnalyticsConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(AnalyticsConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsExportDestination { /// <p>A destination signifying output to an S3 bucket.</p> pub s3_bucket_destination: AnalyticsS3BucketDestination, } struct AnalyticsExportDestinationDeserializer; impl AnalyticsExportDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsExportDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsExportDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "S3BucketDestination" => { obj.s3_bucket_destination = try!(AnalyticsS3BucketDestinationDeserializer::deserialize( "S3BucketDestination", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsExportDestinationSerializer; impl AnalyticsExportDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsExportDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; AnalyticsS3BucketDestinationSerializer::serialize( &mut writer, "S3BucketDestination", &obj.s3_bucket_destination, )?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsFilter { /// <p>A conjunction (logical AND) of predicates, which is used in evaluating an analytics filter. The operator must have at least two predicates.</p> pub and: Option<AnalyticsAndOperator>, /// <p>The prefix to use when evaluating an analytics filter.</p> pub prefix: Option<String>, /// <p>The tag to use when evaluating an analytics filter.</p> pub tag: Option<Tag>, } struct AnalyticsFilterDeserializer; impl AnalyticsFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "And" => { obj.and = Some(try!(AnalyticsAndOperatorDeserializer::deserialize( "And", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tag = Some(try!(TagDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsFilterSerializer; impl AnalyticsFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.and { &AnalyticsAndOperatorSerializer::serialize(&mut writer, "And", value)?; } if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tag { &TagSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct AnalyticsIdDeserializer; impl AnalyticsIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsIdSerializer; impl AnalyticsIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct AnalyticsS3BucketDestination { /// <p>The Amazon resource name (ARN) of the bucket to which data is exported.</p> pub bucket: String, /// <p>The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.</p> pub bucket_account_id: Option<String>, /// <p>The file format used when exporting data to Amazon S3.</p> pub format: String, /// <p>The prefix to use when exporting data. The exported data begins with this prefix.</p> pub prefix: Option<String>, } struct AnalyticsS3BucketDestinationDeserializer; impl AnalyticsS3BucketDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AnalyticsS3BucketDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AnalyticsS3BucketDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = try!(BucketNameDeserializer::deserialize("Bucket", stack)); } "BucketAccountId" => { obj.bucket_account_id = Some(try!(AccountIdDeserializer::deserialize( "BucketAccountId", stack ))); } "Format" => { obj.format = try!(AnalyticsS3ExportFileFormatDeserializer::deserialize( "Format", stack )); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsS3BucketDestinationSerializer; impl AnalyticsS3BucketDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &AnalyticsS3BucketDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Bucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.bucket_account_id { writer.write(xml::writer::XmlEvent::start_element("BucketAccountId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Format"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.format )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct AnalyticsS3ExportFileFormatDeserializer; impl AnalyticsS3ExportFileFormatDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct AnalyticsS3ExportFileFormatSerializer; impl AnalyticsS3ExportFileFormatSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct StreamingBody { len: Option<usize>, inner: Box<::futures::Stream<Item = Vec<u8>, Error = ::std::io::Error> + Send>, } impl StreamingBody { pub fn new<S>(stream: S) -> StreamingBody where S: ::futures::Stream<Item = Vec<u8>, Error = ::std::io::Error> + Send + 'static, { StreamingBody { len: None, inner: Box::new(stream), } } } impl From<Vec<u8>> for StreamingBody { fn from(buf: Vec<u8>) -> StreamingBody { StreamingBody { len: Some(buf.len()), inner: Box::new(::futures::stream::once(Ok(buf))), } } } impl fmt::Debug for StreamingBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "<Body: streaming content, len = {:?}>", self.len) } } impl ::futures::Stream for StreamingBody { type Item = Vec<u8>; type Error = ::std::io::Error; fn poll(&mut self) -> ::futures::Poll<Option<Self::Item>, Self::Error> { self.inner.poll() } } struct BodyDeserializer; impl BodyDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<u8>, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)).into_bytes(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BodySerializer; impl BodySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<u8>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = String::from_utf8(obj.to_vec()).expect("Not a UTF-8 string") )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Bucket { /// <p>Date the bucket was created.</p> pub creation_date: Option<String>, /// <p>The name of the bucket.</p> pub name: Option<String>, } struct BucketDeserializer; impl BucketDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Bucket, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Bucket::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CreationDate" => { obj.creation_date = Some(try!(CreationDateDeserializer::deserialize( "CreationDate", stack ))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct BucketAccelerateStatusDeserializer; impl BucketAccelerateStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketAccelerateStatusSerializer; impl BucketAccelerateStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct BucketLifecycleConfiguration { pub rules: Vec<LifecycleRule>, } pub struct BucketLifecycleConfigurationSerializer; impl BucketLifecycleConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &BucketLifecycleConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; LifecycleRulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketLocationConstraintDeserializer; impl BucketLocationConstraintDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketLocationConstraintSerializer; impl BucketLocationConstraintSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct BucketLoggingStatus { pub logging_enabled: Option<LoggingEnabled>, } pub struct BucketLoggingStatusSerializer; impl BucketLoggingStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &BucketLoggingStatus, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.logging_enabled { &LoggingEnabledSerializer::serialize(&mut writer, "LoggingEnabled", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketLogsPermissionDeserializer; impl BucketLogsPermissionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketLogsPermissionSerializer; impl BucketLogsPermissionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketNameDeserializer; impl BucketNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketNameSerializer; impl BucketNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketVersioningStatusDeserializer; impl BucketVersioningStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct BucketVersioningStatusSerializer; impl BucketVersioningStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct BucketsDeserializer; impl BucketsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Bucket>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Bucket" { obj.push(try!(BucketDeserializer::deserialize("Bucket", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct BytesProcessedDeserializer; impl BytesProcessedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct BytesScannedDeserializer; impl BytesScannedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CORSConfiguration { pub cors_rules: Vec<CORSRule>, } pub struct CORSConfigurationSerializer; impl CORSConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CORSConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; CORSRulesSerializer::serialize(&mut writer, "CORSRule", &obj.cors_rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CORSRule { /// <p>Specifies which headers are allowed in a pre-flight OPTIONS request.</p> pub allowed_headers: Option<Vec<String>>, /// <p>Identifies HTTP methods that the domain/origin specified in the rule is allowed to execute.</p> pub allowed_methods: Vec<String>, /// <p>One or more origins you want customers to be able to access the bucket from.</p> pub allowed_origins: Vec<String>, /// <p>One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object).</p> pub expose_headers: Option<Vec<String>>, /// <p>The time in seconds that your browser is to cache the preflight response for the specified resource.</p> pub max_age_seconds: Option<i64>, } struct CORSRuleDeserializer; impl CORSRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CORSRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CORSRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { match &name[..] { "AllowedHeader" => { obj.allowed_headers = Some(try!( AllowedHeadersDeserializer::deserialize("AllowedHeader", stack) )); } "AllowedMethod" => { obj.allowed_methods = try!(AllowedMethodsDeserializer::deserialize( "AllowedMethod", stack )); } "AllowedOrigin" => { obj.allowed_origins = try!(AllowedOriginsDeserializer::deserialize( "AllowedOrigin", stack )); } "ExposeHeader" => { obj.expose_headers = Some(try!( ExposeHeadersDeserializer::deserialize("ExposeHeader", stack) )); } "MaxAgeSeconds" => { obj.max_age_seconds = Some(try!( MaxAgeSecondsDeserializer::deserialize("MaxAgeSeconds", stack) )); } _ => skip_tree(stack), } } DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CORSRuleSerializer; impl CORSRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CORSRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.allowed_headers { &AllowedHeadersSerializer::serialize(&mut writer, "AllowedHeader", value)?; } AllowedMethodsSerializer::serialize(&mut writer, "AllowedMethod", &obj.allowed_methods)?; AllowedOriginsSerializer::serialize(&mut writer, "AllowedOrigin", &obj.allowed_origins)?; if let Some(ref value) = obj.expose_headers { &ExposeHeadersSerializer::serialize(&mut writer, "ExposeHeader", value)?; } if let Some(ref value) = obj.max_age_seconds { writer.write(xml::writer::XmlEvent::start_element("MaxAgeSeconds"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct CORSRulesDeserializer; impl CORSRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<CORSRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(CORSRuleDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct CORSRulesSerializer; impl CORSRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<CORSRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { CORSRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } /// <p>Describes how a CSV-formatted input object is formatted.</p> #[derive(Default, Debug, Clone)] pub struct CSVInput { /// <p>Single character used to indicate a row should be ignored when present at the start of a row.</p> pub comments: Option<String>, /// <p>Value used to separate individual fields in a record.</p> pub field_delimiter: Option<String>, /// <p>Describes the first line of input. Valid values: None, Ignore, Use.</p> pub file_header_info: Option<String>, /// <p>Value used for escaping where the field delimiter is part of the value.</p> pub quote_character: Option<String>, /// <p>Single character used for escaping the quote character inside an already escaped value.</p> pub quote_escape_character: Option<String>, /// <p>Value used to separate individual records.</p> pub record_delimiter: Option<String>, } pub struct CSVInputSerializer; impl CSVInputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CSVInput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.comments { writer.write(xml::writer::XmlEvent::start_element("Comments"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.field_delimiter { writer.write(xml::writer::XmlEvent::start_element("FieldDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.file_header_info { writer.write(xml::writer::XmlEvent::start_element("FileHeaderInfo"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_character { writer.write(xml::writer::XmlEvent::start_element("QuoteCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_escape_character { writer.write(xml::writer::XmlEvent::start_element("QuoteEscapeCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.record_delimiter { writer.write(xml::writer::XmlEvent::start_element("RecordDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes how CSV-formatted results are formatted.</p> #[derive(Default, Debug, Clone)] pub struct CSVOutput { /// <p>Value used to separate individual fields in a record.</p> pub field_delimiter: Option<String>, /// <p>Value used for escaping where the field delimiter is part of the value.</p> pub quote_character: Option<String>, /// <p>Single character used for escaping the quote character inside an already escaped value.</p> pub quote_escape_character: Option<String>, /// <p>Indicates whether or not all output fields should be quoted.</p> pub quote_fields: Option<String>, /// <p>Value used to separate individual records.</p> pub record_delimiter: Option<String>, } pub struct CSVOutputSerializer; impl CSVOutputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CSVOutput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.field_delimiter { writer.write(xml::writer::XmlEvent::start_element("FieldDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_character { writer.write(xml::writer::XmlEvent::start_element("QuoteCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_escape_character { writer.write(xml::writer::XmlEvent::start_element("QuoteEscapeCharacter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.quote_fields { writer.write(xml::writer::XmlEvent::start_element("QuoteFields"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.record_delimiter { writer.write(xml::writer::XmlEvent::start_element("RecordDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct CloudFunctionDeserializer; impl CloudFunctionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CloudFunctionSerializer; impl CloudFunctionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CloudFunctionConfiguration { pub cloud_function: Option<String>, pub events: Option<Vec<String>>, pub id: Option<String>, pub invocation_role: Option<String>, } struct CloudFunctionConfigurationDeserializer; impl CloudFunctionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CloudFunctionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CloudFunctionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CloudFunction" => { obj.cloud_function = Some(try!(CloudFunctionDeserializer::deserialize( "CloudFunction", stack ))); } "Event" => { obj.events = Some(try!(EventListDeserializer::deserialize("Event", stack))); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "InvocationRole" => { obj.invocation_role = Some(try!(CloudFunctionInvocationRoleDeserializer::deserialize( "InvocationRole", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CloudFunctionConfigurationSerializer; impl CloudFunctionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CloudFunctionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.cloud_function { writer.write(xml::writer::XmlEvent::start_element("CloudFunction"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.events { &EventListSerializer::serialize(&mut writer, "Event", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.invocation_role { writer.write(xml::writer::XmlEvent::start_element("InvocationRole"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct CloudFunctionInvocationRoleDeserializer; impl CloudFunctionInvocationRoleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CloudFunctionInvocationRoleSerializer; impl CloudFunctionInvocationRoleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct CodeDeserializer; impl CodeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct CommentsSerializer; impl CommentsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CommonPrefix { pub prefix: Option<String>, } struct CommonPrefixDeserializer; impl CommonPrefixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CommonPrefix, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CommonPrefix::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct CommonPrefixListDeserializer; impl CommonPrefixListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<CommonPrefix>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(CommonPrefixDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CompleteMultipartUploadOutput { pub bucket: Option<String>, /// <p>Entity tag of the object.</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p> pub expiration: Option<String>, pub key: Option<String>, pub location: Option<String>, pub request_charged: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>Version of the object.</p> pub version_id: Option<String>, } struct CompleteMultipartUploadOutputDeserializer; impl CompleteMultipartUploadOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CompleteMultipartUploadOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CompleteMultipartUploadOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "Location" => { obj.location = Some(try!(LocationDeserializer::deserialize("Location", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CompleteMultipartUploadRequest { pub bucket: String, pub key: String, pub multipart_upload: Option<CompletedMultipartUpload>, pub request_payer: Option<String>, pub upload_id: String, } #[derive(Default, Debug, Clone)] pub struct CompletedMultipartUpload { pub parts: Option<Vec<CompletedPart>>, } pub struct CompletedMultipartUploadSerializer; impl CompletedMultipartUploadSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CompletedMultipartUpload, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.parts { &CompletedPartListSerializer::serialize(&mut writer, "Part", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CompletedPart { /// <p>Entity tag returned when the part was uploaded.</p> pub e_tag: Option<String>, /// <p>Part number that identifies the part. This is a positive integer between 1 and 10,000.</p> pub part_number: Option<i64>, } pub struct CompletedPartSerializer; impl CompletedPartSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CompletedPart, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.e_tag { writer.write(xml::writer::XmlEvent::start_element("ETag"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.part_number { writer.write(xml::writer::XmlEvent::start_element("PartNumber"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct CompletedPartListSerializer; impl CompletedPartListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<CompletedPart>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { CompletedPartSerializer::serialize(writer, name, element)?; } Ok(()) } } pub struct CompressionTypeSerializer; impl CompressionTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Condition { /// <p>The HTTP error code when the redirect is applied. In the event of an error, if the error code equals this value, then the specified redirect is applied. Required when parent element Condition is specified and sibling KeyPrefixEquals is not specified. If both are specified, then both must be true for the redirect to be applied.</p> pub http_error_code_returned_equals: Option<String>, /// <p>The object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. To redirect request for all pages with the prefix docs/, the key prefix will be /docs, which identifies all objects in the docs/ folder. Required when the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is not specified. If both conditions are specified, both must be true for the redirect to be applied.</p> pub key_prefix_equals: Option<String>, } struct ConditionDeserializer; impl ConditionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Condition, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Condition::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "HttpErrorCodeReturnedEquals" => { obj.http_error_code_returned_equals = Some(try!(HttpErrorCodeReturnedEqualsDeserializer::deserialize( "HttpErrorCodeReturnedEquals", stack ))); } "KeyPrefixEquals" => { obj.key_prefix_equals = Some(try!( KeyPrefixEqualsDeserializer::deserialize("KeyPrefixEquals", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ConditionSerializer; impl ConditionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Condition, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.http_error_code_returned_equals { writer.write(xml::writer::XmlEvent::start_element( "HttpErrorCodeReturnedEquals", ))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.key_prefix_equals { writer.write(xml::writer::XmlEvent::start_element("KeyPrefixEquals"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct ContinuationEvent {} struct ContinuationEventDeserializer; impl ContinuationEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ContinuationEvent, XmlParseError> { try!(start_element(tag_name, stack)); let obj = ContinuationEvent::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CopyObjectOutput { pub copy_object_result: Option<CopyObjectResult>, pub copy_source_version_id: Option<String>, /// <p>If the object expiration is configured, the response includes this header.</p> pub expiration: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>Version ID of the newly created copy.</p> pub version_id: Option<String>, } struct CopyObjectOutputDeserializer; impl CopyObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyObjectOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CopyObjectResult" => { obj.copy_object_result = Some(try!( CopyObjectResultDeserializer::deserialize("CopyObjectResult", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CopyObjectRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, pub bucket: String, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.</p> pub copy_source: String, /// <p>Copies the object if its entity tag (ETag) matches the specified tag.</p> pub copy_source_if_match: Option<String>, /// <p>Copies the object if it has been modified since the specified time.</p> pub copy_source_if_modified_since: Option<String>, /// <p>Copies the object if its entity tag (ETag) is different than the specified ETag.</p> pub copy_source_if_none_match: Option<String>, /// <p>Copies the object if it hasn&#39;t been modified since the specified time.</p> pub copy_source_if_unmodified_since: Option<String>, /// <p>Specifies the algorithm to use when decrypting the source object (e.g., AES256).</p> pub copy_source_sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.</p> pub copy_source_sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub copy_source_sse_customer_key_md5: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Gives the grantee READ, READ<em>ACP, and WRITE</em>ACP permissions on the object.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to read the object data and its metadata.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the object ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to write the ACL for the applicable object.</p> pub grant_write_acp: Option<String>, pub key: String, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, /// <p>Specifies whether the metadata is copied from the source object or replaced with metadata provided in the request.</p> pub metadata_directive: Option<String>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>The type of storage to use for the object. Defaults to &#39;STANDARD&#39;.</p> pub storage_class: Option<String>, /// <p>The tag-set for the object destination object this value must be used in conjunction with the TaggingDirective. The tag-set must be encoded as URL Query parameters</p> pub tagging: Option<String>, /// <p>Specifies whether the object tag-set are copied from the source object or replaced with tag-set provided in the request.</p> pub tagging_directive: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } #[derive(Default, Debug, Clone)] pub struct CopyObjectResult { pub e_tag: Option<String>, pub last_modified: Option<String>, } struct CopyObjectResultDeserializer; impl CopyObjectResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyObjectResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyObjectResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CopyPartResult { /// <p>Entity tag of the object.</p> pub e_tag: Option<String>, /// <p>Date and time at which the object was uploaded.</p> pub last_modified: Option<String>, } struct CopyPartResultDeserializer; impl CopyPartResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyPartResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyPartResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CreateBucketConfiguration { /// <p>Specifies the region where the bucket will be created. If you don&#39;t specify a region, the bucket will be created in US Standard.</p> pub location_constraint: Option<String>, } pub struct CreateBucketConfigurationSerializer; impl CreateBucketConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &CreateBucketConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.location_constraint { writer.write(xml::writer::XmlEvent::start_element("LocationConstraint"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct CreateBucketOutput { pub location: Option<String>, } struct CreateBucketOutputDeserializer; impl CreateBucketOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateBucketOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = CreateBucketOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CreateBucketRequest { /// <p>The canned ACL to apply to the bucket.</p> pub acl: Option<String>, pub bucket: String, pub create_bucket_configuration: Option<CreateBucketConfiguration>, /// <p>Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to list the objects in the bucket.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the bucket ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p> pub grant_write: Option<String>, /// <p>Allows grantee to write the ACL for the applicable bucket.</p> pub grant_write_acp: Option<String>, } #[derive(Default, Debug, Clone)] pub struct CreateMultipartUploadOutput { /// <p>Date when multipart upload will become eligible for abort operation by lifecycle.</p> pub abort_date: Option<String>, /// <p>Id of the lifecycle rule that makes a multipart upload eligible for abort operation.</p> pub abort_rule_id: Option<String>, /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: Option<String>, /// <p>Object key for which the multipart upload was initiated.</p> pub key: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>ID for the initiated multipart upload.</p> pub upload_id: Option<String>, } struct CreateMultipartUploadOutputDeserializer; impl CreateMultipartUploadOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateMultipartUploadOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateMultipartUploadOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "UploadId" => { obj.upload_id = Some(try!(MultipartUploadIdDeserializer::deserialize( "UploadId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct CreateMultipartUploadRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, pub bucket: String, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Gives the grantee READ, READ<em>ACP, and WRITE</em>ACP permissions on the object.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to read the object data and its metadata.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the object ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to write the ACL for the applicable object.</p> pub grant_write_acp: Option<String>, pub key: String, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>The type of storage to use for the object. Defaults to &#39;STANDARD&#39;.</p> pub storage_class: Option<String>, /// <p>The tag-set for the object. The tag-set must be encoded as URL Query parameters</p> pub tagging: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } struct CreationDateDeserializer; impl CreationDateDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct DateDeserializer; impl DateDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DateSerializer; impl DateSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct DaysDeserializer; impl DaysDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DaysSerializer; impl DaysSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct DaysAfterInitiationDeserializer; impl DaysAfterInitiationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DaysAfterInitiationSerializer; impl DaysAfterInitiationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Delete { pub objects: Vec<ObjectIdentifier>, /// <p>Element to enable quiet mode for the request. When you add this element, you must set its value to true.</p> pub quiet: Option<bool>, } pub struct DeleteSerializer; impl DeleteSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Delete, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; ObjectIdentifierListSerializer::serialize(&mut writer, "Object", &obj.objects)?; if let Some(ref value) = obj.quiet { writer.write(xml::writer::XmlEvent::start_element("Quiet"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct DeleteBucketAnalyticsConfigurationRequest { /// <p>The name of the bucket from which an analytics configuration is deleted.</p> pub bucket: String, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketCorsRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketEncryptionRequest { /// <p>The name of the bucket containing the server-side encryption configuration to delete.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketInventoryConfigurationRequest { /// <p>The name of the bucket containing the inventory configuration to delete.</p> pub bucket: String, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketLifecycleRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketMetricsConfigurationRequest { /// <p>The name of the bucket containing the metrics configuration to delete.</p> pub bucket: String, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketPolicyRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketReplicationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketTaggingRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct DeleteBucketWebsiteRequest { pub bucket: String, } struct DeleteMarkerDeserializer; impl DeleteMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteMarkerEntry { /// <p>Specifies whether the object is (true) or is not (false) the latest version of an object.</p> pub is_latest: Option<bool>, /// <p>The object key.</p> pub key: Option<String>, /// <p>Date and time the object was last modified.</p> pub last_modified: Option<String>, pub owner: Option<Owner>, /// <p>Version ID of an object.</p> pub version_id: Option<String>, } struct DeleteMarkerEntryDeserializer; impl DeleteMarkerEntryDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteMarkerEntry, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteMarkerEntry::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "IsLatest" => { obj.is_latest = Some(try!(IsLatestDeserializer::deserialize("IsLatest", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DeleteMarkerVersionIdDeserializer; impl DeleteMarkerVersionIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct DeleteMarkersDeserializer; impl DeleteMarkersDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DeleteMarkerEntry>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(DeleteMarkerEntryDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectOutput { /// <p>Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.</p> pub delete_marker: Option<bool>, pub request_charged: Option<String>, /// <p>Returns the version ID of the delete marker created as a result of the DELETE operation.</p> pub version_id: Option<String>, } struct DeleteObjectOutputDeserializer; impl DeleteObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = DeleteObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectRequest { pub bucket: String, pub key: String, /// <p>The concatenation of the authentication device&#39;s serial number, a space, and the value that is displayed on your authentication device.</p> pub mfa: Option<String>, pub request_payer: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct DeleteObjectTaggingOutput { /// <p>The versionId of the object the tag-set was removed from.</p> pub version_id: Option<String>, } struct DeleteObjectTaggingOutputDeserializer; impl DeleteObjectTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteObjectTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = DeleteObjectTaggingOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectTaggingRequest { pub bucket: String, pub key: String, /// <p>The versionId of the object that the tag-set will be removed from.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct DeleteObjectsOutput { pub deleted: Option<Vec<DeletedObject>>, pub errors: Option<Vec<S3Error>>, pub request_charged: Option<String>, } struct DeleteObjectsOutputDeserializer; impl DeleteObjectsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteObjectsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteObjectsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Deleted" => { obj.deleted = Some(try!(DeletedObjectsDeserializer::deserialize( "Deleted", stack ))); } "Error" => { obj.errors = Some(try!(ErrorsDeserializer::deserialize("Error", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct DeleteObjectsRequest { pub bucket: String, pub delete: Delete, /// <p>The concatenation of the authentication device&#39;s serial number, a space, and the value that is displayed on your authentication device.</p> pub mfa: Option<String>, pub request_payer: Option<String>, } #[derive(Default, Debug, Clone)] pub struct DeletedObject { pub delete_marker: Option<bool>, pub delete_marker_version_id: Option<String>, pub key: Option<String>, pub version_id: Option<String>, } struct DeletedObjectDeserializer; impl DeletedObjectDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeletedObject, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeletedObject::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DeleteMarker" => { obj.delete_marker = Some(try!(DeleteMarkerDeserializer::deserialize( "DeleteMarker", stack ))); } "DeleteMarkerVersionId" => { obj.delete_marker_version_id = Some(try!(DeleteMarkerVersionIdDeserializer::deserialize( "DeleteMarkerVersionId", stack ))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DeletedObjectsDeserializer; impl DeletedObjectsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DeletedObject>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(DeletedObjectDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } struct DelimiterDeserializer; impl DelimiterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DelimiterSerializer; impl DelimiterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct DescriptionSerializer; impl DescriptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for replication destination information.</p> #[derive(Default, Debug, Clone)] pub struct Destination { /// <p>Container for information regarding the access control for replicas.</p> pub access_control_translation: Option<AccessControlTranslation>, /// <p>Account ID of the destination bucket. Currently this is only being verified if Access Control Translation is enabled</p> pub account: Option<String>, /// <p>Amazon resource name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.</p> pub bucket: String, /// <p>Container for information regarding encryption based configuration for replicas.</p> pub encryption_configuration: Option<EncryptionConfiguration>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct DestinationDeserializer; impl DestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Destination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Destination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccessControlTranslation" => { obj.access_control_translation = Some(try!(AccessControlTranslationDeserializer::deserialize( "AccessControlTranslation", stack ))); } "Account" => { obj.account = Some(try!(AccountIdDeserializer::deserialize("Account", stack))); } "Bucket" => { obj.bucket = try!(BucketNameDeserializer::deserialize("Bucket", stack)); } "EncryptionConfiguration" => { obj.encryption_configuration = Some(try!(EncryptionConfigurationDeserializer::deserialize( "EncryptionConfiguration", stack ))); } "StorageClass" => { obj.storage_class = Some(try!(StorageClassDeserializer::deserialize( "StorageClass", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DestinationSerializer; impl DestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Destination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.access_control_translation { &AccessControlTranslationSerializer::serialize( &mut writer, "AccessControlTranslation", value, )?; } if let Some(ref value) = obj.account { writer.write(xml::writer::XmlEvent::start_element("Account"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Bucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.encryption_configuration { &EncryptionConfigurationSerializer::serialize( &mut writer, "EncryptionConfiguration", value, )?; } if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct DisplayNameDeserializer; impl DisplayNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct DisplayNameSerializer; impl DisplayNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ETagDeserializer; impl ETagDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ETagSerializer; impl ETagSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct EmailAddressDeserializer; impl EmailAddressDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EmailAddressSerializer; impl EmailAddressSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct EnableRequestProgressSerializer; impl EnableRequestProgressSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct EncodingTypeDeserializer; impl EncodingTypeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EncodingTypeSerializer; impl EncodingTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes the server-side encryption that will be applied to the restore results.</p> #[derive(Default, Debug, Clone)] pub struct Encryption { /// <p>The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms).</p> pub encryption_type: String, /// <p>If the encryption type is aws:kms, this optional value can be used to specify the encryption context for the restore results.</p> pub kms_context: Option<String>, /// <p>If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.</p> pub kms_key_id: Option<String>, } pub struct EncryptionSerializer; impl EncryptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Encryption, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("EncryptionType"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.encryption_type )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.kms_context { writer.write(xml::writer::XmlEvent::start_element("KMSContext"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.kms_key_id { writer.write(xml::writer::XmlEvent::start_element("KMSKeyId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information regarding encryption based configuration for replicas.</p> #[derive(Default, Debug, Clone)] pub struct EncryptionConfiguration { /// <p>The id of the KMS key used to encrypt the replica object.</p> pub replica_kms_key_id: Option<String>, } struct EncryptionConfigurationDeserializer; impl EncryptionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EncryptionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EncryptionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ReplicaKmsKeyID" => { obj.replica_kms_key_id = Some(try!( ReplicaKmsKeyIDDeserializer::deserialize("ReplicaKmsKeyID", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EncryptionConfigurationSerializer; impl EncryptionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &EncryptionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.replica_kms_key_id { writer.write(xml::writer::XmlEvent::start_element("ReplicaKmsKeyID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct EndEvent {} struct EndEventDeserializer; impl EndEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EndEvent, XmlParseError> { try!(start_element(tag_name, stack)); let obj = EndEvent::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct S3Error { pub code: Option<String>, pub key: Option<String>, pub message: Option<String>, pub version_id: Option<String>, } struct S3ErrorDeserializer; impl S3ErrorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<S3Error, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = S3Error::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Code" => { obj.code = Some(try!(CodeDeserializer::deserialize("Code", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "Message" => { obj.message = Some(try!(MessageDeserializer::deserialize("Message", stack))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ErrorDocument { /// <p>The object key name to use when a 4XX class error occurs.</p> pub key: String, } struct ErrorDocumentDeserializer; impl ErrorDocumentDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ErrorDocument, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ErrorDocument::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Key" => { obj.key = try!(ObjectKeyDeserializer::deserialize("Key", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ErrorDocumentSerializer; impl ErrorDocumentSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ErrorDocument, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Key"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ErrorsDeserializer; impl ErrorsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<S3Error>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(S3ErrorDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } struct EventDeserializer; impl EventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct EventSerializer; impl EventSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct EventListDeserializer; impl EventListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(EventDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct EventListSerializer; impl EventListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { EventSerializer::serialize(writer, name, element)?; } Ok(()) } } struct ExpirationStatusDeserializer; impl ExpirationStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ExpirationStatusSerializer; impl ExpirationStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ExpiredObjectDeleteMarkerDeserializer; impl ExpiredObjectDeleteMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ExpiredObjectDeleteMarkerSerializer; impl ExpiredObjectDeleteMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ExposeHeaderDeserializer; impl ExposeHeaderDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ExposeHeaderSerializer; impl ExposeHeaderSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ExposeHeadersDeserializer; impl ExposeHeadersDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ExposeHeaderDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct ExposeHeadersSerializer; impl ExposeHeadersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ExposeHeaderSerializer::serialize(writer, name, element)?; } Ok(()) } } pub struct ExpressionSerializer; impl ExpressionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ExpressionTypeSerializer; impl ExpressionTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct FetchOwnerSerializer; impl FetchOwnerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct FieldDelimiterSerializer; impl FieldDelimiterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct FileHeaderInfoSerializer; impl FileHeaderInfoSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for key value pair that defines the criteria for the filter rule.</p> #[derive(Default, Debug, Clone)] pub struct FilterRule { /// <p>Object key name prefix or suffix identifying one or more objects to which the filtering rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes and suffixes are not supported. For more information, go to <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring Event Notifications</a> in the Amazon Simple Storage Service Developer Guide.</p> pub name: Option<String>, pub value: Option<String>, } struct FilterRuleDeserializer; impl FilterRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<FilterRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = FilterRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Name" => { obj.name = Some(try!(FilterRuleNameDeserializer::deserialize("Name", stack))); } "Value" => { obj.value = Some(try!(FilterRuleValueDeserializer::deserialize( "Value", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct FilterRuleSerializer; impl FilterRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &FilterRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.name { writer.write(xml::writer::XmlEvent::start_element("Name"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.value { writer.write(xml::writer::XmlEvent::start_element("Value"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct FilterRuleListDeserializer; impl FilterRuleListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<FilterRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(FilterRuleDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct FilterRuleListSerializer; impl FilterRuleListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<FilterRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { FilterRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } struct FilterRuleNameDeserializer; impl FilterRuleNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct FilterRuleNameSerializer; impl FilterRuleNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct FilterRuleValueDeserializer; impl FilterRuleValueDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct FilterRuleValueSerializer; impl FilterRuleValueSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAccelerateConfigurationOutput { /// <p>The accelerate configuration of the bucket.</p> pub status: Option<String>, } struct GetBucketAccelerateConfigurationOutputDeserializer; impl GetBucketAccelerateConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketAccelerateConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketAccelerateConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Status" => { obj.status = Some(try!(BucketAccelerateStatusDeserializer::deserialize( "Status", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAccelerateConfigurationRequest { /// <p>Name of the bucket for which the accelerate configuration is retrieved.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketAclOutput { /// <p>A list of grants.</p> pub grants: Option<Vec<Grant>>, pub owner: Option<Owner>, } struct GetBucketAclOutputDeserializer; impl GetBucketAclOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketAclOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketAclOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccessControlList" => { obj.grants = Some(try!(GrantsDeserializer::deserialize( "AccessControlList", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAclRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketAnalyticsConfigurationOutput { /// <p>The configuration and any analyses for the analytics filter.</p> pub analytics_configuration: Option<AnalyticsConfiguration>, } struct GetBucketAnalyticsConfigurationOutputDeserializer; impl GetBucketAnalyticsConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketAnalyticsConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketAnalyticsConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AnalyticsConfiguration" => { obj.analytics_configuration = Some(try!(AnalyticsConfigurationDeserializer::deserialize( "AnalyticsConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketAnalyticsConfigurationRequest { /// <p>The name of the bucket from which an analytics configuration is retrieved.</p> pub bucket: String, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketCorsOutput { pub cors_rules: Option<Vec<CORSRule>>, } struct GetBucketCorsOutputDeserializer; impl GetBucketCorsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketCorsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketCorsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CORSRule" => { obj.cors_rules = Some(try!(CORSRulesDeserializer::deserialize("CORSRule", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketCorsRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketEncryptionOutput { pub server_side_encryption_configuration: Option<ServerSideEncryptionConfiguration>, } struct GetBucketEncryptionOutputDeserializer; impl GetBucketEncryptionOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketEncryptionOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketEncryptionOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ServerSideEncryptionConfiguration" => { obj.server_side_encryption_configuration = Some(try!( ServerSideEncryptionConfigurationDeserializer::deserialize( "ServerSideEncryptionConfiguration", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketEncryptionRequest { /// <p>The name of the bucket from which the server-side encryption configuration is retrieved.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketInventoryConfigurationOutput { /// <p>Specifies the inventory configuration.</p> pub inventory_configuration: Option<InventoryConfiguration>, } struct GetBucketInventoryConfigurationOutputDeserializer; impl GetBucketInventoryConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketInventoryConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketInventoryConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "InventoryConfiguration" => { obj.inventory_configuration = Some(try!(InventoryConfigurationDeserializer::deserialize( "InventoryConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketInventoryConfigurationRequest { /// <p>The name of the bucket containing the inventory configuration to retrieve.</p> pub bucket: String, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleConfigurationOutput { pub rules: Option<Vec<LifecycleRule>>, } struct GetBucketLifecycleConfigurationOutputDeserializer; impl GetBucketLifecycleConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLifecycleConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketLifecycleConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Rule" => { obj.rules = Some(try!(LifecycleRulesDeserializer::deserialize("Rule", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleConfigurationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleOutput { pub rules: Option<Vec<Rule>>, } struct GetBucketLifecycleOutputDeserializer; impl GetBucketLifecycleOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLifecycleOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketLifecycleOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Rule" => { obj.rules = Some(try!(RulesDeserializer::deserialize("Rule", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLifecycleRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLocationOutput { pub location_constraint: Option<String>, } struct GetBucketLocationOutputDeserializer; impl GetBucketLocationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLocationOutput, XmlParseError> { let mut obj = GetBucketLocationOutput::default(); obj.location_constraint = Some(try!(BucketLocationConstraintDeserializer::deserialize( "LocationConstraint", stack ))); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLocationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketLoggingOutput { pub logging_enabled: Option<LoggingEnabled>, } struct GetBucketLoggingOutputDeserializer; impl GetBucketLoggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketLoggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketLoggingOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "LoggingEnabled" => { obj.logging_enabled = Some(try!(LoggingEnabledDeserializer::deserialize( "LoggingEnabled", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketLoggingRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketMetricsConfigurationOutput { /// <p>Specifies the metrics configuration.</p> pub metrics_configuration: Option<MetricsConfiguration>, } struct GetBucketMetricsConfigurationOutputDeserializer; impl GetBucketMetricsConfigurationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketMetricsConfigurationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketMetricsConfigurationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "MetricsConfiguration" => { obj.metrics_configuration = Some(try!(MetricsConfigurationDeserializer::deserialize( "MetricsConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketMetricsConfigurationRequest { /// <p>The name of the bucket containing the metrics configuration to retrieve.</p> pub bucket: String, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketNotificationConfigurationRequest { /// <p>Name of the bucket to get the notification configuration for.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketPolicyOutput { /// <p>The bucket policy as a JSON document.</p> pub policy: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetBucketPolicyRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketReplicationOutput { pub replication_configuration: Option<ReplicationConfiguration>, } struct GetBucketReplicationOutputDeserializer; impl GetBucketReplicationOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketReplicationOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketReplicationOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ReplicationConfiguration" => { obj.replication_configuration = Some(try!(ReplicationConfigurationDeserializer::deserialize( "ReplicationConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketReplicationRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketRequestPaymentOutput { /// <p>Specifies who pays for the download and request fees.</p> pub payer: Option<String>, } struct GetBucketRequestPaymentOutputDeserializer; impl GetBucketRequestPaymentOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketRequestPaymentOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketRequestPaymentOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Payer" => { obj.payer = Some(try!(PayerDeserializer::deserialize("Payer", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketRequestPaymentRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketTaggingOutput { pub tag_set: Vec<Tag>, } struct GetBucketTaggingOutputDeserializer; impl GetBucketTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketTaggingOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TagSet" => { obj.tag_set = try!(TagSetDeserializer::deserialize("TagSet", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketTaggingRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketVersioningOutput { /// <p>Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.</p> pub mfa_delete: Option<String>, /// <p>The versioning state of the bucket.</p> pub status: Option<String>, } struct GetBucketVersioningOutputDeserializer; impl GetBucketVersioningOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketVersioningOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketVersioningOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "MfaDelete" => { obj.mfa_delete = Some(try!(MFADeleteStatusDeserializer::deserialize( "MfaDelete", stack ))); } "Status" => { obj.status = Some(try!(BucketVersioningStatusDeserializer::deserialize( "Status", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketVersioningRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetBucketWebsiteOutput { pub error_document: Option<ErrorDocument>, pub index_document: Option<IndexDocument>, pub redirect_all_requests_to: Option<RedirectAllRequestsTo>, pub routing_rules: Option<Vec<RoutingRule>>, } struct GetBucketWebsiteOutputDeserializer; impl GetBucketWebsiteOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetBucketWebsiteOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetBucketWebsiteOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { match &name[..] { "ErrorDocument" => { obj.error_document = Some(try!( ErrorDocumentDeserializer::deserialize("ErrorDocument", stack) )); } "IndexDocument" => { obj.index_document = Some(try!( IndexDocumentDeserializer::deserialize("IndexDocument", stack) )); } "RedirectAllRequestsTo" => { obj.redirect_all_requests_to = Some(try!(RedirectAllRequestsToDeserializer::deserialize( "RedirectAllRequestsTo", stack ))); } "RoutingRules" => { obj.routing_rules = Some(try!(RoutingRulesDeserializer::deserialize( "RoutingRules", stack ))); } _ => skip_tree(stack), } } DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetBucketWebsiteRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct GetObjectAclOutput { /// <p>A list of grants.</p> pub grants: Option<Vec<Grant>>, pub owner: Option<Owner>, pub request_charged: Option<String>, } struct GetObjectAclOutputDeserializer; impl GetObjectAclOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetObjectAclOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetObjectAclOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccessControlList" => { obj.grants = Some(try!(GrantsDeserializer::deserialize( "AccessControlList", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetObjectAclRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug)] pub struct GetObjectOutput { pub accept_ranges: Option<String>, /// <p>Object data.</p> pub body: Option<StreamingBody>, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>Size of the body in bytes.</p> pub content_length: Option<i64>, /// <p>The portion of the object returned in the response.</p> pub content_range: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.</p> pub delete_marker: Option<bool>, /// <p>An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.</p> pub expiration: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Last modified date of the object</p> pub last_modified: Option<String>, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, /// <p>This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.</p> pub missing_meta: Option<i64>, /// <p>The count of parts this object has.</p> pub parts_count: Option<i64>, pub replication_status: Option<String>, pub request_charged: Option<String>, /// <p>Provides information about object restoration operation and expiration time of the restored object copy.</p> pub restore: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, pub storage_class: Option<String>, /// <p>The number of tags, if any, on the object.</p> pub tag_count: Option<i64>, /// <p>Version of the object.</p> pub version_id: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetObjectRequest { pub bucket: String, /// <p>Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).</p> pub if_match: Option<String>, /// <p>Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).</p> pub if_modified_since: Option<String>, /// <p>Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).</p> pub if_none_match: Option<String>, /// <p>Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).</p> pub if_unmodified_since: Option<String>, pub key: String, /// <p>Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a &#39;ranged&#39; GET request for the part specified. Useful for downloading just a part of an object.</p> pub part_number: Option<i64>, /// <p>Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.</p> pub range: Option<String>, pub request_payer: Option<String>, /// <p>Sets the Cache-Control header of the response.</p> pub response_cache_control: Option<String>, /// <p>Sets the Content-Disposition header of the response</p> pub response_content_disposition: Option<String>, /// <p>Sets the Content-Encoding header of the response.</p> pub response_content_encoding: Option<String>, /// <p>Sets the Content-Language header of the response.</p> pub response_content_language: Option<String>, /// <p>Sets the Content-Type header of the response.</p> pub response_content_type: Option<String>, /// <p>Sets the Expires header of the response.</p> pub response_expires: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetObjectTaggingOutput { pub tag_set: Vec<Tag>, pub version_id: Option<String>, } struct GetObjectTaggingOutputDeserializer; impl GetObjectTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<GetObjectTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = GetObjectTaggingOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TagSet" => { obj.tag_set = try!(TagSetDeserializer::deserialize("TagSet", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct GetObjectTaggingRequest { pub bucket: String, pub key: String, pub version_id: Option<String>, } #[derive(Default, Debug)] pub struct GetObjectTorrentOutput { pub body: Option<StreamingBody>, pub request_charged: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GetObjectTorrentRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, } #[derive(Default, Debug, Clone)] pub struct GlacierJobParameters { /// <p>Glacier retrieval tier at which the restore will be processed.</p> pub tier: String, } pub struct GlacierJobParametersSerializer; impl GlacierJobParametersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &GlacierJobParameters, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Tier"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.tier )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Grant { pub grantee: Option<Grantee>, /// <p>Specifies the permission given to the grantee.</p> pub permission: Option<String>, } struct GrantDeserializer; impl GrantDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Grant, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Grant::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Grantee" => { obj.grantee = Some(try!(GranteeDeserializer::deserialize("Grantee", stack))); } "Permission" => { obj.permission = Some(try!(PermissionDeserializer::deserialize( "Permission", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct GrantSerializer; impl GrantSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Grant, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.grantee { &GranteeSerializer::serialize(&mut writer, "Grantee", value)?; } if let Some(ref value) = obj.permission { writer.write(xml::writer::XmlEvent::start_element("Permission"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Grantee { /// <p>Screen name of the grantee.</p> pub display_name: Option<String>, /// <p>Email address of the grantee.</p> pub email_address: Option<String>, /// <p>The canonical user ID of the grantee.</p> pub id: Option<String>, /// <p>Type of grantee</p> pub type_: String, /// <p>URI of the grantee group.</p> pub uri: Option<String>, } struct GranteeDeserializer; impl GranteeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Grantee, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Grantee::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DisplayName" => { obj.display_name = Some(try!(DisplayNameDeserializer::deserialize( "DisplayName", stack ))); } "EmailAddress" => { obj.email_address = Some(try!(EmailAddressDeserializer::deserialize( "EmailAddress", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "xsi:type" => { obj.type_ = try!(TypeDeserializer::deserialize("xsi:type", stack)); } "URI" => { obj.uri = Some(try!(URIDeserializer::deserialize("URI", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct GranteeSerializer; impl GranteeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Grantee, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.display_name { writer.write(xml::writer::XmlEvent::start_element("DisplayName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.email_address { writer.write(xml::writer::XmlEvent::start_element("EmailAddress"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("xsi:type"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.type_ )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.uri { writer.write(xml::writer::XmlEvent::start_element("URI"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct GrantsDeserializer; impl GrantsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Grant>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Grant" { obj.push(try!(GrantDeserializer::deserialize("Grant", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct GrantsSerializer; impl GrantsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Grant>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { GrantSerializer::serialize(writer, "Grant", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct HeadBucketRequest { pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct HeadObjectOutput { pub accept_ranges: Option<String>, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>Size of the body in bytes.</p> pub content_length: Option<i64>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.</p> pub delete_marker: Option<bool>, /// <p>An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.</p> pub expiration: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Last modified date of the object</p> pub last_modified: Option<String>, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, /// <p>This is set to the number of metadata entries not returned in x-amz-meta headers. This can happen if you create metadata using an API like SOAP that supports more flexible metadata than the REST API. For example, using SOAP, you can create metadata whose values are not legal HTTP headers.</p> pub missing_meta: Option<i64>, /// <p>The count of parts this object has.</p> pub parts_count: Option<i64>, pub replication_status: Option<String>, pub request_charged: Option<String>, /// <p>Provides information about object restoration operation and expiration time of the restored object copy.</p> pub restore: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, pub storage_class: Option<String>, /// <p>Version of the object.</p> pub version_id: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } struct HeadObjectOutputDeserializer; impl HeadObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<HeadObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = HeadObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct HeadObjectRequest { pub bucket: String, /// <p>Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).</p> pub if_match: Option<String>, /// <p>Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).</p> pub if_modified_since: Option<String>, /// <p>Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).</p> pub if_none_match: Option<String>, /// <p>Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).</p> pub if_unmodified_since: Option<String>, pub key: String, /// <p>Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a &#39;ranged&#39; HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object.</p> pub part_number: Option<i64>, /// <p>Downloads the specified range bytes of an object. For more information about the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.</p> pub range: Option<String>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } struct HostNameDeserializer; impl HostNameDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct HostNameSerializer; impl HostNameSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct HttpErrorCodeReturnedEqualsDeserializer; impl HttpErrorCodeReturnedEqualsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct HttpErrorCodeReturnedEqualsSerializer; impl HttpErrorCodeReturnedEqualsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct HttpRedirectCodeDeserializer; impl HttpRedirectCodeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct HttpRedirectCodeSerializer; impl HttpRedirectCodeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct IDDeserializer; impl IDDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct IDSerializer; impl IDSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct IndexDocument { /// <p>A suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.</p> pub suffix: String, } struct IndexDocumentDeserializer; impl IndexDocumentDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<IndexDocument, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = IndexDocument::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Suffix" => { obj.suffix = try!(SuffixDeserializer::deserialize("Suffix", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct IndexDocumentSerializer; impl IndexDocumentSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &IndexDocument, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Suffix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.suffix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InitiatedDeserializer; impl InitiatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct Initiator { /// <p>Name of the Principal.</p> pub display_name: Option<String>, /// <p>If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value.</p> pub id: Option<String>, } struct InitiatorDeserializer; impl InitiatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Initiator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Initiator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DisplayName" => { obj.display_name = Some(try!(DisplayNameDeserializer::deserialize( "DisplayName", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Describes the serialization format of the object.</p> #[derive(Default, Debug, Clone)] pub struct InputSerialization { /// <p>Describes the serialization of a CSV-encoded object.</p> pub csv: Option<CSVInput>, /// <p>Specifies object&#39;s compression format. Valid values: NONE, GZIP. Default Value: NONE.</p> pub compression_type: Option<String>, /// <p>Specifies JSON as object&#39;s input serialization format.</p> pub json: Option<JSONInput>, } pub struct InputSerializationSerializer; impl InputSerializationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InputSerialization, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.csv { &CSVInputSerializer::serialize(&mut writer, "CSV", value)?; } if let Some(ref value) = obj.compression_type { writer.write(xml::writer::XmlEvent::start_element("CompressionType"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.json { &JSONInputSerializer::serialize(&mut writer, "JSON", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct InventoryConfiguration { /// <p>Contains information about where to publish the inventory results.</p> pub destination: InventoryDestination, /// <p>Specifies an inventory filter. The inventory only includes objects that meet the filter&#39;s criteria.</p> pub filter: Option<InventoryFilter>, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, /// <p>Specifies which object version(s) to included in the inventory results.</p> pub included_object_versions: String, /// <p>Specifies whether the inventory is enabled or disabled.</p> pub is_enabled: bool, /// <p>Contains the optional fields that are included in the inventory results.</p> pub optional_fields: Option<Vec<String>>, /// <p>Specifies the schedule for generating inventory results.</p> pub schedule: InventorySchedule, } struct InventoryConfigurationDeserializer; impl InventoryConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Destination" => { obj.destination = try!(InventoryDestinationDeserializer::deserialize( "Destination", stack )); } "Filter" => { obj.filter = Some(try!(InventoryFilterDeserializer::deserialize( "Filter", stack ))); } "Id" => { obj.id = try!(InventoryIdDeserializer::deserialize("Id", stack)); } "IncludedObjectVersions" => { obj.included_object_versions = try!(InventoryIncludedObjectVersionsDeserializer::deserialize( "IncludedObjectVersions", stack )); } "IsEnabled" => { obj.is_enabled = try!(IsEnabledDeserializer::deserialize("IsEnabled", stack)); } "OptionalFields" => { obj.optional_fields = Some(try!(InventoryOptionalFieldsDeserializer::deserialize( "OptionalFields", stack ))); } "Schedule" => { obj.schedule = try!(InventoryScheduleDeserializer::deserialize( "Schedule", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryConfigurationSerializer; impl InventoryConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; InventoryDestinationSerializer::serialize(&mut writer, "Destination", &obj.destination)?; if let Some(ref value) = obj.filter { &InventoryFilterSerializer::serialize(&mut writer, "Filter", value)?; } writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.id )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element( "IncludedObjectVersions", ))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.included_object_versions )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("IsEnabled"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.is_enabled )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.optional_fields { &InventoryOptionalFieldsSerializer::serialize(&mut writer, "OptionalFields", value)?; } InventoryScheduleSerializer::serialize(&mut writer, "Schedule", &obj.schedule)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryConfigurationListDeserializer; impl InventoryConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<InventoryConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(InventoryConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct InventoryDestination { /// <p>Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.</p> pub s3_bucket_destination: InventoryS3BucketDestination, } struct InventoryDestinationDeserializer; impl InventoryDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "S3BucketDestination" => { obj.s3_bucket_destination = try!(InventoryS3BucketDestinationDeserializer::deserialize( "S3BucketDestination", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryDestinationSerializer; impl InventoryDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; InventoryS3BucketDestinationSerializer::serialize( &mut writer, "S3BucketDestination", &obj.s3_bucket_destination, )?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Contains the type of server-side encryption used to encrypt the inventory results.</p> #[derive(Default, Debug, Clone)] pub struct InventoryEncryption { /// <p>Specifies the use of SSE-KMS to encrypt delievered Inventory reports.</p> pub ssekms: Option<SSEKMS>, /// <p>Specifies the use of SSE-S3 to encrypt delievered Inventory reports.</p> pub sses3: Option<SSES3>, } struct InventoryEncryptionDeserializer; impl InventoryEncryptionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryEncryption, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryEncryption::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "SSE-KMS" => { obj.ssekms = Some(try!(SSEKMSDeserializer::deserialize("SSE-KMS", stack))); } "SSE-S3" => { obj.sses3 = Some(try!(SSES3Deserializer::deserialize("SSE-S3", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryEncryptionSerializer; impl InventoryEncryptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryEncryption, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.ssekms { &SSEKMSSerializer::serialize(&mut writer, "SSE-KMS", value)?; } if let Some(ref value) = obj.sses3 { &SSES3Serializer::serialize(&mut writer, "SSE-S3", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct InventoryFilter { /// <p>The prefix that an object must have to be included in the inventory results.</p> pub prefix: String, } struct InventoryFilterDeserializer; impl InventoryFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = try!(PrefixDeserializer::deserialize("Prefix", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryFilterSerializer; impl InventoryFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryFormatDeserializer; impl InventoryFormatDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryFormatSerializer; impl InventoryFormatSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryFrequencyDeserializer; impl InventoryFrequencyDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryFrequencySerializer; impl InventoryFrequencySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryIdDeserializer; impl InventoryIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryIdSerializer; impl InventoryIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryIncludedObjectVersionsDeserializer; impl InventoryIncludedObjectVersionsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryIncludedObjectVersionsSerializer; impl InventoryIncludedObjectVersionsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryOptionalFieldDeserializer; impl InventoryOptionalFieldDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryOptionalFieldSerializer; impl InventoryOptionalFieldSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct InventoryOptionalFieldsDeserializer; impl InventoryOptionalFieldsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Field" { obj.push(try!(InventoryOptionalFieldDeserializer::deserialize( "Field", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct InventoryOptionalFieldsSerializer; impl InventoryOptionalFieldsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<String>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { InventoryOptionalFieldSerializer::serialize(writer, "Field", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct InventoryS3BucketDestination { /// <p>The ID of the account that owns the destination bucket.</p> pub account_id: Option<String>, /// <p>The Amazon resource name (ARN) of the bucket where inventory results will be published.</p> pub bucket: String, /// <p>Contains the type of server-side encryption used to encrypt the inventory results.</p> pub encryption: Option<InventoryEncryption>, /// <p>Specifies the output format of the inventory results.</p> pub format: String, /// <p>The prefix that is prepended to all inventory results.</p> pub prefix: Option<String>, } struct InventoryS3BucketDestinationDeserializer; impl InventoryS3BucketDestinationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventoryS3BucketDestination, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventoryS3BucketDestination::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AccountId" => { obj.account_id = Some(try!(AccountIdDeserializer::deserialize("AccountId", stack))); } "Bucket" => { obj.bucket = try!(BucketNameDeserializer::deserialize("Bucket", stack)); } "Encryption" => { obj.encryption = Some(try!(InventoryEncryptionDeserializer::deserialize( "Encryption", stack ))); } "Format" => { obj.format = try!(InventoryFormatDeserializer::deserialize("Format", stack)); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryS3BucketDestinationSerializer; impl InventoryS3BucketDestinationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventoryS3BucketDestination, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.account_id { writer.write(xml::writer::XmlEvent::start_element("AccountId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Bucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.encryption { &InventoryEncryptionSerializer::serialize(&mut writer, "Encryption", value)?; } writer.write(xml::writer::XmlEvent::start_element("Format"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.format )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct InventorySchedule { /// <p>Specifies how frequently inventory results are produced.</p> pub frequency: String, } struct InventoryScheduleDeserializer; impl InventoryScheduleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<InventorySchedule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = InventorySchedule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Frequency" => { obj.frequency = try!(InventoryFrequencyDeserializer::deserialize( "Frequency", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct InventoryScheduleSerializer; impl InventoryScheduleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &InventorySchedule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Frequency"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.frequency )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct IsEnabledDeserializer; impl IsEnabledDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct IsEnabledSerializer; impl IsEnabledSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct IsLatestDeserializer; impl IsLatestDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct IsTruncatedDeserializer; impl IsTruncatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct JSONInput { /// <p>The type of JSON. Valid values: Document, Lines.</p> pub type_: Option<String>, } pub struct JSONInputSerializer; impl JSONInputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &JSONInput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.type_ { writer.write(xml::writer::XmlEvent::start_element("Type"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct JSONOutput { /// <p>The value used to separate individual records in the output.</p> pub record_delimiter: Option<String>, } pub struct JSONOutputSerializer; impl JSONOutputSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &JSONOutput, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.record_delimiter { writer.write(xml::writer::XmlEvent::start_element("RecordDelimiter"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct JSONTypeSerializer; impl JSONTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct KMSContextSerializer; impl KMSContextSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct KeyCountDeserializer; impl KeyCountDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct KeyMarkerDeserializer; impl KeyMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct KeyMarkerSerializer; impl KeyMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct KeyPrefixEqualsDeserializer; impl KeyPrefixEqualsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct KeyPrefixEqualsSerializer; impl KeyPrefixEqualsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct LambdaFunctionArnDeserializer; impl LambdaFunctionArnDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LambdaFunctionArnSerializer; impl LambdaFunctionArnSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for specifying the AWS Lambda notification configuration.</p> #[derive(Default, Debug, Clone)] pub struct LambdaFunctionConfiguration { pub events: Vec<String>, pub filter: Option<NotificationConfigurationFilter>, pub id: Option<String>, /// <p>Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.</p> pub lambda_function_arn: String, } struct LambdaFunctionConfigurationDeserializer; impl LambdaFunctionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LambdaFunctionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LambdaFunctionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = try!(EventListDeserializer::deserialize("Event", stack)); } "Filter" => { obj.filter = Some(try!( NotificationConfigurationFilterDeserializer::deserialize( "Filter", stack ) )); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "CloudFunction" => { obj.lambda_function_arn = try!(LambdaFunctionArnDeserializer::deserialize( "CloudFunction", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LambdaFunctionConfigurationSerializer; impl LambdaFunctionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LambdaFunctionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; EventListSerializer::serialize(&mut writer, "Event", &obj.events)?; if let Some(ref value) = obj.filter { &NotificationConfigurationFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("CloudFunction"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.lambda_function_arn )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct LambdaFunctionConfigurationListDeserializer; impl LambdaFunctionConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<LambdaFunctionConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(LambdaFunctionConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct LambdaFunctionConfigurationListSerializer; impl LambdaFunctionConfigurationListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<LambdaFunctionConfiguration>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { LambdaFunctionConfigurationSerializer::serialize(writer, name, element)?; } Ok(()) } } struct LastModifiedDeserializer; impl LastModifiedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct LifecycleConfiguration { pub rules: Vec<Rule>, } pub struct LifecycleConfigurationSerializer; impl LifecycleConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; RulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct LifecycleExpiration { /// <p>Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.</p> pub date: Option<String>, /// <p>Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.</p> pub days: Option<i64>, /// <p>Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired; if set to false the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.</p> pub expired_object_delete_marker: Option<bool>, } struct LifecycleExpirationDeserializer; impl LifecycleExpirationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleExpiration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleExpiration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Date" => { obj.date = Some(try!(DateDeserializer::deserialize("Date", stack))); } "Days" => { obj.days = Some(try!(DaysDeserializer::deserialize("Days", stack))); } "ExpiredObjectDeleteMarker" => { obj.expired_object_delete_marker = Some(try!(ExpiredObjectDeleteMarkerDeserializer::deserialize( "ExpiredObjectDeleteMarker", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleExpirationSerializer; impl LifecycleExpirationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleExpiration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.date { writer.write(xml::writer::XmlEvent::start_element("Date"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.days { writer.write(xml::writer::XmlEvent::start_element("Days"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.expired_object_delete_marker { writer.write(xml::writer::XmlEvent::start_element( "ExpiredObjectDeleteMarker", ))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct LifecycleRule { pub abort_incomplete_multipart_upload: Option<AbortIncompleteMultipartUpload>, pub expiration: Option<LifecycleExpiration>, pub filter: Option<LifecycleRuleFilter>, /// <p>Unique identifier for the rule. The value cannot be longer than 255 characters.</p> pub id: Option<String>, pub noncurrent_version_expiration: Option<NoncurrentVersionExpiration>, pub noncurrent_version_transitions: Option<Vec<NoncurrentVersionTransition>>, /// <p>If &#39;Enabled&#39;, the rule is currently being applied. If &#39;Disabled&#39;, the rule is not currently being applied.</p> pub status: String, pub transitions: Option<Vec<Transition>>, } struct LifecycleRuleDeserializer; impl LifecycleRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AbortIncompleteMultipartUpload" => { obj.abort_incomplete_multipart_upload = Some(try!( AbortIncompleteMultipartUploadDeserializer::deserialize( "AbortIncompleteMultipartUpload", stack ) )); } "Expiration" => { obj.expiration = Some(try!(LifecycleExpirationDeserializer::deserialize( "Expiration", stack ))); } "Filter" => { obj.filter = Some(try!(LifecycleRuleFilterDeserializer::deserialize( "Filter", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "NoncurrentVersionExpiration" => { obj.noncurrent_version_expiration = Some(try!(NoncurrentVersionExpirationDeserializer::deserialize( "NoncurrentVersionExpiration", stack ))); } "NoncurrentVersionTransition" => { obj.noncurrent_version_transitions = Some(try!( NoncurrentVersionTransitionListDeserializer::deserialize( "NoncurrentVersionTransition", stack ) )); } "Status" => { obj.status = try!(ExpirationStatusDeserializer::deserialize("Status", stack)); } "Transition" => { obj.transitions = Some(try!(TransitionListDeserializer::deserialize( "Transition", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleRuleSerializer; impl LifecycleRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.abort_incomplete_multipart_upload { &AbortIncompleteMultipartUploadSerializer::serialize( &mut writer, "AbortIncompleteMultipartUpload", value, )?; } if let Some(ref value) = obj.expiration { &LifecycleExpirationSerializer::serialize(&mut writer, "Expiration", value)?; } if let Some(ref value) = obj.filter { &LifecycleRuleFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.noncurrent_version_expiration { &NoncurrentVersionExpirationSerializer::serialize( &mut writer, "NoncurrentVersionExpiration", value, )?; } if let Some(ref value) = obj.noncurrent_version_transitions { &NoncurrentVersionTransitionListSerializer::serialize( &mut writer, "NoncurrentVersionTransition", value, )?; } writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.transitions { &TransitionListSerializer::serialize(&mut writer, "Transition", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator.</p> #[derive(Default, Debug, Clone)] pub struct LifecycleRuleAndOperator { pub prefix: Option<String>, /// <p>All of these tags must exist in the object&#39;s tag set in order for the rule to apply.</p> pub tags: Option<Vec<Tag>>, } struct LifecycleRuleAndOperatorDeserializer; impl LifecycleRuleAndOperatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleRuleAndOperator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleRuleAndOperator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tags = Some(try!(TagSetDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleRuleAndOperatorSerializer; impl LifecycleRuleAndOperatorSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleRuleAndOperator, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tags { &TagSetSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.</p> #[derive(Default, Debug, Clone)] pub struct LifecycleRuleFilter { pub and: Option<LifecycleRuleAndOperator>, /// <p>Prefix identifying one or more objects to which the rule applies.</p> pub prefix: Option<String>, /// <p>This tag must exist in the object&#39;s tag set in order for the rule to apply.</p> pub tag: Option<Tag>, } struct LifecycleRuleFilterDeserializer; impl LifecycleRuleFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LifecycleRuleFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LifecycleRuleFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "And" => { obj.and = Some(try!(LifecycleRuleAndOperatorDeserializer::deserialize( "And", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tag = Some(try!(TagDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LifecycleRuleFilterSerializer; impl LifecycleRuleFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LifecycleRuleFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.and { &LifecycleRuleAndOperatorSerializer::serialize(&mut writer, "And", value)?; } if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tag { &TagSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct LifecycleRulesDeserializer; impl LifecycleRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<LifecycleRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(LifecycleRuleDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct LifecycleRulesSerializer; impl LifecycleRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<LifecycleRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { LifecycleRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct ListBucketAnalyticsConfigurationsOutput { /// <p>The list of analytics configurations for a bucket.</p> pub analytics_configuration_list: Option<Vec<AnalyticsConfiguration>>, /// <p>The ContinuationToken that represents where this request began.</p> pub continuation_token: Option<String>, /// <p>Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.</p> pub is_truncated: Option<bool>, /// <p>NextContinuationToken is sent when isTruncated is true, which indicates that there are more analytics configurations to list. The next request must include this NextContinuationToken. The token is obfuscated and is not a usable value.</p> pub next_continuation_token: Option<String>, } struct ListBucketAnalyticsConfigurationsOutputDeserializer; impl ListBucketAnalyticsConfigurationsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketAnalyticsConfigurationsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketAnalyticsConfigurationsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AnalyticsConfiguration" => { obj.analytics_configuration_list = Some(try!(AnalyticsConfigurationListDeserializer::deserialize( "AnalyticsConfiguration", stack ))); } "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListBucketAnalyticsConfigurationsRequest { /// <p>The name of the bucket from which analytics configurations are retrieved.</p> pub bucket: String, /// <p>The ContinuationToken that represents a placeholder from where this request should begin.</p> pub continuation_token: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListBucketInventoryConfigurationsOutput { /// <p>If sent in the request, the marker that is used as a starting point for this inventory configuration list response.</p> pub continuation_token: Option<String>, /// <p>The list of inventory configurations for a bucket.</p> pub inventory_configuration_list: Option<Vec<InventoryConfiguration>>, /// <p>Indicates whether the returned list of inventory configurations is truncated in this response. A value of true indicates that the list is truncated.</p> pub is_truncated: Option<bool>, /// <p>The marker used to continue this inventory configuration listing. Use the NextContinuationToken from this response to continue the listing in a subsequent request. The continuation token is an opaque value that Amazon S3 understands.</p> pub next_continuation_token: Option<String>, } struct ListBucketInventoryConfigurationsOutputDeserializer; impl ListBucketInventoryConfigurationsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketInventoryConfigurationsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketInventoryConfigurationsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "InventoryConfiguration" => { obj.inventory_configuration_list = Some(try!(InventoryConfigurationListDeserializer::deserialize( "InventoryConfiguration", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListBucketInventoryConfigurationsRequest { /// <p>The name of the bucket containing the inventory configurations to retrieve.</p> pub bucket: String, /// <p>The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.</p> pub continuation_token: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListBucketMetricsConfigurationsOutput { /// <p>The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request.</p> pub continuation_token: Option<String>, /// <p>Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.</p> pub is_truncated: Option<bool>, /// <p>The list of metrics configurations for a bucket.</p> pub metrics_configuration_list: Option<Vec<MetricsConfiguration>>, /// <p>The marker used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.</p> pub next_continuation_token: Option<String>, } struct ListBucketMetricsConfigurationsOutputDeserializer; impl ListBucketMetricsConfigurationsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketMetricsConfigurationsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketMetricsConfigurationsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "MetricsConfiguration" => { obj.metrics_configuration_list = Some(try!(MetricsConfigurationListDeserializer::deserialize( "MetricsConfiguration", stack ))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListBucketMetricsConfigurationsRequest { /// <p>The name of the bucket containing the metrics configurations to retrieve.</p> pub bucket: String, /// <p>The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.</p> pub continuation_token: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListBucketsOutput { pub buckets: Option<Vec<Bucket>>, pub owner: Option<Owner>, } struct ListBucketsOutputDeserializer; impl ListBucketsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListBucketsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListBucketsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Buckets" => { obj.buckets = Some(try!(BucketsDeserializer::deserialize("Buckets", stack))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListMultipartUploadsOutput { /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: Option<String>, pub common_prefixes: Option<Vec<CommonPrefix>>, pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.</p> pub is_truncated: Option<bool>, /// <p>The key at or after which the listing began.</p> pub key_marker: Option<String>, /// <p>Maximum number of multipart uploads that could have been included in the response.</p> pub max_uploads: Option<i64>, /// <p>When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request.</p> pub next_key_marker: Option<String>, /// <p>When a list is truncated, this element specifies the value that should be used for the upload-id-marker request parameter in a subsequent request.</p> pub next_upload_id_marker: Option<String>, /// <p>When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.</p> pub prefix: Option<String>, /// <p>Upload ID after which listing began.</p> pub upload_id_marker: Option<String>, pub uploads: Option<Vec<MultipartUpload>>, } struct ListMultipartUploadsOutputDeserializer; impl ListMultipartUploadsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListMultipartUploadsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListMultipartUploadsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "KeyMarker" => { obj.key_marker = Some(try!(KeyMarkerDeserializer::deserialize("KeyMarker", stack))); } "MaxUploads" => { obj.max_uploads = Some(try!(MaxUploadsDeserializer::deserialize( "MaxUploads", stack ))); } "NextKeyMarker" => { obj.next_key_marker = Some(try!(NextKeyMarkerDeserializer::deserialize( "NextKeyMarker", stack ))); } "NextUploadIdMarker" => { obj.next_upload_id_marker = Some(try!(NextUploadIdMarkerDeserializer::deserialize( "NextUploadIdMarker", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "UploadIdMarker" => { obj.upload_id_marker = Some(try!(UploadIdMarkerDeserializer::deserialize( "UploadIdMarker", stack ))); } "Upload" => { obj.uploads = Some(try!(MultipartUploadListDeserializer::deserialize( "Upload", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListMultipartUploadsRequest { pub bucket: String, /// <p>Character you use to group keys.</p> pub delimiter: Option<String>, pub encoding_type: Option<String>, /// <p>Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.</p> pub key_marker: Option<String>, /// <p>Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the response body. 1,000 is the maximum number of uploads that can be returned in a response.</p> pub max_uploads: Option<i64>, /// <p>Lists in-progress uploads only for those keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.</p> pub upload_id_marker: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListObjectVersionsOutput { pub common_prefixes: Option<Vec<CommonPrefix>>, pub delete_markers: Option<Vec<DeleteMarkerEntry>>, pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.</p> pub is_truncated: Option<bool>, /// <p>Marks the last Key returned in a truncated response.</p> pub key_marker: Option<String>, pub max_keys: Option<i64>, pub name: Option<String>, /// <p>Use this value for the key marker request parameter in a subsequent request.</p> pub next_key_marker: Option<String>, /// <p>Use this value for the next version id marker parameter in a subsequent request.</p> pub next_version_id_marker: Option<String>, pub prefix: Option<String>, pub version_id_marker: Option<String>, pub versions: Option<Vec<ObjectVersion>>, } struct ListObjectVersionsOutputDeserializer; impl ListObjectVersionsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListObjectVersionsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListObjectVersionsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "DeleteMarker" => { obj.delete_markers = Some(try!(DeleteMarkersDeserializer::deserialize( "DeleteMarker", stack ))); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "KeyMarker" => { obj.key_marker = Some(try!(KeyMarkerDeserializer::deserialize("KeyMarker", stack))); } "MaxKeys" => { obj.max_keys = Some(try!(MaxKeysDeserializer::deserialize("MaxKeys", stack))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } "NextKeyMarker" => { obj.next_key_marker = Some(try!(NextKeyMarkerDeserializer::deserialize( "NextKeyMarker", stack ))); } "NextVersionIdMarker" => { obj.next_version_id_marker = Some(try!(NextVersionIdMarkerDeserializer::deserialize( "NextVersionIdMarker", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "VersionIdMarker" => { obj.version_id_marker = Some(try!( VersionIdMarkerDeserializer::deserialize("VersionIdMarker", stack) )); } "Version" => { obj.versions = Some(try!(ObjectVersionListDeserializer::deserialize( "Version", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListObjectVersionsRequest { pub bucket: String, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, pub encoding_type: Option<String>, /// <p>Specifies the key to start with when listing objects in a bucket.</p> pub key_marker: Option<String>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Specifies the object version you want to start listing from.</p> pub version_id_marker: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListObjectsOutput { pub common_prefixes: Option<Vec<CommonPrefix>>, pub contents: Option<Vec<Object>>, pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.</p> pub is_truncated: Option<bool>, pub marker: Option<String>, pub max_keys: Option<i64>, pub name: Option<String>, /// <p>When response is truncated (the IsTruncated element value in the response is true), you can use the key name in this field as marker in the subsequent request to get next set of objects. Amazon S3 lists objects in alphabetical order Note: This element is returned only if you have delimiter request parameter specified. If response does not include the NextMaker and it is truncated, you can use the value of the last Key in the response as the marker in the subsequent request to get the next set of object keys.</p> pub next_marker: Option<String>, pub prefix: Option<String>, } struct ListObjectsOutputDeserializer; impl ListObjectsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListObjectsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListObjectsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "Contents" => { obj.contents = Some(try!(ObjectListDeserializer::deserialize("Contents", stack))); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "Marker" => { obj.marker = Some(try!(MarkerDeserializer::deserialize("Marker", stack))); } "MaxKeys" => { obj.max_keys = Some(try!(MaxKeysDeserializer::deserialize("MaxKeys", stack))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } "NextMarker" => { obj.next_marker = Some(try!(NextMarkerDeserializer::deserialize( "NextMarker", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListObjectsRequest { pub bucket: String, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, pub encoding_type: Option<String>, /// <p>Specifies the key to start with when listing objects in a bucket.</p> pub marker: Option<String>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Confirms that the requester knows that she or he will be charged for the list objects request. Bucket owners need not specify this parameter in their requests.</p> pub request_payer: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListObjectsV2Output { /// <p>CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter</p> pub common_prefixes: Option<Vec<CommonPrefix>>, /// <p>Metadata about each object returned.</p> pub contents: Option<Vec<Object>>, /// <p>ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key</p> pub continuation_token: Option<String>, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.</p> pub is_truncated: Option<bool>, /// <p>KeyCount is the number of keys returned with this request. KeyCount will always be less than equals to MaxKeys field. Say you ask for 50 keys, your result will include less than equals 50 keys</p> pub key_count: Option<i64>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Name of the bucket to list.</p> pub name: Option<String>, /// <p>NextContinuationToken is sent when isTruncated is true which means there are more keys in the bucket that can be listed. The next list requests to Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken is obfuscated and is not a real key</p> pub next_continuation_token: Option<String>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket</p> pub start_after: Option<String>, } struct ListObjectsV2OutputDeserializer; impl ListObjectsV2OutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListObjectsV2Output, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListObjectsV2Output::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CommonPrefixes" => { obj.common_prefixes = Some(try!( CommonPrefixListDeserializer::deserialize("CommonPrefixes", stack) )); } "Contents" => { obj.contents = Some(try!(ObjectListDeserializer::deserialize("Contents", stack))); } "ContinuationToken" => { obj.continuation_token = Some(try!(TokenDeserializer::deserialize( "ContinuationToken", stack ))); } "Delimiter" => { obj.delimiter = Some(try!(DelimiterDeserializer::deserialize("Delimiter", stack))); } "EncodingType" => { obj.encoding_type = Some(try!(EncodingTypeDeserializer::deserialize( "EncodingType", stack ))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "KeyCount" => { obj.key_count = Some(try!(KeyCountDeserializer::deserialize("KeyCount", stack))); } "MaxKeys" => { obj.max_keys = Some(try!(MaxKeysDeserializer::deserialize("MaxKeys", stack))); } "Name" => { obj.name = Some(try!(BucketNameDeserializer::deserialize("Name", stack))); } "NextContinuationToken" => { obj.next_continuation_token = Some(try!( NextTokenDeserializer::deserialize("NextContinuationToken", stack) )); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "StartAfter" => { obj.start_after = Some(try!(StartAfterDeserializer::deserialize( "StartAfter", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListObjectsV2Request { /// <p>Name of the bucket to list.</p> pub bucket: String, /// <p>ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key</p> pub continuation_token: Option<String>, /// <p>A delimiter is a character you use to group keys.</p> pub delimiter: Option<String>, /// <p>Encoding type used by Amazon S3 to encode object keys in the response.</p> pub encoding_type: Option<String>, /// <p>The owner field is not present in listV2 by default, if you want to return owner field with each key in the result then set the fetch owner field to true</p> pub fetch_owner: Option<bool>, /// <p>Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.</p> pub max_keys: Option<i64>, /// <p>Limits the response to keys that begin with the specified prefix.</p> pub prefix: Option<String>, /// <p>Confirms that the requester knows that she or he will be charged for the list objects request in V2 style. Bucket owners need not specify this parameter in their requests.</p> pub request_payer: Option<String>, /// <p>StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket</p> pub start_after: Option<String>, } #[derive(Default, Debug, Clone)] pub struct ListPartsOutput { /// <p>Date when multipart upload will become eligible for abort operation by lifecycle.</p> pub abort_date: Option<String>, /// <p>Id of the lifecycle rule that makes a multipart upload eligible for abort operation.</p> pub abort_rule_id: Option<String>, /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: Option<String>, /// <p>Identifies who initiated the multipart upload.</p> pub initiator: Option<Initiator>, /// <p>Indicates whether the returned list of parts is truncated.</p> pub is_truncated: Option<bool>, /// <p>Object key for which the multipart upload was initiated.</p> pub key: Option<String>, /// <p>Maximum number of parts that were allowed in the response.</p> pub max_parts: Option<i64>, /// <p>When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.</p> pub next_part_number_marker: Option<i64>, pub owner: Option<Owner>, /// <p>Part number after which listing begins.</p> pub part_number_marker: Option<i64>, pub parts: Option<Vec<Part>>, pub request_charged: Option<String>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, /// <p>Upload ID identifying the multipart upload whose parts are being listed.</p> pub upload_id: Option<String>, } struct ListPartsOutputDeserializer; impl ListPartsOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ListPartsOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ListPartsOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Bucket" => { obj.bucket = Some(try!(BucketNameDeserializer::deserialize("Bucket", stack))); } "Initiator" => { obj.initiator = Some(try!(InitiatorDeserializer::deserialize("Initiator", stack))); } "IsTruncated" => { obj.is_truncated = Some(try!(IsTruncatedDeserializer::deserialize( "IsTruncated", stack ))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "MaxParts" => { obj.max_parts = Some(try!(MaxPartsDeserializer::deserialize("MaxParts", stack))); } "NextPartNumberMarker" => { obj.next_part_number_marker = Some(try!(NextPartNumberMarkerDeserializer::deserialize( "NextPartNumberMarker", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "PartNumberMarker" => { obj.part_number_marker = Some(try!( PartNumberMarkerDeserializer::deserialize("PartNumberMarker", stack) )); } "Part" => { obj.parts = Some(try!(PartsDeserializer::deserialize("Part", stack))); } "StorageClass" => { obj.storage_class = Some(try!(StorageClassDeserializer::deserialize( "StorageClass", stack ))); } "UploadId" => { obj.upload_id = Some(try!(MultipartUploadIdDeserializer::deserialize( "UploadId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ListPartsRequest { pub bucket: String, pub key: String, /// <p>Sets the maximum number of parts to return.</p> pub max_parts: Option<i64>, /// <p>Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.</p> pub part_number_marker: Option<i64>, pub request_payer: Option<String>, /// <p>Upload ID identifying the multipart upload whose parts are being listed.</p> pub upload_id: String, } struct LocationDeserializer; impl LocationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LocationPrefixSerializer; impl LocationPrefixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for logging information. Presence of this element indicates that logging is enabled. Parameters TargetBucket and TargetPrefix are required in this case.</p> #[derive(Default, Debug, Clone)] pub struct LoggingEnabled { /// <p>Specifies the bucket where you want Amazon S3 to store server access logs. You can have your logs delivered to any bucket that you own, including the same bucket that is being logged. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case you should choose a different TargetPrefix for each source bucket so that the delivered log files can be distinguished by key.</p> pub target_bucket: String, pub target_grants: Option<Vec<TargetGrant>>, /// <p>This element lets you specify a prefix for the keys that the log files will be stored under.</p> pub target_prefix: String, } struct LoggingEnabledDeserializer; impl LoggingEnabledDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<LoggingEnabled, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = LoggingEnabled::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TargetBucket" => { obj.target_bucket = try!(TargetBucketDeserializer::deserialize("TargetBucket", stack)); } "TargetGrants" => { obj.target_grants = Some(try!(TargetGrantsDeserializer::deserialize( "TargetGrants", stack ))); } "TargetPrefix" => { obj.target_prefix = try!(TargetPrefixDeserializer::deserialize("TargetPrefix", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct LoggingEnabledSerializer; impl LoggingEnabledSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &LoggingEnabled, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("TargetBucket"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.target_bucket )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.target_grants { &TargetGrantsSerializer::serialize(&mut writer, "TargetGrants", value)?; } writer.write(xml::writer::XmlEvent::start_element("TargetPrefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.target_prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct MFADeleteSerializer; impl MFADeleteSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MFADeleteStatusDeserializer; impl MFADeleteStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct MarkerDeserializer; impl MarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MarkerSerializer; impl MarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxAgeSecondsDeserializer; impl MaxAgeSecondsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxAgeSecondsSerializer; impl MaxAgeSecondsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxKeysDeserializer; impl MaxKeysDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxKeysSerializer; impl MaxKeysSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxPartsDeserializer; impl MaxPartsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxPartsSerializer; impl MaxPartsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MaxUploadsDeserializer; impl MaxUploadsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MaxUploadsSerializer; impl MaxUploadsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MessageDeserializer; impl MessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>A metadata key-value pair to store with an object.</p> #[derive(Default, Debug, Clone)] pub struct MetadataEntry { pub name: Option<String>, pub value: Option<String>, } pub struct MetadataEntrySerializer; impl MetadataEntrySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetadataEntry, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.name { writer.write(xml::writer::XmlEvent::start_element("Name"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.value { writer.write(xml::writer::XmlEvent::start_element("Value"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct MetadataKeySerializer; impl MetadataKeySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct MetadataValueSerializer; impl MetadataValueSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct MetricsAndOperator { /// <p>The prefix used when evaluating an AND predicate.</p> pub prefix: Option<String>, /// <p>The list of tags used when evaluating an AND predicate.</p> pub tags: Option<Vec<Tag>>, } struct MetricsAndOperatorDeserializer; impl MetricsAndOperatorDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MetricsAndOperator, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MetricsAndOperator::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tags = Some(try!(TagSetDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsAndOperatorSerializer; impl MetricsAndOperatorSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetricsAndOperator, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tags { &TagSetSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct MetricsConfiguration { /// <p>Specifies a metrics configuration filter. The metrics configuration will only include objects that meet the filter&#39;s criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).</p> pub filter: Option<MetricsFilter>, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, } struct MetricsConfigurationDeserializer; impl MetricsConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MetricsConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MetricsConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Filter" => { obj.filter = Some(try!(MetricsFilterDeserializer::deserialize( "Filter", stack ))); } "Id" => { obj.id = try!(MetricsIdDeserializer::deserialize("Id", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsConfigurationSerializer; impl MetricsConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetricsConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.filter { &MetricsFilterSerializer::serialize(&mut writer, "Filter", value)?; } writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.id )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MetricsConfigurationListDeserializer; impl MetricsConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<MetricsConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(MetricsConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct MetricsFilter { /// <p>A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.</p> pub and: Option<MetricsAndOperator>, /// <p>The prefix used when evaluating a metrics filter.</p> pub prefix: Option<String>, /// <p>The tag used when evaluating a metrics filter.</p> pub tag: Option<Tag>, } struct MetricsFilterDeserializer; impl MetricsFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MetricsFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MetricsFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "And" => { obj.and = Some(try!(MetricsAndOperatorDeserializer::deserialize( "And", stack ))); } "Prefix" => { obj.prefix = Some(try!(PrefixDeserializer::deserialize("Prefix", stack))); } "Tag" => { obj.tag = Some(try!(TagDeserializer::deserialize("Tag", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsFilterSerializer; impl MetricsFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &MetricsFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.and { &MetricsAndOperatorSerializer::serialize(&mut writer, "And", value)?; } if let Some(ref value) = obj.prefix { writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tag { &TagSerializer::serialize(&mut writer, "Tag", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct MetricsIdDeserializer; impl MetricsIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MetricsIdSerializer; impl MetricsIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct MultipartUpload { /// <p>Date and time at which the multipart upload was initiated.</p> pub initiated: Option<String>, /// <p>Identifies who initiated the multipart upload.</p> pub initiator: Option<Initiator>, /// <p>Key of the object for which the multipart upload was initiated.</p> pub key: Option<String>, pub owner: Option<Owner>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, /// <p>Upload ID that identifies the multipart upload.</p> pub upload_id: Option<String>, } struct MultipartUploadDeserializer; impl MultipartUploadDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<MultipartUpload, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = MultipartUpload::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Initiated" => { obj.initiated = Some(try!(InitiatedDeserializer::deserialize("Initiated", stack))); } "Initiator" => { obj.initiator = Some(try!(InitiatorDeserializer::deserialize("Initiator", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "StorageClass" => { obj.storage_class = Some(try!(StorageClassDeserializer::deserialize( "StorageClass", stack ))); } "UploadId" => { obj.upload_id = Some(try!(MultipartUploadIdDeserializer::deserialize( "UploadId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct MultipartUploadIdDeserializer; impl MultipartUploadIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct MultipartUploadIdSerializer; impl MultipartUploadIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct MultipartUploadListDeserializer; impl MultipartUploadListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<MultipartUpload>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(MultipartUploadDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } struct NextKeyMarkerDeserializer; impl NextKeyMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextMarkerDeserializer; impl NextMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextPartNumberMarkerDeserializer; impl NextPartNumberMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextTokenDeserializer; impl NextTokenDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextUploadIdMarkerDeserializer; impl NextUploadIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct NextVersionIdMarkerDeserializer; impl NextVersionIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object&#39;s lifetime.</p> #[derive(Default, Debug, Clone)] pub struct NoncurrentVersionExpiration { /// <p>Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html">How Amazon S3 Calculates When an Object Became Noncurrent</a> in the Amazon Simple Storage Service Developer Guide.</p> pub noncurrent_days: Option<i64>, } struct NoncurrentVersionExpirationDeserializer; impl NoncurrentVersionExpirationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NoncurrentVersionExpiration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NoncurrentVersionExpiration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "NoncurrentDays" => { obj.noncurrent_days = Some(try!(DaysDeserializer::deserialize("NoncurrentDays", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NoncurrentVersionExpirationSerializer; impl NoncurrentVersionExpirationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NoncurrentVersionExpiration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.noncurrent_days { writer.write(xml::writer::XmlEvent::start_element("NoncurrentDays"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for the transition rule that describes when noncurrent objects transition to the STANDARD<em>IA, ONEZONE</em>IA or GLACIER storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD<em>IA, ONEZONE</em>IA or GLACIER storage class at a specific period in the object&#39;s lifetime.</p> #[derive(Default, Debug, Clone)] pub struct NoncurrentVersionTransition { /// <p>Specifies the number of days an object is noncurrent before Amazon S3 can perform the associated action. For information about the noncurrent days calculations, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html">How Amazon S3 Calculates When an Object Became Noncurrent</a> in the Amazon Simple Storage Service Developer Guide.</p> pub noncurrent_days: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct NoncurrentVersionTransitionDeserializer; impl NoncurrentVersionTransitionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NoncurrentVersionTransition, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NoncurrentVersionTransition::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "NoncurrentDays" => { obj.noncurrent_days = Some(try!(DaysDeserializer::deserialize("NoncurrentDays", stack))); } "StorageClass" => { obj.storage_class = Some(try!( TransitionStorageClassDeserializer::deserialize("StorageClass", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NoncurrentVersionTransitionSerializer; impl NoncurrentVersionTransitionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NoncurrentVersionTransition, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.noncurrent_days { writer.write(xml::writer::XmlEvent::start_element("NoncurrentDays"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct NoncurrentVersionTransitionListDeserializer; impl NoncurrentVersionTransitionListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<NoncurrentVersionTransition>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(NoncurrentVersionTransitionDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct NoncurrentVersionTransitionListSerializer; impl NoncurrentVersionTransitionListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<NoncurrentVersionTransition>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { NoncurrentVersionTransitionSerializer::serialize(writer, name, element)?; } Ok(()) } } /// <p>Container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off on the bucket.</p> #[derive(Default, Debug, Clone)] pub struct NotificationConfiguration { pub lambda_function_configurations: Option<Vec<LambdaFunctionConfiguration>>, pub queue_configurations: Option<Vec<QueueConfiguration>>, pub topic_configurations: Option<Vec<TopicConfiguration>>, } struct NotificationConfigurationDeserializer; impl NotificationConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NotificationConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NotificationConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CloudFunctionConfiguration" => { obj.lambda_function_configurations = Some(try!( LambdaFunctionConfigurationListDeserializer::deserialize( "CloudFunctionConfiguration", stack ) )); } "QueueConfiguration" => { obj.queue_configurations = Some(try!(QueueConfigurationListDeserializer::deserialize( "QueueConfiguration", stack ))); } "TopicConfiguration" => { obj.topic_configurations = Some(try!(TopicConfigurationListDeserializer::deserialize( "TopicConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationConfigurationSerializer; impl NotificationConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NotificationConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.lambda_function_configurations { &LambdaFunctionConfigurationListSerializer::serialize( &mut writer, "CloudFunctionConfiguration", value, )?; } if let Some(ref value) = obj.queue_configurations { &QueueConfigurationListSerializer::serialize(&mut writer, "QueueConfiguration", value)?; } if let Some(ref value) = obj.topic_configurations { &TopicConfigurationListSerializer::serialize(&mut writer, "TopicConfiguration", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct NotificationConfigurationDeprecated { pub cloud_function_configuration: Option<CloudFunctionConfiguration>, pub queue_configuration: Option<QueueConfigurationDeprecated>, pub topic_configuration: Option<TopicConfigurationDeprecated>, } struct NotificationConfigurationDeprecatedDeserializer; impl NotificationConfigurationDeprecatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NotificationConfigurationDeprecated, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NotificationConfigurationDeprecated::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CloudFunctionConfiguration" => { obj.cloud_function_configuration = Some(try!(CloudFunctionConfigurationDeserializer::deserialize( "CloudFunctionConfiguration", stack ))); } "QueueConfiguration" => { obj.queue_configuration = Some(try!(QueueConfigurationDeprecatedDeserializer::deserialize( "QueueConfiguration", stack ))); } "TopicConfiguration" => { obj.topic_configuration = Some(try!(TopicConfigurationDeprecatedDeserializer::deserialize( "TopicConfiguration", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationConfigurationDeprecatedSerializer; impl NotificationConfigurationDeprecatedSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NotificationConfigurationDeprecated, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.cloud_function_configuration { &CloudFunctionConfigurationSerializer::serialize( &mut writer, "CloudFunctionConfiguration", value, )?; } if let Some(ref value) = obj.queue_configuration { &QueueConfigurationDeprecatedSerializer::serialize( &mut writer, "QueueConfiguration", value, )?; } if let Some(ref value) = obj.topic_configuration { &TopicConfigurationDeprecatedSerializer::serialize( &mut writer, "TopicConfiguration", value, )?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for object key name filtering rules. For information about key name filtering, go to <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring Event Notifications</a> in the Amazon Simple Storage Service Developer Guide.</p> #[derive(Default, Debug, Clone)] pub struct NotificationConfigurationFilter { pub key: Option<S3KeyFilter>, } struct NotificationConfigurationFilterDeserializer; impl NotificationConfigurationFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<NotificationConfigurationFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = NotificationConfigurationFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "S3Key" => { obj.key = Some(try!(S3KeyFilterDeserializer::deserialize("S3Key", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationConfigurationFilterSerializer; impl NotificationConfigurationFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &NotificationConfigurationFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.key { &S3KeyFilterSerializer::serialize(&mut writer, "S3Key", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct NotificationIdDeserializer; impl NotificationIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct NotificationIdSerializer; impl NotificationIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Object { pub e_tag: Option<String>, pub key: Option<String>, pub last_modified: Option<String>, pub owner: Option<Owner>, pub size: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct ObjectDeserializer; impl ObjectDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Object, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Object::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "Size" => { obj.size = Some(try!(SizeDeserializer::deserialize("Size", stack))); } "StorageClass" => { obj.storage_class = Some(try!( ObjectStorageClassDeserializer::deserialize("StorageClass", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ObjectCannedACLSerializer; impl ObjectCannedACLSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct ObjectIdentifier { /// <p>Key name of the object to delete.</p> pub key: String, /// <p>VersionId for the specific version of the object to delete.</p> pub version_id: Option<String>, } pub struct ObjectIdentifierSerializer; impl ObjectIdentifierSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ObjectIdentifier, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Key"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.version_id { writer.write(xml::writer::XmlEvent::start_element("VersionId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ObjectIdentifierListSerializer; impl ObjectIdentifierListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<ObjectIdentifier>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ObjectIdentifierSerializer::serialize(writer, name, element)?; } Ok(()) } } struct ObjectKeyDeserializer; impl ObjectKeyDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ObjectKeySerializer; impl ObjectKeySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ObjectListDeserializer; impl ObjectListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Object>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ObjectDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } struct ObjectStorageClassDeserializer; impl ObjectStorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ObjectVersion { pub e_tag: Option<String>, /// <p>Specifies whether the object is (true) or is not (false) the latest version of an object.</p> pub is_latest: Option<bool>, /// <p>The object key.</p> pub key: Option<String>, /// <p>Date and time the object was last modified.</p> pub last_modified: Option<String>, pub owner: Option<Owner>, /// <p>Size in bytes of the object.</p> pub size: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, /// <p>Version ID of an object.</p> pub version_id: Option<String>, } struct ObjectVersionDeserializer; impl ObjectVersionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ObjectVersion, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ObjectVersion::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "IsLatest" => { obj.is_latest = Some(try!(IsLatestDeserializer::deserialize("IsLatest", stack))); } "Key" => { obj.key = Some(try!(ObjectKeyDeserializer::deserialize("Key", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "Owner" => { obj.owner = Some(try!(OwnerDeserializer::deserialize("Owner", stack))); } "Size" => { obj.size = Some(try!(SizeDeserializer::deserialize("Size", stack))); } "StorageClass" => { obj.storage_class = Some(try!(ObjectVersionStorageClassDeserializer::deserialize( "StorageClass", stack ))); } "VersionId" => { obj.version_id = Some(try!(ObjectVersionIdDeserializer::deserialize( "VersionId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct ObjectVersionIdDeserializer; impl ObjectVersionIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ObjectVersionIdSerializer; impl ObjectVersionIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ObjectVersionListDeserializer; impl ObjectVersionListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ObjectVersion>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ObjectVersionDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } struct ObjectVersionStorageClassDeserializer; impl ObjectVersionStorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Describes the location where the restore job&#39;s output is stored.</p> #[derive(Default, Debug, Clone)] pub struct OutputLocation { /// <p>Describes an S3 location that will receive the results of the restore request.</p> pub s3: Option<S3Location>, } pub struct OutputLocationSerializer; impl OutputLocationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &OutputLocation, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.s3 { &S3LocationSerializer::serialize(&mut writer, "S3", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes how results of the Select job are serialized.</p> #[derive(Default, Debug, Clone)] pub struct OutputSerialization { /// <p>Describes the serialization of CSV-encoded Select results.</p> pub csv: Option<CSVOutput>, /// <p>Specifies JSON as request&#39;s output serialization format.</p> pub json: Option<JSONOutput>, } pub struct OutputSerializationSerializer; impl OutputSerializationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &OutputSerialization, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.csv { &CSVOutputSerializer::serialize(&mut writer, "CSV", value)?; } if let Some(ref value) = obj.json { &JSONOutputSerializer::serialize(&mut writer, "JSON", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Owner { pub display_name: Option<String>, pub id: Option<String>, } struct OwnerDeserializer; impl OwnerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Owner, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Owner::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DisplayName" => { obj.display_name = Some(try!(DisplayNameDeserializer::deserialize( "DisplayName", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct OwnerSerializer; impl OwnerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Owner, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.display_name { writer.write(xml::writer::XmlEvent::start_element("DisplayName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct OwnerOverrideDeserializer; impl OwnerOverrideDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct OwnerOverrideSerializer; impl OwnerOverrideSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Part { /// <p>Entity tag returned when the part was uploaded.</p> pub e_tag: Option<String>, /// <p>Date and time at which the part was uploaded.</p> pub last_modified: Option<String>, /// <p>Part number identifying the part. This is a positive integer between 1 and 10,000.</p> pub part_number: Option<i64>, /// <p>Size of the uploaded part data.</p> pub size: Option<i64>, } struct PartDeserializer; impl PartDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Part, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Part::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ETag" => { obj.e_tag = Some(try!(ETagDeserializer::deserialize("ETag", stack))); } "LastModified" => { obj.last_modified = Some(try!(LastModifiedDeserializer::deserialize( "LastModified", stack ))); } "PartNumber" => { obj.part_number = Some(try!(PartNumberDeserializer::deserialize( "PartNumber", stack ))); } "Size" => { obj.size = Some(try!(SizeDeserializer::deserialize("Size", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct PartNumberDeserializer; impl PartNumberDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PartNumberSerializer; impl PartNumberSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PartNumberMarkerDeserializer; impl PartNumberMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PartNumberMarkerSerializer; impl PartNumberMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &i64, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PartsDeserializer; impl PartsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Part>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(PartDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } struct PayerDeserializer; impl PayerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PayerSerializer; impl PayerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PermissionDeserializer; impl PermissionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PermissionSerializer; impl PermissionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct PolicySerializer; impl PolicySerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct PrefixDeserializer; impl PrefixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct PrefixSerializer; impl PrefixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Progress { /// <p>Current number of uncompressed object bytes processed.</p> pub bytes_processed: Option<i64>, /// <p>Current number of object bytes scanned.</p> pub bytes_scanned: Option<i64>, } struct ProgressDeserializer; impl ProgressDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Progress, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Progress::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "BytesProcessed" => { obj.bytes_processed = Some(try!(BytesProcessedDeserializer::deserialize( "BytesProcessed", stack ))); } "BytesScanned" => { obj.bytes_scanned = Some(try!(BytesScannedDeserializer::deserialize( "BytesScanned", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct ProgressEvent { /// <p>The Progress event details.</p> pub details: Option<Progress>, } struct ProgressEventDeserializer; impl ProgressEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ProgressEvent, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ProgressEvent::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Details" => { obj.details = Some(try!(ProgressDeserializer::deserialize("Details", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct ProtocolDeserializer; impl ProtocolDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ProtocolSerializer; impl ProtocolSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct PutBucketAccelerateConfigurationRequest { /// <p>Specifies the Accelerate Configuration you want to set for the bucket.</p> pub accelerate_configuration: AccelerateConfiguration, /// <p>Name of the bucket for which the accelerate configuration is set.</p> pub bucket: String, } #[derive(Default, Debug, Clone)] pub struct PutBucketAclRequest { /// <p>The canned ACL to apply to the bucket.</p> pub acl: Option<String>, pub access_control_policy: Option<AccessControlPolicy>, pub bucket: String, pub content_md5: Option<String>, /// <p>Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to list the objects in the bucket.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the bucket ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p> pub grant_write: Option<String>, /// <p>Allows grantee to write the ACL for the applicable bucket.</p> pub grant_write_acp: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutBucketAnalyticsConfigurationRequest { /// <p>The configuration and any analyses for the analytics filter.</p> pub analytics_configuration: AnalyticsConfiguration, /// <p>The name of the bucket to which an analytics configuration is stored.</p> pub bucket: String, /// <p>The identifier used to represent an analytics configuration.</p> pub id: String, } #[derive(Default, Debug, Clone)] pub struct PutBucketCorsRequest { pub bucket: String, pub cors_configuration: CORSConfiguration, pub content_md5: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutBucketEncryptionRequest { /// <p>The name of the bucket for which the server-side encryption configuration is set.</p> pub bucket: String, /// <p>The base64-encoded 128-bit MD5 digest of the server-side encryption configuration.</p> pub content_md5: Option<String>, pub server_side_encryption_configuration: ServerSideEncryptionConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketInventoryConfigurationRequest { /// <p>The name of the bucket where the inventory configuration will be stored.</p> pub bucket: String, /// <p>The ID used to identify the inventory configuration.</p> pub id: String, /// <p>Specifies the inventory configuration.</p> pub inventory_configuration: InventoryConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketLifecycleConfigurationRequest { pub bucket: String, pub lifecycle_configuration: Option<BucketLifecycleConfiguration>, } #[derive(Default, Debug, Clone)] pub struct PutBucketLifecycleRequest { pub bucket: String, pub content_md5: Option<String>, pub lifecycle_configuration: Option<LifecycleConfiguration>, } #[derive(Default, Debug, Clone)] pub struct PutBucketLoggingRequest { pub bucket: String, pub bucket_logging_status: BucketLoggingStatus, pub content_md5: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutBucketMetricsConfigurationRequest { /// <p>The name of the bucket for which the metrics configuration is set.</p> pub bucket: String, /// <p>The ID used to identify the metrics configuration.</p> pub id: String, /// <p>Specifies the metrics configuration.</p> pub metrics_configuration: MetricsConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketNotificationConfigurationRequest { pub bucket: String, pub notification_configuration: NotificationConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketNotificationRequest { pub bucket: String, pub content_md5: Option<String>, pub notification_configuration: NotificationConfigurationDeprecated, } #[derive(Default, Debug, Clone)] pub struct PutBucketPolicyRequest { pub bucket: String, /// <p>Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future.</p> pub confirm_remove_self_bucket_access: Option<bool>, pub content_md5: Option<String>, /// <p>The bucket policy as a JSON document.</p> pub policy: String, } #[derive(Default, Debug, Clone)] pub struct PutBucketReplicationRequest { pub bucket: String, pub content_md5: Option<String>, pub replication_configuration: ReplicationConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketRequestPaymentRequest { pub bucket: String, pub content_md5: Option<String>, pub request_payment_configuration: RequestPaymentConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketTaggingRequest { pub bucket: String, pub content_md5: Option<String>, pub tagging: Tagging, } #[derive(Default, Debug, Clone)] pub struct PutBucketVersioningRequest { pub bucket: String, pub content_md5: Option<String>, /// <p>The concatenation of the authentication device&#39;s serial number, a space, and the value that is displayed on your authentication device.</p> pub mfa: Option<String>, pub versioning_configuration: VersioningConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutBucketWebsiteRequest { pub bucket: String, pub content_md5: Option<String>, pub website_configuration: WebsiteConfiguration, } #[derive(Default, Debug, Clone)] pub struct PutObjectAclOutput { pub request_charged: Option<String>, } struct PutObjectAclOutputDeserializer; impl PutObjectAclOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PutObjectAclOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = PutObjectAclOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct PutObjectAclRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, pub access_control_policy: Option<AccessControlPolicy>, pub bucket: String, pub content_md5: Option<String>, /// <p>Allows grantee the read, write, read ACP, and write ACP permissions on the bucket.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to list the objects in the bucket.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the bucket ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p> pub grant_write: Option<String>, /// <p>Allows grantee to write the ACL for the applicable bucket.</p> pub grant_write_acp: Option<String>, pub key: String, pub request_payer: Option<String>, /// <p>VersionId used to reference a specific version of the object.</p> pub version_id: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutObjectOutput { /// <p>Entity tag for the uploaded object.</p> pub e_tag: Option<String>, /// <p>If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p> pub expiration: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>Version of the object.</p> pub version_id: Option<String>, } struct PutObjectOutputDeserializer; impl PutObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PutObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = PutObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug)] pub struct PutObjectRequest { /// <p>The canned ACL to apply to the object.</p> pub acl: Option<String>, /// <p>Object data.</p> pub body: Option<StreamingBody>, /// <p>Name of the bucket to which the PUT operation was initiated.</p> pub bucket: String, /// <p>Specifies caching behavior along the request/reply chain.</p> pub cache_control: Option<String>, /// <p>Specifies presentational information for the object.</p> pub content_disposition: Option<String>, /// <p>Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.</p> pub content_encoding: Option<String>, /// <p>The language the content is in.</p> pub content_language: Option<String>, /// <p>Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.</p> pub content_length: Option<i64>, /// <p>The base64-encoded 128-bit MD5 digest of the part data.</p> pub content_md5: Option<String>, /// <p>A standard MIME type describing the format of the object data.</p> pub content_type: Option<String>, /// <p>The date and time at which the object is no longer cacheable.</p> pub expires: Option<String>, /// <p>Gives the grantee READ, READ<em>ACP, and WRITE</em>ACP permissions on the object.</p> pub grant_full_control: Option<String>, /// <p>Allows grantee to read the object data and its metadata.</p> pub grant_read: Option<String>, /// <p>Allows grantee to read the object ACL.</p> pub grant_read_acp: Option<String>, /// <p>Allows grantee to write the ACL for the applicable object.</p> pub grant_write_acp: Option<String>, /// <p>Object key for which the PUT operation was initiated.</p> pub key: String, /// <p>A map of metadata to store with the object in S3.</p> pub metadata: Option<::std::collections::HashMap<String, String>>, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, /// <p>The type of storage to use for the object. Defaults to &#39;STANDARD&#39;.</p> pub storage_class: Option<String>, /// <p>The tag-set for the object. The tag-set must be encoded as URL Query parameters</p> pub tagging: Option<String>, /// <p>If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.</p> pub website_redirect_location: Option<String>, } #[derive(Default, Debug, Clone)] pub struct PutObjectTaggingOutput { pub version_id: Option<String>, } struct PutObjectTaggingOutputDeserializer; impl PutObjectTaggingOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PutObjectTaggingOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = PutObjectTaggingOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct PutObjectTaggingRequest { pub bucket: String, pub content_md5: Option<String>, pub key: String, pub tagging: Tagging, pub version_id: Option<String>, } struct QueueArnDeserializer; impl QueueArnDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct QueueArnSerializer; impl QueueArnSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for specifying an configuration when you want Amazon S3 to publish events to an Amazon Simple Queue Service (Amazon SQS) queue.</p> #[derive(Default, Debug, Clone)] pub struct QueueConfiguration { pub events: Vec<String>, pub filter: Option<NotificationConfigurationFilter>, pub id: Option<String>, /// <p>Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects events of specified type.</p> pub queue_arn: String, } struct QueueConfigurationDeserializer; impl QueueConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<QueueConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = QueueConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = try!(EventListDeserializer::deserialize("Event", stack)); } "Filter" => { obj.filter = Some(try!( NotificationConfigurationFilterDeserializer::deserialize( "Filter", stack ) )); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Queue" => { obj.queue_arn = try!(QueueArnDeserializer::deserialize("Queue", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct QueueConfigurationSerializer; impl QueueConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &QueueConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; EventListSerializer::serialize(&mut writer, "Event", &obj.events)?; if let Some(ref value) = obj.filter { &NotificationConfigurationFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Queue"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.queue_arn )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct QueueConfigurationDeprecated { pub events: Option<Vec<String>>, pub id: Option<String>, pub queue: Option<String>, } struct QueueConfigurationDeprecatedDeserializer; impl QueueConfigurationDeprecatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<QueueConfigurationDeprecated, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = QueueConfigurationDeprecated::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = Some(try!(EventListDeserializer::deserialize("Event", stack))); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Queue" => { obj.queue = Some(try!(QueueArnDeserializer::deserialize("Queue", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct QueueConfigurationDeprecatedSerializer; impl QueueConfigurationDeprecatedSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &QueueConfigurationDeprecated, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.events { &EventListSerializer::serialize(&mut writer, "Event", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.queue { writer.write(xml::writer::XmlEvent::start_element("Queue"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct QueueConfigurationListDeserializer; impl QueueConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<QueueConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(QueueConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct QueueConfigurationListSerializer; impl QueueConfigurationListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<QueueConfiguration>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { QueueConfigurationSerializer::serialize(writer, name, element)?; } Ok(()) } } pub struct QuietSerializer; impl QuietSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &bool, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct QuoteCharacterSerializer; impl QuoteCharacterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct QuoteEscapeCharacterSerializer; impl QuoteEscapeCharacterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct QuoteFieldsSerializer; impl QuoteFieldsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct RecordDelimiterSerializer; impl RecordDelimiterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RecordsEvent { /// <p>The byte array of partial, one or more result records.</p> pub payload: Option<Vec<u8>>, } struct RecordsEventDeserializer; impl RecordsEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RecordsEvent, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RecordsEvent::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Payload" => { obj.payload = Some(try!(BodyDeserializer::deserialize("Payload", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct Redirect { /// <p>The host name to use in the redirect request.</p> pub host_name: Option<String>, /// <p>The HTTP redirect code to use on the response. Not required if one of the siblings is present.</p> pub http_redirect_code: Option<String>, /// <p>Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.</p> pub protocol: Option<String>, /// <p>The object key prefix to use in the redirect request. For example, to redirect requests for all pages with prefix docs/ (objects in the docs/ folder) to documents/, you can set a condition block with KeyPrefixEquals set to docs/ and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of the siblings is present. Can be present only if ReplaceKeyWith is not provided.</p> pub replace_key_prefix_with: Option<String>, /// <p>The specific object key to use in the redirect request. For example, redirect request to error.html. Not required if one of the sibling is present. Can be present only if ReplaceKeyPrefixWith is not provided.</p> pub replace_key_with: Option<String>, } struct RedirectDeserializer; impl RedirectDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Redirect, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Redirect::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "HostName" => { obj.host_name = Some(try!(HostNameDeserializer::deserialize("HostName", stack))); } "HttpRedirectCode" => { obj.http_redirect_code = Some(try!( HttpRedirectCodeDeserializer::deserialize("HttpRedirectCode", stack) )); } "Protocol" => { obj.protocol = Some(try!(ProtocolDeserializer::deserialize("Protocol", stack))); } "ReplaceKeyPrefixWith" => { obj.replace_key_prefix_with = Some(try!(ReplaceKeyPrefixWithDeserializer::deserialize( "ReplaceKeyPrefixWith", stack ))); } "ReplaceKeyWith" => { obj.replace_key_with = Some(try!(ReplaceKeyWithDeserializer::deserialize( "ReplaceKeyWith", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RedirectSerializer; impl RedirectSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Redirect, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.host_name { writer.write(xml::writer::XmlEvent::start_element("HostName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.http_redirect_code { writer.write(xml::writer::XmlEvent::start_element("HttpRedirectCode"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.protocol { writer.write(xml::writer::XmlEvent::start_element("Protocol"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.replace_key_prefix_with { writer.write(xml::writer::XmlEvent::start_element("ReplaceKeyPrefixWith"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.replace_key_with { writer.write(xml::writer::XmlEvent::start_element("ReplaceKeyWith"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RedirectAllRequestsTo { /// <p>Name of the host where requests will be redirected.</p> pub host_name: String, /// <p>Protocol to use (http, https) when redirecting requests. The default is the protocol that is used in the original request.</p> pub protocol: Option<String>, } struct RedirectAllRequestsToDeserializer; impl RedirectAllRequestsToDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RedirectAllRequestsTo, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RedirectAllRequestsTo::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "HostName" => { obj.host_name = try!(HostNameDeserializer::deserialize("HostName", stack)); } "Protocol" => { obj.protocol = Some(try!(ProtocolDeserializer::deserialize("Protocol", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RedirectAllRequestsToSerializer; impl RedirectAllRequestsToSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RedirectAllRequestsTo, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("HostName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.host_name )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.protocol { writer.write(xml::writer::XmlEvent::start_element("Protocol"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplaceKeyPrefixWithDeserializer; impl ReplaceKeyPrefixWithDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplaceKeyPrefixWithSerializer; impl ReplaceKeyPrefixWithSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplaceKeyWithDeserializer; impl ReplaceKeyWithDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplaceKeyWithSerializer; impl ReplaceKeyWithSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplicaKmsKeyIDDeserializer; impl ReplicaKmsKeyIDDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicaKmsKeyIDSerializer; impl ReplicaKmsKeyIDSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for replication rules. You can add as many as 1,000 rules. Total replication configuration size can be up to 2 MB.</p> #[derive(Default, Debug, Clone)] pub struct ReplicationConfiguration { /// <p>Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating the objects.</p> pub role: String, /// <p>Container for information about a particular replication rule. Replication configuration must have at least one rule and can contain up to 1,000 rules.</p> pub rules: Vec<ReplicationRule>, } struct ReplicationConfigurationDeserializer; impl ReplicationConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ReplicationConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ReplicationConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Role" => { obj.role = try!(RoleDeserializer::deserialize("Role", stack)); } "Rule" => { obj.rules = try!(ReplicationRulesDeserializer::deserialize("Rule", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicationConfigurationSerializer; impl ReplicationConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ReplicationConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Role"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.role )))?; writer.write(xml::writer::XmlEvent::end_element())?; ReplicationRulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information about a particular replication rule.</p> #[derive(Default, Debug, Clone)] pub struct ReplicationRule { /// <p>Container for replication destination information.</p> pub destination: Destination, /// <p>Unique identifier for the rule. The value cannot be longer than 255 characters.</p> pub id: Option<String>, /// <p>Object keyname prefix identifying one or more objects to which the rule applies. Maximum prefix length can be up to 1,024 characters. Overlapping prefixes are not supported.</p> pub prefix: String, /// <p>Container for filters that define which source objects should be replicated.</p> pub source_selection_criteria: Option<SourceSelectionCriteria>, /// <p>The rule is ignored if status is not Enabled.</p> pub status: String, } struct ReplicationRuleDeserializer; impl ReplicationRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ReplicationRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ReplicationRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Destination" => { obj.destination = try!(DestinationDeserializer::deserialize("Destination", stack)); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "Prefix" => { obj.prefix = try!(PrefixDeserializer::deserialize("Prefix", stack)); } "SourceSelectionCriteria" => { obj.source_selection_criteria = Some(try!(SourceSelectionCriteriaDeserializer::deserialize( "SourceSelectionCriteria", stack ))); } "Status" => { obj.status = try!(ReplicationRuleStatusDeserializer::deserialize( "Status", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicationRuleSerializer; impl ReplicationRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ReplicationRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; DestinationSerializer::serialize(&mut writer, "Destination", &obj.destination)?; if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.source_selection_criteria { &SourceSelectionCriteriaSerializer::serialize( &mut writer, "SourceSelectionCriteria", value, )?; } writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplicationRuleStatusDeserializer; impl ReplicationRuleStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ReplicationRuleStatusSerializer; impl ReplicationRuleStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ReplicationRulesDeserializer; impl ReplicationRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ReplicationRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ReplicationRuleDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct ReplicationRulesSerializer; impl ReplicationRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<ReplicationRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ReplicationRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct RequestPaymentConfiguration { /// <p>Specifies who pays for the download and request fees.</p> pub payer: String, } pub struct RequestPaymentConfigurationSerializer; impl RequestPaymentConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RequestPaymentConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Payer"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.payer )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RequestProgress { /// <p>Specifies whether periodic QueryProgress frames should be sent. Valid values: TRUE, FALSE. Default value: FALSE.</p> pub enabled: Option<bool>, } pub struct RequestProgressSerializer; impl RequestProgressSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RequestProgress, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.enabled { writer.write(xml::writer::XmlEvent::start_element("Enabled"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseCacheControlSerializer; impl ResponseCacheControlSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentDispositionSerializer; impl ResponseContentDispositionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentEncodingSerializer; impl ResponseContentEncodingSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentLanguageSerializer; impl ResponseContentLanguageSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseContentTypeSerializer; impl ResponseContentTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct ResponseExpiresSerializer; impl ResponseExpiresSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RestoreObjectOutput { pub request_charged: Option<String>, /// <p>Indicates the path in the provided S3 output location where Select results will be restored to.</p> pub restore_output_path: Option<String>, } struct RestoreObjectOutputDeserializer; impl RestoreObjectOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RestoreObjectOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = RestoreObjectOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct RestoreObjectRequest { pub bucket: String, pub key: String, pub request_payer: Option<String>, pub restore_request: Option<RestoreRequest>, pub version_id: Option<String>, } /// <p>Container for restore job parameters.</p> #[derive(Default, Debug, Clone)] pub struct RestoreRequest { /// <p>Lifetime of the active copy in days. Do not use with restores that specify OutputLocation.</p> pub days: Option<i64>, /// <p>The optional description for the job.</p> pub description: Option<String>, /// <p>Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.</p> pub glacier_job_parameters: Option<GlacierJobParameters>, /// <p>Describes the location where the restore job&#39;s output is stored.</p> pub output_location: Option<OutputLocation>, /// <p>Describes the parameters for Select job types.</p> pub select_parameters: Option<SelectParameters>, /// <p>Glacier retrieval tier at which the restore will be processed.</p> pub tier: Option<String>, /// <p>Type of restore request.</p> pub type_: Option<String>, } pub struct RestoreRequestSerializer; impl RestoreRequestSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RestoreRequest, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.days { writer.write(xml::writer::XmlEvent::start_element("Days"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.description { writer.write(xml::writer::XmlEvent::start_element("Description"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.glacier_job_parameters { &GlacierJobParametersSerializer::serialize(&mut writer, "GlacierJobParameters", value)?; } if let Some(ref value) = obj.output_location { &OutputLocationSerializer::serialize(&mut writer, "OutputLocation", value)?; } if let Some(ref value) = obj.select_parameters { &SelectParametersSerializer::serialize(&mut writer, "SelectParameters", value)?; } if let Some(ref value) = obj.tier { writer.write(xml::writer::XmlEvent::start_element("Tier"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.type_ { writer.write(xml::writer::XmlEvent::start_element("Type"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } pub struct RestoreRequestTypeSerializer; impl RestoreRequestTypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct RoleDeserializer; impl RoleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RoleSerializer; impl RoleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct RoutingRule { /// <p>A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.</p> pub condition: Option<Condition>, /// <p>Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can can specify a different error code to return.</p> pub redirect: Redirect, } struct RoutingRuleDeserializer; impl RoutingRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RoutingRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RoutingRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Condition" => { obj.condition = Some(try!(ConditionDeserializer::deserialize("Condition", stack))); } "Redirect" => { obj.redirect = try!(RedirectDeserializer::deserialize("Redirect", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RoutingRuleSerializer; impl RoutingRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &RoutingRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.condition { &ConditionSerializer::serialize(&mut writer, "Condition", value)?; } RedirectSerializer::serialize(&mut writer, "Redirect", &obj.redirect)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct RoutingRulesDeserializer; impl RoutingRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<RoutingRule>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "RoutingRule" { obj.push(try!(RoutingRuleDeserializer::deserialize( "RoutingRule", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct RoutingRulesSerializer; impl RoutingRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<RoutingRule>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { RoutingRuleSerializer::serialize(writer, "RoutingRule", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct Rule { pub abort_incomplete_multipart_upload: Option<AbortIncompleteMultipartUpload>, pub expiration: Option<LifecycleExpiration>, /// <p>Unique identifier for the rule. The value cannot be longer than 255 characters.</p> pub id: Option<String>, pub noncurrent_version_expiration: Option<NoncurrentVersionExpiration>, pub noncurrent_version_transition: Option<NoncurrentVersionTransition>, /// <p>Prefix identifying one or more objects to which the rule applies.</p> pub prefix: String, /// <p>If &#39;Enabled&#39;, the rule is currently being applied. If &#39;Disabled&#39;, the rule is not currently being applied.</p> pub status: String, pub transition: Option<Transition>, } struct RuleDeserializer; impl RuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Rule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Rule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AbortIncompleteMultipartUpload" => { obj.abort_incomplete_multipart_upload = Some(try!( AbortIncompleteMultipartUploadDeserializer::deserialize( "AbortIncompleteMultipartUpload", stack ) )); } "Expiration" => { obj.expiration = Some(try!(LifecycleExpirationDeserializer::deserialize( "Expiration", stack ))); } "ID" => { obj.id = Some(try!(IDDeserializer::deserialize("ID", stack))); } "NoncurrentVersionExpiration" => { obj.noncurrent_version_expiration = Some(try!(NoncurrentVersionExpirationDeserializer::deserialize( "NoncurrentVersionExpiration", stack ))); } "NoncurrentVersionTransition" => { obj.noncurrent_version_transition = Some(try!(NoncurrentVersionTransitionDeserializer::deserialize( "NoncurrentVersionTransition", stack ))); } "Prefix" => { obj.prefix = try!(PrefixDeserializer::deserialize("Prefix", stack)); } "Status" => { obj.status = try!(ExpirationStatusDeserializer::deserialize("Status", stack)); } "Transition" => { obj.transition = Some(try!(TransitionDeserializer::deserialize( "Transition", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct RuleSerializer; impl RuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Rule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.abort_incomplete_multipart_upload { &AbortIncompleteMultipartUploadSerializer::serialize( &mut writer, "AbortIncompleteMultipartUpload", value, )?; } if let Some(ref value) = obj.expiration { &LifecycleExpirationSerializer::serialize(&mut writer, "Expiration", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("ID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.noncurrent_version_expiration { &NoncurrentVersionExpirationSerializer::serialize( &mut writer, "NoncurrentVersionExpiration", value, )?; } if let Some(ref value) = obj.noncurrent_version_transition { &NoncurrentVersionTransitionSerializer::serialize( &mut writer, "NoncurrentVersionTransition", value, )?; } writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.transition { &TransitionSerializer::serialize(&mut writer, "Transition", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct RulesDeserializer; impl RulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Rule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(RuleDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct RulesSerializer; impl RulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Rule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { RuleSerializer::serialize(writer, name, element)?; } Ok(()) } } /// <p>Container for object key name prefix and suffix filtering rules.</p> #[derive(Default, Debug, Clone)] pub struct S3KeyFilter { pub filter_rules: Option<Vec<FilterRule>>, } struct S3KeyFilterDeserializer; impl S3KeyFilterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<S3KeyFilter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = S3KeyFilter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "FilterRule" => { obj.filter_rules = Some(try!(FilterRuleListDeserializer::deserialize( "FilterRule", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct S3KeyFilterSerializer; impl S3KeyFilterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &S3KeyFilter, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.filter_rules { &FilterRuleListSerializer::serialize(&mut writer, "FilterRule", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes an S3 location that will receive the results of the restore request.</p> #[derive(Default, Debug, Clone)] pub struct S3Location { /// <p>A list of grants that control access to the staged results.</p> pub access_control_list: Option<Vec<Grant>>, /// <p>The name of the bucket where the restore results will be placed.</p> pub bucket_name: String, /// <p>The canned ACL to apply to the restore results.</p> pub canned_acl: Option<String>, pub encryption: Option<Encryption>, /// <p>The prefix that is prepended to the restore results for this request.</p> pub prefix: String, /// <p>The class of storage used to store the restore results.</p> pub storage_class: Option<String>, /// <p>The tag-set that is applied to the restore results.</p> pub tagging: Option<Tagging>, /// <p>A list of metadata to store with the restore results in S3.</p> pub user_metadata: Option<Vec<MetadataEntry>>, } pub struct S3LocationSerializer; impl S3LocationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &S3Location, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.access_control_list { &GrantsSerializer::serialize(&mut writer, "AccessControlList", value)?; } writer.write(xml::writer::XmlEvent::start_element("BucketName"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.bucket_name )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.canned_acl { writer.write(xml::writer::XmlEvent::start_element("CannedACL"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.encryption { &EncryptionSerializer::serialize(&mut writer, "Encryption", value)?; } writer.write(xml::writer::XmlEvent::start_element("Prefix"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.prefix )))?; writer.write(xml::writer::XmlEvent::end_element())?; if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.tagging { &TaggingSerializer::serialize(&mut writer, "Tagging", value)?; } if let Some(ref value) = obj.user_metadata { &UserMetadataSerializer::serialize(&mut writer, "UserMetadata", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Specifies the use of SSE-KMS to encrypt delievered Inventory reports.</p> #[derive(Default, Debug, Clone)] pub struct SSEKMS { /// <p>Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports.</p> pub key_id: String, } struct SSEKMSDeserializer; impl SSEKMSDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SSEKMS, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SSEKMS::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "KeyId" => { obj.key_id = try!(SSEKMSKeyIdDeserializer::deserialize("KeyId", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SSEKMSSerializer; impl SSEKMSSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SSEKMS, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("KeyId"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key_id )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct SSEKMSKeyIdDeserializer; impl SSEKMSKeyIdDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SSEKMSKeyIdSerializer; impl SSEKMSKeyIdSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Specifies the use of SSE-S3 to encrypt delievered Inventory reports.</p> #[derive(Default, Debug, Clone)] pub struct SSES3 {} struct SSES3Deserializer; impl SSES3Deserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SSES3, XmlParseError> { try!(start_element(tag_name, stack)); let obj = SSES3::default(); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SSES3Serializer; impl SSES3Serializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SSES3, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct SelectObjectContentEventStream { /// <p>The Continuation Event.</p> pub cont: Option<ContinuationEvent>, /// <p>The End Event.</p> pub end: Option<EndEvent>, /// <p>The Progress Event.</p> pub progress: Option<ProgressEvent>, /// <p>The Records Event.</p> pub records: Option<RecordsEvent>, /// <p>The Stats Event.</p> pub stats: Option<StatsEvent>, } struct SelectObjectContentEventStreamDeserializer; impl SelectObjectContentEventStreamDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SelectObjectContentEventStream, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SelectObjectContentEventStream::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Cont" => { obj.cont = Some(try!(ContinuationEventDeserializer::deserialize( "Cont", stack ))); } "End" => { obj.end = Some(try!(EndEventDeserializer::deserialize("End", stack))); } "Progress" => { obj.progress = Some(try!(ProgressEventDeserializer::deserialize( "Progress", stack ))); } "Records" => { obj.records = Some(try!(RecordsEventDeserializer::deserialize( "Records", stack ))); } "Stats" => { obj.stats = Some(try!(StatsEventDeserializer::deserialize("Stats", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct SelectObjectContentOutput { pub payload: Option<SelectObjectContentEventStream>, } struct SelectObjectContentOutputDeserializer; impl SelectObjectContentOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SelectObjectContentOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SelectObjectContentOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Payload" => { obj.payload = Some(try!( SelectObjectContentEventStreamDeserializer::deserialize( "Payload", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html">S3Select API Documentation</a>.</p> #[derive(Default, Debug, Clone)] pub struct SelectObjectContentRequest { /// <p>The S3 Bucket.</p> pub bucket: String, /// <p>The expression that is used to query the object.</p> pub expression: String, /// <p>The type of the provided expression (e.g., SQL).</p> pub expression_type: String, /// <p>Describes the format of the data in the object that is being queried.</p> pub input_serialization: InputSerialization, /// <p>The Object Key.</p> pub key: String, /// <p>Describes the format of the data that you want Amazon S3 to return in response.</p> pub output_serialization: OutputSerialization, /// <p>Specifies if periodic request progress information should be enabled.</p> pub request_progress: Option<RequestProgress>, /// <p>The SSE Algorithm used to encrypt the object. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html"> Server-Side Encryption (Using Customer-Provided Encryption Keys</a>.</p> pub sse_customer_algorithm: Option<String>, /// <p>The SSE Customer Key. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html"> Server-Side Encryption (Using Customer-Provided Encryption Keys</a>.</p> pub sse_customer_key: Option<String>, /// <p>The SSE Customer Key MD5. For more information, go to <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html"> Server-Side Encryption (Using Customer-Provided Encryption Keys</a>.</p> pub sse_customer_key_md5: Option<String>, } pub struct SelectObjectContentRequestSerializer; impl SelectObjectContentRequestSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SelectObjectContentRequest, xmlns: &str, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name).default_ns(xmlns))?; ExpressionSerializer::serialize(&mut writer, "Expression", &obj.expression)?; ExpressionTypeSerializer::serialize(&mut writer, "ExpressionType", &obj.expression_type)?; InputSerializationSerializer::serialize( &mut writer, "InputSerialization", &obj.input_serialization, )?; OutputSerializationSerializer::serialize( &mut writer, "OutputSerialization", &obj.output_serialization, )?; if let Some(ref value) = obj.request_progress { &RequestProgressSerializer::serialize(&mut writer, "RequestProgress", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes the parameters for Select job types.</p> #[derive(Default, Debug, Clone)] pub struct SelectParameters { /// <p>The expression that is used to query the object.</p> pub expression: String, /// <p>The type of the provided expression (e.g., SQL).</p> pub expression_type: String, /// <p>Describes the serialization format of the object.</p> pub input_serialization: InputSerialization, /// <p>Describes how the results of the Select job are serialized.</p> pub output_serialization: OutputSerialization, } pub struct SelectParametersSerializer; impl SelectParametersSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SelectParameters, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Expression"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.expression )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("ExpressionType"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.expression_type )))?; writer.write(xml::writer::XmlEvent::end_element())?; InputSerializationSerializer::serialize( &mut writer, "InputSerialization", &obj.input_serialization, )?; OutputSerializationSerializer::serialize( &mut writer, "OutputSerialization", &obj.output_serialization, )?; writer.write(xml::writer::XmlEvent::end_element()) } } struct ServerSideEncryptionDeserializer; impl ServerSideEncryptionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionSerializer; impl ServerSideEncryptionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied.</p> #[derive(Default, Debug, Clone)] pub struct ServerSideEncryptionByDefault { /// <p>KMS master key ID to use for the default encryption. This parameter is allowed if SSEAlgorithm is aws:kms.</p> pub kms_master_key_id: Option<String>, /// <p>Server-side encryption algorithm to use for the default encryption.</p> pub sse_algorithm: String, } struct ServerSideEncryptionByDefaultDeserializer; impl ServerSideEncryptionByDefaultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ServerSideEncryptionByDefault, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ServerSideEncryptionByDefault::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "KMSMasterKeyID" => { obj.kms_master_key_id = Some(try!(SSEKMSKeyIdDeserializer::deserialize( "KMSMasterKeyID", stack ))); } "SSEAlgorithm" => { obj.sse_algorithm = try!(ServerSideEncryptionDeserializer::deserialize( "SSEAlgorithm", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionByDefaultSerializer; impl ServerSideEncryptionByDefaultSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ServerSideEncryptionByDefault, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.kms_master_key_id { writer.write(xml::writer::XmlEvent::start_element("KMSMasterKeyID"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("SSEAlgorithm"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.sse_algorithm )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for server-side encryption configuration rules. Currently S3 supports one rule only.</p> #[derive(Default, Debug, Clone)] pub struct ServerSideEncryptionConfiguration { /// <p>Container for information about a particular server-side encryption configuration rule.</p> pub rules: Vec<ServerSideEncryptionRule>, } struct ServerSideEncryptionConfigurationDeserializer; impl ServerSideEncryptionConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ServerSideEncryptionConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ServerSideEncryptionConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Rule" => { obj.rules = try!(ServerSideEncryptionRulesDeserializer::deserialize( "Rule", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionConfigurationSerializer; impl ServerSideEncryptionConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ServerSideEncryptionConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; ServerSideEncryptionRulesSerializer::serialize(&mut writer, "Rule", &obj.rules)?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for information about a particular server-side encryption configuration rule.</p> #[derive(Default, Debug, Clone)] pub struct ServerSideEncryptionRule { /// <p>Describes the default server-side encryption to apply to new objects in the bucket. If Put Object request does not specify any server-side encryption, this default encryption will be applied.</p> pub apply_server_side_encryption_by_default: Option<ServerSideEncryptionByDefault>, } struct ServerSideEncryptionRuleDeserializer; impl ServerSideEncryptionRuleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ServerSideEncryptionRule, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ServerSideEncryptionRule::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ApplyServerSideEncryptionByDefault" => { obj.apply_server_side_encryption_by_default = Some(try!( ServerSideEncryptionByDefaultDeserializer::deserialize( "ApplyServerSideEncryptionByDefault", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ServerSideEncryptionRuleSerializer; impl ServerSideEncryptionRuleSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &ServerSideEncryptionRule, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.apply_server_side_encryption_by_default { &ServerSideEncryptionByDefaultSerializer::serialize( &mut writer, "ApplyServerSideEncryptionByDefault", value, )?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct ServerSideEncryptionRulesDeserializer; impl ServerSideEncryptionRulesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ServerSideEncryptionRule>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(ServerSideEncryptionRuleDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct ServerSideEncryptionRulesSerializer; impl ServerSideEncryptionRulesSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<ServerSideEncryptionRule>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { ServerSideEncryptionRuleSerializer::serialize(writer, name, element)?; } Ok(()) } } struct SizeDeserializer; impl SizeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Container for filters that define which source objects should be replicated.</p> #[derive(Default, Debug, Clone)] pub struct SourceSelectionCriteria { /// <p>Container for filter information of selection of KMS Encrypted S3 objects.</p> pub sse_kms_encrypted_objects: Option<SseKmsEncryptedObjects>, } struct SourceSelectionCriteriaDeserializer; impl SourceSelectionCriteriaDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SourceSelectionCriteria, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SourceSelectionCriteria::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "SseKmsEncryptedObjects" => { obj.sse_kms_encrypted_objects = Some(try!(SseKmsEncryptedObjectsDeserializer::deserialize( "SseKmsEncryptedObjects", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SourceSelectionCriteriaSerializer; impl SourceSelectionCriteriaSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SourceSelectionCriteria, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.sse_kms_encrypted_objects { &SseKmsEncryptedObjectsSerializer::serialize( &mut writer, "SseKmsEncryptedObjects", value, )?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for filter information of selection of KMS Encrypted S3 objects.</p> #[derive(Default, Debug, Clone)] pub struct SseKmsEncryptedObjects { /// <p>The replication for KMS encrypted S3 objects is disabled if status is not Enabled.</p> pub status: String, } struct SseKmsEncryptedObjectsDeserializer; impl SseKmsEncryptedObjectsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<SseKmsEncryptedObjects, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = SseKmsEncryptedObjects::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Status" => { obj.status = try!(SseKmsEncryptedObjectsStatusDeserializer::deserialize( "Status", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SseKmsEncryptedObjectsSerializer; impl SseKmsEncryptedObjectsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &SseKmsEncryptedObjects, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.status )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct SseKmsEncryptedObjectsStatusDeserializer; impl SseKmsEncryptedObjectsStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SseKmsEncryptedObjectsStatusSerializer; impl SseKmsEncryptedObjectsStatusSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct StartAfterDeserializer; impl StartAfterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StartAfterSerializer; impl StartAfterSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Stats { /// <p>Total number of uncompressed object bytes processed.</p> pub bytes_processed: Option<i64>, /// <p>Total number of object bytes scanned.</p> pub bytes_scanned: Option<i64>, } struct StatsDeserializer; impl StatsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Stats, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Stats::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "BytesProcessed" => { obj.bytes_processed = Some(try!(BytesProcessedDeserializer::deserialize( "BytesProcessed", stack ))); } "BytesScanned" => { obj.bytes_scanned = Some(try!(BytesScannedDeserializer::deserialize( "BytesScanned", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct StatsEvent { /// <p>The Stats event details.</p> pub details: Option<Stats>, } struct StatsEventDeserializer; impl StatsEventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<StatsEvent, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = StatsEvent::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Details" => { obj.details = Some(try!(StatsDeserializer::deserialize("Details", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct StorageClassDeserializer; impl StorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassSerializer; impl StorageClassSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct StorageClassAnalysis { /// <p>A container used to describe how data related to the storage class analysis should be exported.</p> pub data_export: Option<StorageClassAnalysisDataExport>, } struct StorageClassAnalysisDeserializer; impl StorageClassAnalysisDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<StorageClassAnalysis, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = StorageClassAnalysis::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DataExport" => { obj.data_export = Some(try!( StorageClassAnalysisDataExportDeserializer::deserialize( "DataExport", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassAnalysisSerializer; impl StorageClassAnalysisSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &StorageClassAnalysis, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.data_export { &StorageClassAnalysisDataExportSerializer::serialize(&mut writer, "DataExport", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct StorageClassAnalysisDataExport { /// <p>The place to store the data for an analysis.</p> pub destination: AnalyticsExportDestination, /// <p>The version of the output schema to use when exporting data. Must be V_1.</p> pub output_schema_version: String, } struct StorageClassAnalysisDataExportDeserializer; impl StorageClassAnalysisDataExportDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<StorageClassAnalysisDataExport, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = StorageClassAnalysisDataExport::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Destination" => { obj.destination = try!(AnalyticsExportDestinationDeserializer::deserialize( "Destination", stack )); } "OutputSchemaVersion" => { obj.output_schema_version = try!(StorageClassAnalysisSchemaVersionDeserializer::deserialize( "OutputSchemaVersion", stack )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassAnalysisDataExportSerializer; impl StorageClassAnalysisDataExportSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &StorageClassAnalysisDataExport, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; AnalyticsExportDestinationSerializer::serialize( &mut writer, "Destination", &obj.destination, )?; writer.write(xml::writer::XmlEvent::start_element("OutputSchemaVersion"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.output_schema_version )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct StorageClassAnalysisSchemaVersionDeserializer; impl StorageClassAnalysisSchemaVersionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct StorageClassAnalysisSchemaVersionSerializer; impl StorageClassAnalysisSchemaVersionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct SuffixDeserializer; impl SuffixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct SuffixSerializer; impl SuffixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct Tag { /// <p>Name of the tag.</p> pub key: String, /// <p>Value of the tag.</p> pub value: String, } struct TagDeserializer; impl TagDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Tag, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Tag::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Key" => { obj.key = try!(ObjectKeyDeserializer::deserialize("Key", stack)); } "Value" => { obj.value = try!(ValueDeserializer::deserialize("Value", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TagSerializer; impl TagSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Tag, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::start_element("Key"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.key )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::start_element("Value"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.value )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TagSetDeserializer; impl TagSetDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Tag>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Tag" { obj.push(try!(TagDeserializer::deserialize("Tag", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct TagSetSerializer; impl TagSetSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Tag>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { TagSerializer::serialize(writer, "Tag", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } #[derive(Default, Debug, Clone)] pub struct Tagging { pub tag_set: Vec<Tag>, } pub struct TaggingSerializer; impl TaggingSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Tagging, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; TagSetSerializer::serialize(&mut writer, "TagSet", &obj.tag_set)?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TargetBucketDeserializer; impl TargetBucketDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TargetBucketSerializer; impl TargetBucketSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct TargetGrant { pub grantee: Option<Grantee>, /// <p>Logging permissions assigned to the Grantee for the bucket.</p> pub permission: Option<String>, } struct TargetGrantDeserializer; impl TargetGrantDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<TargetGrant, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = TargetGrant::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Grantee" => { obj.grantee = Some(try!(GranteeDeserializer::deserialize("Grantee", stack))); } "Permission" => { obj.permission = Some(try!(BucketLogsPermissionDeserializer::deserialize( "Permission", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TargetGrantSerializer; impl TargetGrantSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &TargetGrant, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.grantee { &GranteeSerializer::serialize(&mut writer, "Grantee", value)?; } if let Some(ref value) = obj.permission { writer.write(xml::writer::XmlEvent::start_element("Permission"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct TargetGrantsDeserializer; impl TargetGrantsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<TargetGrant>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Grant" { obj.push(try!(TargetGrantDeserializer::deserialize("Grant", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } pub struct TargetGrantsSerializer; impl TargetGrantsSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<TargetGrant>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { TargetGrantSerializer::serialize(writer, "Grant", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } struct TargetPrefixDeserializer; impl TargetPrefixDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TargetPrefixSerializer; impl TargetPrefixSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } pub struct TierSerializer; impl TierSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TokenDeserializer; impl TokenDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TokenSerializer; impl TokenSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TopicArnDeserializer; impl TopicArnDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TopicArnSerializer; impl TopicArnSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } /// <p>Container for specifying the configuration when you want Amazon S3 to publish events to an Amazon Simple Notification Service (Amazon SNS) topic.</p> #[derive(Default, Debug, Clone)] pub struct TopicConfiguration { pub events: Vec<String>, pub filter: Option<NotificationConfigurationFilter>, pub id: Option<String>, /// <p>Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects events of specified type.</p> pub topic_arn: String, } struct TopicConfigurationDeserializer; impl TopicConfigurationDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<TopicConfiguration, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = TopicConfiguration::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = try!(EventListDeserializer::deserialize("Event", stack)); } "Filter" => { obj.filter = Some(try!( NotificationConfigurationFilterDeserializer::deserialize( "Filter", stack ) )); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Topic" => { obj.topic_arn = try!(TopicArnDeserializer::deserialize("Topic", stack)); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TopicConfigurationSerializer; impl TopicConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &TopicConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; EventListSerializer::serialize(&mut writer, "Event", &obj.events)?; if let Some(ref value) = obj.filter { &NotificationConfigurationFilterSerializer::serialize(&mut writer, "Filter", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::start_element("Topic"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.topic_arn )))?; writer.write(xml::writer::XmlEvent::end_element())?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct TopicConfigurationDeprecated { pub events: Option<Vec<String>>, pub id: Option<String>, /// <p>Amazon SNS topic to which Amazon S3 will publish a message to report the specified events for the bucket.</p> pub topic: Option<String>, } struct TopicConfigurationDeprecatedDeserializer; impl TopicConfigurationDeprecatedDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<TopicConfigurationDeprecated, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = TopicConfigurationDeprecated::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Event" => { obj.events = Some(try!(EventListDeserializer::deserialize("Event", stack))); } "Id" => { obj.id = Some(try!(NotificationIdDeserializer::deserialize("Id", stack))); } "Topic" => { obj.topic = Some(try!(TopicArnDeserializer::deserialize("Topic", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TopicConfigurationDeprecatedSerializer; impl TopicConfigurationDeprecatedSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &TopicConfigurationDeprecated, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.events { &EventListSerializer::serialize(&mut writer, "Event", value)?; } if let Some(ref value) = obj.id { writer.write(xml::writer::XmlEvent::start_element("Id"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.topic { writer.write(xml::writer::XmlEvent::start_element("Topic"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct TopicConfigurationListDeserializer; impl TopicConfigurationListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<TopicConfiguration>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(TopicConfigurationDeserializer::deserialize( tag_name, stack ))); } else { break; } } Ok(obj) } } pub struct TopicConfigurationListSerializer; impl TopicConfigurationListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<TopicConfiguration>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { TopicConfigurationSerializer::serialize(writer, name, element)?; } Ok(()) } } #[derive(Default, Debug, Clone)] pub struct Transition { /// <p>Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.</p> pub date: Option<String>, /// <p>Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer.</p> pub days: Option<i64>, /// <p>The class of storage used to store the object.</p> pub storage_class: Option<String>, } struct TransitionDeserializer; impl TransitionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Transition, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Transition::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Date" => { obj.date = Some(try!(DateDeserializer::deserialize("Date", stack))); } "Days" => { obj.days = Some(try!(DaysDeserializer::deserialize("Days", stack))); } "StorageClass" => { obj.storage_class = Some(try!( TransitionStorageClassDeserializer::deserialize("StorageClass", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TransitionSerializer; impl TransitionSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Transition, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.date { writer.write(xml::writer::XmlEvent::start_element("Date"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.days { writer.write(xml::writer::XmlEvent::start_element("Days"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.storage_class { writer.write(xml::writer::XmlEvent::start_element("StorageClass"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } struct TransitionListDeserializer; impl TransitionListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Transition>, XmlParseError> { let mut obj = vec![]; loop { let consume_next_tag = match stack.peek() { Some(&Ok(XmlEvent::StartElement { ref name, .. })) => name.local_name == tag_name, _ => false, }; if consume_next_tag { obj.push(try!(TransitionDeserializer::deserialize(tag_name, stack))); } else { break; } } Ok(obj) } } pub struct TransitionListSerializer; impl TransitionListSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<Transition>, ) -> Result<(), xml::writer::Error> where W: Write, { for element in obj { TransitionSerializer::serialize(writer, name, element)?; } Ok(()) } } struct TransitionStorageClassDeserializer; impl TransitionStorageClassDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TransitionStorageClassSerializer; impl TransitionStorageClassSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct TypeDeserializer; impl TypeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct TypeSerializer; impl TypeSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct URIDeserializer; impl URIDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct URISerializer; impl URISerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct UploadIdMarkerDeserializer; impl UploadIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct UploadIdMarkerSerializer; impl UploadIdMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct UploadPartCopyOutput { pub copy_part_result: Option<CopyPartResult>, /// <p>The version of the source object that was copied, if you have enabled versioning on the source bucket.</p> pub copy_source_version_id: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, } struct UploadPartCopyOutputDeserializer; impl UploadPartCopyOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<UploadPartCopyOutput, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = UploadPartCopyOutput::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CopyPartResult" => { obj.copy_part_result = Some(try!(CopyPartResultDeserializer::deserialize( "CopyPartResult", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone)] pub struct UploadPartCopyRequest { pub bucket: String, /// <p>The name of the source bucket and key name of the source object, separated by a slash (/). Must be URL-encoded.</p> pub copy_source: String, /// <p>Copies the object if its entity tag (ETag) matches the specified tag.</p> pub copy_source_if_match: Option<String>, /// <p>Copies the object if it has been modified since the specified time.</p> pub copy_source_if_modified_since: Option<String>, /// <p>Copies the object if its entity tag (ETag) is different than the specified ETag.</p> pub copy_source_if_none_match: Option<String>, /// <p>Copies the object if it hasn&#39;t been modified since the specified time.</p> pub copy_source_if_unmodified_since: Option<String>, /// <p>The range of bytes to copy from the source object. The range value must use the form bytes=first-last, where the first and last are the zero-based byte offsets to copy. For example, bytes=0-9 indicates that you want to copy the first ten bytes of the source. You can copy a range only if the source object is greater than 5 GB.</p> pub copy_source_range: Option<String>, /// <p>Specifies the algorithm to use when decrypting the source object (e.g., AES256).</p> pub copy_source_sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use to decrypt the source object. The encryption key provided in this header must be one that was used when the source object was created.</p> pub copy_source_sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub copy_source_sse_customer_key_md5: Option<String>, pub key: String, /// <p>Part number of part being copied. This is a positive integer between 1 and 10,000.</p> pub part_number: i64, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Upload ID identifying the multipart upload whose part is being copied.</p> pub upload_id: String, } #[derive(Default, Debug, Clone)] pub struct UploadPartOutput { /// <p>Entity tag for the uploaded object.</p> pub e_tag: Option<String>, pub request_charged: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.</p> pub sse_customer_algorithm: Option<String>, /// <p>If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round trip message integrity verification of the customer-provided encryption key.</p> pub sse_customer_key_md5: Option<String>, /// <p>If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.</p> pub ssekms_key_id: Option<String>, /// <p>The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).</p> pub server_side_encryption: Option<String>, } struct UploadPartOutputDeserializer; impl UploadPartOutputDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<UploadPartOutput, XmlParseError> { try!(start_element(tag_name, stack)); let obj = UploadPartOutput::default(); try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug)] pub struct UploadPartRequest { /// <p>Object data.</p> pub body: Option<StreamingBody>, /// <p>Name of the bucket to which the multipart upload was initiated.</p> pub bucket: String, /// <p>Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.</p> pub content_length: Option<i64>, /// <p>The base64-encoded 128-bit MD5 digest of the part data.</p> pub content_md5: Option<String>, /// <p>Object key for which the multipart upload was initiated.</p> pub key: String, /// <p>Part number of part being uploaded. This is a positive integer between 1 and 10,000.</p> pub part_number: i64, pub request_payer: Option<String>, /// <p>Specifies the algorithm to use to when encrypting the object (e.g., AES256).</p> pub sse_customer_algorithm: Option<String>, /// <p>Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm header. This must be the same encryption key specified in the initiate multipart upload request.</p> pub sse_customer_key: Option<String>, /// <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error.</p> pub sse_customer_key_md5: Option<String>, /// <p>Upload ID identifying the multipart upload whose part is being uploaded.</p> pub upload_id: String, } pub struct UserMetadataSerializer; impl UserMetadataSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &Vec<MetadataEntry>, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; for element in obj { MetadataEntrySerializer::serialize(writer, "MetadataEntry", element)?; } writer.write(xml::writer::XmlEvent::end_element())?; Ok(()) } } struct ValueDeserializer; impl ValueDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct ValueSerializer; impl ValueSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } struct VersionIdMarkerDeserializer; impl VersionIdMarkerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } pub struct VersionIdMarkerSerializer; impl VersionIdMarkerSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &String, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = obj.to_string() )))?; writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct VersioningConfiguration { /// <p>Specifies whether MFA delete is enabled in the bucket versioning configuration. This element is only returned if the bucket has been configured with MFA delete. If the bucket has never been so configured, this element is not returned.</p> pub mfa_delete: Option<String>, /// <p>The versioning state of the bucket.</p> pub status: Option<String>, } pub struct VersioningConfigurationSerializer; impl VersioningConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &VersioningConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.mfa_delete { writer.write(xml::writer::XmlEvent::start_element("MfaDelete"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } if let Some(ref value) = obj.status { writer.write(xml::writer::XmlEvent::start_element("Status"))?; writer.write(xml::writer::XmlEvent::characters(&format!( "{value}", value = value ))); writer.write(xml::writer::XmlEvent::end_element())?; } writer.write(xml::writer::XmlEvent::end_element()) } } #[derive(Default, Debug, Clone)] pub struct WebsiteConfiguration { pub error_document: Option<ErrorDocument>, pub index_document: Option<IndexDocument>, pub redirect_all_requests_to: Option<RedirectAllRequestsTo>, pub routing_rules: Option<Vec<RoutingRule>>, } pub struct WebsiteConfigurationSerializer; impl WebsiteConfigurationSerializer { #[allow(unused_variables, warnings)] pub fn serialize<W>( mut writer: &mut EventWriter<W>, name: &str, obj: &WebsiteConfiguration, ) -> Result<(), xml::writer::Error> where W: Write, { writer.write(xml::writer::XmlEvent::start_element(name))?; if let Some(ref value) = obj.error_document { &ErrorDocumentSerializer::serialize(&mut writer, "ErrorDocument", value)?; } if let Some(ref value) = obj.index_document { &IndexDocumentSerializer::serialize(&mut writer, "IndexDocument", value)?; } if let Some(ref value) = obj.redirect_all_requests_to { &RedirectAllRequestsToSerializer::serialize( &mut writer, "RedirectAllRequestsTo", value, )?; } if let Some(ref value) = obj.routing_rules { &RoutingRulesSerializer::serialize(&mut writer, "RoutingRules", value)?; } writer.write(xml::writer::XmlEvent::end_element()) } } /// Errors returned by AbortMultipartUpload #[derive(Debug, PartialEq)] pub enum AbortMultipartUploadError { /// <p>The specified multipart upload does not exist.</p> NoSuchUpload(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl AbortMultipartUploadError { pub fn from_body(body: &str) -> AbortMultipartUploadError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchUpload" => { AbortMultipartUploadError::NoSuchUpload(String::from(parsed_error.message)) } _ => AbortMultipartUploadError::Unknown(String::from(body)), }, Err(_) => AbortMultipartUploadError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for AbortMultipartUploadError { fn from(err: XmlParseError) -> AbortMultipartUploadError { let XmlParseError(message) = err; AbortMultipartUploadError::Unknown(message.to_string()) } } impl From<CredentialsError> for AbortMultipartUploadError { fn from(err: CredentialsError) -> AbortMultipartUploadError { AbortMultipartUploadError::Credentials(err) } } impl From<HttpDispatchError> for AbortMultipartUploadError { fn from(err: HttpDispatchError) -> AbortMultipartUploadError { AbortMultipartUploadError::HttpDispatch(err) } } impl From<io::Error> for AbortMultipartUploadError { fn from(err: io::Error) -> AbortMultipartUploadError { AbortMultipartUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for AbortMultipartUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for AbortMultipartUploadError { fn description(&self) -> &str { match *self { AbortMultipartUploadError::NoSuchUpload(ref cause) => cause, AbortMultipartUploadError::Validation(ref cause) => cause, AbortMultipartUploadError::Credentials(ref err) => err.description(), AbortMultipartUploadError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } AbortMultipartUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by CompleteMultipartUpload #[derive(Debug, PartialEq)] pub enum CompleteMultipartUploadError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CompleteMultipartUploadError { pub fn from_body(body: &str) -> CompleteMultipartUploadError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => CompleteMultipartUploadError::Unknown(String::from(body)), }, Err(_) => CompleteMultipartUploadError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CompleteMultipartUploadError { fn from(err: XmlParseError) -> CompleteMultipartUploadError { let XmlParseError(message) = err; CompleteMultipartUploadError::Unknown(message.to_string()) } } impl From<CredentialsError> for CompleteMultipartUploadError { fn from(err: CredentialsError) -> CompleteMultipartUploadError { CompleteMultipartUploadError::Credentials(err) } } impl From<HttpDispatchError> for CompleteMultipartUploadError { fn from(err: HttpDispatchError) -> CompleteMultipartUploadError { CompleteMultipartUploadError::HttpDispatch(err) } } impl From<io::Error> for CompleteMultipartUploadError { fn from(err: io::Error) -> CompleteMultipartUploadError { CompleteMultipartUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CompleteMultipartUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CompleteMultipartUploadError { fn description(&self) -> &str { match *self { CompleteMultipartUploadError::Validation(ref cause) => cause, CompleteMultipartUploadError::Credentials(ref err) => err.description(), CompleteMultipartUploadError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CompleteMultipartUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by CopyObject #[derive(Debug, PartialEq)] pub enum CopyObjectError { /// <p>The source object of the COPY operation is not in the active tier and is only stored in Amazon Glacier.</p> ObjectNotInActiveTierError(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CopyObjectError { pub fn from_body(body: &str) -> CopyObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "ObjectNotInActiveTierError" => { CopyObjectError::ObjectNotInActiveTierError(String::from(parsed_error.message)) } _ => CopyObjectError::Unknown(String::from(body)), }, Err(_) => CopyObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CopyObjectError { fn from(err: XmlParseError) -> CopyObjectError { let XmlParseError(message) = err; CopyObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for CopyObjectError { fn from(err: CredentialsError) -> CopyObjectError { CopyObjectError::Credentials(err) } } impl From<HttpDispatchError> for CopyObjectError { fn from(err: HttpDispatchError) -> CopyObjectError { CopyObjectError::HttpDispatch(err) } } impl From<io::Error> for CopyObjectError { fn from(err: io::Error) -> CopyObjectError { CopyObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CopyObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CopyObjectError { fn description(&self) -> &str { match *self { CopyObjectError::ObjectNotInActiveTierError(ref cause) => cause, CopyObjectError::Validation(ref cause) => cause, CopyObjectError::Credentials(ref err) => err.description(), CopyObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CopyObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateBucket #[derive(Debug, PartialEq)] pub enum CreateBucketError { /// <p>The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.</p> BucketAlreadyExists(String), BucketAlreadyOwnedByYou(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateBucketError { pub fn from_body(body: &str) -> CreateBucketError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "BucketAlreadyExists" => { CreateBucketError::BucketAlreadyExists(String::from(parsed_error.message)) } "BucketAlreadyOwnedByYou" => { CreateBucketError::BucketAlreadyOwnedByYou(String::from(parsed_error.message)) } _ => CreateBucketError::Unknown(String::from(body)), }, Err(_) => CreateBucketError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateBucketError { fn from(err: XmlParseError) -> CreateBucketError { let XmlParseError(message) = err; CreateBucketError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateBucketError { fn from(err: CredentialsError) -> CreateBucketError { CreateBucketError::Credentials(err) } } impl From<HttpDispatchError> for CreateBucketError { fn from(err: HttpDispatchError) -> CreateBucketError { CreateBucketError::HttpDispatch(err) } } impl From<io::Error> for CreateBucketError { fn from(err: io::Error) -> CreateBucketError { CreateBucketError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateBucketError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateBucketError { fn description(&self) -> &str { match *self { CreateBucketError::BucketAlreadyExists(ref cause) => cause, CreateBucketError::BucketAlreadyOwnedByYou(ref cause) => cause, CreateBucketError::Validation(ref cause) => cause, CreateBucketError::Credentials(ref err) => err.description(), CreateBucketError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CreateBucketError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateMultipartUpload #[derive(Debug, PartialEq)] pub enum CreateMultipartUploadError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateMultipartUploadError { pub fn from_body(body: &str) -> CreateMultipartUploadError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => CreateMultipartUploadError::Unknown(String::from(body)), }, Err(_) => CreateMultipartUploadError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateMultipartUploadError { fn from(err: XmlParseError) -> CreateMultipartUploadError { let XmlParseError(message) = err; CreateMultipartUploadError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateMultipartUploadError { fn from(err: CredentialsError) -> CreateMultipartUploadError { CreateMultipartUploadError::Credentials(err) } } impl From<HttpDispatchError> for CreateMultipartUploadError { fn from(err: HttpDispatchError) -> CreateMultipartUploadError { CreateMultipartUploadError::HttpDispatch(err) } } impl From<io::Error> for CreateMultipartUploadError { fn from(err: io::Error) -> CreateMultipartUploadError { CreateMultipartUploadError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateMultipartUploadError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateMultipartUploadError { fn description(&self) -> &str { match *self { CreateMultipartUploadError::Validation(ref cause) => cause, CreateMultipartUploadError::Credentials(ref err) => err.description(), CreateMultipartUploadError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateMultipartUploadError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucket #[derive(Debug, PartialEq)] pub enum DeleteBucketError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketError { pub fn from_body(body: &str) -> DeleteBucketError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketError::Unknown(String::from(body)), }, Err(_) => DeleteBucketError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketError { fn from(err: XmlParseError) -> DeleteBucketError { let XmlParseError(message) = err; DeleteBucketError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketError { fn from(err: CredentialsError) -> DeleteBucketError { DeleteBucketError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketError { fn from(err: HttpDispatchError) -> DeleteBucketError { DeleteBucketError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketError { fn from(err: io::Error) -> DeleteBucketError { DeleteBucketError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketError { fn description(&self) -> &str { match *self { DeleteBucketError::Validation(ref cause) => cause, DeleteBucketError::Credentials(ref err) => err.description(), DeleteBucketError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteBucketError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketAnalyticsConfiguration #[derive(Debug, PartialEq)] pub enum DeleteBucketAnalyticsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketAnalyticsConfigurationError { pub fn from_body(body: &str) -> DeleteBucketAnalyticsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketAnalyticsConfigurationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketAnalyticsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketAnalyticsConfigurationError { fn from(err: XmlParseError) -> DeleteBucketAnalyticsConfigurationError { let XmlParseError(message) = err; DeleteBucketAnalyticsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketAnalyticsConfigurationError { fn from(err: CredentialsError) -> DeleteBucketAnalyticsConfigurationError { DeleteBucketAnalyticsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketAnalyticsConfigurationError { fn from(err: HttpDispatchError) -> DeleteBucketAnalyticsConfigurationError { DeleteBucketAnalyticsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketAnalyticsConfigurationError { fn from(err: io::Error) -> DeleteBucketAnalyticsConfigurationError { DeleteBucketAnalyticsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketAnalyticsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketAnalyticsConfigurationError { fn description(&self) -> &str { match *self { DeleteBucketAnalyticsConfigurationError::Validation(ref cause) => cause, DeleteBucketAnalyticsConfigurationError::Credentials(ref err) => err.description(), DeleteBucketAnalyticsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketAnalyticsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketCors #[derive(Debug, PartialEq)] pub enum DeleteBucketCorsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketCorsError { pub fn from_body(body: &str) -> DeleteBucketCorsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketCorsError::Unknown(String::from(body)), }, Err(_) => DeleteBucketCorsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketCorsError { fn from(err: XmlParseError) -> DeleteBucketCorsError { let XmlParseError(message) = err; DeleteBucketCorsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketCorsError { fn from(err: CredentialsError) -> DeleteBucketCorsError { DeleteBucketCorsError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketCorsError { fn from(err: HttpDispatchError) -> DeleteBucketCorsError { DeleteBucketCorsError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketCorsError { fn from(err: io::Error) -> DeleteBucketCorsError { DeleteBucketCorsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketCorsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketCorsError { fn description(&self) -> &str { match *self { DeleteBucketCorsError::Validation(ref cause) => cause, DeleteBucketCorsError::Credentials(ref err) => err.description(), DeleteBucketCorsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteBucketCorsError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketEncryption #[derive(Debug, PartialEq)] pub enum DeleteBucketEncryptionError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketEncryptionError { pub fn from_body(body: &str) -> DeleteBucketEncryptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketEncryptionError::Unknown(String::from(body)), }, Err(_) => DeleteBucketEncryptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketEncryptionError { fn from(err: XmlParseError) -> DeleteBucketEncryptionError { let XmlParseError(message) = err; DeleteBucketEncryptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketEncryptionError { fn from(err: CredentialsError) -> DeleteBucketEncryptionError { DeleteBucketEncryptionError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketEncryptionError { fn from(err: HttpDispatchError) -> DeleteBucketEncryptionError { DeleteBucketEncryptionError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketEncryptionError { fn from(err: io::Error) -> DeleteBucketEncryptionError { DeleteBucketEncryptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketEncryptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketEncryptionError { fn description(&self) -> &str { match *self { DeleteBucketEncryptionError::Validation(ref cause) => cause, DeleteBucketEncryptionError::Credentials(ref err) => err.description(), DeleteBucketEncryptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketEncryptionError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketInventoryConfiguration #[derive(Debug, PartialEq)] pub enum DeleteBucketInventoryConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketInventoryConfigurationError { pub fn from_body(body: &str) -> DeleteBucketInventoryConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketInventoryConfigurationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketInventoryConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketInventoryConfigurationError { fn from(err: XmlParseError) -> DeleteBucketInventoryConfigurationError { let XmlParseError(message) = err; DeleteBucketInventoryConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketInventoryConfigurationError { fn from(err: CredentialsError) -> DeleteBucketInventoryConfigurationError { DeleteBucketInventoryConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketInventoryConfigurationError { fn from(err: HttpDispatchError) -> DeleteBucketInventoryConfigurationError { DeleteBucketInventoryConfigurationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketInventoryConfigurationError { fn from(err: io::Error) -> DeleteBucketInventoryConfigurationError { DeleteBucketInventoryConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketInventoryConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketInventoryConfigurationError { fn description(&self) -> &str { match *self { DeleteBucketInventoryConfigurationError::Validation(ref cause) => cause, DeleteBucketInventoryConfigurationError::Credentials(ref err) => err.description(), DeleteBucketInventoryConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketInventoryConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketLifecycle #[derive(Debug, PartialEq)] pub enum DeleteBucketLifecycleError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketLifecycleError { pub fn from_body(body: &str) -> DeleteBucketLifecycleError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketLifecycleError::Unknown(String::from(body)), }, Err(_) => DeleteBucketLifecycleError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketLifecycleError { fn from(err: XmlParseError) -> DeleteBucketLifecycleError { let XmlParseError(message) = err; DeleteBucketLifecycleError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketLifecycleError { fn from(err: CredentialsError) -> DeleteBucketLifecycleError { DeleteBucketLifecycleError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketLifecycleError { fn from(err: HttpDispatchError) -> DeleteBucketLifecycleError { DeleteBucketLifecycleError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketLifecycleError { fn from(err: io::Error) -> DeleteBucketLifecycleError { DeleteBucketLifecycleError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketLifecycleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketLifecycleError { fn description(&self) -> &str { match *self { DeleteBucketLifecycleError::Validation(ref cause) => cause, DeleteBucketLifecycleError::Credentials(ref err) => err.description(), DeleteBucketLifecycleError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketLifecycleError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketMetricsConfiguration #[derive(Debug, PartialEq)] pub enum DeleteBucketMetricsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketMetricsConfigurationError { pub fn from_body(body: &str) -> DeleteBucketMetricsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketMetricsConfigurationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketMetricsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketMetricsConfigurationError { fn from(err: XmlParseError) -> DeleteBucketMetricsConfigurationError { let XmlParseError(message) = err; DeleteBucketMetricsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketMetricsConfigurationError { fn from(err: CredentialsError) -> DeleteBucketMetricsConfigurationError { DeleteBucketMetricsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketMetricsConfigurationError { fn from(err: HttpDispatchError) -> DeleteBucketMetricsConfigurationError { DeleteBucketMetricsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketMetricsConfigurationError { fn from(err: io::Error) -> DeleteBucketMetricsConfigurationError { DeleteBucketMetricsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketMetricsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketMetricsConfigurationError { fn description(&self) -> &str { match *self { DeleteBucketMetricsConfigurationError::Validation(ref cause) => cause, DeleteBucketMetricsConfigurationError::Credentials(ref err) => err.description(), DeleteBucketMetricsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketMetricsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketPolicy #[derive(Debug, PartialEq)] pub enum DeleteBucketPolicyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketPolicyError { pub fn from_body(body: &str) -> DeleteBucketPolicyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketPolicyError::Unknown(String::from(body)), }, Err(_) => DeleteBucketPolicyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketPolicyError { fn from(err: XmlParseError) -> DeleteBucketPolicyError { let XmlParseError(message) = err; DeleteBucketPolicyError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketPolicyError { fn from(err: CredentialsError) -> DeleteBucketPolicyError { DeleteBucketPolicyError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketPolicyError { fn from(err: HttpDispatchError) -> DeleteBucketPolicyError { DeleteBucketPolicyError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketPolicyError { fn from(err: io::Error) -> DeleteBucketPolicyError { DeleteBucketPolicyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketPolicyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketPolicyError { fn description(&self) -> &str { match *self { DeleteBucketPolicyError::Validation(ref cause) => cause, DeleteBucketPolicyError::Credentials(ref err) => err.description(), DeleteBucketPolicyError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketPolicyError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketReplication #[derive(Debug, PartialEq)] pub enum DeleteBucketReplicationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketReplicationError { pub fn from_body(body: &str) -> DeleteBucketReplicationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketReplicationError::Unknown(String::from(body)), }, Err(_) => DeleteBucketReplicationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketReplicationError { fn from(err: XmlParseError) -> DeleteBucketReplicationError { let XmlParseError(message) = err; DeleteBucketReplicationError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketReplicationError { fn from(err: CredentialsError) -> DeleteBucketReplicationError { DeleteBucketReplicationError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketReplicationError { fn from(err: HttpDispatchError) -> DeleteBucketReplicationError { DeleteBucketReplicationError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketReplicationError { fn from(err: io::Error) -> DeleteBucketReplicationError { DeleteBucketReplicationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketReplicationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketReplicationError { fn description(&self) -> &str { match *self { DeleteBucketReplicationError::Validation(ref cause) => cause, DeleteBucketReplicationError::Credentials(ref err) => err.description(), DeleteBucketReplicationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketReplicationError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketTagging #[derive(Debug, PartialEq)] pub enum DeleteBucketTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketTaggingError { pub fn from_body(body: &str) -> DeleteBucketTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketTaggingError::Unknown(String::from(body)), }, Err(_) => DeleteBucketTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketTaggingError { fn from(err: XmlParseError) -> DeleteBucketTaggingError { let XmlParseError(message) = err; DeleteBucketTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketTaggingError { fn from(err: CredentialsError) -> DeleteBucketTaggingError { DeleteBucketTaggingError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketTaggingError { fn from(err: HttpDispatchError) -> DeleteBucketTaggingError { DeleteBucketTaggingError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketTaggingError { fn from(err: io::Error) -> DeleteBucketTaggingError { DeleteBucketTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketTaggingError { fn description(&self) -> &str { match *self { DeleteBucketTaggingError::Validation(ref cause) => cause, DeleteBucketTaggingError::Credentials(ref err) => err.description(), DeleteBucketTaggingError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteBucketWebsite #[derive(Debug, PartialEq)] pub enum DeleteBucketWebsiteError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteBucketWebsiteError { pub fn from_body(body: &str) -> DeleteBucketWebsiteError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteBucketWebsiteError::Unknown(String::from(body)), }, Err(_) => DeleteBucketWebsiteError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteBucketWebsiteError { fn from(err: XmlParseError) -> DeleteBucketWebsiteError { let XmlParseError(message) = err; DeleteBucketWebsiteError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteBucketWebsiteError { fn from(err: CredentialsError) -> DeleteBucketWebsiteError { DeleteBucketWebsiteError::Credentials(err) } } impl From<HttpDispatchError> for DeleteBucketWebsiteError { fn from(err: HttpDispatchError) -> DeleteBucketWebsiteError { DeleteBucketWebsiteError::HttpDispatch(err) } } impl From<io::Error> for DeleteBucketWebsiteError { fn from(err: io::Error) -> DeleteBucketWebsiteError { DeleteBucketWebsiteError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteBucketWebsiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteBucketWebsiteError { fn description(&self) -> &str { match *self { DeleteBucketWebsiteError::Validation(ref cause) => cause, DeleteBucketWebsiteError::Credentials(ref err) => err.description(), DeleteBucketWebsiteError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteBucketWebsiteError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteObject #[derive(Debug, PartialEq)] pub enum DeleteObjectError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteObjectError { pub fn from_body(body: &str) -> DeleteObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteObjectError::Unknown(String::from(body)), }, Err(_) => DeleteObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteObjectError { fn from(err: XmlParseError) -> DeleteObjectError { let XmlParseError(message) = err; DeleteObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteObjectError { fn from(err: CredentialsError) -> DeleteObjectError { DeleteObjectError::Credentials(err) } } impl From<HttpDispatchError> for DeleteObjectError { fn from(err: HttpDispatchError) -> DeleteObjectError { DeleteObjectError::HttpDispatch(err) } } impl From<io::Error> for DeleteObjectError { fn from(err: io::Error) -> DeleteObjectError { DeleteObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteObjectError { fn description(&self) -> &str { match *self { DeleteObjectError::Validation(ref cause) => cause, DeleteObjectError::Credentials(ref err) => err.description(), DeleteObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteObjectTagging #[derive(Debug, PartialEq)] pub enum DeleteObjectTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteObjectTaggingError { pub fn from_body(body: &str) -> DeleteObjectTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteObjectTaggingError::Unknown(String::from(body)), }, Err(_) => DeleteObjectTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteObjectTaggingError { fn from(err: XmlParseError) -> DeleteObjectTaggingError { let XmlParseError(message) = err; DeleteObjectTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteObjectTaggingError { fn from(err: CredentialsError) -> DeleteObjectTaggingError { DeleteObjectTaggingError::Credentials(err) } } impl From<HttpDispatchError> for DeleteObjectTaggingError { fn from(err: HttpDispatchError) -> DeleteObjectTaggingError { DeleteObjectTaggingError::HttpDispatch(err) } } impl From<io::Error> for DeleteObjectTaggingError { fn from(err: io::Error) -> DeleteObjectTaggingError { DeleteObjectTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteObjectTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteObjectTaggingError { fn description(&self) -> &str { match *self { DeleteObjectTaggingError::Validation(ref cause) => cause, DeleteObjectTaggingError::Credentials(ref err) => err.description(), DeleteObjectTaggingError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteObjectTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteObjects #[derive(Debug, PartialEq)] pub enum DeleteObjectsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteObjectsError { pub fn from_body(body: &str) -> DeleteObjectsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DeleteObjectsError::Unknown(String::from(body)), }, Err(_) => DeleteObjectsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteObjectsError { fn from(err: XmlParseError) -> DeleteObjectsError { let XmlParseError(message) = err; DeleteObjectsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteObjectsError { fn from(err: CredentialsError) -> DeleteObjectsError { DeleteObjectsError::Credentials(err) } } impl From<HttpDispatchError> for DeleteObjectsError { fn from(err: HttpDispatchError) -> DeleteObjectsError { DeleteObjectsError::HttpDispatch(err) } } impl From<io::Error> for DeleteObjectsError { fn from(err: io::Error) -> DeleteObjectsError { DeleteObjectsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteObjectsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteObjectsError { fn description(&self) -> &str { match *self { DeleteObjectsError::Validation(ref cause) => cause, DeleteObjectsError::Credentials(ref err) => err.description(), DeleteObjectsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteObjectsError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketAccelerateConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketAccelerateConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketAccelerateConfigurationError { pub fn from_body(body: &str) -> GetBucketAccelerateConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketAccelerateConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketAccelerateConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketAccelerateConfigurationError { fn from(err: XmlParseError) -> GetBucketAccelerateConfigurationError { let XmlParseError(message) = err; GetBucketAccelerateConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketAccelerateConfigurationError { fn from(err: CredentialsError) -> GetBucketAccelerateConfigurationError { GetBucketAccelerateConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketAccelerateConfigurationError { fn from(err: HttpDispatchError) -> GetBucketAccelerateConfigurationError { GetBucketAccelerateConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketAccelerateConfigurationError { fn from(err: io::Error) -> GetBucketAccelerateConfigurationError { GetBucketAccelerateConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketAccelerateConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketAccelerateConfigurationError { fn description(&self) -> &str { match *self { GetBucketAccelerateConfigurationError::Validation(ref cause) => cause, GetBucketAccelerateConfigurationError::Credentials(ref err) => err.description(), GetBucketAccelerateConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketAccelerateConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketAcl #[derive(Debug, PartialEq)] pub enum GetBucketAclError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketAclError { pub fn from_body(body: &str) -> GetBucketAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketAclError::Unknown(String::from(body)), }, Err(_) => GetBucketAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketAclError { fn from(err: XmlParseError) -> GetBucketAclError { let XmlParseError(message) = err; GetBucketAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketAclError { fn from(err: CredentialsError) -> GetBucketAclError { GetBucketAclError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketAclError { fn from(err: HttpDispatchError) -> GetBucketAclError { GetBucketAclError::HttpDispatch(err) } } impl From<io::Error> for GetBucketAclError { fn from(err: io::Error) -> GetBucketAclError { GetBucketAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketAclError { fn description(&self) -> &str { match *self { GetBucketAclError::Validation(ref cause) => cause, GetBucketAclError::Credentials(ref err) => err.description(), GetBucketAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketAclError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketAnalyticsConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketAnalyticsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketAnalyticsConfigurationError { pub fn from_body(body: &str) -> GetBucketAnalyticsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketAnalyticsConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketAnalyticsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketAnalyticsConfigurationError { fn from(err: XmlParseError) -> GetBucketAnalyticsConfigurationError { let XmlParseError(message) = err; GetBucketAnalyticsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketAnalyticsConfigurationError { fn from(err: CredentialsError) -> GetBucketAnalyticsConfigurationError { GetBucketAnalyticsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketAnalyticsConfigurationError { fn from(err: HttpDispatchError) -> GetBucketAnalyticsConfigurationError { GetBucketAnalyticsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketAnalyticsConfigurationError { fn from(err: io::Error) -> GetBucketAnalyticsConfigurationError { GetBucketAnalyticsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketAnalyticsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketAnalyticsConfigurationError { fn description(&self) -> &str { match *self { GetBucketAnalyticsConfigurationError::Validation(ref cause) => cause, GetBucketAnalyticsConfigurationError::Credentials(ref err) => err.description(), GetBucketAnalyticsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketAnalyticsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketCors #[derive(Debug, PartialEq)] pub enum GetBucketCorsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketCorsError { pub fn from_body(body: &str) -> GetBucketCorsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketCorsError::Unknown(String::from(body)), }, Err(_) => GetBucketCorsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketCorsError { fn from(err: XmlParseError) -> GetBucketCorsError { let XmlParseError(message) = err; GetBucketCorsError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketCorsError { fn from(err: CredentialsError) -> GetBucketCorsError { GetBucketCorsError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketCorsError { fn from(err: HttpDispatchError) -> GetBucketCorsError { GetBucketCorsError::HttpDispatch(err) } } impl From<io::Error> for GetBucketCorsError { fn from(err: io::Error) -> GetBucketCorsError { GetBucketCorsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketCorsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketCorsError { fn description(&self) -> &str { match *self { GetBucketCorsError::Validation(ref cause) => cause, GetBucketCorsError::Credentials(ref err) => err.description(), GetBucketCorsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketCorsError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketEncryption #[derive(Debug, PartialEq)] pub enum GetBucketEncryptionError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketEncryptionError { pub fn from_body(body: &str) -> GetBucketEncryptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketEncryptionError::Unknown(String::from(body)), }, Err(_) => GetBucketEncryptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketEncryptionError { fn from(err: XmlParseError) -> GetBucketEncryptionError { let XmlParseError(message) = err; GetBucketEncryptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketEncryptionError { fn from(err: CredentialsError) -> GetBucketEncryptionError { GetBucketEncryptionError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketEncryptionError { fn from(err: HttpDispatchError) -> GetBucketEncryptionError { GetBucketEncryptionError::HttpDispatch(err) } } impl From<io::Error> for GetBucketEncryptionError { fn from(err: io::Error) -> GetBucketEncryptionError { GetBucketEncryptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketEncryptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketEncryptionError { fn description(&self) -> &str { match *self { GetBucketEncryptionError::Validation(ref cause) => cause, GetBucketEncryptionError::Credentials(ref err) => err.description(), GetBucketEncryptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketEncryptionError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketInventoryConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketInventoryConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketInventoryConfigurationError { pub fn from_body(body: &str) -> GetBucketInventoryConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketInventoryConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketInventoryConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketInventoryConfigurationError { fn from(err: XmlParseError) -> GetBucketInventoryConfigurationError { let XmlParseError(message) = err; GetBucketInventoryConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketInventoryConfigurationError { fn from(err: CredentialsError) -> GetBucketInventoryConfigurationError { GetBucketInventoryConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketInventoryConfigurationError { fn from(err: HttpDispatchError) -> GetBucketInventoryConfigurationError { GetBucketInventoryConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketInventoryConfigurationError { fn from(err: io::Error) -> GetBucketInventoryConfigurationError { GetBucketInventoryConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketInventoryConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketInventoryConfigurationError { fn description(&self) -> &str { match *self { GetBucketInventoryConfigurationError::Validation(ref cause) => cause, GetBucketInventoryConfigurationError::Credentials(ref err) => err.description(), GetBucketInventoryConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketInventoryConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLifecycle #[derive(Debug, PartialEq)] pub enum GetBucketLifecycleError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLifecycleError { pub fn from_body(body: &str) -> GetBucketLifecycleError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLifecycleError::Unknown(String::from(body)), }, Err(_) => GetBucketLifecycleError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLifecycleError { fn from(err: XmlParseError) -> GetBucketLifecycleError { let XmlParseError(message) = err; GetBucketLifecycleError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLifecycleError { fn from(err: CredentialsError) -> GetBucketLifecycleError { GetBucketLifecycleError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLifecycleError { fn from(err: HttpDispatchError) -> GetBucketLifecycleError { GetBucketLifecycleError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLifecycleError { fn from(err: io::Error) -> GetBucketLifecycleError { GetBucketLifecycleError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLifecycleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLifecycleError { fn description(&self) -> &str { match *self { GetBucketLifecycleError::Validation(ref cause) => cause, GetBucketLifecycleError::Credentials(ref err) => err.description(), GetBucketLifecycleError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketLifecycleError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLifecycleConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketLifecycleConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLifecycleConfigurationError { pub fn from_body(body: &str) -> GetBucketLifecycleConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLifecycleConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketLifecycleConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLifecycleConfigurationError { fn from(err: XmlParseError) -> GetBucketLifecycleConfigurationError { let XmlParseError(message) = err; GetBucketLifecycleConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLifecycleConfigurationError { fn from(err: CredentialsError) -> GetBucketLifecycleConfigurationError { GetBucketLifecycleConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLifecycleConfigurationError { fn from(err: HttpDispatchError) -> GetBucketLifecycleConfigurationError { GetBucketLifecycleConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLifecycleConfigurationError { fn from(err: io::Error) -> GetBucketLifecycleConfigurationError { GetBucketLifecycleConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLifecycleConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLifecycleConfigurationError { fn description(&self) -> &str { match *self { GetBucketLifecycleConfigurationError::Validation(ref cause) => cause, GetBucketLifecycleConfigurationError::Credentials(ref err) => err.description(), GetBucketLifecycleConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketLifecycleConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLocation #[derive(Debug, PartialEq)] pub enum GetBucketLocationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLocationError { pub fn from_body(body: &str) -> GetBucketLocationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLocationError::Unknown(String::from(body)), }, Err(_) => GetBucketLocationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLocationError { fn from(err: XmlParseError) -> GetBucketLocationError { let XmlParseError(message) = err; GetBucketLocationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLocationError { fn from(err: CredentialsError) -> GetBucketLocationError { GetBucketLocationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLocationError { fn from(err: HttpDispatchError) -> GetBucketLocationError { GetBucketLocationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLocationError { fn from(err: io::Error) -> GetBucketLocationError { GetBucketLocationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLocationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLocationError { fn description(&self) -> &str { match *self { GetBucketLocationError::Validation(ref cause) => cause, GetBucketLocationError::Credentials(ref err) => err.description(), GetBucketLocationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketLocationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketLogging #[derive(Debug, PartialEq)] pub enum GetBucketLoggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketLoggingError { pub fn from_body(body: &str) -> GetBucketLoggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketLoggingError::Unknown(String::from(body)), }, Err(_) => GetBucketLoggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketLoggingError { fn from(err: XmlParseError) -> GetBucketLoggingError { let XmlParseError(message) = err; GetBucketLoggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketLoggingError { fn from(err: CredentialsError) -> GetBucketLoggingError { GetBucketLoggingError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketLoggingError { fn from(err: HttpDispatchError) -> GetBucketLoggingError { GetBucketLoggingError::HttpDispatch(err) } } impl From<io::Error> for GetBucketLoggingError { fn from(err: io::Error) -> GetBucketLoggingError { GetBucketLoggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketLoggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketLoggingError { fn description(&self) -> &str { match *self { GetBucketLoggingError::Validation(ref cause) => cause, GetBucketLoggingError::Credentials(ref err) => err.description(), GetBucketLoggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketLoggingError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketMetricsConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketMetricsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketMetricsConfigurationError { pub fn from_body(body: &str) -> GetBucketMetricsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketMetricsConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketMetricsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketMetricsConfigurationError { fn from(err: XmlParseError) -> GetBucketMetricsConfigurationError { let XmlParseError(message) = err; GetBucketMetricsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketMetricsConfigurationError { fn from(err: CredentialsError) -> GetBucketMetricsConfigurationError { GetBucketMetricsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketMetricsConfigurationError { fn from(err: HttpDispatchError) -> GetBucketMetricsConfigurationError { GetBucketMetricsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketMetricsConfigurationError { fn from(err: io::Error) -> GetBucketMetricsConfigurationError { GetBucketMetricsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketMetricsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketMetricsConfigurationError { fn description(&self) -> &str { match *self { GetBucketMetricsConfigurationError::Validation(ref cause) => cause, GetBucketMetricsConfigurationError::Credentials(ref err) => err.description(), GetBucketMetricsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketMetricsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketNotification #[derive(Debug, PartialEq)] pub enum GetBucketNotificationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketNotificationError { pub fn from_body(body: &str) -> GetBucketNotificationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketNotificationError::Unknown(String::from(body)), }, Err(_) => GetBucketNotificationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketNotificationError { fn from(err: XmlParseError) -> GetBucketNotificationError { let XmlParseError(message) = err; GetBucketNotificationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketNotificationError { fn from(err: CredentialsError) -> GetBucketNotificationError { GetBucketNotificationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketNotificationError { fn from(err: HttpDispatchError) -> GetBucketNotificationError { GetBucketNotificationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketNotificationError { fn from(err: io::Error) -> GetBucketNotificationError { GetBucketNotificationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketNotificationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketNotificationError { fn description(&self) -> &str { match *self { GetBucketNotificationError::Validation(ref cause) => cause, GetBucketNotificationError::Credentials(ref err) => err.description(), GetBucketNotificationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketNotificationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketNotificationConfiguration #[derive(Debug, PartialEq)] pub enum GetBucketNotificationConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketNotificationConfigurationError { pub fn from_body(body: &str) -> GetBucketNotificationConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketNotificationConfigurationError::Unknown(String::from(body)), }, Err(_) => GetBucketNotificationConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketNotificationConfigurationError { fn from(err: XmlParseError) -> GetBucketNotificationConfigurationError { let XmlParseError(message) = err; GetBucketNotificationConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketNotificationConfigurationError { fn from(err: CredentialsError) -> GetBucketNotificationConfigurationError { GetBucketNotificationConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketNotificationConfigurationError { fn from(err: HttpDispatchError) -> GetBucketNotificationConfigurationError { GetBucketNotificationConfigurationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketNotificationConfigurationError { fn from(err: io::Error) -> GetBucketNotificationConfigurationError { GetBucketNotificationConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketNotificationConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketNotificationConfigurationError { fn description(&self) -> &str { match *self { GetBucketNotificationConfigurationError::Validation(ref cause) => cause, GetBucketNotificationConfigurationError::Credentials(ref err) => err.description(), GetBucketNotificationConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketNotificationConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketPolicy #[derive(Debug, PartialEq)] pub enum GetBucketPolicyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketPolicyError { pub fn from_body(body: &str) -> GetBucketPolicyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketPolicyError::Unknown(String::from(body)), }, Err(_) => GetBucketPolicyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketPolicyError { fn from(err: XmlParseError) -> GetBucketPolicyError { let XmlParseError(message) = err; GetBucketPolicyError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketPolicyError { fn from(err: CredentialsError) -> GetBucketPolicyError { GetBucketPolicyError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketPolicyError { fn from(err: HttpDispatchError) -> GetBucketPolicyError { GetBucketPolicyError::HttpDispatch(err) } } impl From<io::Error> for GetBucketPolicyError { fn from(err: io::Error) -> GetBucketPolicyError { GetBucketPolicyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketPolicyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketPolicyError { fn description(&self) -> &str { match *self { GetBucketPolicyError::Validation(ref cause) => cause, GetBucketPolicyError::Credentials(ref err) => err.description(), GetBucketPolicyError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketPolicyError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketReplication #[derive(Debug, PartialEq)] pub enum GetBucketReplicationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketReplicationError { pub fn from_body(body: &str) -> GetBucketReplicationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketReplicationError::Unknown(String::from(body)), }, Err(_) => GetBucketReplicationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketReplicationError { fn from(err: XmlParseError) -> GetBucketReplicationError { let XmlParseError(message) = err; GetBucketReplicationError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketReplicationError { fn from(err: CredentialsError) -> GetBucketReplicationError { GetBucketReplicationError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketReplicationError { fn from(err: HttpDispatchError) -> GetBucketReplicationError { GetBucketReplicationError::HttpDispatch(err) } } impl From<io::Error> for GetBucketReplicationError { fn from(err: io::Error) -> GetBucketReplicationError { GetBucketReplicationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketReplicationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketReplicationError { fn description(&self) -> &str { match *self { GetBucketReplicationError::Validation(ref cause) => cause, GetBucketReplicationError::Credentials(ref err) => err.description(), GetBucketReplicationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketReplicationError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketRequestPayment #[derive(Debug, PartialEq)] pub enum GetBucketRequestPaymentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketRequestPaymentError { pub fn from_body(body: &str) -> GetBucketRequestPaymentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketRequestPaymentError::Unknown(String::from(body)), }, Err(_) => GetBucketRequestPaymentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketRequestPaymentError { fn from(err: XmlParseError) -> GetBucketRequestPaymentError { let XmlParseError(message) = err; GetBucketRequestPaymentError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketRequestPaymentError { fn from(err: CredentialsError) -> GetBucketRequestPaymentError { GetBucketRequestPaymentError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketRequestPaymentError { fn from(err: HttpDispatchError) -> GetBucketRequestPaymentError { GetBucketRequestPaymentError::HttpDispatch(err) } } impl From<io::Error> for GetBucketRequestPaymentError { fn from(err: io::Error) -> GetBucketRequestPaymentError { GetBucketRequestPaymentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketRequestPaymentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketRequestPaymentError { fn description(&self) -> &str { match *self { GetBucketRequestPaymentError::Validation(ref cause) => cause, GetBucketRequestPaymentError::Credentials(ref err) => err.description(), GetBucketRequestPaymentError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketRequestPaymentError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketTagging #[derive(Debug, PartialEq)] pub enum GetBucketTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketTaggingError { pub fn from_body(body: &str) -> GetBucketTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketTaggingError::Unknown(String::from(body)), }, Err(_) => GetBucketTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketTaggingError { fn from(err: XmlParseError) -> GetBucketTaggingError { let XmlParseError(message) = err; GetBucketTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketTaggingError { fn from(err: CredentialsError) -> GetBucketTaggingError { GetBucketTaggingError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketTaggingError { fn from(err: HttpDispatchError) -> GetBucketTaggingError { GetBucketTaggingError::HttpDispatch(err) } } impl From<io::Error> for GetBucketTaggingError { fn from(err: io::Error) -> GetBucketTaggingError { GetBucketTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketTaggingError { fn description(&self) -> &str { match *self { GetBucketTaggingError::Validation(ref cause) => cause, GetBucketTaggingError::Credentials(ref err) => err.description(), GetBucketTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketVersioning #[derive(Debug, PartialEq)] pub enum GetBucketVersioningError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketVersioningError { pub fn from_body(body: &str) -> GetBucketVersioningError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketVersioningError::Unknown(String::from(body)), }, Err(_) => GetBucketVersioningError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketVersioningError { fn from(err: XmlParseError) -> GetBucketVersioningError { let XmlParseError(message) = err; GetBucketVersioningError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketVersioningError { fn from(err: CredentialsError) -> GetBucketVersioningError { GetBucketVersioningError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketVersioningError { fn from(err: HttpDispatchError) -> GetBucketVersioningError { GetBucketVersioningError::HttpDispatch(err) } } impl From<io::Error> for GetBucketVersioningError { fn from(err: io::Error) -> GetBucketVersioningError { GetBucketVersioningError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketVersioningError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketVersioningError { fn description(&self) -> &str { match *self { GetBucketVersioningError::Validation(ref cause) => cause, GetBucketVersioningError::Credentials(ref err) => err.description(), GetBucketVersioningError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } GetBucketVersioningError::Unknown(ref cause) => cause, } } } /// Errors returned by GetBucketWebsite #[derive(Debug, PartialEq)] pub enum GetBucketWebsiteError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetBucketWebsiteError { pub fn from_body(body: &str) -> GetBucketWebsiteError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetBucketWebsiteError::Unknown(String::from(body)), }, Err(_) => GetBucketWebsiteError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetBucketWebsiteError { fn from(err: XmlParseError) -> GetBucketWebsiteError { let XmlParseError(message) = err; GetBucketWebsiteError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetBucketWebsiteError { fn from(err: CredentialsError) -> GetBucketWebsiteError { GetBucketWebsiteError::Credentials(err) } } impl From<HttpDispatchError> for GetBucketWebsiteError { fn from(err: HttpDispatchError) -> GetBucketWebsiteError { GetBucketWebsiteError::HttpDispatch(err) } } impl From<io::Error> for GetBucketWebsiteError { fn from(err: io::Error) -> GetBucketWebsiteError { GetBucketWebsiteError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetBucketWebsiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetBucketWebsiteError { fn description(&self) -> &str { match *self { GetBucketWebsiteError::Validation(ref cause) => cause, GetBucketWebsiteError::Credentials(ref err) => err.description(), GetBucketWebsiteError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetBucketWebsiteError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObject #[derive(Debug, PartialEq)] pub enum GetObjectError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectError { pub fn from_body(body: &str) -> GetObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => GetObjectError::NoSuchKey(String::from(parsed_error.message)), _ => GetObjectError::Unknown(String::from(body)), }, Err(_) => GetObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectError { fn from(err: XmlParseError) -> GetObjectError { let XmlParseError(message) = err; GetObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectError { fn from(err: CredentialsError) -> GetObjectError { GetObjectError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectError { fn from(err: HttpDispatchError) -> GetObjectError { GetObjectError::HttpDispatch(err) } } impl From<io::Error> for GetObjectError { fn from(err: io::Error) -> GetObjectError { GetObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectError { fn description(&self) -> &str { match *self { GetObjectError::NoSuchKey(ref cause) => cause, GetObjectError::Validation(ref cause) => cause, GetObjectError::Credentials(ref err) => err.description(), GetObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObjectAcl #[derive(Debug, PartialEq)] pub enum GetObjectAclError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectAclError { pub fn from_body(body: &str) -> GetObjectAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => GetObjectAclError::NoSuchKey(String::from(parsed_error.message)), _ => GetObjectAclError::Unknown(String::from(body)), }, Err(_) => GetObjectAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectAclError { fn from(err: XmlParseError) -> GetObjectAclError { let XmlParseError(message) = err; GetObjectAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectAclError { fn from(err: CredentialsError) -> GetObjectAclError { GetObjectAclError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectAclError { fn from(err: HttpDispatchError) -> GetObjectAclError { GetObjectAclError::HttpDispatch(err) } } impl From<io::Error> for GetObjectAclError { fn from(err: io::Error) -> GetObjectAclError { GetObjectAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectAclError { fn description(&self) -> &str { match *self { GetObjectAclError::NoSuchKey(ref cause) => cause, GetObjectAclError::Validation(ref cause) => cause, GetObjectAclError::Credentials(ref err) => err.description(), GetObjectAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectAclError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObjectTagging #[derive(Debug, PartialEq)] pub enum GetObjectTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectTaggingError { pub fn from_body(body: &str) -> GetObjectTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetObjectTaggingError::Unknown(String::from(body)), }, Err(_) => GetObjectTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectTaggingError { fn from(err: XmlParseError) -> GetObjectTaggingError { let XmlParseError(message) = err; GetObjectTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectTaggingError { fn from(err: CredentialsError) -> GetObjectTaggingError { GetObjectTaggingError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectTaggingError { fn from(err: HttpDispatchError) -> GetObjectTaggingError { GetObjectTaggingError::HttpDispatch(err) } } impl From<io::Error> for GetObjectTaggingError { fn from(err: io::Error) -> GetObjectTaggingError { GetObjectTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectTaggingError { fn description(&self) -> &str { match *self { GetObjectTaggingError::Validation(ref cause) => cause, GetObjectTaggingError::Credentials(ref err) => err.description(), GetObjectTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by GetObjectTorrent #[derive(Debug, PartialEq)] pub enum GetObjectTorrentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl GetObjectTorrentError { pub fn from_body(body: &str) -> GetObjectTorrentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => GetObjectTorrentError::Unknown(String::from(body)), }, Err(_) => GetObjectTorrentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for GetObjectTorrentError { fn from(err: XmlParseError) -> GetObjectTorrentError { let XmlParseError(message) = err; GetObjectTorrentError::Unknown(message.to_string()) } } impl From<CredentialsError> for GetObjectTorrentError { fn from(err: CredentialsError) -> GetObjectTorrentError { GetObjectTorrentError::Credentials(err) } } impl From<HttpDispatchError> for GetObjectTorrentError { fn from(err: HttpDispatchError) -> GetObjectTorrentError { GetObjectTorrentError::HttpDispatch(err) } } impl From<io::Error> for GetObjectTorrentError { fn from(err: io::Error) -> GetObjectTorrentError { GetObjectTorrentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for GetObjectTorrentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for GetObjectTorrentError { fn description(&self) -> &str { match *self { GetObjectTorrentError::Validation(ref cause) => cause, GetObjectTorrentError::Credentials(ref err) => err.description(), GetObjectTorrentError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), GetObjectTorrentError::Unknown(ref cause) => cause, } } } /// Errors returned by HeadBucket #[derive(Debug, PartialEq)] pub enum HeadBucketError { /// <p>The specified bucket does not exist.</p> NoSuchBucket(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl HeadBucketError { pub fn from_body(body: &str) -> HeadBucketError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchBucket" => HeadBucketError::NoSuchBucket(String::from(parsed_error.message)), _ => HeadBucketError::Unknown(String::from(body)), }, Err(_) => HeadBucketError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for HeadBucketError { fn from(err: XmlParseError) -> HeadBucketError { let XmlParseError(message) = err; HeadBucketError::Unknown(message.to_string()) } } impl From<CredentialsError> for HeadBucketError { fn from(err: CredentialsError) -> HeadBucketError { HeadBucketError::Credentials(err) } } impl From<HttpDispatchError> for HeadBucketError { fn from(err: HttpDispatchError) -> HeadBucketError { HeadBucketError::HttpDispatch(err) } } impl From<io::Error> for HeadBucketError { fn from(err: io::Error) -> HeadBucketError { HeadBucketError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for HeadBucketError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for HeadBucketError { fn description(&self) -> &str { match *self { HeadBucketError::NoSuchBucket(ref cause) => cause, HeadBucketError::Validation(ref cause) => cause, HeadBucketError::Credentials(ref err) => err.description(), HeadBucketError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), HeadBucketError::Unknown(ref cause) => cause, } } } /// Errors returned by HeadObject #[derive(Debug, PartialEq)] pub enum HeadObjectError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl HeadObjectError { pub fn from_body(body: &str) -> HeadObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => HeadObjectError::NoSuchKey(String::from(parsed_error.message)), _ => HeadObjectError::Unknown(String::from(body)), }, Err(_) => HeadObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for HeadObjectError { fn from(err: XmlParseError) -> HeadObjectError { let XmlParseError(message) = err; HeadObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for HeadObjectError { fn from(err: CredentialsError) -> HeadObjectError { HeadObjectError::Credentials(err) } } impl From<HttpDispatchError> for HeadObjectError { fn from(err: HttpDispatchError) -> HeadObjectError { HeadObjectError::HttpDispatch(err) } } impl From<io::Error> for HeadObjectError { fn from(err: io::Error) -> HeadObjectError { HeadObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for HeadObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for HeadObjectError { fn description(&self) -> &str { match *self { HeadObjectError::NoSuchKey(ref cause) => cause, HeadObjectError::Validation(ref cause) => cause, HeadObjectError::Credentials(ref err) => err.description(), HeadObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), HeadObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBucketAnalyticsConfigurations #[derive(Debug, PartialEq)] pub enum ListBucketAnalyticsConfigurationsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketAnalyticsConfigurationsError { pub fn from_body(body: &str) -> ListBucketAnalyticsConfigurationsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketAnalyticsConfigurationsError::Unknown(String::from(body)), }, Err(_) => ListBucketAnalyticsConfigurationsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketAnalyticsConfigurationsError { fn from(err: XmlParseError) -> ListBucketAnalyticsConfigurationsError { let XmlParseError(message) = err; ListBucketAnalyticsConfigurationsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketAnalyticsConfigurationsError { fn from(err: CredentialsError) -> ListBucketAnalyticsConfigurationsError { ListBucketAnalyticsConfigurationsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketAnalyticsConfigurationsError { fn from(err: HttpDispatchError) -> ListBucketAnalyticsConfigurationsError { ListBucketAnalyticsConfigurationsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketAnalyticsConfigurationsError { fn from(err: io::Error) -> ListBucketAnalyticsConfigurationsError { ListBucketAnalyticsConfigurationsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketAnalyticsConfigurationsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketAnalyticsConfigurationsError { fn description(&self) -> &str { match *self { ListBucketAnalyticsConfigurationsError::Validation(ref cause) => cause, ListBucketAnalyticsConfigurationsError::Credentials(ref err) => err.description(), ListBucketAnalyticsConfigurationsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListBucketAnalyticsConfigurationsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBucketInventoryConfigurations #[derive(Debug, PartialEq)] pub enum ListBucketInventoryConfigurationsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketInventoryConfigurationsError { pub fn from_body(body: &str) -> ListBucketInventoryConfigurationsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketInventoryConfigurationsError::Unknown(String::from(body)), }, Err(_) => ListBucketInventoryConfigurationsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketInventoryConfigurationsError { fn from(err: XmlParseError) -> ListBucketInventoryConfigurationsError { let XmlParseError(message) = err; ListBucketInventoryConfigurationsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketInventoryConfigurationsError { fn from(err: CredentialsError) -> ListBucketInventoryConfigurationsError { ListBucketInventoryConfigurationsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketInventoryConfigurationsError { fn from(err: HttpDispatchError) -> ListBucketInventoryConfigurationsError { ListBucketInventoryConfigurationsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketInventoryConfigurationsError { fn from(err: io::Error) -> ListBucketInventoryConfigurationsError { ListBucketInventoryConfigurationsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketInventoryConfigurationsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketInventoryConfigurationsError { fn description(&self) -> &str { match *self { ListBucketInventoryConfigurationsError::Validation(ref cause) => cause, ListBucketInventoryConfigurationsError::Credentials(ref err) => err.description(), ListBucketInventoryConfigurationsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListBucketInventoryConfigurationsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBucketMetricsConfigurations #[derive(Debug, PartialEq)] pub enum ListBucketMetricsConfigurationsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketMetricsConfigurationsError { pub fn from_body(body: &str) -> ListBucketMetricsConfigurationsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketMetricsConfigurationsError::Unknown(String::from(body)), }, Err(_) => ListBucketMetricsConfigurationsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketMetricsConfigurationsError { fn from(err: XmlParseError) -> ListBucketMetricsConfigurationsError { let XmlParseError(message) = err; ListBucketMetricsConfigurationsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketMetricsConfigurationsError { fn from(err: CredentialsError) -> ListBucketMetricsConfigurationsError { ListBucketMetricsConfigurationsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketMetricsConfigurationsError { fn from(err: HttpDispatchError) -> ListBucketMetricsConfigurationsError { ListBucketMetricsConfigurationsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketMetricsConfigurationsError { fn from(err: io::Error) -> ListBucketMetricsConfigurationsError { ListBucketMetricsConfigurationsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketMetricsConfigurationsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketMetricsConfigurationsError { fn description(&self) -> &str { match *self { ListBucketMetricsConfigurationsError::Validation(ref cause) => cause, ListBucketMetricsConfigurationsError::Credentials(ref err) => err.description(), ListBucketMetricsConfigurationsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListBucketMetricsConfigurationsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListBuckets #[derive(Debug, PartialEq)] pub enum ListBucketsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListBucketsError { pub fn from_body(body: &str) -> ListBucketsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListBucketsError::Unknown(String::from(body)), }, Err(_) => ListBucketsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListBucketsError { fn from(err: XmlParseError) -> ListBucketsError { let XmlParseError(message) = err; ListBucketsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListBucketsError { fn from(err: CredentialsError) -> ListBucketsError { ListBucketsError::Credentials(err) } } impl From<HttpDispatchError> for ListBucketsError { fn from(err: HttpDispatchError) -> ListBucketsError { ListBucketsError::HttpDispatch(err) } } impl From<io::Error> for ListBucketsError { fn from(err: io::Error) -> ListBucketsError { ListBucketsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListBucketsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListBucketsError { fn description(&self) -> &str { match *self { ListBucketsError::Validation(ref cause) => cause, ListBucketsError::Credentials(ref err) => err.description(), ListBucketsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListBucketsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListMultipartUploads #[derive(Debug, PartialEq)] pub enum ListMultipartUploadsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListMultipartUploadsError { pub fn from_body(body: &str) -> ListMultipartUploadsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListMultipartUploadsError::Unknown(String::from(body)), }, Err(_) => ListMultipartUploadsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListMultipartUploadsError { fn from(err: XmlParseError) -> ListMultipartUploadsError { let XmlParseError(message) = err; ListMultipartUploadsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListMultipartUploadsError { fn from(err: CredentialsError) -> ListMultipartUploadsError { ListMultipartUploadsError::Credentials(err) } } impl From<HttpDispatchError> for ListMultipartUploadsError { fn from(err: HttpDispatchError) -> ListMultipartUploadsError { ListMultipartUploadsError::HttpDispatch(err) } } impl From<io::Error> for ListMultipartUploadsError { fn from(err: io::Error) -> ListMultipartUploadsError { ListMultipartUploadsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListMultipartUploadsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListMultipartUploadsError { fn description(&self) -> &str { match *self { ListMultipartUploadsError::Validation(ref cause) => cause, ListMultipartUploadsError::Credentials(ref err) => err.description(), ListMultipartUploadsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListMultipartUploadsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListObjectVersions #[derive(Debug, PartialEq)] pub enum ListObjectVersionsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListObjectVersionsError { pub fn from_body(body: &str) -> ListObjectVersionsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListObjectVersionsError::Unknown(String::from(body)), }, Err(_) => ListObjectVersionsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListObjectVersionsError { fn from(err: XmlParseError) -> ListObjectVersionsError { let XmlParseError(message) = err; ListObjectVersionsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListObjectVersionsError { fn from(err: CredentialsError) -> ListObjectVersionsError { ListObjectVersionsError::Credentials(err) } } impl From<HttpDispatchError> for ListObjectVersionsError { fn from(err: HttpDispatchError) -> ListObjectVersionsError { ListObjectVersionsError::HttpDispatch(err) } } impl From<io::Error> for ListObjectVersionsError { fn from(err: io::Error) -> ListObjectVersionsError { ListObjectVersionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListObjectVersionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListObjectVersionsError { fn description(&self) -> &str { match *self { ListObjectVersionsError::Validation(ref cause) => cause, ListObjectVersionsError::Credentials(ref err) => err.description(), ListObjectVersionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListObjectVersionsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListObjects #[derive(Debug, PartialEq)] pub enum ListObjectsError { /// <p>The specified bucket does not exist.</p> NoSuchBucket(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListObjectsError { pub fn from_body(body: &str) -> ListObjectsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchBucket" => { ListObjectsError::NoSuchBucket(String::from(parsed_error.message)) } _ => ListObjectsError::Unknown(String::from(body)), }, Err(_) => ListObjectsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListObjectsError { fn from(err: XmlParseError) -> ListObjectsError { let XmlParseError(message) = err; ListObjectsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListObjectsError { fn from(err: CredentialsError) -> ListObjectsError { ListObjectsError::Credentials(err) } } impl From<HttpDispatchError> for ListObjectsError { fn from(err: HttpDispatchError) -> ListObjectsError { ListObjectsError::HttpDispatch(err) } } impl From<io::Error> for ListObjectsError { fn from(err: io::Error) -> ListObjectsError { ListObjectsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListObjectsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListObjectsError { fn description(&self) -> &str { match *self { ListObjectsError::NoSuchBucket(ref cause) => cause, ListObjectsError::Validation(ref cause) => cause, ListObjectsError::Credentials(ref err) => err.description(), ListObjectsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListObjectsError::Unknown(ref cause) => cause, } } } /// Errors returned by ListObjectsV2 #[derive(Debug, PartialEq)] pub enum ListObjectsV2Error { /// <p>The specified bucket does not exist.</p> NoSuchBucket(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListObjectsV2Error { pub fn from_body(body: &str) -> ListObjectsV2Error { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchBucket" => { ListObjectsV2Error::NoSuchBucket(String::from(parsed_error.message)) } _ => ListObjectsV2Error::Unknown(String::from(body)), }, Err(_) => ListObjectsV2Error::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListObjectsV2Error { fn from(err: XmlParseError) -> ListObjectsV2Error { let XmlParseError(message) = err; ListObjectsV2Error::Unknown(message.to_string()) } } impl From<CredentialsError> for ListObjectsV2Error { fn from(err: CredentialsError) -> ListObjectsV2Error { ListObjectsV2Error::Credentials(err) } } impl From<HttpDispatchError> for ListObjectsV2Error { fn from(err: HttpDispatchError) -> ListObjectsV2Error { ListObjectsV2Error::HttpDispatch(err) } } impl From<io::Error> for ListObjectsV2Error { fn from(err: io::Error) -> ListObjectsV2Error { ListObjectsV2Error::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListObjectsV2Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListObjectsV2Error { fn description(&self) -> &str { match *self { ListObjectsV2Error::NoSuchBucket(ref cause) => cause, ListObjectsV2Error::Validation(ref cause) => cause, ListObjectsV2Error::Credentials(ref err) => err.description(), ListObjectsV2Error::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListObjectsV2Error::Unknown(ref cause) => cause, } } } /// Errors returned by ListParts #[derive(Debug, PartialEq)] pub enum ListPartsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListPartsError { pub fn from_body(body: &str) -> ListPartsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => ListPartsError::Unknown(String::from(body)), }, Err(_) => ListPartsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListPartsError { fn from(err: XmlParseError) -> ListPartsError { let XmlParseError(message) = err; ListPartsError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListPartsError { fn from(err: CredentialsError) -> ListPartsError { ListPartsError::Credentials(err) } } impl From<HttpDispatchError> for ListPartsError { fn from(err: HttpDispatchError) -> ListPartsError { ListPartsError::HttpDispatch(err) } } impl From<io::Error> for ListPartsError { fn from(err: io::Error) -> ListPartsError { ListPartsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListPartsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListPartsError { fn description(&self) -> &str { match *self { ListPartsError::Validation(ref cause) => cause, ListPartsError::Credentials(ref err) => err.description(), ListPartsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ListPartsError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketAccelerateConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketAccelerateConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketAccelerateConfigurationError { pub fn from_body(body: &str) -> PutBucketAccelerateConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketAccelerateConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketAccelerateConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketAccelerateConfigurationError { fn from(err: XmlParseError) -> PutBucketAccelerateConfigurationError { let XmlParseError(message) = err; PutBucketAccelerateConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketAccelerateConfigurationError { fn from(err: CredentialsError) -> PutBucketAccelerateConfigurationError { PutBucketAccelerateConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketAccelerateConfigurationError { fn from(err: HttpDispatchError) -> PutBucketAccelerateConfigurationError { PutBucketAccelerateConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketAccelerateConfigurationError { fn from(err: io::Error) -> PutBucketAccelerateConfigurationError { PutBucketAccelerateConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketAccelerateConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketAccelerateConfigurationError { fn description(&self) -> &str { match *self { PutBucketAccelerateConfigurationError::Validation(ref cause) => cause, PutBucketAccelerateConfigurationError::Credentials(ref err) => err.description(), PutBucketAccelerateConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketAccelerateConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketAcl #[derive(Debug, PartialEq)] pub enum PutBucketAclError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketAclError { pub fn from_body(body: &str) -> PutBucketAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketAclError::Unknown(String::from(body)), }, Err(_) => PutBucketAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketAclError { fn from(err: XmlParseError) -> PutBucketAclError { let XmlParseError(message) = err; PutBucketAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketAclError { fn from(err: CredentialsError) -> PutBucketAclError { PutBucketAclError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketAclError { fn from(err: HttpDispatchError) -> PutBucketAclError { PutBucketAclError::HttpDispatch(err) } } impl From<io::Error> for PutBucketAclError { fn from(err: io::Error) -> PutBucketAclError { PutBucketAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketAclError { fn description(&self) -> &str { match *self { PutBucketAclError::Validation(ref cause) => cause, PutBucketAclError::Credentials(ref err) => err.description(), PutBucketAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketAclError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketAnalyticsConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketAnalyticsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketAnalyticsConfigurationError { pub fn from_body(body: &str) -> PutBucketAnalyticsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketAnalyticsConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketAnalyticsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketAnalyticsConfigurationError { fn from(err: XmlParseError) -> PutBucketAnalyticsConfigurationError { let XmlParseError(message) = err; PutBucketAnalyticsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketAnalyticsConfigurationError { fn from(err: CredentialsError) -> PutBucketAnalyticsConfigurationError { PutBucketAnalyticsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketAnalyticsConfigurationError { fn from(err: HttpDispatchError) -> PutBucketAnalyticsConfigurationError { PutBucketAnalyticsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketAnalyticsConfigurationError { fn from(err: io::Error) -> PutBucketAnalyticsConfigurationError { PutBucketAnalyticsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketAnalyticsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketAnalyticsConfigurationError { fn description(&self) -> &str { match *self { PutBucketAnalyticsConfigurationError::Validation(ref cause) => cause, PutBucketAnalyticsConfigurationError::Credentials(ref err) => err.description(), PutBucketAnalyticsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketAnalyticsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketCors #[derive(Debug, PartialEq)] pub enum PutBucketCorsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketCorsError { pub fn from_body(body: &str) -> PutBucketCorsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketCorsError::Unknown(String::from(body)), }, Err(_) => PutBucketCorsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketCorsError { fn from(err: XmlParseError) -> PutBucketCorsError { let XmlParseError(message) = err; PutBucketCorsError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketCorsError { fn from(err: CredentialsError) -> PutBucketCorsError { PutBucketCorsError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketCorsError { fn from(err: HttpDispatchError) -> PutBucketCorsError { PutBucketCorsError::HttpDispatch(err) } } impl From<io::Error> for PutBucketCorsError { fn from(err: io::Error) -> PutBucketCorsError { PutBucketCorsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketCorsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketCorsError { fn description(&self) -> &str { match *self { PutBucketCorsError::Validation(ref cause) => cause, PutBucketCorsError::Credentials(ref err) => err.description(), PutBucketCorsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketCorsError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketEncryption #[derive(Debug, PartialEq)] pub enum PutBucketEncryptionError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketEncryptionError { pub fn from_body(body: &str) -> PutBucketEncryptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketEncryptionError::Unknown(String::from(body)), }, Err(_) => PutBucketEncryptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketEncryptionError { fn from(err: XmlParseError) -> PutBucketEncryptionError { let XmlParseError(message) = err; PutBucketEncryptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketEncryptionError { fn from(err: CredentialsError) -> PutBucketEncryptionError { PutBucketEncryptionError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketEncryptionError { fn from(err: HttpDispatchError) -> PutBucketEncryptionError { PutBucketEncryptionError::HttpDispatch(err) } } impl From<io::Error> for PutBucketEncryptionError { fn from(err: io::Error) -> PutBucketEncryptionError { PutBucketEncryptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketEncryptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketEncryptionError { fn description(&self) -> &str { match *self { PutBucketEncryptionError::Validation(ref cause) => cause, PutBucketEncryptionError::Credentials(ref err) => err.description(), PutBucketEncryptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketEncryptionError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketInventoryConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketInventoryConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketInventoryConfigurationError { pub fn from_body(body: &str) -> PutBucketInventoryConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketInventoryConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketInventoryConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketInventoryConfigurationError { fn from(err: XmlParseError) -> PutBucketInventoryConfigurationError { let XmlParseError(message) = err; PutBucketInventoryConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketInventoryConfigurationError { fn from(err: CredentialsError) -> PutBucketInventoryConfigurationError { PutBucketInventoryConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketInventoryConfigurationError { fn from(err: HttpDispatchError) -> PutBucketInventoryConfigurationError { PutBucketInventoryConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketInventoryConfigurationError { fn from(err: io::Error) -> PutBucketInventoryConfigurationError { PutBucketInventoryConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketInventoryConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketInventoryConfigurationError { fn description(&self) -> &str { match *self { PutBucketInventoryConfigurationError::Validation(ref cause) => cause, PutBucketInventoryConfigurationError::Credentials(ref err) => err.description(), PutBucketInventoryConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketInventoryConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketLifecycle #[derive(Debug, PartialEq)] pub enum PutBucketLifecycleError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketLifecycleError { pub fn from_body(body: &str) -> PutBucketLifecycleError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketLifecycleError::Unknown(String::from(body)), }, Err(_) => PutBucketLifecycleError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketLifecycleError { fn from(err: XmlParseError) -> PutBucketLifecycleError { let XmlParseError(message) = err; PutBucketLifecycleError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketLifecycleError { fn from(err: CredentialsError) -> PutBucketLifecycleError { PutBucketLifecycleError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketLifecycleError { fn from(err: HttpDispatchError) -> PutBucketLifecycleError { PutBucketLifecycleError::HttpDispatch(err) } } impl From<io::Error> for PutBucketLifecycleError { fn from(err: io::Error) -> PutBucketLifecycleError { PutBucketLifecycleError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketLifecycleError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketLifecycleError { fn description(&self) -> &str { match *self { PutBucketLifecycleError::Validation(ref cause) => cause, PutBucketLifecycleError::Credentials(ref err) => err.description(), PutBucketLifecycleError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketLifecycleError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketLifecycleConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketLifecycleConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketLifecycleConfigurationError { pub fn from_body(body: &str) -> PutBucketLifecycleConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketLifecycleConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketLifecycleConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketLifecycleConfigurationError { fn from(err: XmlParseError) -> PutBucketLifecycleConfigurationError { let XmlParseError(message) = err; PutBucketLifecycleConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketLifecycleConfigurationError { fn from(err: CredentialsError) -> PutBucketLifecycleConfigurationError { PutBucketLifecycleConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketLifecycleConfigurationError { fn from(err: HttpDispatchError) -> PutBucketLifecycleConfigurationError { PutBucketLifecycleConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketLifecycleConfigurationError { fn from(err: io::Error) -> PutBucketLifecycleConfigurationError { PutBucketLifecycleConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketLifecycleConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketLifecycleConfigurationError { fn description(&self) -> &str { match *self { PutBucketLifecycleConfigurationError::Validation(ref cause) => cause, PutBucketLifecycleConfigurationError::Credentials(ref err) => err.description(), PutBucketLifecycleConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketLifecycleConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketLogging #[derive(Debug, PartialEq)] pub enum PutBucketLoggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketLoggingError { pub fn from_body(body: &str) -> PutBucketLoggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketLoggingError::Unknown(String::from(body)), }, Err(_) => PutBucketLoggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketLoggingError { fn from(err: XmlParseError) -> PutBucketLoggingError { let XmlParseError(message) = err; PutBucketLoggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketLoggingError { fn from(err: CredentialsError) -> PutBucketLoggingError { PutBucketLoggingError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketLoggingError { fn from(err: HttpDispatchError) -> PutBucketLoggingError { PutBucketLoggingError::HttpDispatch(err) } } impl From<io::Error> for PutBucketLoggingError { fn from(err: io::Error) -> PutBucketLoggingError { PutBucketLoggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketLoggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketLoggingError { fn description(&self) -> &str { match *self { PutBucketLoggingError::Validation(ref cause) => cause, PutBucketLoggingError::Credentials(ref err) => err.description(), PutBucketLoggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketLoggingError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketMetricsConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketMetricsConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketMetricsConfigurationError { pub fn from_body(body: &str) -> PutBucketMetricsConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketMetricsConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketMetricsConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketMetricsConfigurationError { fn from(err: XmlParseError) -> PutBucketMetricsConfigurationError { let XmlParseError(message) = err; PutBucketMetricsConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketMetricsConfigurationError { fn from(err: CredentialsError) -> PutBucketMetricsConfigurationError { PutBucketMetricsConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketMetricsConfigurationError { fn from(err: HttpDispatchError) -> PutBucketMetricsConfigurationError { PutBucketMetricsConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketMetricsConfigurationError { fn from(err: io::Error) -> PutBucketMetricsConfigurationError { PutBucketMetricsConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketMetricsConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketMetricsConfigurationError { fn description(&self) -> &str { match *self { PutBucketMetricsConfigurationError::Validation(ref cause) => cause, PutBucketMetricsConfigurationError::Credentials(ref err) => err.description(), PutBucketMetricsConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketMetricsConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketNotification #[derive(Debug, PartialEq)] pub enum PutBucketNotificationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketNotificationError { pub fn from_body(body: &str) -> PutBucketNotificationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketNotificationError::Unknown(String::from(body)), }, Err(_) => PutBucketNotificationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketNotificationError { fn from(err: XmlParseError) -> PutBucketNotificationError { let XmlParseError(message) = err; PutBucketNotificationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketNotificationError { fn from(err: CredentialsError) -> PutBucketNotificationError { PutBucketNotificationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketNotificationError { fn from(err: HttpDispatchError) -> PutBucketNotificationError { PutBucketNotificationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketNotificationError { fn from(err: io::Error) -> PutBucketNotificationError { PutBucketNotificationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketNotificationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketNotificationError { fn description(&self) -> &str { match *self { PutBucketNotificationError::Validation(ref cause) => cause, PutBucketNotificationError::Credentials(ref err) => err.description(), PutBucketNotificationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketNotificationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketNotificationConfiguration #[derive(Debug, PartialEq)] pub enum PutBucketNotificationConfigurationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketNotificationConfigurationError { pub fn from_body(body: &str) -> PutBucketNotificationConfigurationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketNotificationConfigurationError::Unknown(String::from(body)), }, Err(_) => PutBucketNotificationConfigurationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketNotificationConfigurationError { fn from(err: XmlParseError) -> PutBucketNotificationConfigurationError { let XmlParseError(message) = err; PutBucketNotificationConfigurationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketNotificationConfigurationError { fn from(err: CredentialsError) -> PutBucketNotificationConfigurationError { PutBucketNotificationConfigurationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketNotificationConfigurationError { fn from(err: HttpDispatchError) -> PutBucketNotificationConfigurationError { PutBucketNotificationConfigurationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketNotificationConfigurationError { fn from(err: io::Error) -> PutBucketNotificationConfigurationError { PutBucketNotificationConfigurationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketNotificationConfigurationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketNotificationConfigurationError { fn description(&self) -> &str { match *self { PutBucketNotificationConfigurationError::Validation(ref cause) => cause, PutBucketNotificationConfigurationError::Credentials(ref err) => err.description(), PutBucketNotificationConfigurationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketNotificationConfigurationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketPolicy #[derive(Debug, PartialEq)] pub enum PutBucketPolicyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketPolicyError { pub fn from_body(body: &str) -> PutBucketPolicyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketPolicyError::Unknown(String::from(body)), }, Err(_) => PutBucketPolicyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketPolicyError { fn from(err: XmlParseError) -> PutBucketPolicyError { let XmlParseError(message) = err; PutBucketPolicyError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketPolicyError { fn from(err: CredentialsError) -> PutBucketPolicyError { PutBucketPolicyError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketPolicyError { fn from(err: HttpDispatchError) -> PutBucketPolicyError { PutBucketPolicyError::HttpDispatch(err) } } impl From<io::Error> for PutBucketPolicyError { fn from(err: io::Error) -> PutBucketPolicyError { PutBucketPolicyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketPolicyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketPolicyError { fn description(&self) -> &str { match *self { PutBucketPolicyError::Validation(ref cause) => cause, PutBucketPolicyError::Credentials(ref err) => err.description(), PutBucketPolicyError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketPolicyError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketReplication #[derive(Debug, PartialEq)] pub enum PutBucketReplicationError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketReplicationError { pub fn from_body(body: &str) -> PutBucketReplicationError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketReplicationError::Unknown(String::from(body)), }, Err(_) => PutBucketReplicationError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketReplicationError { fn from(err: XmlParseError) -> PutBucketReplicationError { let XmlParseError(message) = err; PutBucketReplicationError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketReplicationError { fn from(err: CredentialsError) -> PutBucketReplicationError { PutBucketReplicationError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketReplicationError { fn from(err: HttpDispatchError) -> PutBucketReplicationError { PutBucketReplicationError::HttpDispatch(err) } } impl From<io::Error> for PutBucketReplicationError { fn from(err: io::Error) -> PutBucketReplicationError { PutBucketReplicationError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketReplicationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketReplicationError { fn description(&self) -> &str { match *self { PutBucketReplicationError::Validation(ref cause) => cause, PutBucketReplicationError::Credentials(ref err) => err.description(), PutBucketReplicationError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketReplicationError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketRequestPayment #[derive(Debug, PartialEq)] pub enum PutBucketRequestPaymentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketRequestPaymentError { pub fn from_body(body: &str) -> PutBucketRequestPaymentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketRequestPaymentError::Unknown(String::from(body)), }, Err(_) => PutBucketRequestPaymentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketRequestPaymentError { fn from(err: XmlParseError) -> PutBucketRequestPaymentError { let XmlParseError(message) = err; PutBucketRequestPaymentError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketRequestPaymentError { fn from(err: CredentialsError) -> PutBucketRequestPaymentError { PutBucketRequestPaymentError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketRequestPaymentError { fn from(err: HttpDispatchError) -> PutBucketRequestPaymentError { PutBucketRequestPaymentError::HttpDispatch(err) } } impl From<io::Error> for PutBucketRequestPaymentError { fn from(err: io::Error) -> PutBucketRequestPaymentError { PutBucketRequestPaymentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketRequestPaymentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketRequestPaymentError { fn description(&self) -> &str { match *self { PutBucketRequestPaymentError::Validation(ref cause) => cause, PutBucketRequestPaymentError::Credentials(ref err) => err.description(), PutBucketRequestPaymentError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketRequestPaymentError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketTagging #[derive(Debug, PartialEq)] pub enum PutBucketTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketTaggingError { pub fn from_body(body: &str) -> PutBucketTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketTaggingError::Unknown(String::from(body)), }, Err(_) => PutBucketTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketTaggingError { fn from(err: XmlParseError) -> PutBucketTaggingError { let XmlParseError(message) = err; PutBucketTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketTaggingError { fn from(err: CredentialsError) -> PutBucketTaggingError { PutBucketTaggingError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketTaggingError { fn from(err: HttpDispatchError) -> PutBucketTaggingError { PutBucketTaggingError::HttpDispatch(err) } } impl From<io::Error> for PutBucketTaggingError { fn from(err: io::Error) -> PutBucketTaggingError { PutBucketTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketTaggingError { fn description(&self) -> &str { match *self { PutBucketTaggingError::Validation(ref cause) => cause, PutBucketTaggingError::Credentials(ref err) => err.description(), PutBucketTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketVersioning #[derive(Debug, PartialEq)] pub enum PutBucketVersioningError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketVersioningError { pub fn from_body(body: &str) -> PutBucketVersioningError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketVersioningError::Unknown(String::from(body)), }, Err(_) => PutBucketVersioningError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketVersioningError { fn from(err: XmlParseError) -> PutBucketVersioningError { let XmlParseError(message) = err; PutBucketVersioningError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketVersioningError { fn from(err: CredentialsError) -> PutBucketVersioningError { PutBucketVersioningError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketVersioningError { fn from(err: HttpDispatchError) -> PutBucketVersioningError { PutBucketVersioningError::HttpDispatch(err) } } impl From<io::Error> for PutBucketVersioningError { fn from(err: io::Error) -> PutBucketVersioningError { PutBucketVersioningError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketVersioningError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketVersioningError { fn description(&self) -> &str { match *self { PutBucketVersioningError::Validation(ref cause) => cause, PutBucketVersioningError::Credentials(ref err) => err.description(), PutBucketVersioningError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PutBucketVersioningError::Unknown(ref cause) => cause, } } } /// Errors returned by PutBucketWebsite #[derive(Debug, PartialEq)] pub enum PutBucketWebsiteError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutBucketWebsiteError { pub fn from_body(body: &str) -> PutBucketWebsiteError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutBucketWebsiteError::Unknown(String::from(body)), }, Err(_) => PutBucketWebsiteError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutBucketWebsiteError { fn from(err: XmlParseError) -> PutBucketWebsiteError { let XmlParseError(message) = err; PutBucketWebsiteError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutBucketWebsiteError { fn from(err: CredentialsError) -> PutBucketWebsiteError { PutBucketWebsiteError::Credentials(err) } } impl From<HttpDispatchError> for PutBucketWebsiteError { fn from(err: HttpDispatchError) -> PutBucketWebsiteError { PutBucketWebsiteError::HttpDispatch(err) } } impl From<io::Error> for PutBucketWebsiteError { fn from(err: io::Error) -> PutBucketWebsiteError { PutBucketWebsiteError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutBucketWebsiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutBucketWebsiteError { fn description(&self) -> &str { match *self { PutBucketWebsiteError::Validation(ref cause) => cause, PutBucketWebsiteError::Credentials(ref err) => err.description(), PutBucketWebsiteError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutBucketWebsiteError::Unknown(ref cause) => cause, } } } /// Errors returned by PutObject #[derive(Debug, PartialEq)] pub enum PutObjectError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutObjectError { pub fn from_body(body: &str) -> PutObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutObjectError::Unknown(String::from(body)), }, Err(_) => PutObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutObjectError { fn from(err: XmlParseError) -> PutObjectError { let XmlParseError(message) = err; PutObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutObjectError { fn from(err: CredentialsError) -> PutObjectError { PutObjectError::Credentials(err) } } impl From<HttpDispatchError> for PutObjectError { fn from(err: HttpDispatchError) -> PutObjectError { PutObjectError::HttpDispatch(err) } } impl From<io::Error> for PutObjectError { fn from(err: io::Error) -> PutObjectError { PutObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutObjectError { fn description(&self) -> &str { match *self { PutObjectError::Validation(ref cause) => cause, PutObjectError::Credentials(ref err) => err.description(), PutObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by PutObjectAcl #[derive(Debug, PartialEq)] pub enum PutObjectAclError { /// <p>The specified key does not exist.</p> NoSuchKey(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutObjectAclError { pub fn from_body(body: &str) -> PutObjectAclError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "NoSuchKey" => PutObjectAclError::NoSuchKey(String::from(parsed_error.message)), _ => PutObjectAclError::Unknown(String::from(body)), }, Err(_) => PutObjectAclError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutObjectAclError { fn from(err: XmlParseError) -> PutObjectAclError { let XmlParseError(message) = err; PutObjectAclError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutObjectAclError { fn from(err: CredentialsError) -> PutObjectAclError { PutObjectAclError::Credentials(err) } } impl From<HttpDispatchError> for PutObjectAclError { fn from(err: HttpDispatchError) -> PutObjectAclError { PutObjectAclError::HttpDispatch(err) } } impl From<io::Error> for PutObjectAclError { fn from(err: io::Error) -> PutObjectAclError { PutObjectAclError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutObjectAclError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutObjectAclError { fn description(&self) -> &str { match *self { PutObjectAclError::NoSuchKey(ref cause) => cause, PutObjectAclError::Validation(ref cause) => cause, PutObjectAclError::Credentials(ref err) => err.description(), PutObjectAclError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutObjectAclError::Unknown(ref cause) => cause, } } } /// Errors returned by PutObjectTagging #[derive(Debug, PartialEq)] pub enum PutObjectTaggingError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PutObjectTaggingError { pub fn from_body(body: &str) -> PutObjectTaggingError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => PutObjectTaggingError::Unknown(String::from(body)), }, Err(_) => PutObjectTaggingError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PutObjectTaggingError { fn from(err: XmlParseError) -> PutObjectTaggingError { let XmlParseError(message) = err; PutObjectTaggingError::Unknown(message.to_string()) } } impl From<CredentialsError> for PutObjectTaggingError { fn from(err: CredentialsError) -> PutObjectTaggingError { PutObjectTaggingError::Credentials(err) } } impl From<HttpDispatchError> for PutObjectTaggingError { fn from(err: HttpDispatchError) -> PutObjectTaggingError { PutObjectTaggingError::HttpDispatch(err) } } impl From<io::Error> for PutObjectTaggingError { fn from(err: io::Error) -> PutObjectTaggingError { PutObjectTaggingError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PutObjectTaggingError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PutObjectTaggingError { fn description(&self) -> &str { match *self { PutObjectTaggingError::Validation(ref cause) => cause, PutObjectTaggingError::Credentials(ref err) => err.description(), PutObjectTaggingError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), PutObjectTaggingError::Unknown(ref cause) => cause, } } } /// Errors returned by RestoreObject #[derive(Debug, PartialEq)] pub enum RestoreObjectError { /// <p>This operation is not allowed against this storage tier</p> ObjectAlreadyInActiveTierError(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RestoreObjectError { pub fn from_body(body: &str) -> RestoreObjectError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "ObjectAlreadyInActiveTierError" => { RestoreObjectError::ObjectAlreadyInActiveTierError(String::from( parsed_error.message, )) } _ => RestoreObjectError::Unknown(String::from(body)), }, Err(_) => RestoreObjectError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RestoreObjectError { fn from(err: XmlParseError) -> RestoreObjectError { let XmlParseError(message) = err; RestoreObjectError::Unknown(message.to_string()) } } impl From<CredentialsError> for RestoreObjectError { fn from(err: CredentialsError) -> RestoreObjectError { RestoreObjectError::Credentials(err) } } impl From<HttpDispatchError> for RestoreObjectError { fn from(err: HttpDispatchError) -> RestoreObjectError { RestoreObjectError::HttpDispatch(err) } } impl From<io::Error> for RestoreObjectError { fn from(err: io::Error) -> RestoreObjectError { RestoreObjectError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RestoreObjectError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RestoreObjectError { fn description(&self) -> &str { match *self { RestoreObjectError::ObjectAlreadyInActiveTierError(ref cause) => cause, RestoreObjectError::Validation(ref cause) => cause, RestoreObjectError::Credentials(ref err) => err.description(), RestoreObjectError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), RestoreObjectError::Unknown(ref cause) => cause, } } } /// Errors returned by SelectObjectContent #[derive(Debug, PartialEq)] pub enum SelectObjectContentError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl SelectObjectContentError { pub fn from_body(body: &str) -> SelectObjectContentError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => SelectObjectContentError::Unknown(String::from(body)), }, Err(_) => SelectObjectContentError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for SelectObjectContentError { fn from(err: XmlParseError) -> SelectObjectContentError { let XmlParseError(message) = err; SelectObjectContentError::Unknown(message.to_string()) } } impl From<CredentialsError> for SelectObjectContentError { fn from(err: CredentialsError) -> SelectObjectContentError { SelectObjectContentError::Credentials(err) } } impl From<HttpDispatchError> for SelectObjectContentError { fn from(err: HttpDispatchError) -> SelectObjectContentError { SelectObjectContentError::HttpDispatch(err) } } impl From<io::Error> for SelectObjectContentError { fn from(err: io::Error) -> SelectObjectContentError { SelectObjectContentError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for SelectObjectContentError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for SelectObjectContentError { fn description(&self) -> &str { match *self { SelectObjectContentError::Validation(ref cause) => cause, SelectObjectContentError::Credentials(ref err) => err.description(), SelectObjectContentError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } SelectObjectContentError::Unknown(ref cause) => cause, } } } /// Errors returned by UploadPart #[derive(Debug, PartialEq)] pub enum UploadPartError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UploadPartError { pub fn from_body(body: &str) -> UploadPartError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => UploadPartError::Unknown(String::from(body)), }, Err(_) => UploadPartError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for UploadPartError { fn from(err: XmlParseError) -> UploadPartError { let XmlParseError(message) = err; UploadPartError::Unknown(message.to_string()) } } impl From<CredentialsError> for UploadPartError { fn from(err: CredentialsError) -> UploadPartError { UploadPartError::Credentials(err) } } impl From<HttpDispatchError> for UploadPartError { fn from(err: HttpDispatchError) -> UploadPartError { UploadPartError::HttpDispatch(err) } } impl From<io::Error> for UploadPartError { fn from(err: io::Error) -> UploadPartError { UploadPartError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UploadPartError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UploadPartError { fn description(&self) -> &str { match *self { UploadPartError::Validation(ref cause) => cause, UploadPartError::Credentials(ref err) => err.description(), UploadPartError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), UploadPartError::Unknown(ref cause) => cause, } } } /// Errors returned by UploadPartCopy #[derive(Debug, PartialEq)] pub enum UploadPartCopyError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl UploadPartCopyError { pub fn from_body(body: &str) -> UploadPartCopyError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => UploadPartCopyError::Unknown(String::from(body)), }, Err(_) => UploadPartCopyError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for UploadPartCopyError { fn from(err: XmlParseError) -> UploadPartCopyError { let XmlParseError(message) = err; UploadPartCopyError::Unknown(message.to_string()) } } impl From<CredentialsError> for UploadPartCopyError { fn from(err: CredentialsError) -> UploadPartCopyError { UploadPartCopyError::Credentials(err) } } impl From<HttpDispatchError> for UploadPartCopyError { fn from(err: HttpDispatchError) -> UploadPartCopyError { UploadPartCopyError::HttpDispatch(err) } } impl From<io::Error> for UploadPartCopyError { fn from(err: io::Error) -> UploadPartCopyError { UploadPartCopyError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for UploadPartCopyError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for UploadPartCopyError { fn description(&self) -> &str { match *self { UploadPartCopyError::Validation(ref cause) => cause, UploadPartCopyError::Credentials(ref err) => err.description(), UploadPartCopyError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), UploadPartCopyError::Unknown(ref cause) => cause, } } } /// Trait representing the capabilities of the Amazon S3 API. Amazon S3 clients implement this trait. pub trait S3 { /// <p>Aborts a multipart upload.</p><p>To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.</p> fn abort_multipart_upload( &self, input: AbortMultipartUploadRequest, ) -> RusotoFuture<AbortMultipartUploadOutput, AbortMultipartUploadError>; /// <p>Completes a multipart upload by assembling previously uploaded parts.</p> fn complete_multipart_upload( &self, input: CompleteMultipartUploadRequest, ) -> RusotoFuture<CompleteMultipartUploadOutput, CompleteMultipartUploadError>; /// <p>Creates a copy of an object that is already stored in Amazon S3.</p> fn copy_object( &self, input: CopyObjectRequest, ) -> RusotoFuture<CopyObjectOutput, CopyObjectError>; /// <p>Creates a new bucket.</p> fn create_bucket( &self, input: CreateBucketRequest, ) -> RusotoFuture<CreateBucketOutput, CreateBucketError>; /// <p>Initiates a multipart upload and returns an upload ID.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> fn create_multipart_upload( &self, input: CreateMultipartUploadRequest, ) -> RusotoFuture<CreateMultipartUploadOutput, CreateMultipartUploadError>; /// <p>Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.</p> fn delete_bucket(&self, input: DeleteBucketRequest) -> RusotoFuture<(), DeleteBucketError>; /// <p>Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).</p> fn delete_bucket_analytics_configuration( &self, input: DeleteBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketAnalyticsConfigurationError>; /// <p>Deletes the cors configuration information set for the bucket.</p> fn delete_bucket_cors( &self, input: DeleteBucketCorsRequest, ) -> RusotoFuture<(), DeleteBucketCorsError>; /// <p>Deletes the server-side encryption configuration from the bucket.</p> fn delete_bucket_encryption( &self, input: DeleteBucketEncryptionRequest, ) -> RusotoFuture<(), DeleteBucketEncryptionError>; /// <p>Deletes an inventory configuration (identified by the inventory ID) from the bucket.</p> fn delete_bucket_inventory_configuration( &self, input: DeleteBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketInventoryConfigurationError>; /// <p>Deletes the lifecycle configuration from the bucket.</p> fn delete_bucket_lifecycle( &self, input: DeleteBucketLifecycleRequest, ) -> RusotoFuture<(), DeleteBucketLifecycleError>; /// <p>Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> fn delete_bucket_metrics_configuration( &self, input: DeleteBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketMetricsConfigurationError>; /// <p>Deletes the policy from the bucket.</p> fn delete_bucket_policy( &self, input: DeleteBucketPolicyRequest, ) -> RusotoFuture<(), DeleteBucketPolicyError>; /// <p>Deletes the replication configuration from the bucket.</p> fn delete_bucket_replication( &self, input: DeleteBucketReplicationRequest, ) -> RusotoFuture<(), DeleteBucketReplicationError>; /// <p>Deletes the tags from the bucket.</p> fn delete_bucket_tagging( &self, input: DeleteBucketTaggingRequest, ) -> RusotoFuture<(), DeleteBucketTaggingError>; /// <p>This operation removes the website configuration from the bucket.</p> fn delete_bucket_website( &self, input: DeleteBucketWebsiteRequest, ) -> RusotoFuture<(), DeleteBucketWebsiteError>; /// <p>Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn&#39;t a null version, Amazon S3 does not remove any objects.</p> fn delete_object( &self, input: DeleteObjectRequest, ) -> RusotoFuture<DeleteObjectOutput, DeleteObjectError>; /// <p>Removes the tag-set from an existing object.</p> fn delete_object_tagging( &self, input: DeleteObjectTaggingRequest, ) -> RusotoFuture<DeleteObjectTaggingOutput, DeleteObjectTaggingError>; /// <p>This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.</p> fn delete_objects( &self, input: DeleteObjectsRequest, ) -> RusotoFuture<DeleteObjectsOutput, DeleteObjectsError>; /// <p>Returns the accelerate configuration of a bucket.</p> fn get_bucket_accelerate_configuration( &self, input: GetBucketAccelerateConfigurationRequest, ) -> RusotoFuture<GetBucketAccelerateConfigurationOutput, GetBucketAccelerateConfigurationError>; /// <p>Gets the access control policy for the bucket.</p> fn get_bucket_acl( &self, input: GetBucketAclRequest, ) -> RusotoFuture<GetBucketAclOutput, GetBucketAclError>; /// <p>Gets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> fn get_bucket_analytics_configuration( &self, input: GetBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<GetBucketAnalyticsConfigurationOutput, GetBucketAnalyticsConfigurationError>; /// <p>Returns the cors configuration for the bucket.</p> fn get_bucket_cors( &self, input: GetBucketCorsRequest, ) -> RusotoFuture<GetBucketCorsOutput, GetBucketCorsError>; /// <p>Returns the server-side encryption configuration of a bucket.</p> fn get_bucket_encryption( &self, input: GetBucketEncryptionRequest, ) -> RusotoFuture<GetBucketEncryptionOutput, GetBucketEncryptionError>; /// <p>Returns an inventory configuration (identified by the inventory ID) from the bucket.</p> fn get_bucket_inventory_configuration( &self, input: GetBucketInventoryConfigurationRequest, ) -> RusotoFuture<GetBucketInventoryConfigurationOutput, GetBucketInventoryConfigurationError>; /// <p>Deprecated, see the GetBucketLifecycleConfiguration operation.</p> fn get_bucket_lifecycle( &self, input: GetBucketLifecycleRequest, ) -> RusotoFuture<GetBucketLifecycleOutput, GetBucketLifecycleError>; /// <p>Returns the lifecycle configuration information set on the bucket.</p> fn get_bucket_lifecycle_configuration( &self, input: GetBucketLifecycleConfigurationRequest, ) -> RusotoFuture<GetBucketLifecycleConfigurationOutput, GetBucketLifecycleConfigurationError>; /// <p>Returns the region the bucket resides in.</p> fn get_bucket_location( &self, input: GetBucketLocationRequest, ) -> RusotoFuture<GetBucketLocationOutput, GetBucketLocationError>; /// <p>Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.</p> fn get_bucket_logging( &self, input: GetBucketLoggingRequest, ) -> RusotoFuture<GetBucketLoggingOutput, GetBucketLoggingError>; /// <p>Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> fn get_bucket_metrics_configuration( &self, input: GetBucketMetricsConfigurationRequest, ) -> RusotoFuture<GetBucketMetricsConfigurationOutput, GetBucketMetricsConfigurationError>; /// <p>Deprecated, see the GetBucketNotificationConfiguration operation.</p> fn get_bucket_notification( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfigurationDeprecated, GetBucketNotificationError>; /// <p>Returns the notification configuration of a bucket.</p> fn get_bucket_notification_configuration( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfiguration, GetBucketNotificationConfigurationError>; /// <p>Returns the policy of a specified bucket.</p> fn get_bucket_policy( &self, input: GetBucketPolicyRequest, ) -> RusotoFuture<GetBucketPolicyOutput, GetBucketPolicyError>; /// <p>Returns the replication configuration of a bucket.</p> fn get_bucket_replication( &self, input: GetBucketReplicationRequest, ) -> RusotoFuture<GetBucketReplicationOutput, GetBucketReplicationError>; /// <p>Returns the request payment configuration of a bucket.</p> fn get_bucket_request_payment( &self, input: GetBucketRequestPaymentRequest, ) -> RusotoFuture<GetBucketRequestPaymentOutput, GetBucketRequestPaymentError>; /// <p>Returns the tag set associated with the bucket.</p> fn get_bucket_tagging( &self, input: GetBucketTaggingRequest, ) -> RusotoFuture<GetBucketTaggingOutput, GetBucketTaggingError>; /// <p>Returns the versioning state of a bucket.</p> fn get_bucket_versioning( &self, input: GetBucketVersioningRequest, ) -> RusotoFuture<GetBucketVersioningOutput, GetBucketVersioningError>; /// <p>Returns the website configuration for a bucket.</p> fn get_bucket_website( &self, input: GetBucketWebsiteRequest, ) -> RusotoFuture<GetBucketWebsiteOutput, GetBucketWebsiteError>; /// <p>Retrieves objects from Amazon S3.</p> fn get_object(&self, input: GetObjectRequest) -> RusotoFuture<GetObjectOutput, GetObjectError>; /// <p>Returns the access control list (ACL) of an object.</p> fn get_object_acl( &self, input: GetObjectAclRequest, ) -> RusotoFuture<GetObjectAclOutput, GetObjectAclError>; /// <p>Returns the tag-set of an object.</p> fn get_object_tagging( &self, input: GetObjectTaggingRequest, ) -> RusotoFuture<GetObjectTaggingOutput, GetObjectTaggingError>; /// <p>Return torrent files from a bucket.</p> fn get_object_torrent( &self, input: GetObjectTorrentRequest, ) -> RusotoFuture<GetObjectTorrentOutput, GetObjectTorrentError>; /// <p>This operation is useful to determine if a bucket exists and you have permission to access it.</p> fn head_bucket(&self, input: HeadBucketRequest) -> RusotoFuture<(), HeadBucketError>; /// <p>The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you&#39;re only interested in an object&#39;s metadata. To use HEAD, you must have READ access to the object.</p> fn head_object( &self, input: HeadObjectRequest, ) -> RusotoFuture<HeadObjectOutput, HeadObjectError>; /// <p>Lists the analytics configurations for the bucket.</p> fn list_bucket_analytics_configurations( &self, input: ListBucketAnalyticsConfigurationsRequest, ) -> RusotoFuture<ListBucketAnalyticsConfigurationsOutput, ListBucketAnalyticsConfigurationsError>; /// <p>Returns a list of inventory configurations for the bucket.</p> fn list_bucket_inventory_configurations( &self, input: ListBucketInventoryConfigurationsRequest, ) -> RusotoFuture<ListBucketInventoryConfigurationsOutput, ListBucketInventoryConfigurationsError>; /// <p>Lists the metrics configurations for the bucket.</p> fn list_bucket_metrics_configurations( &self, input: ListBucketMetricsConfigurationsRequest, ) -> RusotoFuture<ListBucketMetricsConfigurationsOutput, ListBucketMetricsConfigurationsError>; /// <p>Returns a list of all buckets owned by the authenticated sender of the request.</p> fn list_buckets(&self) -> RusotoFuture<ListBucketsOutput, ListBucketsError>; /// <p>This operation lists in-progress multipart uploads.</p> fn list_multipart_uploads( &self, input: ListMultipartUploadsRequest, ) -> RusotoFuture<ListMultipartUploadsOutput, ListMultipartUploadsError>; /// <p>Returns metadata about all of the versions of objects in a bucket.</p> fn list_object_versions( &self, input: ListObjectVersionsRequest, ) -> RusotoFuture<ListObjectVersionsOutput, ListObjectVersionsError>; /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.</p> fn list_objects( &self, input: ListObjectsRequest, ) -> RusotoFuture<ListObjectsOutput, ListObjectsError>; /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.</p> fn list_objects_v2( &self, input: ListObjectsV2Request, ) -> RusotoFuture<ListObjectsV2Output, ListObjectsV2Error>; /// <p>Lists the parts that have been uploaded for a specific multipart upload.</p> fn list_parts(&self, input: ListPartsRequest) -> RusotoFuture<ListPartsOutput, ListPartsError>; /// <p>Sets the accelerate configuration of an existing bucket.</p> fn put_bucket_accelerate_configuration( &self, input: PutBucketAccelerateConfigurationRequest, ) -> RusotoFuture<(), PutBucketAccelerateConfigurationError>; /// <p>Sets the permissions on a bucket using access control lists (ACL).</p> fn put_bucket_acl(&self, input: PutBucketAclRequest) -> RusotoFuture<(), PutBucketAclError>; /// <p>Sets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> fn put_bucket_analytics_configuration( &self, input: PutBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), PutBucketAnalyticsConfigurationError>; /// <p>Sets the cors configuration for a bucket.</p> fn put_bucket_cors(&self, input: PutBucketCorsRequest) -> RusotoFuture<(), PutBucketCorsError>; /// <p>Creates a new server-side encryption configuration (or replaces an existing one, if present).</p> fn put_bucket_encryption( &self, input: PutBucketEncryptionRequest, ) -> RusotoFuture<(), PutBucketEncryptionError>; /// <p>Adds an inventory configuration (identified by the inventory ID) from the bucket.</p> fn put_bucket_inventory_configuration( &self, input: PutBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), PutBucketInventoryConfigurationError>; /// <p>Deprecated, see the PutBucketLifecycleConfiguration operation.</p> fn put_bucket_lifecycle( &self, input: PutBucketLifecycleRequest, ) -> RusotoFuture<(), PutBucketLifecycleError>; /// <p>Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.</p> fn put_bucket_lifecycle_configuration( &self, input: PutBucketLifecycleConfigurationRequest, ) -> RusotoFuture<(), PutBucketLifecycleConfigurationError>; /// <p>Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.</p> fn put_bucket_logging( &self, input: PutBucketLoggingRequest, ) -> RusotoFuture<(), PutBucketLoggingError>; /// <p>Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.</p> fn put_bucket_metrics_configuration( &self, input: PutBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), PutBucketMetricsConfigurationError>; /// <p>Deprecated, see the PutBucketNotificationConfiguraiton operation.</p> fn put_bucket_notification( &self, input: PutBucketNotificationRequest, ) -> RusotoFuture<(), PutBucketNotificationError>; /// <p>Enables notifications of specified events for a bucket.</p> fn put_bucket_notification_configuration( &self, input: PutBucketNotificationConfigurationRequest, ) -> RusotoFuture<(), PutBucketNotificationConfigurationError>; /// <p>Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.</p> fn put_bucket_policy( &self, input: PutBucketPolicyRequest, ) -> RusotoFuture<(), PutBucketPolicyError>; /// <p>Creates a new replication configuration (or replaces an existing one, if present).</p> fn put_bucket_replication( &self, input: PutBucketReplicationRequest, ) -> RusotoFuture<(), PutBucketReplicationError>; /// <p>Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html</p> fn put_bucket_request_payment( &self, input: PutBucketRequestPaymentRequest, ) -> RusotoFuture<(), PutBucketRequestPaymentError>; /// <p>Sets the tags for a bucket.</p> fn put_bucket_tagging( &self, input: PutBucketTaggingRequest, ) -> RusotoFuture<(), PutBucketTaggingError>; /// <p>Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.</p> fn put_bucket_versioning( &self, input: PutBucketVersioningRequest, ) -> RusotoFuture<(), PutBucketVersioningError>; /// <p>Set the website configuration for a bucket.</p> fn put_bucket_website( &self, input: PutBucketWebsiteRequest, ) -> RusotoFuture<(), PutBucketWebsiteError>; /// <p>Adds an object to a bucket.</p> fn put_object(&self, input: PutObjectRequest) -> RusotoFuture<PutObjectOutput, PutObjectError>; /// <p>uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket</p> fn put_object_acl( &self, input: PutObjectAclRequest, ) -> RusotoFuture<PutObjectAclOutput, PutObjectAclError>; /// <p>Sets the supplied tag-set to an object that already exists in a bucket</p> fn put_object_tagging( &self, input: PutObjectTaggingRequest, ) -> RusotoFuture<PutObjectTaggingOutput, PutObjectTaggingError>; /// <p>Restores an archived copy of an object back into Amazon S3</p> fn restore_object( &self, input: RestoreObjectRequest, ) -> RusotoFuture<RestoreObjectOutput, RestoreObjectError>; /// <p>This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.</p> fn select_object_content( &self, input: SelectObjectContentRequest, ) -> RusotoFuture<SelectObjectContentOutput, SelectObjectContentError>; /// <p>Uploads a part in a multipart upload.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> fn upload_part( &self, input: UploadPartRequest, ) -> RusotoFuture<UploadPartOutput, UploadPartError>; /// <p>Uploads a part by copying data from an existing object as data source.</p> fn upload_part_copy( &self, input: UploadPartCopyRequest, ) -> RusotoFuture<UploadPartCopyOutput, UploadPartCopyError>; } /// A client for the Amazon S3 API. pub struct S3Client<P = CredentialsProvider, D = RequestDispatcher> where P: ProvideAwsCredentials, D: DispatchSignedRequest, { inner: ClientInner<P, D>, region: region::Region, } impl S3Client { /// Creates a simple client backed by an implicit event loop. /// /// The client will use the default credentials provider and tls client. /// /// See the `rusoto_core::reactor` module for more details. pub fn simple(region: region::Region) -> S3Client { S3Client::new( RequestDispatcher::default(), CredentialsProvider::default(), region, ) } } impl<P, D> S3Client<P, D> where P: ProvideAwsCredentials, D: DispatchSignedRequest, { pub fn new(request_dispatcher: D, credentials_provider: P, region: region::Region) -> Self { S3Client { inner: ClientInner::new(credentials_provider, request_dispatcher), region: region, } } } impl<P, D> S3 for S3Client<P, D> where P: ProvideAwsCredentials + 'static, D: DispatchSignedRequest + 'static, { /// <p>Aborts a multipart upload.</p><p>To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.</p> #[allow(unused_variables, warnings)] fn abort_multipart_upload( &self, input: AbortMultipartUploadRequest, ) -> RusotoFuture<AbortMultipartUploadOutput, AbortMultipartUploadError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put("uploadId", &input.upload_id); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(AbortMultipartUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = AbortMultipartUploadOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(AbortMultipartUploadOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Completes a multipart upload by assembling previously uploaded parts.</p> #[allow(unused_variables, warnings)] fn complete_multipart_upload( &self, input: CompleteMultipartUploadRequest, ) -> RusotoFuture<CompleteMultipartUploadOutput, CompleteMultipartUploadError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put("uploadId", &input.upload_id); request.set_params(params); if input.multipart_upload.is_some() { let mut writer = EventWriter::new(Vec::new()); CompletedMultipartUploadSerializer::serialize( &mut writer, "CompleteMultipartUpload", input.multipart_upload.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CompleteMultipartUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CompleteMultipartUploadOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CompleteMultipartUploadOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Creates a copy of an object that is already stored in Amazon S3.</p> #[allow(unused_variables, warnings)] fn copy_object( &self, input: CopyObjectRequest, ) -> RusotoFuture<CopyObjectOutput, CopyObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref cache_control) = input.cache_control { request.add_header("Cache-Control", &cache_control.to_string()); } if let Some(ref content_disposition) = input.content_disposition { request.add_header("Content-Disposition", &content_disposition.to_string()); } if let Some(ref content_encoding) = input.content_encoding { request.add_header("Content-Encoding", &content_encoding.to_string()); } if let Some(ref content_language) = input.content_language { request.add_header("Content-Language", &content_language.to_string()); } if let Some(ref content_type) = input.content_type { request.add_header("Content-Type", &content_type.to_string()); } request.add_header("x-amz-copy-source", &input.copy_source); if let Some(ref copy_source_if_match) = input.copy_source_if_match { request.add_header( "x-amz-copy-source-if-match", &copy_source_if_match.to_string(), ); } if let Some(ref copy_source_if_modified_since) = input.copy_source_if_modified_since { request.add_header( "x-amz-copy-source-if-modified-since", &copy_source_if_modified_since.to_string(), ); } if let Some(ref copy_source_if_none_match) = input.copy_source_if_none_match { request.add_header( "x-amz-copy-source-if-none-match", &copy_source_if_none_match.to_string(), ); } if let Some(ref copy_source_if_unmodified_since) = input.copy_source_if_unmodified_since { request.add_header( "x-amz-copy-source-if-unmodified-since", &copy_source_if_unmodified_since.to_string(), ); } if let Some(ref copy_source_sse_customer_algorithm) = input.copy_source_sse_customer_algorithm { request.add_header( "x-amz-copy-source-server-side-encryption-customer-algorithm", &copy_source_sse_customer_algorithm.to_string(), ); } if let Some(ref copy_source_sse_customer_key) = input.copy_source_sse_customer_key { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key", &copy_source_sse_customer_key.to_string(), ); } if let Some(ref copy_source_sse_customer_key_md5) = input.copy_source_sse_customer_key_md5 { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key-MD5", &copy_source_sse_customer_key_md5.to_string(), ); } if let Some(ref expires) = input.expires { request.add_header("Expires", &expires.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref metadata) = input.metadata { for (header_name, header_value) in metadata.iter() { let header = format!("x-amz-meta-{}", header_name); request.add_header(header, header_value); } } if let Some(ref metadata_directive) = input.metadata_directive { request.add_header("x-amz-metadata-directive", &metadata_directive.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", &ssekms_key_id.to_string(), ); } if let Some(ref server_side_encryption) = input.server_side_encryption { request.add_header( "x-amz-server-side-encryption", &server_side_encryption.to_string(), ); } if let Some(ref storage_class) = input.storage_class { request.add_header("x-amz-storage-class", &storage_class.to_string()); } if let Some(ref tagging) = input.tagging { request.add_header("x-amz-tagging", &tagging.to_string()); } if let Some(ref tagging_directive) = input.tagging_directive { request.add_header("x-amz-tagging-directive", &tagging_directive.to_string()); } if let Some(ref website_redirect_location) = input.website_redirect_location { request.add_header( "x-amz-website-redirect-location", &website_redirect_location.to_string(), ); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CopyObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CopyObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CopyObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(copy_source_version_id) = response.headers.get("x-amz-copy-source-version-id") { let value = copy_source_version_id.to_owned(); result.copy_source_version_id = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Creates a new bucket.</p> #[allow(unused_variables, warnings)] fn create_bucket( &self, input: CreateBucketRequest, ) -> RusotoFuture<CreateBucketOutput, CreateBucketError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write) = input.grant_write { request.add_header("x-amz-grant-write", &grant_write.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if input.create_bucket_configuration.is_some() { let mut writer = EventWriter::new(Vec::new()); CreateBucketConfigurationSerializer::serialize( &mut writer, "CreateBucketConfiguration", input.create_bucket_configuration.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateBucketError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CreateBucketOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CreateBucketOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(location) = response.headers.get("Location") { let value = location.to_owned(); result.location = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Initiates a multipart upload and returns an upload ID.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> #[allow(unused_variables, warnings)] fn create_multipart_upload( &self, input: CreateMultipartUploadRequest, ) -> RusotoFuture<CreateMultipartUploadOutput, CreateMultipartUploadError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref cache_control) = input.cache_control { request.add_header("Cache-Control", &cache_control.to_string()); } if let Some(ref content_disposition) = input.content_disposition { request.add_header("Content-Disposition", &content_disposition.to_string()); } if let Some(ref content_encoding) = input.content_encoding { request.add_header("Content-Encoding", &content_encoding.to_string()); } if let Some(ref content_language) = input.content_language { request.add_header("Content-Language", &content_language.to_string()); } if let Some(ref content_type) = input.content_type { request.add_header("Content-Type", &content_type.to_string()); } if let Some(ref expires) = input.expires { request.add_header("Expires", &expires.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref metadata) = input.metadata { for (header_name, header_value) in metadata.iter() { let header = format!("x-amz-meta-{}", header_name); request.add_header(header, header_value); } } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", &ssekms_key_id.to_string(), ); } if let Some(ref server_side_encryption) = input.server_side_encryption { request.add_header( "x-amz-server-side-encryption", &server_side_encryption.to_string(), ); } if let Some(ref storage_class) = input.storage_class { request.add_header("x-amz-storage-class", &storage_class.to_string()); } if let Some(ref tagging) = input.tagging { request.add_header("x-amz-tagging", &tagging.to_string()); } if let Some(ref website_redirect_location) = input.website_redirect_location { request.add_header( "x-amz-website-redirect-location", &website_redirect_location.to_string(), ); } let mut params = Params::new(); params.put_key("uploads"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(CreateMultipartUploadError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = CreateMultipartUploadOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(CreateMultipartUploadOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(abort_date) = response.headers.get("x-amz-abort-date") { let value = abort_date.to_owned(); result.abort_date = Some(value) }; if let Some(abort_rule_id) = response.headers.get("x-amz-abort-rule-id") { let value = abort_rule_id.to_owned(); result.abort_rule_id = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.</p> #[allow(unused_variables, warnings)] fn delete_bucket(&self, input: DeleteBucketRequest) -> RusotoFuture<(), DeleteBucketError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).</p> #[allow(unused_variables, warnings)] fn delete_bucket_analytics_configuration( &self, input: DeleteBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketAnalyticsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("analytics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketAnalyticsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the cors configuration information set for the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_cors( &self, input: DeleteBucketCorsRequest, ) -> RusotoFuture<(), DeleteBucketCorsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("cors"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketCorsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the server-side encryption configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_encryption( &self, input: DeleteBucketEncryptionRequest, ) -> RusotoFuture<(), DeleteBucketEncryptionError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("encryption"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketEncryptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes an inventory configuration (identified by the inventory ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_inventory_configuration( &self, input: DeleteBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketInventoryConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("inventory"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketInventoryConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the lifecycle configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_lifecycle( &self, input: DeleteBucketLifecycleRequest, ) -> RusotoFuture<(), DeleteBucketLifecycleError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketLifecycleError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_metrics_configuration( &self, input: DeleteBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), DeleteBucketMetricsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("metrics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketMetricsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the policy from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_policy( &self, input: DeleteBucketPolicyRequest, ) -> RusotoFuture<(), DeleteBucketPolicyError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("policy"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketPolicyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the replication configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_replication( &self, input: DeleteBucketReplicationRequest, ) -> RusotoFuture<(), DeleteBucketReplicationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("replication"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketReplicationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deletes the tags from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_tagging( &self, input: DeleteBucketTaggingRequest, ) -> RusotoFuture<(), DeleteBucketTaggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>This operation removes the website configuration from the bucket.</p> #[allow(unused_variables, warnings)] fn delete_bucket_website( &self, input: DeleteBucketWebsiteRequest, ) -> RusotoFuture<(), DeleteBucketWebsiteError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("website"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteBucketWebsiteError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn&#39;t a null version, Amazon S3 does not remove any objects.</p> #[allow(unused_variables, warnings)] fn delete_object( &self, input: DeleteObjectRequest, ) -> RusotoFuture<DeleteObjectOutput, DeleteObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); if let Some(ref mfa) = input.mfa { request.add_header("x-amz-mfa", &mfa.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = DeleteObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(DeleteObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(delete_marker) = response.headers.get("x-amz-delete-marker") { let value = delete_marker.to_owned(); result.delete_marker = Some(value.parse::<bool>().unwrap()) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Removes the tag-set from an existing object.</p> #[allow(unused_variables, warnings)] fn delete_object_tagging( &self, input: DeleteObjectTaggingRequest, ) -> RusotoFuture<DeleteObjectTaggingOutput, DeleteObjectTaggingError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("DELETE", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteObjectTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = DeleteObjectTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(DeleteObjectTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.</p> #[allow(unused_variables, warnings)] fn delete_objects( &self, input: DeleteObjectsRequest, ) -> RusotoFuture<DeleteObjectsOutput, DeleteObjectsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref mfa) = input.mfa { request.add_header("x-amz-mfa", &mfa.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put_key("delete"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); DeleteSerializer::serialize(&mut writer, "Delete", &input.delete); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(DeleteObjectsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = DeleteObjectsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(DeleteObjectsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the accelerate configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_accelerate_configuration( &self, input: GetBucketAccelerateConfigurationRequest, ) -> RusotoFuture<GetBucketAccelerateConfigurationOutput, GetBucketAccelerateConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("accelerate"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketAccelerateConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketAccelerateConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketAccelerateConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Gets the access control policy for the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_acl( &self, input: GetBucketAclRequest, ) -> RusotoFuture<GetBucketAclOutput, GetBucketAclError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("acl"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketAclOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketAclOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Gets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> #[allow(unused_variables, warnings)] fn get_bucket_analytics_configuration( &self, input: GetBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<GetBucketAnalyticsConfigurationOutput, GetBucketAnalyticsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("analytics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketAnalyticsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketAnalyticsConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketAnalyticsConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the cors configuration for the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_cors( &self, input: GetBucketCorsRequest, ) -> RusotoFuture<GetBucketCorsOutput, GetBucketCorsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("cors"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketCorsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketCorsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketCorsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the server-side encryption configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_encryption( &self, input: GetBucketEncryptionRequest, ) -> RusotoFuture<GetBucketEncryptionOutput, GetBucketEncryptionError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("encryption"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketEncryptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketEncryptionOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketEncryptionOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns an inventory configuration (identified by the inventory ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_inventory_configuration( &self, input: GetBucketInventoryConfigurationRequest, ) -> RusotoFuture<GetBucketInventoryConfigurationOutput, GetBucketInventoryConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("inventory"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketInventoryConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketInventoryConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketInventoryConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Deprecated, see the GetBucketLifecycleConfiguration operation.</p> #[allow(unused_variables, warnings)] fn get_bucket_lifecycle( &self, input: GetBucketLifecycleRequest, ) -> RusotoFuture<GetBucketLifecycleOutput, GetBucketLifecycleError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLifecycleError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLifecycleOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketLifecycleOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the lifecycle configuration information set on the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_lifecycle_configuration( &self, input: GetBucketLifecycleConfigurationRequest, ) -> RusotoFuture<GetBucketLifecycleConfigurationOutput, GetBucketLifecycleConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLifecycleConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLifecycleConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketLifecycleConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the region the bucket resides in.</p> #[allow(unused_variables, warnings)] fn get_bucket_location( &self, input: GetBucketLocationRequest, ) -> RusotoFuture<GetBucketLocationOutput, GetBucketLocationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("location"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLocationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLocationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketLocationOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.</p> #[allow(unused_variables, warnings)] fn get_bucket_logging( &self, input: GetBucketLoggingRequest, ) -> RusotoFuture<GetBucketLoggingOutput, GetBucketLoggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("logging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketLoggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketLoggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketLoggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_metrics_configuration( &self, input: GetBucketMetricsConfigurationRequest, ) -> RusotoFuture<GetBucketMetricsConfigurationOutput, GetBucketMetricsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("metrics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketMetricsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketMetricsConfigurationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( GetBucketMetricsConfigurationOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Deprecated, see the GetBucketNotificationConfiguration operation.</p> #[allow(unused_variables, warnings)] fn get_bucket_notification( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfigurationDeprecated, GetBucketNotificationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketNotificationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = NotificationConfigurationDeprecated::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( NotificationConfigurationDeprecatedDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the notification configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_notification_configuration( &self, input: GetBucketNotificationConfigurationRequest, ) -> RusotoFuture<NotificationConfiguration, GetBucketNotificationConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketNotificationConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = NotificationConfiguration::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(NotificationConfigurationDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the policy of a specified bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_policy( &self, input: GetBucketPolicyRequest, ) -> RusotoFuture<GetBucketPolicyOutput, GetBucketPolicyError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("policy"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketPolicyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().map(move |response| { let mut result = GetBucketPolicyOutput::default(); result.policy = Some(String::from_utf8_lossy(response.body.as_ref()).into()); result })) }); RusotoFuture::new(future) } /// <p>Returns the replication configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_replication( &self, input: GetBucketReplicationRequest, ) -> RusotoFuture<GetBucketReplicationOutput, GetBucketReplicationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("replication"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketReplicationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketReplicationOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketReplicationOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the request payment configuration of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_request_payment( &self, input: GetBucketRequestPaymentRequest, ) -> RusotoFuture<GetBucketRequestPaymentOutput, GetBucketRequestPaymentError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("requestPayment"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketRequestPaymentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketRequestPaymentOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketRequestPaymentOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the tag set associated with the bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_tagging( &self, input: GetBucketTaggingRequest, ) -> RusotoFuture<GetBucketTaggingOutput, GetBucketTaggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the versioning state of a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_versioning( &self, input: GetBucketVersioningRequest, ) -> RusotoFuture<GetBucketVersioningOutput, GetBucketVersioningError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("versioning"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketVersioningError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketVersioningOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketVersioningOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the website configuration for a bucket.</p> #[allow(unused_variables, warnings)] fn get_bucket_website( &self, input: GetBucketWebsiteRequest, ) -> RusotoFuture<GetBucketWebsiteOutput, GetBucketWebsiteError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("website"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetBucketWebsiteError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetBucketWebsiteOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetBucketWebsiteOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Retrieves objects from Amazon S3.</p> #[allow(unused_variables, warnings)] fn get_object(&self, input: GetObjectRequest) -> RusotoFuture<GetObjectOutput, GetObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref if_match) = input.if_match { request.add_header("If-Match", &if_match.to_string()); } if let Some(ref if_modified_since) = input.if_modified_since { request.add_header("If-Modified-Since", &if_modified_since.to_string()); } if let Some(ref if_none_match) = input.if_none_match { request.add_header("If-None-Match", &if_none_match.to_string()); } if let Some(ref if_unmodified_since) = input.if_unmodified_since { request.add_header("If-Unmodified-Since", &if_unmodified_since.to_string()); } if let Some(ref range) = input.range { request.add_header("Range", &range.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); if let Some(ref x) = input.part_number { params.put("partNumber", x); } if let Some(ref x) = input.response_cache_control { params.put("response-cache-control", x); } if let Some(ref x) = input.response_content_disposition { params.put("response-content-disposition", x); } if let Some(ref x) = input.response_content_encoding { params.put("response-content-encoding", x); } if let Some(ref x) = input.response_content_language { params.put("response-content-language", x); } if let Some(ref x) = input.response_content_type { params.put("response-content-type", x); } if let Some(ref x) = input.response_expires { params.put("response-expires", x); } if let Some(ref x) = input.version_id { params.put("versionId", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } let mut result = GetObjectOutput::default(); result.body = Some(StreamingBody { len: None, inner: response.body, }); if let Some(accept_ranges) = response.headers.get("accept-ranges") { let value = accept_ranges.to_owned(); result.accept_ranges = Some(value) }; if let Some(cache_control) = response.headers.get("Cache-Control") { let value = cache_control.to_owned(); result.cache_control = Some(value) }; if let Some(content_disposition) = response.headers.get("Content-Disposition") { let value = content_disposition.to_owned(); result.content_disposition = Some(value) }; if let Some(content_encoding) = response.headers.get("Content-Encoding") { let value = content_encoding.to_owned(); result.content_encoding = Some(value) }; if let Some(content_language) = response.headers.get("Content-Language") { let value = content_language.to_owned(); result.content_language = Some(value) }; if let Some(content_length) = response.headers.get("Content-Length") { let value = content_length.to_owned(); result.content_length = Some(value.parse::<i64>().unwrap()) }; if let Some(content_range) = response.headers.get("Content-Range") { let value = content_range.to_owned(); result.content_range = Some(value) }; if let Some(content_type) = response.headers.get("Content-Type") { let value = content_type.to_owned(); result.content_type = Some(value) }; if let Some(delete_marker) = response.headers.get("x-amz-delete-marker") { let value = delete_marker.to_owned(); result.delete_marker = Some(value.parse::<bool>().unwrap()) }; if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(expires) = response.headers.get("Expires") { let value = expires.to_owned(); result.expires = Some(value) }; if let Some(last_modified) = response.headers.get("Last-Modified") { let value = last_modified.to_owned(); result.last_modified = Some(value) }; let mut values = ::std::collections::HashMap::new(); for (key, value) in response.headers.iter() { if key.starts_with("x-amz-meta-") { values.insert(key["x-amz-meta-".len()..].to_owned(), value.to_owned()); } } result.metadata = Some(values); if let Some(missing_meta) = response.headers.get("x-amz-missing-meta") { let value = missing_meta.to_owned(); result.missing_meta = Some(value.parse::<i64>().unwrap()) }; if let Some(parts_count) = response.headers.get("x-amz-mp-parts-count") { let value = parts_count.to_owned(); result.parts_count = Some(value.parse::<i64>().unwrap()) }; if let Some(replication_status) = response.headers.get("x-amz-replication-status") { let value = replication_status.to_owned(); result.replication_status = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(restore) = response.headers.get("x-amz-restore") { let value = restore.to_owned(); result.restore = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(storage_class) = response.headers.get("x-amz-storage-class") { let value = storage_class.to_owned(); result.storage_class = Some(value) }; if let Some(tag_count) = response.headers.get("x-amz-tagging-count") { let value = tag_count.to_owned(); result.tag_count = Some(value.parse::<i64>().unwrap()) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; if let Some(website_redirect_location) = response.headers.get("x-amz-website-redirect-location") { let value = website_redirect_location.to_owned(); result.website_redirect_location = Some(value) }; future::Either::A(future::ok(result)) }); RusotoFuture::new(future) } /// <p>Returns the access control list (ACL) of an object.</p> #[allow(unused_variables, warnings)] fn get_object_acl( &self, input: GetObjectAclRequest, ) -> RusotoFuture<GetObjectAclOutput, GetObjectAclError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("acl"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetObjectAclOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetObjectAclOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns the tag-set of an object.</p> #[allow(unused_variables, warnings)] fn get_object_tagging( &self, input: GetObjectTaggingRequest, ) -> RusotoFuture<GetObjectTaggingOutput, GetObjectTaggingError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("tagging"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = GetObjectTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(GetObjectTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Return torrent files from a bucket.</p> #[allow(unused_variables, warnings)] fn get_object_torrent( &self, input: GetObjectTorrentRequest, ) -> RusotoFuture<GetObjectTorrentOutput, GetObjectTorrentError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); params.put_key("torrent"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(GetObjectTorrentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } let mut result = GetObjectTorrentOutput::default(); result.body = Some(StreamingBody { len: None, inner: response.body, }); if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; future::Either::A(future::ok(result)) }); RusotoFuture::new(future) } /// <p>This operation is useful to determine if a bucket exists and you have permission to access it.</p> #[allow(unused_variables, warnings)] fn head_bucket(&self, input: HeadBucketRequest) -> RusotoFuture<(), HeadBucketError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("HEAD", "s3", &self.region, &request_uri); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(HeadBucketError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you&#39;re only interested in an object&#39;s metadata. To use HEAD, you must have READ access to the object.</p> #[allow(unused_variables, warnings)] fn head_object( &self, input: HeadObjectRequest, ) -> RusotoFuture<HeadObjectOutput, HeadObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("HEAD", "s3", &self.region, &request_uri); if let Some(ref if_match) = input.if_match { request.add_header("If-Match", &if_match.to_string()); } if let Some(ref if_modified_since) = input.if_modified_since { request.add_header("If-Modified-Since", &if_modified_since.to_string()); } if let Some(ref if_none_match) = input.if_none_match { request.add_header("If-None-Match", &if_none_match.to_string()); } if let Some(ref if_unmodified_since) = input.if_unmodified_since { request.add_header("If-Unmodified-Since", &if_unmodified_since.to_string()); } if let Some(ref range) = input.range { request.add_header("Range", &range.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); if let Some(ref x) = input.part_number { params.put("partNumber", x); } if let Some(ref x) = input.version_id { params.put("versionId", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(HeadObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = HeadObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(HeadObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(accept_ranges) = response.headers.get("accept-ranges") { let value = accept_ranges.to_owned(); result.accept_ranges = Some(value) }; if let Some(cache_control) = response.headers.get("Cache-Control") { let value = cache_control.to_owned(); result.cache_control = Some(value) }; if let Some(content_disposition) = response.headers.get("Content-Disposition") { let value = content_disposition.to_owned(); result.content_disposition = Some(value) }; if let Some(content_encoding) = response.headers.get("Content-Encoding") { let value = content_encoding.to_owned(); result.content_encoding = Some(value) }; if let Some(content_language) = response.headers.get("Content-Language") { let value = content_language.to_owned(); result.content_language = Some(value) }; if let Some(content_length) = response.headers.get("Content-Length") { let value = content_length.to_owned(); result.content_length = Some(value.parse::<i64>().unwrap()) }; if let Some(content_type) = response.headers.get("Content-Type") { let value = content_type.to_owned(); result.content_type = Some(value) }; if let Some(delete_marker) = response.headers.get("x-amz-delete-marker") { let value = delete_marker.to_owned(); result.delete_marker = Some(value.parse::<bool>().unwrap()) }; if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(expires) = response.headers.get("Expires") { let value = expires.to_owned(); result.expires = Some(value) }; if let Some(last_modified) = response.headers.get("Last-Modified") { let value = last_modified.to_owned(); result.last_modified = Some(value) }; let mut values = ::std::collections::HashMap::new(); for (key, value) in response.headers.iter() { if key.starts_with("x-amz-meta-") { values.insert(key["x-amz-meta-".len()..].to_owned(), value.to_owned()); } } result.metadata = Some(values); if let Some(missing_meta) = response.headers.get("x-amz-missing-meta") { let value = missing_meta.to_owned(); result.missing_meta = Some(value.parse::<i64>().unwrap()) }; if let Some(parts_count) = response.headers.get("x-amz-mp-parts-count") { let value = parts_count.to_owned(); result.parts_count = Some(value.parse::<i64>().unwrap()) }; if let Some(replication_status) = response.headers.get("x-amz-replication-status") { let value = replication_status.to_owned(); result.replication_status = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(restore) = response.headers.get("x-amz-restore") { let value = restore.to_owned(); result.restore = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(storage_class) = response.headers.get("x-amz-storage-class") { let value = storage_class.to_owned(); result.storage_class = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; if let Some(website_redirect_location) = response.headers.get("x-amz-website-redirect-location") { let value = website_redirect_location.to_owned(); result.website_redirect_location = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Lists the analytics configurations for the bucket.</p> #[allow(unused_variables, warnings)] fn list_bucket_analytics_configurations( &self, input: ListBucketAnalyticsConfigurationsRequest, ) -> RusotoFuture<ListBucketAnalyticsConfigurationsOutput, ListBucketAnalyticsConfigurationsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } params.put_key("analytics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketAnalyticsConfigurationsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketAnalyticsConfigurationsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( ListBucketAnalyticsConfigurationsOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns a list of inventory configurations for the bucket.</p> #[allow(unused_variables, warnings)] fn list_bucket_inventory_configurations( &self, input: ListBucketInventoryConfigurationsRequest, ) -> RusotoFuture<ListBucketInventoryConfigurationsOutput, ListBucketInventoryConfigurationsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } params.put_key("inventory"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketInventoryConfigurationsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketInventoryConfigurationsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( ListBucketInventoryConfigurationsOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Lists the metrics configurations for the bucket.</p> #[allow(unused_variables, warnings)] fn list_bucket_metrics_configurations( &self, input: ListBucketMetricsConfigurationsRequest, ) -> RusotoFuture<ListBucketMetricsConfigurationsOutput, ListBucketMetricsConfigurationsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } params.put_key("metrics"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketMetricsConfigurationsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketMetricsConfigurationsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!( ListBucketMetricsConfigurationsOutputDeserializer::deserialize( &actual_tag_name, &mut stack ) ); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns a list of all buckets owned by the authenticated sender of the request.</p> #[allow(unused_variables, warnings)] fn list_buckets(&self) -> RusotoFuture<ListBucketsOutput, ListBucketsError> { let request_uri = "/"; let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListBucketsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListBucketsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListBucketsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>This operation lists in-progress multipart uploads.</p> #[allow(unused_variables, warnings)] fn list_multipart_uploads( &self, input: ListMultipartUploadsRequest, ) -> RusotoFuture<ListMultipartUploadsOutput, ListMultipartUploadsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.key_marker { params.put("key-marker", x); } if let Some(ref x) = input.max_uploads { params.put("max-uploads", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } if let Some(ref x) = input.upload_id_marker { params.put("upload-id-marker", x); } params.put_key("uploads"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListMultipartUploadsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListMultipartUploadsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListMultipartUploadsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns metadata about all of the versions of objects in a bucket.</p> #[allow(unused_variables, warnings)] fn list_object_versions( &self, input: ListObjectVersionsRequest, ) -> RusotoFuture<ListObjectVersionsOutput, ListObjectVersionsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); let mut params = Params::new(); if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.key_marker { params.put("key-marker", x); } if let Some(ref x) = input.max_keys { params.put("max-keys", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } if let Some(ref x) = input.version_id_marker { params.put("version-id-marker", x); } params.put_key("versions"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListObjectVersionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListObjectVersionsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListObjectVersionsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.</p> #[allow(unused_variables, warnings)] fn list_objects( &self, input: ListObjectsRequest, ) -> RusotoFuture<ListObjectsOutput, ListObjectsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.marker { params.put("marker", x); } if let Some(ref x) = input.max_keys { params.put("max-keys", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListObjectsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListObjectsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListObjectsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.</p> #[allow(unused_variables, warnings)] fn list_objects_v2( &self, input: ListObjectsV2Request, ) -> RusotoFuture<ListObjectsV2Output, ListObjectsV2Error> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.continuation_token { params.put("continuation-token", x); } if let Some(ref x) = input.delimiter { params.put("delimiter", x); } if let Some(ref x) = input.encoding_type { params.put("encoding-type", x); } if let Some(ref x) = input.fetch_owner { params.put("fetch-owner", x); } if let Some(ref x) = input.max_keys { params.put("max-keys", x); } if let Some(ref x) = input.prefix { params.put("prefix", x); } if let Some(ref x) = input.start_after { params.put("start-after", x); } params.put("list-type", "2"); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListObjectsV2Error::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListObjectsV2Output::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListObjectsV2OutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Lists the parts that have been uploaded for a specific multipart upload.</p> #[allow(unused_variables, warnings)] fn list_parts(&self, input: ListPartsRequest) -> RusotoFuture<ListPartsOutput, ListPartsError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("GET", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.max_parts { params.put("max-parts", x); } if let Some(ref x) = input.part_number_marker { params.put("part-number-marker", x); } params.put("uploadId", &input.upload_id); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(ListPartsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = ListPartsOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(ListPartsOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(abort_date) = response.headers.get("x-amz-abort-date") { let value = abort_date.to_owned(); result.abort_date = Some(value) }; if let Some(abort_rule_id) = response.headers.get("x-amz-abort-rule-id") { let value = abort_rule_id.to_owned(); result.abort_rule_id = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Sets the accelerate configuration of an existing bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_accelerate_configuration( &self, input: PutBucketAccelerateConfigurationRequest, ) -> RusotoFuture<(), PutBucketAccelerateConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("accelerate"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); AccelerateConfigurationSerializer::serialize( &mut writer, "AccelerateConfiguration", &input.accelerate_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketAccelerateConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the permissions on a bucket using access control lists (ACL).</p> #[allow(unused_variables, warnings)] fn put_bucket_acl(&self, input: PutBucketAclRequest) -> RusotoFuture<(), PutBucketAclError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write) = input.grant_write { request.add_header("x-amz-grant-write", &grant_write.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } let mut params = Params::new(); params.put_key("acl"); request.set_params(params); if input.access_control_policy.is_some() { let mut writer = EventWriter::new(Vec::new()); AccessControlPolicySerializer::serialize( &mut writer, "AccessControlPolicy", input.access_control_policy.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets an analytics configuration for the bucket (specified by the analytics configuration ID).</p> #[allow(unused_variables, warnings)] fn put_bucket_analytics_configuration( &self, input: PutBucketAnalyticsConfigurationRequest, ) -> RusotoFuture<(), PutBucketAnalyticsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("analytics"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); AnalyticsConfigurationSerializer::serialize( &mut writer, "AnalyticsConfiguration", &input.analytics_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketAnalyticsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the cors configuration for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_cors(&self, input: PutBucketCorsRequest) -> RusotoFuture<(), PutBucketCorsError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("cors"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); CORSConfigurationSerializer::serialize( &mut writer, "CORSConfiguration", &input.cors_configuration, ); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketCorsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Creates a new server-side encryption configuration (or replaces an existing one, if present).</p> #[allow(unused_variables, warnings)] fn put_bucket_encryption( &self, input: PutBucketEncryptionRequest, ) -> RusotoFuture<(), PutBucketEncryptionError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("encryption"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); ServerSideEncryptionConfigurationSerializer::serialize( &mut writer, "ServerSideEncryptionConfiguration", &input.server_side_encryption_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketEncryptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Adds an inventory configuration (identified by the inventory ID) from the bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_inventory_configuration( &self, input: PutBucketInventoryConfigurationRequest, ) -> RusotoFuture<(), PutBucketInventoryConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("inventory"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); InventoryConfigurationSerializer::serialize( &mut writer, "InventoryConfiguration", &input.inventory_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketInventoryConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deprecated, see the PutBucketLifecycleConfiguration operation.</p> #[allow(unused_variables, warnings)] fn put_bucket_lifecycle( &self, input: PutBucketLifecycleRequest, ) -> RusotoFuture<(), PutBucketLifecycleError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); if input.lifecycle_configuration.is_some() { let mut writer = EventWriter::new(Vec::new()); LifecycleConfigurationSerializer::serialize( &mut writer, "LifecycleConfiguration", input.lifecycle_configuration.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketLifecycleError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.</p> #[allow(unused_variables, warnings)] fn put_bucket_lifecycle_configuration( &self, input: PutBucketLifecycleConfigurationRequest, ) -> RusotoFuture<(), PutBucketLifecycleConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("lifecycle"); request.set_params(params); if input.lifecycle_configuration.is_some() { let mut writer = EventWriter::new(Vec::new()); BucketLifecycleConfigurationSerializer::serialize( &mut writer, "LifecycleConfiguration", input.lifecycle_configuration.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketLifecycleConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.</p> #[allow(unused_variables, warnings)] fn put_bucket_logging( &self, input: PutBucketLoggingRequest, ) -> RusotoFuture<(), PutBucketLoggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("logging"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); BucketLoggingStatusSerializer::serialize( &mut writer, "BucketLoggingStatus", &input.bucket_logging_status, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketLoggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_metrics_configuration( &self, input: PutBucketMetricsConfigurationRequest, ) -> RusotoFuture<(), PutBucketMetricsConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put("id", &input.id); params.put_key("metrics"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); MetricsConfigurationSerializer::serialize( &mut writer, "MetricsConfiguration", &input.metrics_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketMetricsConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Deprecated, see the PutBucketNotificationConfiguraiton operation.</p> #[allow(unused_variables, warnings)] fn put_bucket_notification( &self, input: PutBucketNotificationRequest, ) -> RusotoFuture<(), PutBucketNotificationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); NotificationConfigurationDeprecatedSerializer::serialize( &mut writer, "NotificationConfigurationDeprecated", &input.notification_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketNotificationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Enables notifications of specified events for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_notification_configuration( &self, input: PutBucketNotificationConfigurationRequest, ) -> RusotoFuture<(), PutBucketNotificationConfigurationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); let mut params = Params::new(); params.put_key("notification"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); NotificationConfigurationSerializer::serialize( &mut writer, "NotificationConfiguration", &input.notification_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketNotificationConfigurationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Replaces a policy on a bucket. If the bucket already has a policy, the one in this request completely replaces it.</p> #[allow(unused_variables, warnings)] fn put_bucket_policy( &self, input: PutBucketPolicyRequest, ) -> RusotoFuture<(), PutBucketPolicyError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref confirm_remove_self_bucket_access) = input.confirm_remove_self_bucket_access { request.add_header( "x-amz-confirm-remove-self-bucket-access", &confirm_remove_self_bucket_access.to_string(), ); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("policy"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); PolicySerializer::serialize(&mut writer, "Policy", &input.policy); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketPolicyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Creates a new replication configuration (or replaces an existing one, if present).</p> #[allow(unused_variables, warnings)] fn put_bucket_replication( &self, input: PutBucketReplicationRequest, ) -> RusotoFuture<(), PutBucketReplicationError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("replication"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); ReplicationConfigurationSerializer::serialize( &mut writer, "ReplicationConfiguration", &input.replication_configuration, ); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketReplicationError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html</p> #[allow(unused_variables, warnings)] fn put_bucket_request_payment( &self, input: PutBucketRequestPaymentRequest, ) -> RusotoFuture<(), PutBucketRequestPaymentError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("requestPayment"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); RequestPaymentConfigurationSerializer::serialize( &mut writer, "RequestPaymentConfiguration", &input.request_payment_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketRequestPaymentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the tags for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_tagging( &self, input: PutBucketTaggingRequest, ) -> RusotoFuture<(), PutBucketTaggingError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("tagging"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); TaggingSerializer::serialize(&mut writer, "Tagging", &input.tagging); request.set_payload(Some(writer.into_inner())); request.set_content_md5_header(); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.</p> #[allow(unused_variables, warnings)] fn put_bucket_versioning( &self, input: PutBucketVersioningRequest, ) -> RusotoFuture<(), PutBucketVersioningError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref mfa) = input.mfa { request.add_header("x-amz-mfa", &mfa.to_string()); } let mut params = Params::new(); params.put_key("versioning"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); VersioningConfigurationSerializer::serialize( &mut writer, "VersioningConfiguration", &input.versioning_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketVersioningError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Set the website configuration for a bucket.</p> #[allow(unused_variables, warnings)] fn put_bucket_website( &self, input: PutBucketWebsiteRequest, ) -> RusotoFuture<(), PutBucketWebsiteError> { let request_uri = format!("/{bucket}", bucket = input.bucket); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); params.put_key("website"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); WebsiteConfigurationSerializer::serialize( &mut writer, "WebsiteConfiguration", &input.website_configuration, ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutBucketWebsiteError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(future::ok(::std::mem::drop(response))) }); RusotoFuture::new(future) } /// <p>Adds an object to a bucket.</p> #[allow(unused_variables, warnings)] fn put_object(&self, input: PutObjectRequest) -> RusotoFuture<PutObjectOutput, PutObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref cache_control) = input.cache_control { request.add_header("Cache-Control", &cache_control.to_string()); } if let Some(ref content_disposition) = input.content_disposition { request.add_header("Content-Disposition", &content_disposition.to_string()); } if let Some(ref content_encoding) = input.content_encoding { request.add_header("Content-Encoding", &content_encoding.to_string()); } if let Some(ref content_language) = input.content_language { request.add_header("Content-Language", &content_language.to_string()); } if let Some(ref content_length) = input.content_length { request.add_header("Content-Length", &content_length.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref content_type) = input.content_type { request.add_header("Content-Type", &content_type.to_string()); } if let Some(ref expires) = input.expires { request.add_header("Expires", &expires.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref metadata) = input.metadata { for (header_name, header_value) in metadata.iter() { let header = format!("x-amz-meta-{}", header_name); request.add_header(header, header_value); } } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } if let Some(ref ssekms_key_id) = input.ssekms_key_id { request.add_header( "x-amz-server-side-encryption-aws-kms-key-id", &ssekms_key_id.to_string(), ); } if let Some(ref server_side_encryption) = input.server_side_encryption { request.add_header( "x-amz-server-side-encryption", &server_side_encryption.to_string(), ); } if let Some(ref storage_class) = input.storage_class { request.add_header("x-amz-storage-class", &storage_class.to_string()); } if let Some(ref tagging) = input.tagging { request.add_header("x-amz-tagging", &tagging.to_string()); } if let Some(ref website_redirect_location) = input.website_redirect_location { request.add_header( "x-amz-website-redirect-location", &website_redirect_location.to_string(), ); } if let Some(__body) = input.body { request.set_payload_stream(__body.len, __body.inner); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = PutObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(PutObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(expiration) = response.headers.get("x-amz-expiration") { let value = expiration.to_owned(); result.expiration = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket</p> #[allow(unused_variables, warnings)] fn put_object_acl( &self, input: PutObjectAclRequest, ) -> RusotoFuture<PutObjectAclOutput, PutObjectAclError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref acl) = input.acl { request.add_header("x-amz-acl", &acl.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref grant_full_control) = input.grant_full_control { request.add_header("x-amz-grant-full-control", &grant_full_control.to_string()); } if let Some(ref grant_read) = input.grant_read { request.add_header("x-amz-grant-read", &grant_read.to_string()); } if let Some(ref grant_read_acp) = input.grant_read_acp { request.add_header("x-amz-grant-read-acp", &grant_read_acp.to_string()); } if let Some(ref grant_write) = input.grant_write { request.add_header("x-amz-grant-write", &grant_write.to_string()); } if let Some(ref grant_write_acp) = input.grant_write_acp { request.add_header("x-amz-grant-write-acp", &grant_write_acp.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("acl"); request.set_params(params); if input.access_control_policy.is_some() { let mut writer = EventWriter::new(Vec::new()); AccessControlPolicySerializer::serialize( &mut writer, "AccessControlPolicy", input.access_control_policy.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutObjectAclError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = PutObjectAclOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(PutObjectAclOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Sets the supplied tag-set to an object that already exists in a bucket</p> #[allow(unused_variables, warnings)] fn put_object_tagging( &self, input: PutObjectTaggingRequest, ) -> RusotoFuture<PutObjectTaggingOutput, PutObjectTaggingError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("tagging"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); TaggingSerializer::serialize(&mut writer, "Tagging", &input.tagging); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(PutObjectTaggingError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = PutObjectTaggingOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(PutObjectTaggingOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(version_id) = response.headers.get("x-amz-version-id") { let value = version_id.to_owned(); result.version_id = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Restores an archived copy of an object back into Amazon S3</p> #[allow(unused_variables, warnings)] fn restore_object( &self, input: RestoreObjectRequest, ) -> RusotoFuture<RestoreObjectOutput, RestoreObjectError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } let mut params = Params::new(); if let Some(ref x) = input.version_id { params.put("versionId", x); } params.put_key("restore"); request.set_params(params); if input.restore_request.is_some() { let mut writer = EventWriter::new(Vec::new()); RestoreRequestSerializer::serialize( &mut writer, "RestoreRequest", input.restore_request.as_ref().unwrap(), ); request.set_payload(Some(writer.into_inner())); } else { request.set_payload(Some(Vec::new())); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(RestoreObjectError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = RestoreObjectOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(RestoreObjectOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(restore_output_path) = response.headers.get("x-amz-restore-output-path") { let value = restore_output_path.to_owned(); result.restore_output_path = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.</p> #[allow(unused_variables, warnings)] fn select_object_content( &self, input: SelectObjectContentRequest, ) -> RusotoFuture<SelectObjectContentOutput, SelectObjectContentError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("POST", "s3", &self.region, &request_uri); if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); params.put("select&select-type", "2"); request.set_params(params); let mut writer = EventWriter::new(Vec::new()); SelectObjectContentRequestSerializer::serialize( &mut writer, "SelectObjectContentRequest", &input, "http://s3.amazonaws.com/doc/2006-03-01/", ); request.set_payload(Some(writer.into_inner())); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(SelectObjectContentError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = SelectObjectContentOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(SelectObjectContentOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } Ok(result) })) }); RusotoFuture::new(future) } /// <p>Uploads a part in a multipart upload.</p><p><b>Note:</b> After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.</p> #[allow(unused_variables, warnings)] fn upload_part( &self, input: UploadPartRequest, ) -> RusotoFuture<UploadPartOutput, UploadPartError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); if let Some(ref content_length) = input.content_length { request.add_header("Content-Length", &content_length.to_string()); } if let Some(ref content_md5) = input.content_md5 { request.add_header("Content-MD5", &content_md5.to_string()); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); params.put("partNumber", &input.part_number); params.put("uploadId", &input.upload_id); request.set_params(params); if let Some(__body) = input.body { request.set_payload_stream(__body.len, __body.inner); } let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(UploadPartError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = UploadPartOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(UploadPartOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(e_tag) = response.headers.get("ETag") { let value = e_tag.to_owned(); result.e_tag = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } /// <p>Uploads a part by copying data from an existing object as data source.</p> #[allow(unused_variables, warnings)] fn upload_part_copy( &self, input: UploadPartCopyRequest, ) -> RusotoFuture<UploadPartCopyOutput, UploadPartCopyError> { let request_uri = format!("/{bucket}/{key}", bucket = input.bucket, key = input.key); let mut request = SignedRequest::new("PUT", "s3", &self.region, &request_uri); request.add_header("x-amz-copy-source", &input.copy_source); if let Some(ref copy_source_if_match) = input.copy_source_if_match { request.add_header( "x-amz-copy-source-if-match", &copy_source_if_match.to_string(), ); } if let Some(ref copy_source_if_modified_since) = input.copy_source_if_modified_since { request.add_header( "x-amz-copy-source-if-modified-since", &copy_source_if_modified_since.to_string(), ); } if let Some(ref copy_source_if_none_match) = input.copy_source_if_none_match { request.add_header( "x-amz-copy-source-if-none-match", &copy_source_if_none_match.to_string(), ); } if let Some(ref copy_source_if_unmodified_since) = input.copy_source_if_unmodified_since { request.add_header( "x-amz-copy-source-if-unmodified-since", &copy_source_if_unmodified_since.to_string(), ); } if let Some(ref copy_source_range) = input.copy_source_range { request.add_header("x-amz-copy-source-range", &copy_source_range.to_string()); } if let Some(ref copy_source_sse_customer_algorithm) = input.copy_source_sse_customer_algorithm { request.add_header( "x-amz-copy-source-server-side-encryption-customer-algorithm", &copy_source_sse_customer_algorithm.to_string(), ); } if let Some(ref copy_source_sse_customer_key) = input.copy_source_sse_customer_key { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key", &copy_source_sse_customer_key.to_string(), ); } if let Some(ref copy_source_sse_customer_key_md5) = input.copy_source_sse_customer_key_md5 { request.add_header( "x-amz-copy-source-server-side-encryption-customer-key-MD5", &copy_source_sse_customer_key_md5.to_string(), ); } if let Some(ref request_payer) = input.request_payer { request.add_header("x-amz-request-payer", &request_payer.to_string()); } if let Some(ref sse_customer_algorithm) = input.sse_customer_algorithm { request.add_header( "x-amz-server-side-encryption-customer-algorithm", &sse_customer_algorithm.to_string(), ); } if let Some(ref sse_customer_key) = input.sse_customer_key { request.add_header( "x-amz-server-side-encryption-customer-key", &sse_customer_key.to_string(), ); } if let Some(ref sse_customer_key_md5) = input.sse_customer_key_md5 { request.add_header( "x-amz-server-side-encryption-customer-key-MD5", &sse_customer_key_md5.to_string(), ); } let mut params = Params::new(); params.put("partNumber", &input.part_number); params.put("uploadId", &input.upload_id); request.set_params(params); let future = self.inner.sign_and_dispatch(request, |response| { if response.status != StatusCode::Ok && response.status != StatusCode::NoContent && response.status != StatusCode::PartialContent { return future::Either::B(response.buffer().from_err().and_then(|response| { Err(UploadPartCopyError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } future::Either::A(response.buffer().from_err().and_then(move |response| { let mut result; if response.body.is_empty() { result = UploadPartCopyOutput::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); result = try!(UploadPartCopyOutputDeserializer::deserialize( &actual_tag_name, &mut stack )); } if let Some(copy_source_version_id) = response.headers.get("x-amz-copy-source-version-id") { let value = copy_source_version_id.to_owned(); result.copy_source_version_id = Some(value) }; if let Some(request_charged) = response.headers.get("x-amz-request-charged") { let value = request_charged.to_owned(); result.request_charged = Some(value) }; if let Some(sse_customer_algorithm) = response .headers .get("x-amz-server-side-encryption-customer-algorithm") { let value = sse_customer_algorithm.to_owned(); result.sse_customer_algorithm = Some(value) }; if let Some(sse_customer_key_md5) = response .headers .get("x-amz-server-side-encryption-customer-key-MD5") { let value = sse_customer_key_md5.to_owned(); result.sse_customer_key_md5 = Some(value) }; if let Some(ssekms_key_id) = response .headers .get("x-amz-server-side-encryption-aws-kms-key-id") { let value = ssekms_key_id.to_owned(); result.ssekms_key_id = Some(value) }; if let Some(server_side_encryption) = response.headers.get("x-amz-server-side-encryption") { let value = server_side_encryption.to_owned(); result.server_side_encryption = Some(value) }; Ok(result) })) }); RusotoFuture::new(future) } } #[cfg(test)] mod protocol_tests { extern crate rusoto_mock; use self::rusoto_mock::*; use super::*; use rusoto_core::Region as rusoto_region; #[test] fn test_parse_error_s3_create_bucket() { let mock_response = MockResponseReader::read_response( "test_resources/generated/error", "s3-create-bucket.xml", ); let mock = MockRequestDispatcher::with_status(400).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = CreateBucketRequest::default(); let result = client.create_bucket(request).sync(); assert!(!result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_error_s3_list_objects() { let mock_response = MockResponseReader::read_response( "test_resources/generated/error", "s3-list-objects.xml", ); let mock = MockRequestDispatcher::with_status(400).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListObjectsRequest::default(); let result = client.list_objects(request).sync(); assert!(!result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_acl() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-acl.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketAclRequest::default(); let result = client.get_bucket_acl(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_location() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-location.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketLocationRequest::default(); let result = client.get_bucket_location(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_logging() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-logging.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketLoggingRequest::default(); let result = client.get_bucket_logging(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_get_bucket_policy() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-get-bucket-policy.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = GetBucketPolicyRequest::default(); let result = client.get_bucket_policy(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_buckets() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-buckets.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let result = client.list_buckets().sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_multipart_uploads() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-multipart-uploads.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListMultipartUploadsRequest::default(); let result = client.list_multipart_uploads(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_object_versions() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-object-versions.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListObjectVersionsRequest::default(); let result = client.list_object_versions(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } #[test] fn test_parse_valid_s3_list_objects() { let mock_response = MockResponseReader::read_response( "test_resources/generated/valid", "s3-list-objects.xml", ); let mock = MockRequestDispatcher::with_status(200).with_body(&mock_response); let client = S3Client::new(mock, MockCredentialsProvider, rusoto_region::UsEast1); let request = ListObjectsRequest::default(); let result = client.list_objects(request).sync(); assert!(result.is_ok(), "parse error: {:?}", result); } }
use specs::{ReadStorage, System, WriteStorage}; use crate::combat::components::intent::{XAxis, YAxis}; use crate::combat::components::{Action, Command, Facing, Intent, State, Velocity, WalkingState}; pub const X_STEP_SIZES: [[i32; 4]; 3] = [[-25, -3, -23, -4], [0, 0, 0, 0], [25, 3, 23, 4]]; pub const Y_STEP_SIZES: [[i32; 4]; 3] = [[-2, -9, -2, -9], [0, 0, 0, 0], [8, 2, 9, 2]]; pub struct VelocitySystem; impl<'a> System<'a> for VelocitySystem { type SystemData = ( ReadStorage<'a, Intent>, WriteStorage<'a, State>, WriteStorage<'a, Velocity>, WriteStorage<'a, WalkingState>, ); fn run(&mut self, (intent, mut state, mut velocity, mut walking_state): Self::SystemData) { use specs::Join; for (intent, state, velocity, walking_state) in (&intent, &mut state, &mut velocity, &mut walking_state).join() { match state.action { Action::Idle | Action::Move { .. } => match intent.command { Command::Move { x, y } => { // match x { // XAxis::Right => state.direction = Facing::Right, // XAxis::Left => state.direction = Facing::Left, // _ => (), // } if x == XAxis::Centre && y == YAxis::Centre { state.action = Action::Idle; velocity.x = 0; velocity.y = 0; } else { state.action = Action::Move { x, y }; let step = (walking_state.step + 1) % 4; velocity.x = X_STEP_SIZES[(x as i32 + 1) as usize][step as usize]; velocity.y = Y_STEP_SIZES[(y as i32 + 1) as usize][step as usize]; } } _ => { state.action = Action::Idle; velocity.x = 0; velocity.y = 0; } }, _ => { velocity.x = 0; velocity.y = 0; } } } } } pub struct ConfirmVelocity; // If an entity has a non zero velocity, set movement in State, // update the walking state step // This is used after VelocitySystem is piped through RestrictMovementBoundry // and check entity collision, those two systems might prevent movement // so we want to update the final velocity here. impl<'a> System<'a> for ConfirmVelocity { type SystemData = ( ReadStorage<'a, Velocity>, WriteStorage<'a, State>, WriteStorage<'a, WalkingState>, ); fn run(&mut self, (velocity, mut state, mut walking_state): Self::SystemData) { use specs::Join; for (velocity, state, walking_state) in (&velocity, &mut state, &mut walking_state).join() { match state.action { Action::Move { mut x, mut y } => { if velocity.x == 0 { x = XAxis::Centre; } if velocity.y == 0 { y = YAxis::Centre; } if x == XAxis::Centre && y == YAxis::Centre { state.action = Action::Idle; } else { walking_state.step = (walking_state.step + 1) % 4; } } _ => (), } } } } fix black knight facing step sizes. When walking backwards and facing the player, the black knight step sizes were incorrect causing the animation to be "jumpy", in the original the forward facing animations were "jumpy", we fix this so both forward and back animations look smooth. use specs::{ReadStorage, System, WriteStorage}; use crate::combat::components::intent::{XAxis, YAxis}; use crate::combat::components::{Action, Command, Facing, Intent, State, Velocity, WalkingState}; pub const X_STEP_SIZES: [[i32; 4]; 3] = [[-25, -3, -23, -4], [0, 0, 0, 0], [25, 3, 23, 4]]; pub const Y_STEP_SIZES: [[i32; 4]; 3] = [[-2, -9, -2, -9], [0, 0, 0, 0], [8, 2, 9, 2]]; pub struct VelocitySystem; impl<'a> System<'a> for VelocitySystem { type SystemData = ( ReadStorage<'a, Intent>, WriteStorage<'a, State>, WriteStorage<'a, Velocity>, WriteStorage<'a, WalkingState>, ); fn run(&mut self, (intent, mut state, mut velocity, mut walking_state): Self::SystemData) { use specs::Join; for (intent, state, velocity, walking_state) in (&intent, &mut state, &mut velocity, &mut walking_state).join() { match state.action { Action::Idle | Action::Move { .. } => match intent.command { Command::Move { x, y } => { if x == XAxis::Centre && y == YAxis::Centre { state.action = Action::Idle; velocity.x = 0; velocity.y = 0; } else { let walking_direction = match x { XAxis::Right => Facing::Right, XAxis::Left => Facing::Left, _ => state.direction, }; state.action = Action::Move { x, y }; let mut step = (walking_state.step + 1) % 4; if walking_direction != state.direction { // if we have an ai player, then they are always // facing the player, so the step sizes don't // match the animation, shifting this fixes that // this wasn't fixed in the original and the // forward facing animations were off by 1 step = (step + 1) % 4; } velocity.x = X_STEP_SIZES[(x as i32 + 1) as usize][step as usize]; velocity.y = Y_STEP_SIZES[(y as i32 + 1) as usize][step as usize]; } } _ => { state.action = Action::Idle; velocity.x = 0; velocity.y = 0; } }, _ => { velocity.x = 0; velocity.y = 0; } } } } } pub struct ConfirmVelocity; // If an entity has a non zero velocity, set movement in State, // update the walking state step // This is used after VelocitySystem is piped through RestrictMovementBoundry // and check entity collision, those two systems might prevent movement // so we want to update the final velocity here. impl<'a> System<'a> for ConfirmVelocity { type SystemData = ( ReadStorage<'a, Velocity>, WriteStorage<'a, State>, WriteStorage<'a, WalkingState>, ); fn run(&mut self, (velocity, mut state, mut walking_state): Self::SystemData) { use specs::Join; for (velocity, state, walking_state) in (&velocity, &mut state, &mut walking_state).join() { match state.action { Action::Move { mut x, mut y } => { if velocity.x == 0 { x = XAxis::Centre; } if velocity.y == 0 { y = YAxis::Centre; } if x == XAxis::Centre && y == YAxis::Centre { state.action = Action::Idle; } else { walking_state.step = (walking_state.step + 1) % 4; } } _ => (), } } } }
//! Dummy backend implementation to test the code for compile errors //! outside of the graphics development environment. extern crate gfx_hal as core; use std::ops::Range; use core::{buffer, command, device, format, image, target, mapping, memory, pass, pool, pso}; /// Dummy backend. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum Backend { } impl core::Backend for Backend { type Adapter = Adapter; type Device = Device; type Surface = Surface; type Swapchain = Swapchain; type CommandQueue = CommandQueue; type CommandBuffer = RawCommandBuffer; type SubpassCommandBuffer = SubpassCommandBuffer; type QueueFamily = QueueFamily; type Memory = (); type CommandPool = RawCommandPool; type SubpassCommandPool = SubpassCommandPool; type ShaderModule = (); type RenderPass = (); type Framebuffer = (); type UnboundBuffer = (); type Buffer = (); type BufferView = (); type UnboundImage = (); type Image = (); type ImageView = (); type Sampler = (); type ComputePipeline = (); type GraphicsPipeline = (); type PipelineLayout = (); type DescriptorSetLayout = (); type DescriptorPool = DescriptorPool; type DescriptorSet = (); type Fence = (); type Semaphore = (); } /// Dummy adapter. pub struct Adapter; impl core::Adapter<Backend> for Adapter { fn open(&self, _: &[(&QueueFamily, core::QueueType, u32)]) -> core::Gpu<Backend> { unimplemented!() } fn info(&self) -> &core::AdapterInfo { unimplemented!() } fn queue_families(&self) -> &[(QueueFamily, core::QueueType)] { unimplemented!() } } /// Dummy command queue doing nothing. pub struct CommandQueue; impl core::RawCommandQueue<Backend> for CommandQueue { unsafe fn submit_raw(&mut self, _: core::RawSubmission<Backend>, _: Option<&()>) { unimplemented!() } } /// Dummy device doing nothing. #[derive(Clone)] pub struct Device; impl core::Device<Backend> for Device { fn get_features(&self) -> &core::Features { unimplemented!() } fn get_limits(&self) -> &core::Limits { unimplemented!() } fn allocate_memory(&mut self, _: &core::MemoryType, _: u64) -> Result<(), device::OutOfMemory> { unimplemented!() } fn create_render_pass(&mut self, _: &[pass::Attachment], _: &[pass::SubpassDesc], _: &[pass::SubpassDependency]) -> () { unimplemented!() } fn create_pipeline_layout(&mut self, _: &[&()]) -> () { unimplemented!() } fn create_graphics_pipelines<'a>( &mut self, _: &[(pso::GraphicsShaderSet<'a, Backend>, &(), pass::Subpass<'a, Backend>, &pso::GraphicsPipelineDesc)], ) -> Vec<Result<(), pso::CreationError>> { unimplemented!() } fn create_compute_pipelines<'a>( &mut self, _: &[(pso::EntryPoint<'a, Backend>, &())], ) -> Vec<Result<(), pso::CreationError>> { unimplemented!() } fn create_framebuffer( &mut self, _: &(), _: &[&()], _: device::Extent, ) -> Result<(), device::FramebufferError> { unimplemented!() } fn create_shader_module(&mut self, _: &[u8]) -> Result<(), device::ShaderError> { unimplemented!() } fn create_sampler(&mut self, _: image::SamplerInfo) -> () { unimplemented!() } fn create_buffer(&mut self, _: u64, _: u64, _: buffer::Usage) -> Result<(), buffer::CreationError> { unimplemented!() } fn get_buffer_requirements(&mut self, _: &()) -> memory::Requirements { unimplemented!() } fn bind_buffer_memory(&mut self, _: &(), _: u64, _: ()) -> Result<(), device::BindError> { unimplemented!() } fn create_buffer_view(&mut self, _: &(), _: format::Format, _: Range<u64>) -> Result<(), buffer::ViewError> { unimplemented!() } fn create_image(&mut self, _: image::Kind, _: image::Level, _: format::Format, _: image::Usage) -> Result<(), image::CreationError> { unimplemented!() } fn get_image_requirements(&mut self, _: &()) -> memory::Requirements { unimplemented!() } fn bind_image_memory(&mut self, _: &(), _: u64, _: ()) -> Result<(), device::BindError> { unimplemented!() } fn create_image_view(& mut self, _: &(), _: format::Format, _: format::Swizzle, _: image::SubresourceRange, ) -> Result<(), image::ViewError> { unimplemented!() } fn create_descriptor_pool(&mut self, _: usize, _: &[pso::DescriptorRangeDesc]) -> DescriptorPool { unimplemented!() } fn create_descriptor_set_layout(&mut self, _: &[pso::DescriptorSetLayoutBinding]) -> () { unimplemented!() } fn update_descriptor_sets(&mut self, _: &[pso::DescriptorSetWrite<Backend>]) { unimplemented!() } fn acquire_mapping_raw(&mut self, _: &(), _: Option<Range<u64>>) -> Result<*mut u8, mapping::Error> { unimplemented!() } fn release_mapping_raw(&mut self, _: &(), _: Option<Range<u64>>) { unimplemented!() } fn create_semaphore(&mut self) -> () { unimplemented!() } fn create_fence(&mut self, _: bool) -> () { unimplemented!() } fn reset_fences(&mut self, _: &[&()]) { unimplemented!() } fn wait_for_fences(&mut self, _: &[&()], _: device::WaitFor, _: u32) -> bool { unimplemented!() } fn free_memory(&mut self, _: ()) { unimplemented!() } fn destroy_shader_module(&mut self, _: ()) { unimplemented!() } fn destroy_renderpass(&mut self, _: ()) { unimplemented!() } fn destroy_pipeline_layout(&mut self, _: ()) { unimplemented!() } fn destroy_graphics_pipeline(&mut self, _: ()) { unimplemented!() } fn destroy_compute_pipeline(&mut self, _: ()) { unimplemented!() } fn destroy_framebuffer(&mut self, _: ()) { unimplemented!() } fn destroy_buffer(&mut self, _: ()) { unimplemented!() } fn destroy_buffer_view(&mut self, _: ()) { unimplemented!() } fn destroy_image(&mut self, _: ()) { unimplemented!() } fn destroy_image_view(&mut self, _: ()) { unimplemented!() } fn destroy_sampler(&mut self, _: ()) { unimplemented!() } fn destroy_descriptor_pool(&mut self, _: DescriptorPool) { unimplemented!() } fn destroy_descriptor_set_layout(&mut self, _: ()) { unimplemented!() } fn destroy_fence(&mut self, _: ()) { unimplemented!() } fn destroy_semaphore(&mut self, _: ()) { unimplemented!() } } /// Dummy queue family; pub struct QueueFamily; impl core::QueueFamily for QueueFamily { fn num_queues(&self) -> u32 { unimplemented!() } } /// Dummy subpass command buffer. pub struct SubpassCommandBuffer; /// Dummy raw command pool. pub struct RawCommandPool; impl core::RawCommandPool<Backend> for RawCommandPool { fn reset(&mut self) { unimplemented!() } unsafe fn from_queue(_: &CommandQueue, _: pool::CommandPoolCreateFlags) -> Self { unimplemented!() } fn allocate(&mut self, _: usize) -> Vec<RawCommandBuffer> { unimplemented!() } unsafe fn free(&mut self, _: Vec<RawCommandBuffer>) { unimplemented!() } } /// Dummy subpass command pool. pub struct SubpassCommandPool; impl core::SubpassCommandPool<Backend> for SubpassCommandPool { } /// Dummy command buffer, which ignores all the calls. #[derive(Clone)] pub struct RawCommandBuffer; impl core::RawCommandBuffer<Backend> for RawCommandBuffer { fn begin(&mut self) { unimplemented!() } fn finish(&mut self) { unimplemented!() } fn reset(&mut self, _: bool) { unimplemented!() } fn pipeline_barrier( &mut self, _: Range<pso::PipelineStage>, _: &[memory::Barrier<Backend>], ) { unimplemented!() } fn fill_buffer(&mut self, _: &(), _: Range<u64>, _: u32) { unimplemented!() } fn update_buffer(&mut self, _: &(), _: u64, _: &[u8]) { unimplemented!() } fn clear_color_image( &mut self, _: &(), _: image::ImageLayout, _: image::SubresourceRange, _: command::ClearColor, ) { unimplemented!() } fn clear_depth_stencil_image( &mut self, _: &(), _: image::ImageLayout, _: image::SubresourceRange, _: command::ClearDepthStencil, ) { unimplemented!() } fn clear_attachments(&mut self, _: &[command::AttachmentClear], _: &[target::Rect]) { unimplemented!() } fn resolve_image( &mut self, _: &(), _: image::ImageLayout, _: &(), _: image::ImageLayout, _: &[command::ImageResolve], ) { unimplemented!() } fn bind_index_buffer(&mut self, _: buffer::IndexBufferView<Backend>) { unimplemented!() } fn bind_vertex_buffers(&mut self, _: pso::VertexBufferSet<Backend>) { unimplemented!() } fn set_viewports(&mut self, _: &[core::Viewport]) { } fn set_scissors(&mut self, _: &[target::Rect]) { unimplemented!() } fn set_stencil_reference(&mut self, _: target::Stencil, _: target::Stencil) { unimplemented!() } fn set_blend_constants(&mut self, _: target::ColorValue) { unimplemented!() } fn begin_renderpass( &mut self, _: &(), _: &(), _: target::Rect, _: &[command::ClearValue], _: command::SubpassContents, ) { unimplemented!() } fn next_subpass(&mut self, _: command::SubpassContents) { unimplemented!() } fn end_renderpass(&mut self) { unimplemented!() } fn bind_graphics_pipeline(&mut self, _: &()) { unimplemented!() } fn bind_graphics_descriptor_sets( &mut self, _: &(), _: usize, _: &[&()], ) { unimplemented!() } fn bind_compute_pipeline(&mut self, _: &()) { unimplemented!() } fn bind_compute_descriptor_sets( &mut self, _: &(), _: usize, _: &[&()], ) { unimplemented!() } fn dispatch(&mut self, _: u32, _: u32, _: u32) { unimplemented!() } fn dispatch_indirect(&mut self, _: &(), _: u64) { unimplemented!() } fn copy_buffer(&mut self, _: &(), _: &(), _: &[command::BufferCopy]) { unimplemented!() } fn copy_image( &mut self, _: &(), _: image::ImageLayout, _: &(), _: image::ImageLayout, _: &[command::ImageCopy], ) { unimplemented!() } fn copy_buffer_to_image( &mut self, _: &(), _: &(), _: image::ImageLayout, _: &[command::BufferImageCopy], ) { unimplemented!() } fn copy_image_to_buffer( &mut self, _: &(), _: image::ImageLayout, _: &(), _: &[command::BufferImageCopy], ) { unimplemented!() } fn draw(&mut self, _: Range<core::VertexCount>, _: Range<core::InstanceCount>, ) { unimplemented!() } fn draw_indexed( &mut self, _: Range<core::IndexCount>, _: core::VertexOffset, _: Range<core::InstanceCount>, ) { unimplemented!() } fn draw_indirect(&mut self, _: &(), _: u64, _: u32, _: u32) { unimplemented!() } fn draw_indexed_indirect( &mut self, _: &(), _: u64, _: u32, _: u32, ) { unimplemented!() } } // Dummy descriptor pool. #[derive(Debug)] pub struct DescriptorPool; impl core::DescriptorPool<Backend> for DescriptorPool { fn allocate_sets(&mut self, _: &[&()]) -> Vec<()> { unimplemented!() } fn reset(&mut self) { unimplemented!() } } /// Dummy surface. pub struct Surface; impl core::Surface<Backend> for Surface { fn get_kind(&self) -> core::image::Kind { unimplemented!() } fn surface_capabilities(&self, _: &Adapter) -> core::SurfaceCapabilities { unimplemented!() } fn supports_queue(&self, _: &QueueFamily) -> bool { unimplemented!() } fn build_swapchain<C>(&mut self, _: core::SwapchainConfig, _: &core::CommandQueue<Backend, C> ) -> (Swapchain, core::Backbuffer<Backend>) { unimplemented!() } } /// Dummy swapchain. pub struct Swapchain; impl core::Swapchain<Backend> for Swapchain { fn acquire_frame(&mut self, _: core::FrameSync<Backend>) -> core::Frame { unimplemented!() } fn present<C>( &mut self, _: &mut core::CommandQueue<Backend, C>, _: &[&()], ) { unimplemented!() } } pub struct Instance; impl core::Instance for Instance { type Backend = Backend; fn enumerate_adapters(&self) -> Vec<Adapter> { Vec::new() } } [empty] update for RawQueueFamily //! Dummy backend implementation to test the code for compile errors //! outside of the graphics development environment. extern crate gfx_hal as core; use std::ops::Range; use core::{buffer, command, device, format, image, target, mapping, memory, pass, pool, pso, queue}; /// Dummy backend. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum Backend { } impl core::Backend for Backend { type Adapter = Adapter; type Device = Device; type Surface = Surface; type Swapchain = Swapchain; type CommandQueue = RawCommandQueue; type CommandBuffer = RawCommandBuffer; type SubpassCommandBuffer = SubpassCommandBuffer; type QueueFamily = RawQueueFamily; type Memory = (); type CommandPool = RawCommandPool; type SubpassCommandPool = SubpassCommandPool; type ShaderModule = (); type RenderPass = (); type Framebuffer = (); type UnboundBuffer = (); type Buffer = (); type BufferView = (); type UnboundImage = (); type Image = (); type ImageView = (); type Sampler = (); type ComputePipeline = (); type GraphicsPipeline = (); type PipelineLayout = (); type DescriptorSetLayout = (); type DescriptorPool = DescriptorPool; type DescriptorSet = (); type Fence = (); type Semaphore = (); } /// Dummy adapter. pub struct Adapter; impl core::Adapter<Backend> for Adapter { fn open(&self) -> core::Gpu<Backend> { unimplemented!() } fn info(&self) -> &core::AdapterInfo { unimplemented!() } fn queue_families(&self) -> &[&RawQueueFamily] { unimplemented!() } } /// Dummy command queue doing nothing. pub struct RawCommandQueue; impl queue::RawCommandQueue<Backend> for RawCommandQueue { unsafe fn submit_raw(&mut self, _: queue::RawSubmission<Backend>, _: Option<&()>) { unimplemented!() } } /// Dummy device doing nothing. #[derive(Clone)] pub struct Device; impl core::Device<Backend> for Device { fn get_features(&self) -> &core::Features { unimplemented!() } fn get_limits(&self) -> &core::Limits { unimplemented!() } fn allocate_memory(&mut self, _: &core::MemoryType, _: u64) -> Result<(), device::OutOfMemory> { unimplemented!() } fn create_render_pass(&mut self, _: &[pass::Attachment], _: &[pass::SubpassDesc], _: &[pass::SubpassDependency]) -> () { unimplemented!() } fn create_pipeline_layout(&mut self, _: &[&()]) -> () { unimplemented!() } fn create_graphics_pipelines<'a>( &mut self, _: &[(pso::GraphicsShaderSet<'a, Backend>, &(), pass::Subpass<'a, Backend>, &pso::GraphicsPipelineDesc)], ) -> Vec<Result<(), pso::CreationError>> { unimplemented!() } fn create_compute_pipelines<'a>( &mut self, _: &[(pso::EntryPoint<'a, Backend>, &())], ) -> Vec<Result<(), pso::CreationError>> { unimplemented!() } fn create_framebuffer( &mut self, _: &(), _: &[&()], _: device::Extent, ) -> Result<(), device::FramebufferError> { unimplemented!() } fn create_shader_module(&mut self, _: &[u8]) -> Result<(), device::ShaderError> { unimplemented!() } fn create_sampler(&mut self, _: image::SamplerInfo) -> () { unimplemented!() } fn create_buffer(&mut self, _: u64, _: u64, _: buffer::Usage) -> Result<(), buffer::CreationError> { unimplemented!() } fn get_buffer_requirements(&mut self, _: &()) -> memory::Requirements { unimplemented!() } fn bind_buffer_memory(&mut self, _: &(), _: u64, _: ()) -> Result<(), device::BindError> { unimplemented!() } fn create_buffer_view(&mut self, _: &(), _: format::Format, _: Range<u64>) -> Result<(), buffer::ViewError> { unimplemented!() } fn create_image(&mut self, _: image::Kind, _: image::Level, _: format::Format, _: image::Usage) -> Result<(), image::CreationError> { unimplemented!() } fn get_image_requirements(&mut self, _: &()) -> memory::Requirements { unimplemented!() } fn bind_image_memory(&mut self, _: &(), _: u64, _: ()) -> Result<(), device::BindError> { unimplemented!() } fn create_image_view(& mut self, _: &(), _: format::Format, _: format::Swizzle, _: image::SubresourceRange, ) -> Result<(), image::ViewError> { unimplemented!() } fn create_descriptor_pool(&mut self, _: usize, _: &[pso::DescriptorRangeDesc]) -> DescriptorPool { unimplemented!() } fn create_descriptor_set_layout(&mut self, _: &[pso::DescriptorSetLayoutBinding]) -> () { unimplemented!() } fn update_descriptor_sets(&mut self, _: &[pso::DescriptorSetWrite<Backend>]) { unimplemented!() } fn acquire_mapping_raw(&mut self, _: &(), _: Option<Range<u64>>) -> Result<*mut u8, mapping::Error> { unimplemented!() } fn release_mapping_raw(&mut self, _: &(), _: Option<Range<u64>>) { unimplemented!() } fn create_semaphore(&mut self) -> () { unimplemented!() } fn create_fence(&mut self, _: bool) -> () { unimplemented!() } fn reset_fences(&mut self, _: &[&()]) { unimplemented!() } fn wait_for_fences(&mut self, _: &[&()], _: device::WaitFor, _: u32) -> bool { unimplemented!() } fn free_memory(&mut self, _: ()) { unimplemented!() } fn destroy_shader_module(&mut self, _: ()) { unimplemented!() } fn destroy_renderpass(&mut self, _: ()) { unimplemented!() } fn destroy_pipeline_layout(&mut self, _: ()) { unimplemented!() } fn destroy_graphics_pipeline(&mut self, _: ()) { unimplemented!() } fn destroy_compute_pipeline(&mut self, _: ()) { unimplemented!() } fn destroy_framebuffer(&mut self, _: ()) { unimplemented!() } fn destroy_buffer(&mut self, _: ()) { unimplemented!() } fn destroy_buffer_view(&mut self, _: ()) { unimplemented!() } fn destroy_image(&mut self, _: ()) { unimplemented!() } fn destroy_image_view(&mut self, _: ()) { unimplemented!() } fn destroy_sampler(&mut self, _: ()) { unimplemented!() } fn destroy_descriptor_pool(&mut self, _: DescriptorPool) { unimplemented!() } fn destroy_descriptor_set_layout(&mut self, _: ()) { unimplemented!() } fn destroy_fence(&mut self, _: ()) { unimplemented!() } fn destroy_semaphore(&mut self, _: ()) { unimplemented!() } } /// Dummy queue family; pub struct RawQueueFamily; impl queue::RawQueueFamily<Backend> for RawQueueFamily { fn queue_type(&self) -> core::QueueType { unimplemented!() } fn max_queues(&self) -> usize { unimplemented!() } fn create_queue(&mut self) -> RawCommandQueue { unimplemented!() } fn create_pool(&mut self, _: pool::CommandPoolCreateFlags) -> RawCommandPool { unimplemented!() } } /// Dummy subpass command buffer. pub struct SubpassCommandBuffer; /// Dummy raw command pool. pub struct RawCommandPool; impl pool::RawCommandPool<Backend> for RawCommandPool { fn reset(&mut self) { unimplemented!() } fn allocate(&mut self, _: usize) -> Vec<RawCommandBuffer> { unimplemented!() } unsafe fn free(&mut self, _: Vec<RawCommandBuffer>) { unimplemented!() } } /// Dummy subpass command pool. pub struct SubpassCommandPool; impl pool::SubpassCommandPool<Backend> for SubpassCommandPool { } /// Dummy command buffer, which ignores all the calls. #[derive(Clone)] pub struct RawCommandBuffer; impl command::RawCommandBuffer<Backend> for RawCommandBuffer { fn begin(&mut self) { unimplemented!() } fn finish(&mut self) { unimplemented!() } fn reset(&mut self, _: bool) { unimplemented!() } fn pipeline_barrier( &mut self, _: Range<pso::PipelineStage>, _: &[memory::Barrier<Backend>], ) { unimplemented!() } fn fill_buffer(&mut self, _: &(), _: Range<u64>, _: u32) { unimplemented!() } fn update_buffer(&mut self, _: &(), _: u64, _: &[u8]) { unimplemented!() } fn clear_color_image( &mut self, _: &(), _: image::ImageLayout, _: image::SubresourceRange, _: command::ClearColor, ) { unimplemented!() } fn clear_depth_stencil_image( &mut self, _: &(), _: image::ImageLayout, _: image::SubresourceRange, _: command::ClearDepthStencil, ) { unimplemented!() } fn clear_attachments(&mut self, _: &[command::AttachmentClear], _: &[target::Rect]) { unimplemented!() } fn resolve_image( &mut self, _: &(), _: image::ImageLayout, _: &(), _: image::ImageLayout, _: &[command::ImageResolve], ) { unimplemented!() } fn bind_index_buffer(&mut self, _: buffer::IndexBufferView<Backend>) { unimplemented!() } fn bind_vertex_buffers(&mut self, _: pso::VertexBufferSet<Backend>) { unimplemented!() } fn set_viewports(&mut self, _: &[core::Viewport]) { } fn set_scissors(&mut self, _: &[target::Rect]) { unimplemented!() } fn set_stencil_reference(&mut self, _: target::Stencil, _: target::Stencil) { unimplemented!() } fn set_blend_constants(&mut self, _: target::ColorValue) { unimplemented!() } fn begin_renderpass( &mut self, _: &(), _: &(), _: target::Rect, _: &[command::ClearValue], _: command::SubpassContents, ) { unimplemented!() } fn next_subpass(&mut self, _: command::SubpassContents) { unimplemented!() } fn end_renderpass(&mut self) { unimplemented!() } fn bind_graphics_pipeline(&mut self, _: &()) { unimplemented!() } fn bind_graphics_descriptor_sets( &mut self, _: &(), _: usize, _: &[&()], ) { unimplemented!() } fn bind_compute_pipeline(&mut self, _: &()) { unimplemented!() } fn bind_compute_descriptor_sets( &mut self, _: &(), _: usize, _: &[&()], ) { unimplemented!() } fn dispatch(&mut self, _: u32, _: u32, _: u32) { unimplemented!() } fn dispatch_indirect(&mut self, _: &(), _: u64) { unimplemented!() } fn copy_buffer(&mut self, _: &(), _: &(), _: &[command::BufferCopy]) { unimplemented!() } fn copy_image( &mut self, _: &(), _: image::ImageLayout, _: &(), _: image::ImageLayout, _: &[command::ImageCopy], ) { unimplemented!() } fn copy_buffer_to_image( &mut self, _: &(), _: &(), _: image::ImageLayout, _: &[command::BufferImageCopy], ) { unimplemented!() } fn copy_image_to_buffer( &mut self, _: &(), _: image::ImageLayout, _: &(), _: &[command::BufferImageCopy], ) { unimplemented!() } fn draw(&mut self, _: Range<core::VertexCount>, _: Range<core::InstanceCount>, ) { unimplemented!() } fn draw_indexed( &mut self, _: Range<core::IndexCount>, _: core::VertexOffset, _: Range<core::InstanceCount>, ) { unimplemented!() } fn draw_indirect(&mut self, _: &(), _: u64, _: u32, _: u32) { unimplemented!() } fn draw_indexed_indirect( &mut self, _: &(), _: u64, _: u32, _: u32, ) { unimplemented!() } } // Dummy descriptor pool. #[derive(Debug)] pub struct DescriptorPool; impl core::DescriptorPool<Backend> for DescriptorPool { fn allocate_sets(&mut self, _: &[&()]) -> Vec<()> { unimplemented!() } fn reset(&mut self) { unimplemented!() } } /// Dummy surface. pub struct Surface; impl core::Surface<Backend> for Surface { fn get_kind(&self) -> core::image::Kind { unimplemented!() } fn surface_capabilities(&self, _: &Adapter) -> core::SurfaceCapabilities { unimplemented!() } fn supports_queue(&self, _: &RawQueueFamily) -> bool { unimplemented!() } fn build_swapchain<C>(&mut self, _: core::SwapchainConfig, _: &core::CommandQueue<Backend, C> ) -> (Swapchain, core::Backbuffer<Backend>) { unimplemented!() } } /// Dummy swapchain. pub struct Swapchain; impl core::Swapchain<Backend> for Swapchain { fn acquire_frame(&mut self, _: core::FrameSync<Backend>) -> core::Frame { unimplemented!() } fn present<C>( &mut self, _: &mut core::CommandQueue<Backend, C>, _: &[&()], ) { unimplemented!() } } pub struct Instance; impl core::Instance for Instance { type Backend = Backend; fn enumerate_adapters(&self) -> Vec<Adapter> { Vec::new() } }
// Copyright (c) 2013-2015 Sandstorm Development Group, Inc. and contributors // Licensed under the MIT License: // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use capnp_rpc::{rpc, twoparty, rpc_twoparty_capnp}; use calculator_capnp::calculator; use gj::Promise; pub fn main() { let args: Vec<String> = ::std::env::args().collect(); if args.len() != 3 { println!("usage: {} client HOST:PORT", args[0]); return; } ::gj::EventLoop::top_level(move |wait_scope| { use std::net::ToSocketAddrs; let addr = try!(args[2].to_socket_addrs()).next().expect("could not parse address"); let stream = try!(::gj::io::tcp::Stream::connect(addr).wait(wait_scope)); let stream2 = try!(stream.try_clone()); let connection: Box<::capnp_rpc::VatNetwork<twoparty::VatId>> = Box::new(twoparty::VatNetwork::new(stream, stream2, Default::default())); let mut rpc_system = rpc::System::new(connection, None); let calculator = calculator::Client { client: rpc_system.bootstrap(rpc_twoparty_capnp::Side::Server) }; { println!("Evaluating a literal..."); let mut request = calculator.evaluate_request(); request.init().init_expression().set_literal(11.0); request.send().promise.then(|response| { let value = pry!(pry!(response.get()).get_value()); let request = value.read_request(); request.send().promise.then(|response|{ assert_eq!(pry!(response.get()).get_value(), 11.0); Promise::ok(()) }) }).wait(wait_scope).unwrap(); println!("PASS"); } { println!("Evaluating a literal using pipelining..."); let mut request = calculator.evaluate_request(); request.init().init_expression().set_literal(23.0); let value = request.send().pipeline.get_value(); let request = value.read_request(); request.send().promise.then(|response|{ assert_eq!(pry!(response.get()).get_value(), 23.0); Promise::ok(()) }).wait(wait_scope).unwrap(); println!("PASS"); } Ok(()) }).expect("top level error"); } 123 + 45 - 67 case. Doesn't quite work yet. // Copyright (c) 2013-2015 Sandstorm Development Group, Inc. and contributors // Licensed under the MIT License: // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use capnp_rpc::{rpc, twoparty, rpc_twoparty_capnp}; use calculator_capnp::calculator; use gj::Promise; pub fn main() { let args: Vec<String> = ::std::env::args().collect(); if args.len() != 3 { println!("usage: {} client HOST:PORT", args[0]); return; } ::gj::EventLoop::top_level(move |wait_scope| { use std::net::ToSocketAddrs; let addr = try!(args[2].to_socket_addrs()).next().expect("could not parse address"); let stream = try!(::gj::io::tcp::Stream::connect(addr).wait(wait_scope)); let stream2 = try!(stream.try_clone()); let connection: Box<::capnp_rpc::VatNetwork<twoparty::VatId>> = Box::new(twoparty::VatNetwork::new(stream, stream2, Default::default())); let mut rpc_system = rpc::System::new(connection, None); let calculator = calculator::Client { client: rpc_system.bootstrap(rpc_twoparty_capnp::Side::Server) }; { println!("Evaluating a literal..."); let mut request = calculator.evaluate_request(); request.init().init_expression().set_literal(11.0); request.send().promise.then(|response| { let value = pry!(pry!(response.get()).get_value()); let request = value.read_request(); request.send().promise.then(|response|{ assert_eq!(pry!(response.get()).get_value(), 11.0); Promise::ok(()) }) }).wait(wait_scope).unwrap(); println!("PASS"); } { println!("Evaluating a literal using pipelining..."); let mut request = calculator.evaluate_request(); request.init().init_expression().set_literal(23.0); let value = request.send().pipeline.get_value(); let request = value.read_request(); request.send().promise.then(|response|{ assert_eq!(pry!(response.get()).get_value(), 23.0); Promise::ok(()) }).wait(wait_scope).unwrap(); println!("PASS"); } { // Make a request to evaluate 123 + 45 - 67. // // The Calculator interface requires that we first call getOperator() to // get the addition and subtraction functions, then call evaluate() to use // them. But, once again, we can get both functions, call evaluate(), and // then read() the result -- four RPCs -- in the time of *one* network // round trip, because of promise pipelining. println!("Using add and subtract... "); let add = { // Get the "add" function from the server. let mut request = calculator.get_operator_request(); request.init().set_op(calculator::Operator::Add); request.send().pipeline.get_func() }; let subtract = { // Get the "subtract" function from the server. let mut request = calculator.get_operator_request(); request.init().set_op(calculator::Operator::Subtract); request.send().pipeline.get_func() }; // Build the request to evaluate 123 + 45 - 67. let mut request = calculator.evaluate_request(); { let mut subtract_call = request.init().init_expression().init_call(); subtract_call.set_function(subtract); let mut subtract_params = subtract_call.init_params(2); subtract_params.borrow().get(1).set_literal(67.0); let mut add_call = subtract_params.get(0).init_call(); add_call.set_function(add); let mut add_params = add_call.init_params(2); add_params.borrow().get(0).set_literal(123.0); add_params.get(1).set_literal(45.0); } // Send the evaluate() request, read() the result, and wait for read() to // finish. let eval_promise = request.send(); let read_promise = eval_promise.pipeline.get_value().read_request().send(); let response = try!(read_promise.promise.wait(wait_scope)); assert_eq!(try!(response.get()).get_value(), 101.0); println!("PASS"); } Ok(()) }).expect("top level error"); }
// Copyright © 2018 Cormac O'Brien // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. extern crate cgmath; extern crate chrono; extern crate env_logger; extern crate failure; extern crate flame; extern crate gfx; extern crate gfx_device_gl; extern crate gfx_window_glutin; extern crate glutin; extern crate richter; extern crate rodio; mod game; use std::cell::RefCell; use std::env; use std::fs::File; use std::net::ToSocketAddrs; use std::path::Path; use std::process::exit; use std::rc::Rc; use richter::client::input::game::MouseWheel; use richter::client::input::{Input, InputFocus}; use richter::client::render::{self, GraphicsPackage}; use richter::client::{self, Client}; use richter::common; use richter::common::console::{CmdRegistry, Console, CvarRegistry}; use richter::common::host::{Host, Program}; use richter::common::vfs::Vfs; use game::Game; use cgmath::{Matrix4, SquareMatrix}; use chrono::Duration; use gfx::Encoder; use gfx_device_gl::{CommandBuffer, Device, Resources}; use glutin::{CursorState, Event, EventsLoop, GlContext, GlWindow, MouseCursor, WindowEvent}; use rodio::Endpoint; enum TitleState { Menu, Console, } enum ProgramState { Title, Game(Game), } struct ClientProgram { vfs: Rc<Vfs>, cvars: Rc<RefCell<CvarRegistry>>, cmds: Rc<RefCell<CmdRegistry>>, console: Rc<RefCell<Console>>, events_loop: RefCell<EventsLoop>, window: RefCell<GlWindow>, gfx_pkg: Rc<RefCell<GraphicsPackage>>, device: RefCell<Device>, encoder: RefCell<Encoder<Resources, CommandBuffer>>, data: RefCell<render::pipe::Data<Resources>>, endpoint: Rc<Endpoint>, state: RefCell<ProgramState>, input: Rc<RefCell<Input>>, } impl ClientProgram { pub fn new() -> ClientProgram { let mut vfs = Vfs::new(); // add basedir first vfs.add_directory(common::DEFAULT_BASEDIR).unwrap(); // then add PAK archives for vfs_id in 0..common::MAX_PAKFILES { // TODO: check `-basedir` command line argument let basedir = common::DEFAULT_BASEDIR; let path_string = format!("{}/pak{}.pak", basedir, vfs_id); let path = Path::new(&path_string); // keep adding PAKs until we don't find one or we hit MAX_PAKFILES if !path.exists() { break; } vfs.add_pakfile(path).unwrap(); } let cvars = Rc::new(RefCell::new(CvarRegistry::new())); client::register_cvars(&cvars.borrow_mut()); let cmds = Rc::new(RefCell::new(CmdRegistry::new())); // TODO: register commands as other subsystems come online let console = Rc::new(RefCell::new(Console::new(cmds.clone(), cvars.clone()))); let input = Rc::new(RefCell::new(Input::new(InputFocus::Game, console.clone()))); input.borrow_mut().bind_defaults(); let events_loop = glutin::EventsLoop::new(); let window_builder = glutin::WindowBuilder::new() .with_title("Richter client") .with_dimensions(1600, 900); let context_builder = glutin::ContextBuilder::new() .with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 3))) .with_vsync(false); let (window, device, mut factory, color, depth) = gfx_window_glutin::init::<render::ColorFormat, render::DepthFormat>( window_builder, context_builder, &events_loop, ); use gfx::traits::FactoryExt; use gfx::Factory; let (_, dummy_texture) = factory .create_texture_immutable_u8::<render::ColorFormat>( gfx::texture::Kind::D2(0, 0, gfx::texture::AaMode::Single), gfx::texture::Mipmap::Allocated, &[&[]], ) .expect("dummy texture generation failed"); let sampler = factory.create_sampler(gfx::texture::SamplerInfo::new( gfx::texture::FilterMethod::Scale, gfx::texture::WrapMode::Tile, )); let data = render::pipe::Data { vertex_buffer: factory.create_vertex_buffer(&[]), transform: Matrix4::identity().into(), sampler: (dummy_texture.clone(), sampler.clone()), out_color: color.clone(), out_depth: depth.clone(), }; let encoder = factory.create_command_buffer().into(); let endpoint = Rc::new(rodio::get_endpoints_list().next().unwrap()); let gfx_pkg = Rc::new(RefCell::new(GraphicsPackage::new( &vfs, factory, color, depth, console.clone(), ))); // this will also execute config.cfg and autoexec.cfg (assuming an unmodified quake.rc) console.borrow().stuff_text("exec quake.rc\n"); ClientProgram { vfs: Rc::new(vfs), cvars, cmds, console, events_loop: RefCell::new(events_loop), window: RefCell::new(window), gfx_pkg, device: RefCell::new(device), encoder: RefCell::new(encoder), data: RefCell::new(data), endpoint, state: RefCell::new(ProgramState::Title), input, } } fn connect<A>(&mut self, server_addrs: A) where A: ToSocketAddrs, { let cl = Client::connect( server_addrs, self.vfs.clone(), self.cvars.clone(), self.cmds.clone(), self.console.clone(), self.endpoint.clone(), ).unwrap(); cl.register_cmds(&mut self.cmds.borrow_mut()); self.state.replace(ProgramState::Game( Game::new( self.vfs.clone(), self.cvars.clone(), self.cmds.clone(), self.gfx_pkg.clone(), self.input.clone(), cl, ).unwrap(), )); } fn render(&mut self) { self.encoder .borrow_mut() .clear(&self.gfx_pkg.borrow().color_target(), [0.0, 0.0, 0.0, 1.0]); self.encoder .borrow_mut() .clear_depth(&self.gfx_pkg.borrow().depth_stencil(), 1.0); let (win_w, win_h) = self.window.borrow().get_inner_size().unwrap(); match *self.state.borrow_mut() { ProgramState::Title => unimplemented!(), ProgramState::Game(ref mut game) => { game.render( &mut self.encoder.borrow_mut(), &mut self.data.borrow_mut(), win_w, win_h, ); } } use std::ops::DerefMut; flame::start("Encoder::flush"); self.encoder .borrow_mut() .flush(self.device.borrow_mut().deref_mut()); flame::end("Encoder::flush"); flame::start("Window::swap_buffers"); self.window.borrow_mut().swap_buffers().unwrap(); flame::end("Window::swap_buffers"); use gfx::Device; flame::start("Device::cleanup"); self.device.borrow_mut().cleanup(); flame::end("Device::cleanup"); } } impl Program for ClientProgram { fn frame(&mut self, frame_duration: Duration) { let _guard = flame::start_guard("ClientProgram::frame"); match *self.state.borrow_mut() { ProgramState::Title => unimplemented!(), ProgramState::Game(ref mut game) => { game.frame(frame_duration); } } flame::start("EventsLoop::poll_events"); self.events_loop .borrow_mut() .poll_events(|event| match event { Event::WindowEvent { event: WindowEvent::Closed, .. } => { // TODO: handle quit properly flame::dump_html(File::create("flame.html").unwrap()).unwrap(); std::process::exit(0); } e => match *self.state.borrow_mut() { ProgramState::Title => unimplemented!(), ProgramState::Game(ref mut game) => game.handle_input(e), }, }); flame::end("EventsLoop::poll_events"); match self.input.borrow().current_focus() { InputFocus::Game => { self.window .borrow_mut() .set_cursor_state(CursorState::Grab) .unwrap(); self.window.borrow_mut().set_cursor(MouseCursor::NoneCursor); } _ => { self.window .borrow_mut() .set_cursor_state(CursorState::Normal) .unwrap(); self.window.borrow_mut().set_cursor(MouseCursor::Default); } } // run console commands self.console.borrow().execute(); self.render(); } } fn main() { env_logger::init(); let args: Vec<String> = env::args().collect(); if args.len() != 2 { println!("Usage: {} <server_address>", args[0]); exit(1); } let mut client_program = ClientProgram::new(); client_program.connect(&args[1]); let mut host = Host::new(client_program); loop { host.frame(); } } Change 0x0 dummy texture to 1x1 // Copyright © 2018 Cormac O'Brien // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. extern crate cgmath; extern crate chrono; extern crate env_logger; extern crate failure; extern crate flame; extern crate gfx; extern crate gfx_device_gl; extern crate gfx_window_glutin; extern crate glutin; extern crate richter; extern crate rodio; mod game; use std::cell::RefCell; use std::env; use std::fs::File; use std::net::ToSocketAddrs; use std::path::Path; use std::process::exit; use std::rc::Rc; use richter::client::input::game::MouseWheel; use richter::client::input::{Input, InputFocus}; use richter::client::render::{self, GraphicsPackage}; use richter::client::{self, Client}; use richter::common; use richter::common::console::{CmdRegistry, Console, CvarRegistry}; use richter::common::host::{Host, Program}; use richter::common::vfs::Vfs; use game::Game; use cgmath::{Matrix4, SquareMatrix}; use chrono::Duration; use gfx::Encoder; use gfx_device_gl::{CommandBuffer, Device, Resources}; use glutin::{CursorState, Event, EventsLoop, GlContext, GlWindow, MouseCursor, WindowEvent}; use rodio::Endpoint; enum TitleState { Menu, Console, } enum ProgramState { Title, Game(Game), } struct ClientProgram { vfs: Rc<Vfs>, cvars: Rc<RefCell<CvarRegistry>>, cmds: Rc<RefCell<CmdRegistry>>, console: Rc<RefCell<Console>>, events_loop: RefCell<EventsLoop>, window: RefCell<GlWindow>, gfx_pkg: Rc<RefCell<GraphicsPackage>>, device: RefCell<Device>, encoder: RefCell<Encoder<Resources, CommandBuffer>>, data: RefCell<render::pipe::Data<Resources>>, endpoint: Rc<Endpoint>, state: RefCell<ProgramState>, input: Rc<RefCell<Input>>, } impl ClientProgram { pub fn new() -> ClientProgram { let mut vfs = Vfs::new(); // add basedir first vfs.add_directory(common::DEFAULT_BASEDIR).unwrap(); // then add PAK archives for vfs_id in 0..common::MAX_PAKFILES { // TODO: check `-basedir` command line argument let basedir = common::DEFAULT_BASEDIR; let path_string = format!("{}/pak{}.pak", basedir, vfs_id); let path = Path::new(&path_string); // keep adding PAKs until we don't find one or we hit MAX_PAKFILES if !path.exists() { break; } vfs.add_pakfile(path).unwrap(); } let cvars = Rc::new(RefCell::new(CvarRegistry::new())); client::register_cvars(&cvars.borrow_mut()); let cmds = Rc::new(RefCell::new(CmdRegistry::new())); // TODO: register commands as other subsystems come online let console = Rc::new(RefCell::new(Console::new(cmds.clone(), cvars.clone()))); let input = Rc::new(RefCell::new(Input::new(InputFocus::Game, console.clone()))); input.borrow_mut().bind_defaults(); let events_loop = glutin::EventsLoop::new(); let window_builder = glutin::WindowBuilder::new() .with_title("Richter client") .with_dimensions(1600, 900); let context_builder = glutin::ContextBuilder::new() .with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 3))) .with_vsync(false); let (window, device, mut factory, color, depth) = gfx_window_glutin::init::<render::ColorFormat, render::DepthFormat>( window_builder, context_builder, &events_loop, ); use gfx::traits::FactoryExt; use gfx::Factory; let (_, dummy_texture) = factory .create_texture_immutable_u8::<render::ColorFormat>( gfx::texture::Kind::D2(1, 1, gfx::texture::AaMode::Single), gfx::texture::Mipmap::Allocated, &[&[]], ) .expect("dummy texture generation failed"); let sampler = factory.create_sampler(gfx::texture::SamplerInfo::new( gfx::texture::FilterMethod::Scale, gfx::texture::WrapMode::Tile, )); let data = render::pipe::Data { vertex_buffer: factory.create_vertex_buffer(&[]), transform: Matrix4::identity().into(), sampler: (dummy_texture.clone(), sampler.clone()), out_color: color.clone(), out_depth: depth.clone(), }; let encoder = factory.create_command_buffer().into(); let endpoint = Rc::new(rodio::get_endpoints_list().next().unwrap()); let gfx_pkg = Rc::new(RefCell::new(GraphicsPackage::new( &vfs, factory, color, depth, console.clone(), ))); // this will also execute config.cfg and autoexec.cfg (assuming an unmodified quake.rc) console.borrow().stuff_text("exec quake.rc\n"); ClientProgram { vfs: Rc::new(vfs), cvars, cmds, console, events_loop: RefCell::new(events_loop), window: RefCell::new(window), gfx_pkg, device: RefCell::new(device), encoder: RefCell::new(encoder), data: RefCell::new(data), endpoint, state: RefCell::new(ProgramState::Title), input, } } fn connect<A>(&mut self, server_addrs: A) where A: ToSocketAddrs, { let cl = Client::connect( server_addrs, self.vfs.clone(), self.cvars.clone(), self.cmds.clone(), self.console.clone(), self.endpoint.clone(), ).unwrap(); cl.register_cmds(&mut self.cmds.borrow_mut()); self.state.replace(ProgramState::Game( Game::new( self.vfs.clone(), self.cvars.clone(), self.cmds.clone(), self.gfx_pkg.clone(), self.input.clone(), cl, ).unwrap(), )); } fn render(&mut self) { self.encoder .borrow_mut() .clear(&self.gfx_pkg.borrow().color_target(), [0.0, 0.0, 0.0, 1.0]); self.encoder .borrow_mut() .clear_depth(&self.gfx_pkg.borrow().depth_stencil(), 1.0); let (win_w, win_h) = self.window.borrow().get_inner_size().unwrap(); match *self.state.borrow_mut() { ProgramState::Title => unimplemented!(), ProgramState::Game(ref mut game) => { game.render( &mut self.encoder.borrow_mut(), &mut self.data.borrow_mut(), win_w, win_h, ); } } use std::ops::DerefMut; flame::start("Encoder::flush"); self.encoder .borrow_mut() .flush(self.device.borrow_mut().deref_mut()); flame::end("Encoder::flush"); flame::start("Window::swap_buffers"); self.window.borrow_mut().swap_buffers().unwrap(); flame::end("Window::swap_buffers"); use gfx::Device; flame::start("Device::cleanup"); self.device.borrow_mut().cleanup(); flame::end("Device::cleanup"); } } impl Program for ClientProgram { fn frame(&mut self, frame_duration: Duration) { let _guard = flame::start_guard("ClientProgram::frame"); match *self.state.borrow_mut() { ProgramState::Title => unimplemented!(), ProgramState::Game(ref mut game) => { game.frame(frame_duration); } } flame::start("EventsLoop::poll_events"); self.events_loop .borrow_mut() .poll_events(|event| match event { Event::WindowEvent { event: WindowEvent::Closed, .. } => { // TODO: handle quit properly flame::dump_html(File::create("flame.html").unwrap()).unwrap(); std::process::exit(0); } e => match *self.state.borrow_mut() { ProgramState::Title => unimplemented!(), ProgramState::Game(ref mut game) => game.handle_input(e), }, }); flame::end("EventsLoop::poll_events"); match self.input.borrow().current_focus() { InputFocus::Game => { self.window .borrow_mut() .set_cursor_state(CursorState::Grab) .unwrap(); self.window.borrow_mut().set_cursor(MouseCursor::NoneCursor); } _ => { self.window .borrow_mut() .set_cursor_state(CursorState::Normal) .unwrap(); self.window.borrow_mut().set_cursor(MouseCursor::Default); } } // run console commands self.console.borrow().execute(); self.render(); } } fn main() { env_logger::init(); let args: Vec<String> = env::args().collect(); if args.len() != 2 { println!("Usage: {} <server_address>", args[0]); exit(1); } let mut client_program = ClientProgram::new(); client_program.connect(&args[1]); let mut host = Host::new(client_program); loop { host.frame(); } }
// Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ control_request_type, descriptor, ConfigDescriptorTree, ControlRequestDataPhaseTransferDirection, ControlRequestRecipient, ControlRequestType, DeviceDescriptor, DeviceDescriptorTree, Error, Result, StandardControlRequest, }; use base::{handle_eintr_errno, AsRawDescriptor, IoctlNr, RawDescriptor}; use data_model::vec_with_array_field; use libc::{EAGAIN, ENODEV, ENOENT}; use std::convert::TryInto; use std::fs::File; use std::io::{Read, Seek, SeekFrom}; use std::mem::size_of_val; use std::os::raw::{c_int, c_uint, c_void}; use std::sync::Arc; /// Device represents a USB device. pub struct Device { fd: Arc<File>, device_descriptor_tree: DeviceDescriptorTree, } /// Transfer contains the information necessary to submit a USB request /// and, once it has been submitted and completed, contains the response. pub struct Transfer { // NOTE: This Vec is actually a single URB with a trailing // variable-length field created by vec_with_array_field(). urb: Vec<usb_sys::usbdevfs_urb>, pub buffer: Vec<u8>, callback: Option<Box<dyn Fn(Transfer) + Send + Sync>>, } /// TransferHandle is a handle that allows cancellation of in-flight transfers /// between submit_transfer() and get_completed_transfer(). /// Attempting to cancel a transfer that has already completed is safe and will /// return an error. pub struct TransferHandle { weak_transfer: std::sync::Weak<Transfer>, fd: std::sync::Weak<File>, } #[derive(PartialEq)] pub enum TransferStatus { Completed, Error, Cancelled, NoDevice, } impl Device { /// Create a new `Device` from a file descriptor. /// `fd` should be a file in usbdevfs (e.g. `/dev/bus/usb/001/002`). pub fn new(mut fd: File) -> Result<Self> { fd.seek(SeekFrom::Start(0)).map_err(Error::DescriptorRead)?; let mut descriptor_data = Vec::new(); fd.read_to_end(&mut descriptor_data) .map_err(Error::DescriptorRead)?; let device_descriptor_tree = descriptor::parse_usbfs_descriptors(&descriptor_data)?; let device = Device { fd: Arc::new(fd), device_descriptor_tree, }; Ok(device) } pub fn fd(&self) -> Arc<File> { self.fd.clone() } unsafe fn ioctl(&self, nr: IoctlNr) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl(&*self.fd, nr)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } unsafe fn ioctl_with_ref<T>(&self, nr: IoctlNr, arg: &T) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl_with_ref(&*self.fd, nr, arg)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } unsafe fn ioctl_with_mut_ref<T>(&self, nr: IoctlNr, arg: &mut T) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl_with_mut_ref(&*self.fd, nr, arg)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } unsafe fn ioctl_with_mut_ptr<T>(&self, nr: IoctlNr, arg: *mut T) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl_with_mut_ptr(&*self.fd, nr, arg)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } /// Submit a transfer to the device. /// The transfer will be processed asynchronously by the device. /// Call `poll_transfers()` on this device to check for completed transfers. pub fn submit_transfer(&mut self, transfer: Transfer) -> Result<TransferHandle> { let mut rc_transfer = Arc::new(transfer); // Technically, Arc::from_raw() should only be called on pointers returned // from Arc::into_raw(). However, we need to stash this value inside the // Arc<Transfer> itself, so we manually calculate the address that would be // returned from Arc::into_raw() via Deref and then call Arc::into_raw() // to forget the Arc without dropping its contents. // Do not remove the into_raw() call! let raw_transfer = (&*rc_transfer) as *const Transfer as usize; match Arc::get_mut(&mut rc_transfer) { Some(t) => t.urb_mut().usercontext = raw_transfer, None => { // This should never happen, since there is only one strong reference // at this point. return Err(Error::RcGetMutFailed); } } let _ = Arc::into_raw(rc_transfer.clone()); let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb; // Safe because we control the lifetime of the URB via Arc::into_raw() and // Arc::from_raw() in poll_transfers(). unsafe { self.ioctl_with_mut_ptr(usb_sys::USBDEVFS_SUBMITURB(), urb_ptr)?; } let weak_transfer = Arc::downgrade(&rc_transfer); Ok(TransferHandle { weak_transfer, fd: Arc::downgrade(&self.fd), }) } /// Check for completed asynchronous transfers submitted via `submit_transfer()`. /// The callback for each completed transfer will be called. pub fn poll_transfers(&self) -> Result<()> { // Reap completed transfers until we get EAGAIN. loop { let mut urb_ptr: *mut usb_sys::usbdevfs_urb = std::ptr::null_mut(); // Safe because we provide a valid urb_ptr to be filled by the kernel. let result = unsafe { self.ioctl_with_mut_ref(usb_sys::USBDEVFS_REAPURBNDELAY(), &mut urb_ptr) }; match result { Err(Error::IoctlFailed(_nr, e)) => { if e.errno() == EAGAIN { // No more completed transfers right now. break; } } Err(e) => return Err(e), Ok(_) => {} } if urb_ptr.is_null() { break; } // Safe because the URB usercontext field is always set to the result of // Arc::into_raw() in submit_transfer(). let rc_transfer: Arc<Transfer> = unsafe { Arc::from_raw((*urb_ptr).usercontext as *const Transfer) }; // There should always be exactly one strong reference to rc_transfer, // so try_unwrap() should never fail. let mut transfer = Arc::try_unwrap(rc_transfer).map_err(|_| Error::RcUnwrapFailed)?; if let Some(cb) = transfer.callback.take() { cb(transfer); } } Ok(()) } /// Perform a USB port reset to reinitialize a device. pub fn reset(&self) -> Result<()> { // TODO(dverkamp): re-enable reset once crbug.com/1058059 is resolved. // Skip reset for all non-Edge TPU devices. let vid = self.device_descriptor_tree.idVendor; let pid = self.device_descriptor_tree.idProduct; match (vid, pid) { (0x1a6e, 0x089a) => (), _ => return Ok(()), } // Safe because self.fd is a valid usbdevfs file descriptor. let result = unsafe { self.ioctl(usb_sys::USBDEVFS_RESET()) }; if let Err(Error::IoctlFailed(_nr, errno_err)) = result { // The device may disappear after a reset if e.g. its firmware changed. // Treat that as success. if errno_err.errno() == libc::ENODEV { return Ok(()); } } result?; Ok(()) } /// Claim an interface on this device. pub fn claim_interface(&self, interface_number: u8) -> Result<()> { let disconnect_claim = usb_sys::usbdevfs_disconnect_claim { interface: interface_number.into(), flags: 0, driver: [0u8; 256], }; // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevs_disconnect_claim structure. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_DISCONNECT_CLAIM(), &disconnect_claim)?; } Ok(()) } /// Release an interface previously claimed with `claim_interface()`. pub fn release_interface(&self, interface_number: u8) -> Result<()> { let ifnum: c_uint = interface_number.into(); // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to unsigned int. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_RELEASEINTERFACE(), &ifnum)?; } Ok(()) } /// Activate an alternate setting for an interface. pub fn set_interface_alt_setting( &self, interface_number: u8, alternative_setting: u8, ) -> Result<()> { let setinterface = usb_sys::usbdevfs_setinterface { interface: interface_number.into(), altsetting: alternative_setting.into(), }; // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_setinterface structure. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_SETINTERFACE(), &setinterface)?; } Ok(()) } /// Set active configuration for this device. pub fn set_active_configuration(&mut self, config: u8) -> Result<()> { let config: c_int = config.into(); // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to int. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_SETCONFIGURATION(), &config)?; } Ok(()) } /// Get the device descriptor of this device. pub fn get_device_descriptor(&self) -> Result<DeviceDescriptor> { Ok(*self.device_descriptor_tree) } pub fn get_device_descriptor_tree(&self) -> &DeviceDescriptorTree { &self.device_descriptor_tree } /// Get active config descriptor of this device. pub fn get_config_descriptor(&self, config: u8) -> Result<ConfigDescriptorTree> { match self.device_descriptor_tree.get_config_descriptor(config) { Some(config_descriptor) => Ok(config_descriptor.clone()), None => Err(Error::NoSuchDescriptor), } } /// Get a configuration descriptor by its index within the list of descriptors returned /// by the device. pub fn get_config_descriptor_by_index(&self, config_index: u8) -> Result<ConfigDescriptorTree> { match self .device_descriptor_tree .get_config_descriptor_by_index(config_index) { Some(config_descriptor) => Ok(config_descriptor.clone()), None => Err(Error::NoSuchDescriptor), } } /// Get bConfigurationValue of the currently active configuration. pub fn get_active_configuration(&self) -> Result<u8> { // If the device only exposes a single configuration, bypass the control transfer below // by looking up the configuration value from the descriptor. if self.device_descriptor_tree.bNumConfigurations == 1 { if let Some(config_descriptor) = self .device_descriptor_tree .get_config_descriptor_by_index(0) { return Ok(config_descriptor.bConfigurationValue); } } // Send a synchronous control transfer to get the active configuration. let mut active_config: u8 = 0; let ctrl_transfer = usb_sys::usbdevfs_ctrltransfer { bRequestType: control_request_type( ControlRequestType::Standard, ControlRequestDataPhaseTransferDirection::DeviceToHost, ControlRequestRecipient::Device, ), bRequest: StandardControlRequest::GetConfiguration as u8, wValue: 0, wIndex: 0, wLength: size_of_val(&active_config) as u16, timeout: 5000, // milliseconds data: &mut active_config as *mut u8 as *mut c_void, }; // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_ctrltransfer structure. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_CONTROL(), &ctrl_transfer)?; } Ok(active_config) } /// Get the total number of configurations for this device. pub fn get_num_configurations(&self) -> u8 { self.device_descriptor_tree.bNumConfigurations } /// Clear the halt/stall condition for an endpoint. pub fn clear_halt(&self, ep_addr: u8) -> Result<()> { let endpoint: c_uint = ep_addr.into(); // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to unsigned int. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_CLEAR_HALT(), &endpoint)?; } Ok(()) } } impl AsRawDescriptor for Device { fn as_raw_descriptor(&self) -> RawDescriptor { self.fd.as_raw_descriptor() } } impl Transfer { fn urb(&self) -> &usb_sys::usbdevfs_urb { // self.urb is a Vec created with `vec_with_array_field`; the first entry is // the URB itself. &self.urb[0] } fn urb_mut(&mut self) -> &mut usb_sys::usbdevfs_urb { &mut self.urb[0] } fn new( transfer_type: u8, endpoint: u8, buffer: Vec<u8>, iso_packets: &[usb_sys::usbdevfs_iso_packet_desc], ) -> Result<Transfer> { let mut transfer = Transfer { urb: vec_with_array_field::<usb_sys::usbdevfs_urb, usb_sys::usbdevfs_iso_packet_desc>( iso_packets.len(), ), buffer, callback: None, }; transfer.urb_mut().urb_type = transfer_type; transfer.urb_mut().endpoint = endpoint; transfer.urb_mut().buffer = transfer.buffer.as_mut_ptr() as *mut c_void; transfer.urb_mut().buffer_length = transfer .buffer .len() .try_into() .map_err(Error::InvalidBufferLength)?; // Safe because we ensured there is enough space in transfer.urb to hold the number of // isochronous frames required. let iso_frame_desc = unsafe { transfer .urb_mut() .iso_frame_desc .as_mut_slice(iso_packets.len()) }; iso_frame_desc.copy_from_slice(iso_packets); Ok(transfer) } /// Create a control transfer. pub fn new_control(buffer: Vec<u8>) -> Result<Transfer> { let endpoint = 0; Self::new(usb_sys::USBDEVFS_URB_TYPE_CONTROL, endpoint, buffer, &[]) } /// Create an interrupt transfer. pub fn new_interrupt(endpoint: u8, buffer: Vec<u8>) -> Result<Transfer> { Self::new(usb_sys::USBDEVFS_URB_TYPE_INTERRUPT, endpoint, buffer, &[]) } /// Create a bulk transfer. pub fn new_bulk(endpoint: u8, buffer: Vec<u8>) -> Result<Transfer> { Self::new(usb_sys::USBDEVFS_URB_TYPE_BULK, endpoint, buffer, &[]) } /// Create an isochronous transfer. pub fn new_isochronous(endpoint: u8, buffer: Vec<u8>) -> Result<Transfer> { // TODO(dverkamp): allow user to specify iso descriptors Self::new(usb_sys::USBDEVFS_URB_TYPE_ISO, endpoint, buffer, &[]) } /// Get the status of a completed transfer. pub fn status(&self) -> TransferStatus { let status = self.urb().status; if status == 0 { TransferStatus::Completed } else if status == -ENODEV { TransferStatus::NoDevice } else if status == -ENOENT { TransferStatus::Cancelled } else { TransferStatus::Error } } /// Get the actual amount of data transferred, which may be less than /// the original length. pub fn actual_length(&self) -> usize { self.urb().actual_length as usize } /// Set callback function for transfer completion. pub fn set_callback<C: 'static + Fn(Transfer) + Send + Sync>(&mut self, cb: C) { self.callback = Some(Box::new(cb)); } } impl TransferHandle { /// Attempt to cancel the transfer associated with this `TransferHandle`. /// Safe to call even if the transfer has already completed; /// `Error::TransferAlreadyCompleted` will be returned in this case. pub fn cancel(self) -> Result<()> { let rc_transfer = match self.weak_transfer.upgrade() { None => return Err(Error::TransferAlreadyCompleted), Some(rc_transfer) => rc_transfer, }; let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb; let fd = match self.fd.upgrade() { None => return Err(Error::NoDevice), Some(fd) => fd, }; // Safe because fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_urb structure. if unsafe { handle_eintr_errno!(base::ioctl_with_mut_ptr( &*fd, usb_sys::USBDEVFS_DISCARDURB(), urb_ptr )) } < 0 { return Err(Error::IoctlFailed( usb_sys::USBDEVFS_DISCARDURB(), base::Error::last(), )); } Ok(()) } } usb_util: do not silently drop non-EAGAIN errors Fix the error check in the poll_transfers() loop so that ioctl errors other than EAGAIN are propagated to the caller. BUG=chromium:1278424 TEST=Connect USB device to Crostini Change-Id: I5508550011101a98e4d32099ac0044a3d9137018 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/crosvm/+/3335303 Reviewed-by: Dmitry Torokhov <10a8c465cefc9bdd6c925e26964d23c90f1141cc@chromium.org> Tested-by: kokoro <2ac7b1f3fa578934c95181d4272be0d3bca00121@google.com> Reviewed-by: Chirantan Ekbote <35787c0e27de5c48735e07f8324823b765e8bcbc@chromium.org> Commit-Queue: Daniel Verkamp <72bc170b46ec491f7bdd4359a1c0bfed274de40c@chromium.org> // Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ control_request_type, descriptor, ConfigDescriptorTree, ControlRequestDataPhaseTransferDirection, ControlRequestRecipient, ControlRequestType, DeviceDescriptor, DeviceDescriptorTree, Error, Result, StandardControlRequest, }; use base::{handle_eintr_errno, AsRawDescriptor, IoctlNr, RawDescriptor}; use data_model::vec_with_array_field; use libc::{EAGAIN, ENODEV, ENOENT}; use std::convert::TryInto; use std::fs::File; use std::io::{Read, Seek, SeekFrom}; use std::mem::size_of_val; use std::os::raw::{c_int, c_uint, c_void}; use std::sync::Arc; /// Device represents a USB device. pub struct Device { fd: Arc<File>, device_descriptor_tree: DeviceDescriptorTree, } /// Transfer contains the information necessary to submit a USB request /// and, once it has been submitted and completed, contains the response. pub struct Transfer { // NOTE: This Vec is actually a single URB with a trailing // variable-length field created by vec_with_array_field(). urb: Vec<usb_sys::usbdevfs_urb>, pub buffer: Vec<u8>, callback: Option<Box<dyn Fn(Transfer) + Send + Sync>>, } /// TransferHandle is a handle that allows cancellation of in-flight transfers /// between submit_transfer() and get_completed_transfer(). /// Attempting to cancel a transfer that has already completed is safe and will /// return an error. pub struct TransferHandle { weak_transfer: std::sync::Weak<Transfer>, fd: std::sync::Weak<File>, } #[derive(PartialEq)] pub enum TransferStatus { Completed, Error, Cancelled, NoDevice, } impl Device { /// Create a new `Device` from a file descriptor. /// `fd` should be a file in usbdevfs (e.g. `/dev/bus/usb/001/002`). pub fn new(mut fd: File) -> Result<Self> { fd.seek(SeekFrom::Start(0)).map_err(Error::DescriptorRead)?; let mut descriptor_data = Vec::new(); fd.read_to_end(&mut descriptor_data) .map_err(Error::DescriptorRead)?; let device_descriptor_tree = descriptor::parse_usbfs_descriptors(&descriptor_data)?; let device = Device { fd: Arc::new(fd), device_descriptor_tree, }; Ok(device) } pub fn fd(&self) -> Arc<File> { self.fd.clone() } unsafe fn ioctl(&self, nr: IoctlNr) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl(&*self.fd, nr)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } unsafe fn ioctl_with_ref<T>(&self, nr: IoctlNr, arg: &T) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl_with_ref(&*self.fd, nr, arg)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } unsafe fn ioctl_with_mut_ref<T>(&self, nr: IoctlNr, arg: &mut T) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl_with_mut_ref(&*self.fd, nr, arg)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } unsafe fn ioctl_with_mut_ptr<T>(&self, nr: IoctlNr, arg: *mut T) -> Result<i32> { let ret = handle_eintr_errno!(base::ioctl_with_mut_ptr(&*self.fd, nr, arg)); if ret < 0 { return Err(Error::IoctlFailed(nr, base::Error::last())); } Ok(ret) } /// Submit a transfer to the device. /// The transfer will be processed asynchronously by the device. /// Call `poll_transfers()` on this device to check for completed transfers. pub fn submit_transfer(&mut self, transfer: Transfer) -> Result<TransferHandle> { let mut rc_transfer = Arc::new(transfer); // Technically, Arc::from_raw() should only be called on pointers returned // from Arc::into_raw(). However, we need to stash this value inside the // Arc<Transfer> itself, so we manually calculate the address that would be // returned from Arc::into_raw() via Deref and then call Arc::into_raw() // to forget the Arc without dropping its contents. // Do not remove the into_raw() call! let raw_transfer = (&*rc_transfer) as *const Transfer as usize; match Arc::get_mut(&mut rc_transfer) { Some(t) => t.urb_mut().usercontext = raw_transfer, None => { // This should never happen, since there is only one strong reference // at this point. return Err(Error::RcGetMutFailed); } } let _ = Arc::into_raw(rc_transfer.clone()); let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb; // Safe because we control the lifetime of the URB via Arc::into_raw() and // Arc::from_raw() in poll_transfers(). unsafe { self.ioctl_with_mut_ptr(usb_sys::USBDEVFS_SUBMITURB(), urb_ptr)?; } let weak_transfer = Arc::downgrade(&rc_transfer); Ok(TransferHandle { weak_transfer, fd: Arc::downgrade(&self.fd), }) } /// Check for completed asynchronous transfers submitted via `submit_transfer()`. /// The callback for each completed transfer will be called. pub fn poll_transfers(&self) -> Result<()> { // Reap completed transfers until we get EAGAIN. loop { let mut urb_ptr: *mut usb_sys::usbdevfs_urb = std::ptr::null_mut(); // Safe because we provide a valid urb_ptr to be filled by the kernel. let result = unsafe { self.ioctl_with_mut_ref(usb_sys::USBDEVFS_REAPURBNDELAY(), &mut urb_ptr) }; match result { // EAGAIN indicates no more completed transfers right now. Err(Error::IoctlFailed(_nr, e)) if e.errno() == EAGAIN => break, Err(e) => return Err(e), Ok(_) => {} } if urb_ptr.is_null() { break; } // Safe because the URB usercontext field is always set to the result of // Arc::into_raw() in submit_transfer(). let rc_transfer: Arc<Transfer> = unsafe { Arc::from_raw((*urb_ptr).usercontext as *const Transfer) }; // There should always be exactly one strong reference to rc_transfer, // so try_unwrap() should never fail. let mut transfer = Arc::try_unwrap(rc_transfer).map_err(|_| Error::RcUnwrapFailed)?; if let Some(cb) = transfer.callback.take() { cb(transfer); } } Ok(()) } /// Perform a USB port reset to reinitialize a device. pub fn reset(&self) -> Result<()> { // TODO(dverkamp): re-enable reset once crbug.com/1058059 is resolved. // Skip reset for all non-Edge TPU devices. let vid = self.device_descriptor_tree.idVendor; let pid = self.device_descriptor_tree.idProduct; match (vid, pid) { (0x1a6e, 0x089a) => (), _ => return Ok(()), } // Safe because self.fd is a valid usbdevfs file descriptor. let result = unsafe { self.ioctl(usb_sys::USBDEVFS_RESET()) }; if let Err(Error::IoctlFailed(_nr, errno_err)) = result { // The device may disappear after a reset if e.g. its firmware changed. // Treat that as success. if errno_err.errno() == libc::ENODEV { return Ok(()); } } result?; Ok(()) } /// Claim an interface on this device. pub fn claim_interface(&self, interface_number: u8) -> Result<()> { let disconnect_claim = usb_sys::usbdevfs_disconnect_claim { interface: interface_number.into(), flags: 0, driver: [0u8; 256], }; // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevs_disconnect_claim structure. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_DISCONNECT_CLAIM(), &disconnect_claim)?; } Ok(()) } /// Release an interface previously claimed with `claim_interface()`. pub fn release_interface(&self, interface_number: u8) -> Result<()> { let ifnum: c_uint = interface_number.into(); // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to unsigned int. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_RELEASEINTERFACE(), &ifnum)?; } Ok(()) } /// Activate an alternate setting for an interface. pub fn set_interface_alt_setting( &self, interface_number: u8, alternative_setting: u8, ) -> Result<()> { let setinterface = usb_sys::usbdevfs_setinterface { interface: interface_number.into(), altsetting: alternative_setting.into(), }; // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_setinterface structure. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_SETINTERFACE(), &setinterface)?; } Ok(()) } /// Set active configuration for this device. pub fn set_active_configuration(&mut self, config: u8) -> Result<()> { let config: c_int = config.into(); // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to int. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_SETCONFIGURATION(), &config)?; } Ok(()) } /// Get the device descriptor of this device. pub fn get_device_descriptor(&self) -> Result<DeviceDescriptor> { Ok(*self.device_descriptor_tree) } pub fn get_device_descriptor_tree(&self) -> &DeviceDescriptorTree { &self.device_descriptor_tree } /// Get active config descriptor of this device. pub fn get_config_descriptor(&self, config: u8) -> Result<ConfigDescriptorTree> { match self.device_descriptor_tree.get_config_descriptor(config) { Some(config_descriptor) => Ok(config_descriptor.clone()), None => Err(Error::NoSuchDescriptor), } } /// Get a configuration descriptor by its index within the list of descriptors returned /// by the device. pub fn get_config_descriptor_by_index(&self, config_index: u8) -> Result<ConfigDescriptorTree> { match self .device_descriptor_tree .get_config_descriptor_by_index(config_index) { Some(config_descriptor) => Ok(config_descriptor.clone()), None => Err(Error::NoSuchDescriptor), } } /// Get bConfigurationValue of the currently active configuration. pub fn get_active_configuration(&self) -> Result<u8> { // If the device only exposes a single configuration, bypass the control transfer below // by looking up the configuration value from the descriptor. if self.device_descriptor_tree.bNumConfigurations == 1 { if let Some(config_descriptor) = self .device_descriptor_tree .get_config_descriptor_by_index(0) { return Ok(config_descriptor.bConfigurationValue); } } // Send a synchronous control transfer to get the active configuration. let mut active_config: u8 = 0; let ctrl_transfer = usb_sys::usbdevfs_ctrltransfer { bRequestType: control_request_type( ControlRequestType::Standard, ControlRequestDataPhaseTransferDirection::DeviceToHost, ControlRequestRecipient::Device, ), bRequest: StandardControlRequest::GetConfiguration as u8, wValue: 0, wIndex: 0, wLength: size_of_val(&active_config) as u16, timeout: 5000, // milliseconds data: &mut active_config as *mut u8 as *mut c_void, }; // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_ctrltransfer structure. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_CONTROL(), &ctrl_transfer)?; } Ok(active_config) } /// Get the total number of configurations for this device. pub fn get_num_configurations(&self) -> u8 { self.device_descriptor_tree.bNumConfigurations } /// Clear the halt/stall condition for an endpoint. pub fn clear_halt(&self, ep_addr: u8) -> Result<()> { let endpoint: c_uint = ep_addr.into(); // Safe because self.fd is a valid usbdevfs file descriptor and we pass a valid // pointer to unsigned int. unsafe { self.ioctl_with_ref(usb_sys::USBDEVFS_CLEAR_HALT(), &endpoint)?; } Ok(()) } } impl AsRawDescriptor for Device { fn as_raw_descriptor(&self) -> RawDescriptor { self.fd.as_raw_descriptor() } } impl Transfer { fn urb(&self) -> &usb_sys::usbdevfs_urb { // self.urb is a Vec created with `vec_with_array_field`; the first entry is // the URB itself. &self.urb[0] } fn urb_mut(&mut self) -> &mut usb_sys::usbdevfs_urb { &mut self.urb[0] } fn new( transfer_type: u8, endpoint: u8, buffer: Vec<u8>, iso_packets: &[usb_sys::usbdevfs_iso_packet_desc], ) -> Result<Transfer> { let mut transfer = Transfer { urb: vec_with_array_field::<usb_sys::usbdevfs_urb, usb_sys::usbdevfs_iso_packet_desc>( iso_packets.len(), ), buffer, callback: None, }; transfer.urb_mut().urb_type = transfer_type; transfer.urb_mut().endpoint = endpoint; transfer.urb_mut().buffer = transfer.buffer.as_mut_ptr() as *mut c_void; transfer.urb_mut().buffer_length = transfer .buffer .len() .try_into() .map_err(Error::InvalidBufferLength)?; // Safe because we ensured there is enough space in transfer.urb to hold the number of // isochronous frames required. let iso_frame_desc = unsafe { transfer .urb_mut() .iso_frame_desc .as_mut_slice(iso_packets.len()) }; iso_frame_desc.copy_from_slice(iso_packets); Ok(transfer) } /// Create a control transfer. pub fn new_control(buffer: Vec<u8>) -> Result<Transfer> { let endpoint = 0; Self::new(usb_sys::USBDEVFS_URB_TYPE_CONTROL, endpoint, buffer, &[]) } /// Create an interrupt transfer. pub fn new_interrupt(endpoint: u8, buffer: Vec<u8>) -> Result<Transfer> { Self::new(usb_sys::USBDEVFS_URB_TYPE_INTERRUPT, endpoint, buffer, &[]) } /// Create a bulk transfer. pub fn new_bulk(endpoint: u8, buffer: Vec<u8>) -> Result<Transfer> { Self::new(usb_sys::USBDEVFS_URB_TYPE_BULK, endpoint, buffer, &[]) } /// Create an isochronous transfer. pub fn new_isochronous(endpoint: u8, buffer: Vec<u8>) -> Result<Transfer> { // TODO(dverkamp): allow user to specify iso descriptors Self::new(usb_sys::USBDEVFS_URB_TYPE_ISO, endpoint, buffer, &[]) } /// Get the status of a completed transfer. pub fn status(&self) -> TransferStatus { let status = self.urb().status; if status == 0 { TransferStatus::Completed } else if status == -ENODEV { TransferStatus::NoDevice } else if status == -ENOENT { TransferStatus::Cancelled } else { TransferStatus::Error } } /// Get the actual amount of data transferred, which may be less than /// the original length. pub fn actual_length(&self) -> usize { self.urb().actual_length as usize } /// Set callback function for transfer completion. pub fn set_callback<C: 'static + Fn(Transfer) + Send + Sync>(&mut self, cb: C) { self.callback = Some(Box::new(cb)); } } impl TransferHandle { /// Attempt to cancel the transfer associated with this `TransferHandle`. /// Safe to call even if the transfer has already completed; /// `Error::TransferAlreadyCompleted` will be returned in this case. pub fn cancel(self) -> Result<()> { let rc_transfer = match self.weak_transfer.upgrade() { None => return Err(Error::TransferAlreadyCompleted), Some(rc_transfer) => rc_transfer, }; let urb_ptr = rc_transfer.urb.as_ptr() as *mut usb_sys::usbdevfs_urb; let fd = match self.fd.upgrade() { None => return Err(Error::NoDevice), Some(fd) => fd, }; // Safe because fd is a valid usbdevfs file descriptor and we pass a valid // pointer to a usbdevfs_urb structure. if unsafe { handle_eintr_errno!(base::ioctl_with_mut_ptr( &*fd, usb_sys::USBDEVFS_DISCARDURB(), urb_ptr )) } < 0 { return Err(Error::IoctlFailed( usb_sys::USBDEVFS_DISCARDURB(), base::Error::last(), )); } Ok(()) } }
extern crate document; use document::Nodeset; use super::XPathEvaluationContext; use super::XPathNodeTest; use super::XPathValue; use super::{Boolean,Number,String,Nodes}; use super::axis::XPathAxis; pub trait XPathExpression { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue; } type SubExpression = Box<XPathExpression + 'static>; pub struct ExpressionAnd { pub left: SubExpression, pub right: SubExpression, } impl XPathExpression for ExpressionAnd { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { Boolean(self.left.evaluate(context).boolean() && self.right.evaluate(context).boolean()) } } pub struct ExpressionContextNode; impl XPathExpression for ExpressionContextNode { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut result = Nodeset::new(); result.add(context.node().clone()); Nodes(result.clone()) } } pub struct ExpressionEqual { pub left: SubExpression, pub right: SubExpression, } impl ExpressionEqual { fn boolean_evaluate(& self, context: &XPathEvaluationContext) -> bool { let left_val = self.left.evaluate(context); let right_val = self.right.evaluate(context); match (&left_val, &right_val) { (&Boolean(_), _) | (_, &Boolean(_)) => left_val.boolean() == right_val.boolean(), (&Number(_), _) | (_, &Number(_)) => left_val.number() == right_val.number(), _ => left_val.string() == right_val.string() } } } impl XPathExpression for ExpressionEqual { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { Boolean(self.boolean_evaluate(context)) } } pub struct ExpressionNotEqual { equal: ExpressionEqual, } impl ExpressionNotEqual { pub fn new(left: SubExpression, right: SubExpression) -> ExpressionNotEqual { ExpressionNotEqual { equal: ExpressionEqual{left: left, right: right} } } } impl XPathExpression for ExpressionNotEqual { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { Boolean(!self.equal.boolean_evaluate(context)) } } pub struct ExpressionFunction { pub name: String, pub arguments: Vec<SubExpression>, } impl XPathExpression for ExpressionFunction { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { match context.function_for_name(self.name.as_slice()) { Some(fun) => { // TODO: Error when argument count mismatch let args = self.arguments.iter().map(|ref arg| arg.evaluate(context)).collect(); fun.evaluate(context, args) }, None => fail!("throw UnknownXPathFunctionException(_name)"), } } } pub struct ExpressionLiteral { pub value: XPathValue, } impl XPathExpression for ExpressionLiteral { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { self.value.clone() } } pub struct ExpressionMath { left: SubExpression, right: SubExpression, operation: fn(f64, f64) -> f64, } fn add(a: f64, b: f64) -> f64 {a + b} fn subtract(a: f64, b: f64) -> f64 {a - b} fn multiply(a: f64, b: f64) -> f64 {a * b} fn divide(a: f64, b: f64) -> f64 {a / b} fn modulus(a: f64, b: f64) -> f64 {a % b} impl ExpressionMath { pub fn addition(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: add} } pub fn subtraction(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: subtract} } pub fn multiplication(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: multiply} } pub fn division(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: divide} } pub fn remainder(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: modulus} } } impl XPathExpression for ExpressionMath { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { let left = self.left.evaluate(context); let right = self.right.evaluate(context); let op = self.operation; return Number(op(left.number(), right.number())); } } pub struct ExpressionNegation { expression: SubExpression, } impl XPathExpression for ExpressionNegation { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { let result = self.expression.evaluate(context); return Number(-result.number()); } } pub struct ExpressionOr { left: SubExpression, right: SubExpression, } impl XPathExpression for ExpressionOr { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { return Boolean(self.left.evaluate(context).boolean() || self.right.evaluate(context).boolean()) } } pub struct ExpressionPath { start_point: SubExpression, steps: Vec<SubExpression>, } impl XPathExpression for ExpressionPath { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { let mut result = self.start_point.evaluate(context).nodeset(); for step in self.steps.iter() { let mut step_result = Nodeset::new(); let mut sub_context = context.new_context_for(result.size()); for current_node in result.iter() { sub_context.next(current_node.clone()); let selected = step.evaluate(&sub_context); // TODO: What if it is not a nodeset? step_result.add_nodeset(&selected.nodeset()); } result = step_result; } Nodes(result) } } pub struct ExpressionPredicate { pub node_selector: SubExpression, pub predicate: SubExpression, } impl ExpressionPredicate { fn include(value: &XPathValue, context: &XPathEvaluationContext) -> bool { match value { &Number(v) => context.position() == v as uint, _ => value.boolean() } } } impl XPathExpression for ExpressionPredicate { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut selected = Nodeset::new(); let nodes = self.node_selector.evaluate(context).nodeset(); let mut sub_context = context.new_context_for(nodes.size()); for current_node in nodes.iter() { sub_context.next(current_node.clone()); let value = self.predicate.evaluate(&sub_context); if ExpressionPredicate::include(&value, &sub_context) { selected.add(current_node.clone()); } } Nodes(selected) } } pub struct ExpressionRelational { pub left: SubExpression, pub right: SubExpression, pub operation: fn(f64, f64) -> bool, } fn less_than(left: f64, right: f64) -> bool { left < right } fn less_than_or_equal(left: f64, right: f64) -> bool { left <= right } fn greater_than(left: f64, right: f64) -> bool { left > right } fn greater_than_or_equal(left: f64, right: f64) -> bool { left >= right } impl ExpressionRelational { pub fn less_than(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: less_than} } pub fn less_than_or_equal(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: less_than_or_equal} } pub fn greater_than(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: greater_than} } pub fn greater_than_or_equal(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: greater_than_or_equal} } } impl XPathExpression for ExpressionRelational { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let left_val = self.left.evaluate(context); let right_val = self.right.evaluate(context); let op = self.operation; Boolean(op(left_val.number(), right_val.number())) } } pub struct ExpressionRootNode; impl XPathExpression for ExpressionRootNode { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let n = &context.node; let mut result = Nodeset::new(); result.add(n.document().root()); Nodes(result) } } type StepAxis = Box<XPathAxis + 'static>; type StepTest = Box<XPathNodeTest + 'static>; pub struct ExpressionStep { axis: StepAxis, node_test: StepTest, } impl ExpressionStep { pub fn new(axis: StepAxis, node_test: StepTest) -> ExpressionStep { ExpressionStep {axis: axis, node_test: node_test} } } impl XPathExpression for ExpressionStep { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut result = Nodeset::new(); self.axis.select_nodes(context, self.node_test, & mut result); Nodes(result) } } pub struct ExpressionUnion { pub left: SubExpression, pub right: SubExpression, } impl XPathExpression for ExpressionUnion { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut left_val = self.left.evaluate(context).nodeset(); let right_val = self.right.evaluate(context).nodeset(); left_val.add_nodeset(&right_val); Nodes(left_val) } } pub struct ExpressionVariable { pub name: String, } impl XPathExpression for ExpressionVariable { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { match context.value_of(self.name.as_slice()) { Some(v) => v.clone(), None => fail!("throw UnknownVariableException(_name)"), } } } Fix warnings in expressions extern crate document; use document::Nodeset; use super::XPathEvaluationContext; use super::XPathNodeTest; use super::XPathValue; use super::{Boolean,Number,Nodes}; use super::axis::XPathAxis; pub trait XPathExpression { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue; } pub type SubExpression = Box<XPathExpression + 'static>; pub struct ExpressionAnd { pub left: SubExpression, pub right: SubExpression, } impl XPathExpression for ExpressionAnd { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { Boolean(self.left.evaluate(context).boolean() && self.right.evaluate(context).boolean()) } } pub struct ExpressionContextNode; impl XPathExpression for ExpressionContextNode { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut result = Nodeset::new(); result.add(context.node().clone()); Nodes(result.clone()) } } pub struct ExpressionEqual { pub left: SubExpression, pub right: SubExpression, } impl ExpressionEqual { fn boolean_evaluate(& self, context: &XPathEvaluationContext) -> bool { let left_val = self.left.evaluate(context); let right_val = self.right.evaluate(context); match (&left_val, &right_val) { (&Boolean(_), _) | (_, &Boolean(_)) => left_val.boolean() == right_val.boolean(), (&Number(_), _) | (_, &Number(_)) => left_val.number() == right_val.number(), _ => left_val.string() == right_val.string() } } } impl XPathExpression for ExpressionEqual { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { Boolean(self.boolean_evaluate(context)) } } pub struct ExpressionNotEqual { equal: ExpressionEqual, } impl ExpressionNotEqual { pub fn new(left: SubExpression, right: SubExpression) -> ExpressionNotEqual { ExpressionNotEqual { equal: ExpressionEqual{left: left, right: right} } } } impl XPathExpression for ExpressionNotEqual { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { Boolean(!self.equal.boolean_evaluate(context)) } } pub struct ExpressionFunction { pub name: String, pub arguments: Vec<SubExpression>, } impl XPathExpression for ExpressionFunction { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { match context.function_for_name(self.name.as_slice()) { Some(fun) => { // TODO: Error when argument count mismatch let args = self.arguments.iter().map(|ref arg| arg.evaluate(context)).collect(); fun.evaluate(context, args) }, None => fail!("throw UnknownXPathFunctionException(_name)"), } } } pub struct ExpressionLiteral { pub value: XPathValue, } impl XPathExpression for ExpressionLiteral { fn evaluate(& self, _: &XPathEvaluationContext) -> XPathValue { self.value.clone() } } pub struct ExpressionMath { left: SubExpression, right: SubExpression, operation: fn(f64, f64) -> f64, } fn add(a: f64, b: f64) -> f64 {a + b} fn subtract(a: f64, b: f64) -> f64 {a - b} fn multiply(a: f64, b: f64) -> f64 {a * b} fn divide(a: f64, b: f64) -> f64 {a / b} fn modulus(a: f64, b: f64) -> f64 {a % b} impl ExpressionMath { pub fn addition(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: add} } pub fn subtraction(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: subtract} } pub fn multiplication(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: multiply} } pub fn division(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: divide} } pub fn remainder(left: SubExpression, right: SubExpression) -> ExpressionMath { ExpressionMath{left: left, right: right, operation: modulus} } } impl XPathExpression for ExpressionMath { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { let left = self.left.evaluate(context); let right = self.right.evaluate(context); let op = self.operation; return Number(op(left.number(), right.number())); } } pub struct ExpressionNegation { expression: SubExpression, } impl XPathExpression for ExpressionNegation { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { let result = self.expression.evaluate(context); return Number(-result.number()); } } pub struct ExpressionOr { left: SubExpression, right: SubExpression, } impl XPathExpression for ExpressionOr { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { return Boolean(self.left.evaluate(context).boolean() || self.right.evaluate(context).boolean()) } } pub struct ExpressionPath { start_point: SubExpression, steps: Vec<SubExpression>, } impl XPathExpression for ExpressionPath { fn evaluate(& self, context: &XPathEvaluationContext) -> XPathValue { let mut result = self.start_point.evaluate(context).nodeset(); for step in self.steps.iter() { let mut step_result = Nodeset::new(); let mut sub_context = context.new_context_for(result.size()); for current_node in result.iter() { sub_context.next(current_node.clone()); let selected = step.evaluate(&sub_context); // TODO: What if it is not a nodeset? step_result.add_nodeset(&selected.nodeset()); } result = step_result; } Nodes(result) } } pub struct ExpressionPredicate { pub node_selector: SubExpression, pub predicate: SubExpression, } impl ExpressionPredicate { fn include(value: &XPathValue, context: &XPathEvaluationContext) -> bool { match value { &Number(v) => context.position() == v as uint, _ => value.boolean() } } } impl XPathExpression for ExpressionPredicate { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut selected = Nodeset::new(); let nodes = self.node_selector.evaluate(context).nodeset(); let mut sub_context = context.new_context_for(nodes.size()); for current_node in nodes.iter() { sub_context.next(current_node.clone()); let value = self.predicate.evaluate(&sub_context); if ExpressionPredicate::include(&value, &sub_context) { selected.add(current_node.clone()); } } Nodes(selected) } } pub struct ExpressionRelational { pub left: SubExpression, pub right: SubExpression, pub operation: fn(f64, f64) -> bool, } fn less_than(left: f64, right: f64) -> bool { left < right } fn less_than_or_equal(left: f64, right: f64) -> bool { left <= right } fn greater_than(left: f64, right: f64) -> bool { left > right } fn greater_than_or_equal(left: f64, right: f64) -> bool { left >= right } impl ExpressionRelational { pub fn less_than(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: less_than} } pub fn less_than_or_equal(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: less_than_or_equal} } pub fn greater_than(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: greater_than} } pub fn greater_than_or_equal(left: SubExpression, right: SubExpression) -> ExpressionRelational { ExpressionRelational{left: left, right: right, operation: greater_than_or_equal} } } impl XPathExpression for ExpressionRelational { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let left_val = self.left.evaluate(context); let right_val = self.right.evaluate(context); let op = self.operation; Boolean(op(left_val.number(), right_val.number())) } } pub struct ExpressionRootNode; impl XPathExpression for ExpressionRootNode { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let n = &context.node; let mut result = Nodeset::new(); result.add(n.document().root()); Nodes(result) } } pub type StepAxis = Box<XPathAxis + 'static>; pub type StepTest = Box<XPathNodeTest + 'static>; pub struct ExpressionStep { axis: StepAxis, node_test: StepTest, } impl ExpressionStep { pub fn new(axis: StepAxis, node_test: StepTest) -> ExpressionStep { ExpressionStep {axis: axis, node_test: node_test} } } impl XPathExpression for ExpressionStep { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut result = Nodeset::new(); self.axis.select_nodes(context, self.node_test, & mut result); Nodes(result) } } pub struct ExpressionUnion { pub left: SubExpression, pub right: SubExpression, } impl XPathExpression for ExpressionUnion { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { let mut left_val = self.left.evaluate(context).nodeset(); let right_val = self.right.evaluate(context).nodeset(); left_val.add_nodeset(&right_val); Nodes(left_val) } } pub struct ExpressionVariable { pub name: String, } impl XPathExpression for ExpressionVariable { fn evaluate(&self, context: &XPathEvaluationContext) -> XPathValue { match context.value_of(self.name.as_slice()) { Some(v) => v.clone(), None => fail!("throw UnknownVariableException(_name)"), } } }
use crate::{nameutil::mangle_keywords, Env}; use once_cell::sync::Lazy; use regex::{Captures, Regex}; use std::{ fmt::{self, Display, Formatter}, str::FromStr, }; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum GiDocgenError { InvalidLinkType(String), BrokenLinkType(String), InvalidLink, } impl Display for GiDocgenError { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Self::InvalidLinkType(e) => f.write_str(&format!("Invalid link type \"{}\"", e)), Self::BrokenLinkType(e) => { f.write_str(&format!("Broken link syntax for type \"{}\"", e)) } Self::InvalidLink => f.write_str("Invalid link syntax"), } } } impl std::error::Error for GiDocgenError {} /// Convert a "Namespace.Type" to (Option<Namespace>, Type) fn namespace_type_from_details( link_details: &str, link_type: &str, ) -> Result<(Option<String>, String), GiDocgenError> { let res: Vec<&str> = link_details.split('.').collect(); let len = res.len(); if len == 1 { Ok((None, res[0].to_string())) } else if len == 2 { if res[1].is_empty() { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } else { Ok((Some(res[0].to_string()), res[1].to_string())) } } else { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } } /// Convert a "Namespace.Type.method_name" to (Option<Namespace>, Option<Type>, name) /// Type is only optional for global functions and the order can be modified the `is_global_func` parameters fn namespace_type_method_from_details( link_details: &str, link_type: &str, is_global_func: bool, ) -> Result<(Option<String>, Option<String>, String), GiDocgenError> { let res: Vec<&str> = link_details.split('.').collect(); let len = res.len(); if len == 1 { Ok((None, None, res[0].to_string())) } else if len == 2 { if res[1].is_empty() { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } else if is_global_func { Ok((Some(res[0].to_string()), None, res[1].to_string())) } else { Ok((None, Some(res[0].to_string()), res[1].to_string())) } } else if len == 3 { if res[2].is_empty() { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } else { Ok(( Some(res[0].to_string()), Some(res[1].to_string()), res[2].to_string(), )) } } else { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } } static GI_DOCGEN_SYMBOLS: Lazy<Regex> = Lazy::new(|| { Regex::new(r"\[(callback|id|alias|class|const|ctor|enum|error|flags|func|iface|method|property|signal|struct|vfunc)[@](\w+\b)([:.]+[\w-]+\b)?([:.]+[\w-]+\b)?\]?").unwrap() }); pub(crate) fn replace_c_types(entry: &str, env: &Env, _in_type: &str) -> String { GI_DOCGEN_SYMBOLS .replace_all(entry, |caps: &Captures<'_>| { if let Ok(gi_type) = GiDocgen::from_str(&caps[0]) { gi_type.rust_link(env) } else { // otherwise fallback to the original string caps[0].to_string() } }) .to_string() } /// A representation of the various ways to link items using GI-docgen /// /// See <https://gnome.pages.gitlab.gnome.org/gi-docgen/linking.html> for details. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum GiDocgen { // C-identifier Id(String), // Alias to another type Alias(String), // Object Class Class { namespace: Option<String>, type_: String, }, Const { namespace: Option<String>, type_: String, }, Constructor { namespace: Option<String>, type_: String, name: String, }, Callback { namespace: Option<String>, name: String, }, Enum { namespace: Option<String>, type_: String, }, Error { namespace: Option<String>, type_: String, }, Flag { namespace: Option<String>, type_: String, }, Func { namespace: Option<String>, type_: Option<String>, name: String, }, Interface { namespace: Option<String>, type_: String, }, Method { namespace: Option<String>, type_: String, name: String, is_instance: bool, // Whether `type_` ends with Class }, Property { namespace: Option<String>, type_: String, name: String, }, Signal { namespace: Option<String>, type_: String, name: String, }, Struct { namespace: Option<String>, type_: String, }, VFunc { namespace: Option<String>, type_: String, name: String, }, } fn ns_type_to_doc(namespace: &Option<String>, type_: &str) -> String { if let Some(ns) = namespace { format!("{}::{}", ns, type_) } else { type_.to_string() } } impl GiDocgen { pub fn rust_link(&self, env: &Env) -> String { let symbols = env.symbols.borrow(); match self { GiDocgen::Enum { namespace, type_ } | GiDocgen::Error { namespace, type_ } => { if let Some(enum_info) = env.analysis.enumerations.iter().find(|e| &e.name == type_) { let sym = symbols.by_tid(enum_info.type_id).unwrap(); format!("[`{name}`](crate::{name})", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Class { namespace, type_ } | GiDocgen::Interface { namespace, type_ } => { if let Some((_, class_info)) = env.analysis.objects.iter().find(|(_, o)| &o.name == type_) { let sym = symbols.by_tid(class_info.type_id).unwrap(); format!("[{name}](crate::{name})", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Flag { namespace, type_ } => { if let Some(flag_info) = env.analysis.flags.iter().find(|e| &e.name == type_) { let sym = symbols.by_tid(flag_info.type_id).unwrap(); format!("[`{name}`](crate::{name})", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Const { namespace, type_ } => { if let Some(const_info) = env.analysis.constants.iter().find(|c| &c.name == type_) { let sym = symbols.by_tid(const_info.typ).unwrap(); format!("[{name}](crate::{name})", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Property { namespace, type_, name, } => { if let Some((_, class_info)) = env.analysis.objects.iter().find(|(_, o)| &o.name == type_) { let sym = symbols.by_tid(class_info.type_id).unwrap(); format!("`{}:{}`", sym.full_rust_name(), name) } else { format!("`{}:{}`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Signal { namespace, type_, name, } => { if let Some((_, class_info)) = env.analysis.objects.iter().find(|(_, o)| &o.name == type_) { let sym = symbols.by_tid(class_info.type_id).unwrap(); format!("`{}::{}`", sym.full_rust_name(), name) } else { format!("`{}::{}`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Id(c_name) => { if let Some(sym) = symbols.by_c_name(c_name) { format!("[{name}](crate::{name})", name = sym.full_rust_name()) } else { format!("`{}`", c_name) } } GiDocgen::Struct { namespace, type_ } => { if let Some((_, record_info)) = env.analysis.records.iter().find(|(_, r)| &r.name == type_) { let sym = symbols.by_tid(record_info.type_id).unwrap(); format!("[{name}](crate::{name})", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Constructor { namespace, type_, name, } => { if let Some((class_info, fn_info)) = env.analysis.find_object_by_function( env, |o| &o.name == type_, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(class_info.type_id).unwrap(); let parent = sym.full_rust_name(); fn_info.doc_link(Some(&parent), None) } else { format!("`{}::{}`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Func { namespace: _, type_, name, } => { if let Some(ty) = type_ { if let Some((obj_info, fn_info)) = env.analysis.find_object_by_function( env, |o| &o.name == ty, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(obj_info.type_id).unwrap(); let parent = sym.full_rust_name(); fn_info.doc_link(Some(&parent), None) } else { format!("`{}`", name) } } else if let Some(fn_info) = env.analysis.find_global_function(env, |f| &f.name == name) { fn_info.doc_link(None, None) } else { format!("`{}`", name) } } GiDocgen::Alias(alias) => { if let Some((_, record_info)) = env.analysis.records.iter().find(|(_, r)| &r.name == alias) { let sym = symbols.by_tid(record_info.type_id).unwrap(); format!( "{alias} alias [{name}](crate::{name})", alias = alias, name = sym.full_rust_name() ) } else { format!("`{}`", alias) } } GiDocgen::Method { namespace, type_, name, is_instance: _, } => { if let Some((obj_info, fn_info)) = env.analysis.find_object_by_function( env, |o| &o.name == type_, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(obj_info.type_id).unwrap(); let (type_name, visible_type_name) = obj_info.generate_doc_link_info(fn_info); fn_info.doc_link( Some(&sym.full_rust_name().replace(type_, &type_name)), Some(&visible_type_name), ) } else if let Some((record_info, fn_info)) = env.analysis.find_record_by_function( env, |r| &r.name == type_, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(record_info.type_id).unwrap(); let parent = sym.full_rust_name(); fn_info.doc_link(Some(&parent), None) } else { format!("`{}::{}()`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Callback { namespace, name } => { format!("`{}`", ns_type_to_doc(namespace, name)) } GiDocgen::VFunc { namespace, type_, name, } => { format!("`virtual:{}::{}`", ns_type_to_doc(namespace, type_), name) } } } } impl FromStr for GiDocgen { type Err = GiDocgenError; // We assume the string is contained inside a [] fn from_str(item_link: &str) -> Result<Self, Self::Err> { let item_link = item_link.trim_start_matches('[').trim_end_matches(']'); if let Some((link_type, link_details)) = item_link.split_once('@') { match link_type { "alias" => Ok(GiDocgen::Alias(link_details.to_string())), "class" => { let (namespace, type_) = namespace_type_from_details(link_details, "class")?; Ok(GiDocgen::Class { namespace, type_ }) } "const" => { let (namespace, type_) = namespace_type_from_details(link_details, "const")?; Ok(GiDocgen::Const { namespace, type_ }) } "ctor" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "ctor", false)?; Ok(GiDocgen::Constructor { namespace, type_: type_ .ok_or_else(|| GiDocgenError::BrokenLinkType("ctor".to_string()))?, name, }) } "enum" => { let (namespace, type_) = namespace_type_from_details(link_details, "enum")?; Ok(GiDocgen::Enum { namespace, type_ }) } "error" => { let (namespace, type_) = namespace_type_from_details(link_details, "error")?; Ok(GiDocgen::Error { namespace, type_ }) } "flags" => { let (namespace, type_) = namespace_type_from_details(link_details, "flags")?; Ok(GiDocgen::Flag { namespace, type_ }) } "func" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "func", true)?; Ok(GiDocgen::Func { namespace, type_, name, }) } "iface" => { let (namespace, type_) = namespace_type_from_details(link_details, "iface")?; Ok(GiDocgen::Interface { namespace, type_ }) } "callback" => { let (namespace, name) = namespace_type_from_details(link_details, "callback")?; Ok(GiDocgen::Callback { namespace, name }) } "method" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "method", false)?; let type_ = type_.ok_or_else(|| GiDocgenError::BrokenLinkType("method".to_string()))?; Ok(GiDocgen::Method { namespace, is_instance: type_.ends_with("Class"), type_, name, }) } "property" => { let (namespace, type_) = namespace_type_from_details(link_details, "property")?; let type_details: Vec<_> = type_.split(':').collect(); if type_details.len() < 2 || type_details[1].is_empty() { Err(GiDocgenError::BrokenLinkType("property".to_string())) } else { Ok(GiDocgen::Property { namespace, type_: type_details[0].to_string(), name: type_details[1].to_string(), }) } } "signal" => { let (namespace, type_) = namespace_type_from_details(link_details, "signal")?; let type_details: Vec<_> = type_.split("::").collect(); if type_details.len() < 2 || type_details[1].is_empty() { Err(GiDocgenError::BrokenLinkType("signal".to_string())) } else { Ok(GiDocgen::Signal { namespace, type_: type_details[0].to_string(), name: type_details[1].to_string(), }) } } "struct" => { let (namespace, type_) = namespace_type_from_details(link_details, "struct")?; Ok(GiDocgen::Struct { namespace, type_ }) } "vfunc" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "vfunc", false)?; Ok(GiDocgen::VFunc { namespace, type_: type_ .ok_or_else(|| GiDocgenError::BrokenLinkType("vfunc".to_string()))?, name, }) } "id" => Ok(GiDocgen::Id(link_details.to_string())), e => Err(GiDocgenError::InvalidLinkType(e.to_string())), } } else { Err(GiDocgenError::InvalidLink) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_link_alias() { assert_eq!( GiDocgen::from_str("[alias@Allocation]"), Ok(GiDocgen::Alias("Allocation".to_string())) ); } #[test] fn test_link_class() { assert_eq!( GiDocgen::from_str("[class@Widget]"), Ok(GiDocgen::Class { namespace: None, type_: "Widget".to_string(), }) ); assert_eq!( GiDocgen::from_str("[class@Gdk.Surface]"), Ok(GiDocgen::Class { namespace: Some("Gdk".to_string()), type_: "Surface".to_string(), }) ); assert_eq!( GiDocgen::from_str("[class@Gsk.RenderNode]"), Ok(GiDocgen::Class { namespace: Some("Gsk".to_string()), type_: "RenderNode".to_string(), }) ); assert_eq!( GiDocgen::from_str("[class@Gsk.RenderNode.test]"), Err(GiDocgenError::BrokenLinkType("class".to_string())) ); assert_eq!( GiDocgen::from_str("[class@Gsk.]"), Err(GiDocgenError::BrokenLinkType("class".to_string())) ); } #[test] fn test_link_id() { assert_eq!( GiDocgen::from_str("[id@gtk_widget_show]"), Ok(GiDocgen::Id("gtk_widget_show".to_string())) ); } #[test] fn test_link_const() { assert_eq!( GiDocgen::from_str("[const@Gdk.KEY_q]"), Ok(GiDocgen::Const { namespace: Some("Gdk".to_string()), type_: "KEY_q".to_string() }) ); } #[test] fn test_link_callback() { assert_eq!( GiDocgen::from_str("[callback@Gtk.MapListModelMapFunc]"), Ok(GiDocgen::Callback { namespace: Some("Gtk".to_string()), name: "MapListModelMapFunc".to_string() }) ) } #[test] fn test_link_enum() { assert_eq!( GiDocgen::from_str("[enum@Orientation]"), Ok(GiDocgen::Enum { namespace: None, type_: "Orientation".to_string() }) ); } #[test] fn test_link_error() { assert_eq!( GiDocgen::from_str("[error@Gtk.BuilderParseError]"), Ok(GiDocgen::Error { namespace: Some("Gtk".to_string()), type_: "BuilderParseError".to_string() }) ); } #[test] fn test_link_flags() { assert_eq!( GiDocgen::from_str("[flags@Gdk.ModifierType]"), Ok(GiDocgen::Flag { namespace: Some("Gdk".to_string()), type_: "ModifierType".to_string() }) ); } #[test] fn test_link_iface() { assert_eq!( GiDocgen::from_str("[iface@Gtk.Buildable]"), Ok(GiDocgen::Interface { namespace: Some("Gtk".to_string()), type_: "Buildable".to_string() }) ); } #[test] fn test_link_struct() { assert_eq!( GiDocgen::from_str("[struct@Gtk.TextIter]"), Ok(GiDocgen::Struct { namespace: Some("Gtk".to_string()), type_: "TextIter".to_string() }) ); } #[test] fn test_link_property() { assert_eq!( GiDocgen::from_str("[property@Gtk.Orientable:orientation]"), Ok(GiDocgen::Property { namespace: Some("Gtk".to_string()), type_: "Orientable".to_string(), name: "orientation".to_string(), }) ); assert_eq!( GiDocgen::from_str("[property@Gtk.Orientable]"), Err(GiDocgenError::BrokenLinkType("property".to_string())) ); assert_eq!( GiDocgen::from_str("[property@Gtk.Orientable:]"), Err(GiDocgenError::BrokenLinkType("property".to_string())) ); } #[test] fn test_link_signal() { assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager::changed]"), Ok(GiDocgen::Signal { namespace: Some("Gtk".to_string()), type_: "RecentManager".to_string(), name: "changed".to_string(), }) ); assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager]"), Err(GiDocgenError::BrokenLinkType("signal".to_string())) ); assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager::]"), Err(GiDocgenError::BrokenLinkType("signal".to_string())) ); assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager:]"), Err(GiDocgenError::BrokenLinkType("signal".to_string())) ); } #[test] fn test_link_vfunc() { assert_eq!( GiDocgen::from_str("[vfunc@Gtk.Widget.measure]"), Ok(GiDocgen::VFunc { namespace: Some("Gtk".to_string()), type_: "Widget".to_string(), name: "measure".to_string(), }) ); assert_eq!( GiDocgen::from_str("[vfunc@Widget.snapshot]"), Ok(GiDocgen::VFunc { namespace: None, type_: "Widget".to_string(), name: "snapshot".to_string(), }) ); } #[test] fn test_link_ctor() { assert_eq!( GiDocgen::from_str("[ctor@Gtk.Box.new]"), Ok(GiDocgen::Constructor { namespace: Some("Gtk".to_string()), type_: "Box".to_string(), name: "new".to_string(), }) ); assert_eq!( GiDocgen::from_str("[ctor@Button.new_with_label]"), Ok(GiDocgen::Constructor { namespace: None, type_: "Button".to_string(), name: "new_with_label".to_string(), }) ); } #[test] fn test_link_func() { assert_eq!( GiDocgen::from_str("[func@Gtk.init]"), Ok(GiDocgen::Func { namespace: Some("Gtk".to_string()), type_: None, name: "init".to_string(), }) ); assert_eq!( GiDocgen::from_str("[func@show_uri]"), Ok(GiDocgen::Func { namespace: None, type_: None, name: "show_uri".to_string(), }) ); assert_eq!( GiDocgen::from_str("[func@Gtk.Window.list_toplevels]"), Ok(GiDocgen::Func { namespace: Some("Gtk".to_string()), type_: Some("Window".to_string()), name: "list_toplevels".to_string(), }) ); } #[test] fn test_link_method() { assert_eq!( GiDocgen::from_str("[method@Gtk.Widget.show]"), Ok(GiDocgen::Method { namespace: Some("Gtk".to_string()), type_: "Widget".to_string(), name: "show".to_string(), is_instance: false, }) ); assert_eq!( GiDocgen::from_str("[method@WidgetClass.add_binding]"), Ok(GiDocgen::Method { namespace: None, type_: "WidgetClass".to_string(), name: "add_binding".to_string(), is_instance: true, }) ); } } docs/gi-docgen: use [][] for doc links use crate::{nameutil::mangle_keywords, Env}; use once_cell::sync::Lazy; use regex::{Captures, Regex}; use std::{ fmt::{self, Display, Formatter}, str::FromStr, }; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum GiDocgenError { InvalidLinkType(String), BrokenLinkType(String), InvalidLink, } impl Display for GiDocgenError { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Self::InvalidLinkType(e) => f.write_str(&format!("Invalid link type \"{}\"", e)), Self::BrokenLinkType(e) => { f.write_str(&format!("Broken link syntax for type \"{}\"", e)) } Self::InvalidLink => f.write_str("Invalid link syntax"), } } } impl std::error::Error for GiDocgenError {} /// Convert a "Namespace.Type" to (Option<Namespace>, Type) fn namespace_type_from_details( link_details: &str, link_type: &str, ) -> Result<(Option<String>, String), GiDocgenError> { let res: Vec<&str> = link_details.split('.').collect(); let len = res.len(); if len == 1 { Ok((None, res[0].to_string())) } else if len == 2 { if res[1].is_empty() { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } else { Ok((Some(res[0].to_string()), res[1].to_string())) } } else { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } } /// Convert a "Namespace.Type.method_name" to (Option<Namespace>, Option<Type>, name) /// Type is only optional for global functions and the order can be modified the `is_global_func` parameters fn namespace_type_method_from_details( link_details: &str, link_type: &str, is_global_func: bool, ) -> Result<(Option<String>, Option<String>, String), GiDocgenError> { let res: Vec<&str> = link_details.split('.').collect(); let len = res.len(); if len == 1 { Ok((None, None, res[0].to_string())) } else if len == 2 { if res[1].is_empty() { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } else if is_global_func { Ok((Some(res[0].to_string()), None, res[1].to_string())) } else { Ok((None, Some(res[0].to_string()), res[1].to_string())) } } else if len == 3 { if res[2].is_empty() { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } else { Ok(( Some(res[0].to_string()), Some(res[1].to_string()), res[2].to_string(), )) } } else { Err(GiDocgenError::BrokenLinkType(link_type.to_string())) } } static GI_DOCGEN_SYMBOLS: Lazy<Regex> = Lazy::new(|| { Regex::new(r"\[(callback|id|alias|class|const|ctor|enum|error|flags|func|iface|method|property|signal|struct|vfunc)[@](\w+\b)([:.]+[\w-]+\b)?([:.]+[\w-]+\b)?\]?").unwrap() }); pub(crate) fn replace_c_types(entry: &str, env: &Env, _in_type: &str) -> String { GI_DOCGEN_SYMBOLS .replace_all(entry, |caps: &Captures<'_>| { if let Ok(gi_type) = GiDocgen::from_str(&caps[0]) { gi_type.rust_link(env) } else { // otherwise fallback to the original string caps[0].to_string() } }) .to_string() } /// A representation of the various ways to link items using GI-docgen /// /// See <https://gnome.pages.gitlab.gnome.org/gi-docgen/linking.html> for details. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum GiDocgen { // C-identifier Id(String), // Alias to another type Alias(String), // Object Class Class { namespace: Option<String>, type_: String, }, Const { namespace: Option<String>, type_: String, }, Constructor { namespace: Option<String>, type_: String, name: String, }, Callback { namespace: Option<String>, name: String, }, Enum { namespace: Option<String>, type_: String, }, Error { namespace: Option<String>, type_: String, }, Flag { namespace: Option<String>, type_: String, }, Func { namespace: Option<String>, type_: Option<String>, name: String, }, Interface { namespace: Option<String>, type_: String, }, Method { namespace: Option<String>, type_: String, name: String, is_instance: bool, // Whether `type_` ends with Class }, Property { namespace: Option<String>, type_: String, name: String, }, Signal { namespace: Option<String>, type_: String, name: String, }, Struct { namespace: Option<String>, type_: String, }, VFunc { namespace: Option<String>, type_: String, name: String, }, } fn ns_type_to_doc(namespace: &Option<String>, type_: &str) -> String { if let Some(ns) = namespace { format!("{}::{}", ns, type_) } else { type_.to_string() } } impl GiDocgen { pub fn rust_link(&self, env: &Env) -> String { let symbols = env.symbols.borrow(); match self { GiDocgen::Enum { namespace, type_ } | GiDocgen::Error { namespace, type_ } => { if let Some(enum_info) = env.analysis.enumerations.iter().find(|e| &e.name == type_) { let sym = symbols.by_tid(enum_info.type_id).unwrap(); format!("[`{name}`][crate::{name}]", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Class { namespace, type_ } | GiDocgen::Interface { namespace, type_ } => { if let Some((_, class_info)) = env.analysis.objects.iter().find(|(_, o)| &o.name == type_) { let sym = symbols.by_tid(class_info.type_id).unwrap(); format!("[`{name}`][crate::{name}]", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Flag { namespace, type_ } => { if let Some(flag_info) = env.analysis.flags.iter().find(|e| &e.name == type_) { let sym = symbols.by_tid(flag_info.type_id).unwrap(); format!("[`{name}`][crate::{name}]", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Const { namespace, type_ } => { if let Some(const_info) = env.analysis.constants.iter().find(|c| &c.name == type_) { let sym = symbols.by_tid(const_info.typ).unwrap(); format!("[`{name}`][crate::{name}]", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Property { namespace, type_, name, } => { if let Some((_, class_info)) = env.analysis.objects.iter().find(|(_, o)| &o.name == type_) { let sym = symbols.by_tid(class_info.type_id).unwrap(); format!("`property::{}::{}`", sym.full_rust_name(), name) } else { format!("`property::{}::{}`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Signal { namespace, type_, name, } => { if let Some((_, class_info)) = env.analysis.objects.iter().find(|(_, o)| &o.name == type_) { let sym = symbols.by_tid(class_info.type_id).unwrap(); format!("`signal::{}::{}`", sym.full_rust_name(), name) } else { format!("`signal::{}::{}`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Id(c_name) => { if let Some(sym) = symbols.by_c_name(c_name) { format!("[`{name}`][crate::{name}]", name = sym.full_rust_name()) } else { format!("`{}`", c_name) } } GiDocgen::Struct { namespace, type_ } => { if let Some((_, record_info)) = env.analysis.records.iter().find(|(_, r)| &r.name == type_) { let sym = symbols.by_tid(record_info.type_id).unwrap(); format!("[`{name}`][crate::{name}]", name = sym.full_rust_name()) } else { format!("`{}`", ns_type_to_doc(namespace, type_)) } } GiDocgen::Constructor { namespace, type_, name, } => { if let Some((class_info, fn_info)) = env.analysis.find_object_by_function( env, |o| &o.name == type_, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(class_info.type_id).unwrap(); let parent = sym.full_rust_name(); fn_info.doc_link(Some(&parent), None) } else { format!("`{}::{}`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Func { namespace: _, type_, name, } => { if let Some(ty) = type_ { if let Some((obj_info, fn_info)) = env.analysis.find_object_by_function( env, |o| &o.name == ty, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(obj_info.type_id).unwrap(); let parent = sym.full_rust_name(); fn_info.doc_link(Some(&parent), None) } else { format!("`{}`", name) } } else if let Some(fn_info) = env.analysis.find_global_function(env, |f| &f.name == name) { fn_info.doc_link(None, None) } else { format!("`{}`", name) } } GiDocgen::Alias(alias) => { if let Some((_, record_info)) = env.analysis.records.iter().find(|(_, r)| &r.name == alias) { let sym = symbols.by_tid(record_info.type_id).unwrap(); format!( "alias::[`{name}`][crate::{name}]", name = sym.full_rust_name() ) } else { format!("`alias::{}`", alias) } } GiDocgen::Method { namespace, type_, name, is_instance: _, } => { if let Some((obj_info, fn_info)) = env.analysis.find_object_by_function( env, |o| &o.name == type_, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(obj_info.type_id).unwrap(); let (type_name, visible_type_name) = obj_info.generate_doc_link_info(fn_info); fn_info.doc_link( Some(&sym.full_rust_name().replace(type_, &type_name)), Some(&visible_type_name), ) } else if let Some((record_info, fn_info)) = env.analysis.find_record_by_function( env, |r| &r.name == type_, |f| f.name == mangle_keywords(name), ) { let sym = symbols.by_tid(record_info.type_id).unwrap(); let parent = sym.full_rust_name(); fn_info.doc_link(Some(&parent), None) } else { format!("`{}::{}()`", ns_type_to_doc(namespace, type_), name) } } GiDocgen::Callback { namespace, name } => { format!("`callback::{}`", ns_type_to_doc(namespace, name)) } GiDocgen::VFunc { namespace, type_, name, } => { format!( "`virtual-function::{}::{}`", ns_type_to_doc(namespace, type_), name ) } } } } impl FromStr for GiDocgen { type Err = GiDocgenError; // We assume the string is contained inside a [] fn from_str(item_link: &str) -> Result<Self, Self::Err> { let item_link = item_link.trim_start_matches('[').trim_end_matches(']'); if let Some((link_type, link_details)) = item_link.split_once('@') { match link_type { "alias" => Ok(GiDocgen::Alias(link_details.to_string())), "class" => { let (namespace, type_) = namespace_type_from_details(link_details, "class")?; Ok(GiDocgen::Class { namespace, type_ }) } "const" => { let (namespace, type_) = namespace_type_from_details(link_details, "const")?; Ok(GiDocgen::Const { namespace, type_ }) } "ctor" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "ctor", false)?; Ok(GiDocgen::Constructor { namespace, type_: type_ .ok_or_else(|| GiDocgenError::BrokenLinkType("ctor".to_string()))?, name, }) } "enum" => { let (namespace, type_) = namespace_type_from_details(link_details, "enum")?; Ok(GiDocgen::Enum { namespace, type_ }) } "error" => { let (namespace, type_) = namespace_type_from_details(link_details, "error")?; Ok(GiDocgen::Error { namespace, type_ }) } "flags" => { let (namespace, type_) = namespace_type_from_details(link_details, "flags")?; Ok(GiDocgen::Flag { namespace, type_ }) } "func" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "func", true)?; Ok(GiDocgen::Func { namespace, type_, name, }) } "iface" => { let (namespace, type_) = namespace_type_from_details(link_details, "iface")?; Ok(GiDocgen::Interface { namespace, type_ }) } "callback" => { let (namespace, name) = namespace_type_from_details(link_details, "callback")?; Ok(GiDocgen::Callback { namespace, name }) } "method" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "method", false)?; let type_ = type_.ok_or_else(|| GiDocgenError::BrokenLinkType("method".to_string()))?; Ok(GiDocgen::Method { namespace, is_instance: type_.ends_with("Class"), type_, name, }) } "property" => { let (namespace, type_) = namespace_type_from_details(link_details, "property")?; let type_details: Vec<_> = type_.split(':').collect(); if type_details.len() < 2 || type_details[1].is_empty() { Err(GiDocgenError::BrokenLinkType("property".to_string())) } else { Ok(GiDocgen::Property { namespace, type_: type_details[0].to_string(), name: type_details[1].to_string(), }) } } "signal" => { let (namespace, type_) = namespace_type_from_details(link_details, "signal")?; let type_details: Vec<_> = type_.split("::").collect(); if type_details.len() < 2 || type_details[1].is_empty() { Err(GiDocgenError::BrokenLinkType("signal".to_string())) } else { Ok(GiDocgen::Signal { namespace, type_: type_details[0].to_string(), name: type_details[1].to_string(), }) } } "struct" => { let (namespace, type_) = namespace_type_from_details(link_details, "struct")?; Ok(GiDocgen::Struct { namespace, type_ }) } "vfunc" => { let (namespace, type_, name) = namespace_type_method_from_details(link_details, "vfunc", false)?; Ok(GiDocgen::VFunc { namespace, type_: type_ .ok_or_else(|| GiDocgenError::BrokenLinkType("vfunc".to_string()))?, name, }) } "id" => Ok(GiDocgen::Id(link_details.to_string())), e => Err(GiDocgenError::InvalidLinkType(e.to_string())), } } else { Err(GiDocgenError::InvalidLink) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_link_alias() { assert_eq!( GiDocgen::from_str("[alias@Allocation]"), Ok(GiDocgen::Alias("Allocation".to_string())) ); } #[test] fn test_link_class() { assert_eq!( GiDocgen::from_str("[class@Widget]"), Ok(GiDocgen::Class { namespace: None, type_: "Widget".to_string(), }) ); assert_eq!( GiDocgen::from_str("[class@Gdk.Surface]"), Ok(GiDocgen::Class { namespace: Some("Gdk".to_string()), type_: "Surface".to_string(), }) ); assert_eq!( GiDocgen::from_str("[class@Gsk.RenderNode]"), Ok(GiDocgen::Class { namespace: Some("Gsk".to_string()), type_: "RenderNode".to_string(), }) ); assert_eq!( GiDocgen::from_str("[class@Gsk.RenderNode.test]"), Err(GiDocgenError::BrokenLinkType("class".to_string())) ); assert_eq!( GiDocgen::from_str("[class@Gsk.]"), Err(GiDocgenError::BrokenLinkType("class".to_string())) ); } #[test] fn test_link_id() { assert_eq!( GiDocgen::from_str("[id@gtk_widget_show]"), Ok(GiDocgen::Id("gtk_widget_show".to_string())) ); } #[test] fn test_link_const() { assert_eq!( GiDocgen::from_str("[const@Gdk.KEY_q]"), Ok(GiDocgen::Const { namespace: Some("Gdk".to_string()), type_: "KEY_q".to_string() }) ); } #[test] fn test_link_callback() { assert_eq!( GiDocgen::from_str("[callback@Gtk.MapListModelMapFunc]"), Ok(GiDocgen::Callback { namespace: Some("Gtk".to_string()), name: "MapListModelMapFunc".to_string() }) ) } #[test] fn test_link_enum() { assert_eq!( GiDocgen::from_str("[enum@Orientation]"), Ok(GiDocgen::Enum { namespace: None, type_: "Orientation".to_string() }) ); } #[test] fn test_link_error() { assert_eq!( GiDocgen::from_str("[error@Gtk.BuilderParseError]"), Ok(GiDocgen::Error { namespace: Some("Gtk".to_string()), type_: "BuilderParseError".to_string() }) ); } #[test] fn test_link_flags() { assert_eq!( GiDocgen::from_str("[flags@Gdk.ModifierType]"), Ok(GiDocgen::Flag { namespace: Some("Gdk".to_string()), type_: "ModifierType".to_string() }) ); } #[test] fn test_link_iface() { assert_eq!( GiDocgen::from_str("[iface@Gtk.Buildable]"), Ok(GiDocgen::Interface { namespace: Some("Gtk".to_string()), type_: "Buildable".to_string() }) ); } #[test] fn test_link_struct() { assert_eq!( GiDocgen::from_str("[struct@Gtk.TextIter]"), Ok(GiDocgen::Struct { namespace: Some("Gtk".to_string()), type_: "TextIter".to_string() }) ); } #[test] fn test_link_property() { assert_eq!( GiDocgen::from_str("[property@Gtk.Orientable:orientation]"), Ok(GiDocgen::Property { namespace: Some("Gtk".to_string()), type_: "Orientable".to_string(), name: "orientation".to_string(), }) ); assert_eq!( GiDocgen::from_str("[property@Gtk.Orientable]"), Err(GiDocgenError::BrokenLinkType("property".to_string())) ); assert_eq!( GiDocgen::from_str("[property@Gtk.Orientable:]"), Err(GiDocgenError::BrokenLinkType("property".to_string())) ); } #[test] fn test_link_signal() { assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager::changed]"), Ok(GiDocgen::Signal { namespace: Some("Gtk".to_string()), type_: "RecentManager".to_string(), name: "changed".to_string(), }) ); assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager]"), Err(GiDocgenError::BrokenLinkType("signal".to_string())) ); assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager::]"), Err(GiDocgenError::BrokenLinkType("signal".to_string())) ); assert_eq!( GiDocgen::from_str("[signal@Gtk.RecentManager:]"), Err(GiDocgenError::BrokenLinkType("signal".to_string())) ); } #[test] fn test_link_vfunc() { assert_eq!( GiDocgen::from_str("[vfunc@Gtk.Widget.measure]"), Ok(GiDocgen::VFunc { namespace: Some("Gtk".to_string()), type_: "Widget".to_string(), name: "measure".to_string(), }) ); assert_eq!( GiDocgen::from_str("[vfunc@Widget.snapshot]"), Ok(GiDocgen::VFunc { namespace: None, type_: "Widget".to_string(), name: "snapshot".to_string(), }) ); } #[test] fn test_link_ctor() { assert_eq!( GiDocgen::from_str("[ctor@Gtk.Box.new]"), Ok(GiDocgen::Constructor { namespace: Some("Gtk".to_string()), type_: "Box".to_string(), name: "new".to_string(), }) ); assert_eq!( GiDocgen::from_str("[ctor@Button.new_with_label]"), Ok(GiDocgen::Constructor { namespace: None, type_: "Button".to_string(), name: "new_with_label".to_string(), }) ); } #[test] fn test_link_func() { assert_eq!( GiDocgen::from_str("[func@Gtk.init]"), Ok(GiDocgen::Func { namespace: Some("Gtk".to_string()), type_: None, name: "init".to_string(), }) ); assert_eq!( GiDocgen::from_str("[func@show_uri]"), Ok(GiDocgen::Func { namespace: None, type_: None, name: "show_uri".to_string(), }) ); assert_eq!( GiDocgen::from_str("[func@Gtk.Window.list_toplevels]"), Ok(GiDocgen::Func { namespace: Some("Gtk".to_string()), type_: Some("Window".to_string()), name: "list_toplevels".to_string(), }) ); } #[test] fn test_link_method() { assert_eq!( GiDocgen::from_str("[method@Gtk.Widget.show]"), Ok(GiDocgen::Method { namespace: Some("Gtk".to_string()), type_: "Widget".to_string(), name: "show".to_string(), is_instance: false, }) ); assert_eq!( GiDocgen::from_str("[method@WidgetClass.add_binding]"), Ok(GiDocgen::Method { namespace: None, type_: "WidgetClass".to_string(), name: "add_binding".to_string(), is_instance: true, }) ); } }
mod ed25519; pub mod types; use self::ed25519::ED25519CryptoType; use self::types::{ KeyInfo, MyDidInfo, TheirDidInfo, Key, Did }; use utils::crypto::base58::Base58; use utils::crypto::verkey_builder::build_full_verkey; use errors::common::CommonError; use errors::signus::SignusError; use std::collections::HashMap; use std::str; pub const DEFAULT_CRYPTO_TYPE: &'static str = "ed25519"; trait CryptoType { fn encrypt(&self, private_key: &[u8], public_key: &[u8], doc: &[u8], nonce: &[u8]) -> Result<Vec<u8>, CommonError>; fn decrypt(&self, private_key: &[u8], public_key: &[u8], doc: &[u8], nonce: &[u8]) -> Result<Vec<u8>, CommonError>; fn gen_nonce(&self) -> Vec<u8>; fn create_key(&self, seed: Option<&[u8]>) -> Result<(Vec<u8>, Vec<u8>), CommonError>; fn validate_key(&self, vk: &[u8]) -> Result<(), CommonError>; fn sign(&self, sk: &[u8], doc: &[u8]) -> Result<Vec<u8>, CommonError>; fn verify(&self, vk: &[u8], doc: &[u8], signature: &[u8]) -> Result<bool, CommonError>; fn encrypt_sealed(&self, vk: &[u8], doc: &[u8]) -> Result<Vec<u8>, CommonError>; fn decrypt_sealed(&self, vk: &[u8], sk: &[u8], doc: &[u8]) -> Result<Vec<u8>, CommonError>; } pub struct SignusService { crypto_types: HashMap<&'static str, Box<CryptoType>> } impl SignusService { pub fn new() -> SignusService { let mut crypto_types: HashMap<&str, Box<CryptoType>> = HashMap::new(); crypto_types.insert(DEFAULT_CRYPTO_TYPE, Box::new(ED25519CryptoType::new())); SignusService { crypto_types: crypto_types } } pub fn create_key(&self, key_info: &KeyInfo) -> Result<Key, SignusError> { let crypto_type_name = key_info.crypto_type .as_ref() .map(String::as_str) .unwrap_or(DEFAULT_CRYPTO_TYPE); if !self.crypto_types.contains_key(crypto_type_name) { return Err( SignusError::UnknownCryptoError( format!("KeyInfo contains unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let seed = key_info.seed.as_ref().map(String::as_bytes); let (vk, sk) = crypto_type.create_key(seed)?; let vk = Base58::encode(&vk); let sk = Base58::encode(&sk); if !crypto_type_name.eq(DEFAULT_CRYPTO_TYPE) { // Use suffix with crypto type name to store crypto type inside of vk let vk = format!("{}:{}", vk, crypto_type_name); } Ok(Key::new(vk, sk)) } pub fn create_my_did(&self, my_did_info: &MyDidInfo) -> Result<(Did, Key), SignusError> { let crypto_type_name = my_did_info.crypto_type .as_ref() .map(String::as_str) .unwrap_or(DEFAULT_CRYPTO_TYPE); if !self.crypto_types.contains_key(crypto_type_name) { return Err( SignusError::UnknownCryptoError( format!("MyDidInfo info contains unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let seed = my_did_info.seed.as_ref().map(String::as_bytes); let (vk, sk) = crypto_type.create_key(seed)?; let did = match my_did_info.did { Some(ref did) => Base58::decode(did)?, _ if my_did_info.cid == Some(true) => vk.clone(), _ => vk[0..16].to_vec() }; let did = Base58::encode(&did); let vk = Base58::encode(&vk); let sk = Base58::encode(&sk); if !crypto_type_name.eq(DEFAULT_CRYPTO_TYPE) { // Use suffix with crypto type name to store crypto type inside of vk let vk = format!("{}:{}", vk, crypto_type_name); } Ok((Did::new(did, vk.clone()), Key::new(vk, sk))) } pub fn create_their_did(&self, their_did_info: &TheirDidInfo) -> Result<Did, SignusError> { // Check did is correct Base58 Base58::decode(&their_did_info.did)?; let verkey = build_full_verkey(their_did_info.did.as_str(), their_did_info.verkey.as_ref().map(String::as_str))?; self.validate_key(&verkey)?; let did = Did::new(their_did_info.did.clone(), verkey); Ok(did) } pub fn sign(&self, my_key: &Key, doc: &[u8]) -> Result<Vec<u8>, SignusError> { let crypto_type_name = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); splits[1] } else { DEFAULT_CRYPTO_TYPE }; if !self.crypto_types.contains_key(crypto_type_name) { return Err( SignusError::UnknownCryptoError( format!("Trying to sign message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let my_sk = Base58::decode(my_key.signkey.as_str())?; let signature = crypto_type.sign(&my_sk, doc)?; Ok(signature) } pub fn verify(&self, their_vk: &str, msg: &[u8], signature: &[u8]) -> Result<bool, SignusError> { let (their_vk, crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(crypto_type_name) { return Err(SignusError::UnknownCryptoError( format!("Trying to verify message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let their_vk = Base58::decode(&their_vk)?; Ok(crypto_type.verify(&their_vk, msg, signature)?) } pub fn encrypt(&self, my_key: &Key, their_vk: &str, doc: &[u8]) -> Result<(Vec<u8>, Vec<u8>), SignusError> { let (my_vk, crypto_type_name) = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); (splits[0], splits[1]) } else { (my_key.verkey.as_str(), DEFAULT_CRYPTO_TYPE) }; let (their_vk, their_crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError(format!("Trying to encrypt message with unknown crypto: {}", crypto_type_name))); } if !crypto_type_name.eq(their_crypto_type_name) { // TODO: FIXME: Use dedicated error code return Err(SignusError::UnknownCryptoError( format!("My key crypto type is incompatible with their key crypto type: {} {}", crypto_type_name, their_crypto_type_name))); } let crypto_type = self.crypto_types.get(&crypto_type_name).unwrap(); let my_sk = Base58::decode(my_key.signkey.as_str())?; let their_vk = Base58::decode(their_vk)?; let nonce = crypto_type.gen_nonce(); let encrypted_doc = crypto_type.encrypt(&my_sk, &their_vk, doc, &nonce)?; Ok((encrypted_doc, nonce)) } pub fn decrypt(&self, my_key: &Key, their_vk: &str, doc: &[u8], nonce: &[u8]) -> Result<Vec<u8>, SignusError> { let (my_vk, crypto_type_name) = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); (splits[0], splits[1]) } else { (my_key.verkey.as_str(), DEFAULT_CRYPTO_TYPE) }; let (their_vk, their_crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError( format!("Trying to decrypt message with unknown crypto: {}", crypto_type_name))); } if !crypto_type_name.eq(their_crypto_type_name) { // TODO: FIXME: Use dedicated error code return Err(SignusError::UnknownCryptoError( format!("My key crypto type is incompatible with their key crypto type: {} {}", crypto_type_name, their_crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let my_sk = Base58::decode(&my_key.signkey)?; let their_vk = Base58::decode(their_vk)?; let decrypted_doc = crypto_type.decrypt(&my_sk, &their_vk, &doc, &nonce)?; Ok(decrypted_doc) } pub fn encrypt_sealed(&self, their_vk: &str, doc: &[u8]) -> Result<Vec<u8>, SignusError> { let (their_vk, crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError(format!("Trying to encrypt sealed message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let their_vk = Base58::decode(their_vk)?; let encrypted_doc = crypto_type.encrypt_sealed(&their_vk, doc)?; Ok(encrypted_doc) } pub fn decrypt_sealed(&self, my_key: &Key, doc: &[u8]) -> Result<Vec<u8>, SignusError> { let (my_vk, crypto_type_name) = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); (splits[0], splits[1]) } else { (my_key.verkey.as_str(), DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError( format!("Trying to decrypt sealed message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let my_vk = Base58::decode(my_vk)?; let my_sk = Base58::decode(my_key.signkey.as_str())?; let decrypted_doc = crypto_type.decrypt_sealed(&my_vk, &my_sk, doc)?; Ok(decrypted_doc) } pub fn validate_key(&self, vk: &str) -> Result<(), SignusError> { let (vk, crypto_type_name) = if vk.contains(":") { let splits: Vec<&str> = vk.split(":").collect(); (splits[0], splits[1]) } else { (vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError(format!("Trying to use key with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let vk = Base58::decode(vk)?; crypto_type.validate_key(&vk)?; Ok(()) } pub fn validate_did(&self, did: &str) -> Result<(), SignusError> { let did = Base58::decode(did)?; if did.len() != 16 && did.len() != 32 { return Err(SignusError::CommonError( CommonError::InvalidStructure( format!("Trying to use did with unexpected len: {}", did.len())))); } Ok(()) } } #[cfg(test)] mod tests { use super::*; use services::signus::types::MyDidInfo; #[test] fn create_my_did_with_works_for_empty_info() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); service.create_my_did(&did_info).unwrap(); } #[test] fn create_my_did_works_for_passed_did() { let service = SignusService::new(); let did = "NcYxiDXkpYi6ov5FcYDi1e"; let did_info = MyDidInfo::new(Some(did.clone().to_string()), None, None, None); let (my_did, _) = service.create_my_did(&did_info).unwrap(); assert_eq!(did, my_did.did); } #[test] fn create_my_did_not_works_for_invalid_crypto_type() { let service = SignusService::new(); let did = Some("NcYxiDXkpYi6ov5FcYDi1e".to_string()); let crypto_type = Some("type".to_string()); let did_info = MyDidInfo::new(did.clone(), None, crypto_type, None); assert!(service.create_my_did(&did_info).is_err()); } #[test] fn create_my_did_works_for_seed() { let service = SignusService::new(); let did = Some("NcYxiDXkpYi6ov5FcYDi1e".to_string()); let seed = Some("00000000000000000000000000000My1".to_string()); let did_info_with_seed = MyDidInfo::new(did.clone(), seed, None, None); let did_info_without_seed = MyDidInfo::new(did.clone(), None, None, None); let (did_with_seed, _) = service.create_my_did(&did_info_with_seed).unwrap(); let (did_without_seed, _) = service.create_my_did(&did_info_without_seed).unwrap(); assert_ne!(did_with_seed.verkey, did_without_seed.verkey) } #[test] fn create_their_did_works_without_verkey() { let service = SignusService::new(); let did = "CnEDk9HrMnmiHXEV1WFgbVCRteYnPqsJwrTdcZaNhFVW"; let their_did_info = TheirDidInfo::new(did.to_string(), None); let their_did = service.create_their_did(&their_did_info).unwrap(); assert_eq!(did.to_string(), their_did.did); assert_eq!(did.to_string(), their_did.verkey); } #[test] fn create_their_did_works_for_full_verkey() { let service = SignusService::new(); let did = "8wZcEriaNLNKtteJvx7f8i"; let verkey = "5L2HBnzbu6Auh2pkDRbFt5f4prvgE2LzknkuYLsKkacp"; let their_did_info = TheirDidInfo::new(did.to_string(), Some(verkey.to_string())); let their_did = service.create_their_did(&their_did_info).unwrap(); assert_eq!(did.to_string(), their_did.did); assert_eq!(verkey, their_did.verkey); } #[test] fn create_their_did_works_for_abbreviated_verkey() { let service = SignusService::new(); let did = "8wZcEriaNLNKtteJvx7f8i"; let their_did_info = TheirDidInfo::new(did.to_string(), Some("~NcYxiDXkpYi6ov5FcYDi1e".to_string())); let their_did = service.create_their_did(&their_did_info).unwrap(); assert_eq!(did.to_string(), their_did.did); assert_eq!("5L2HBnzbu6Auh2pkDRbFt5f4prvgE2LzknkuYLsKkacp", their_did.verkey); } #[test] fn sign_works() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); service.sign(&my_key, message.as_bytes()).unwrap(); } #[test] fn sign_works_for_invalid_signkey() { let service = SignusService::new(); let message = r#"message"#; let my_key = Key::new("8wZcEriaNLNKtteJvx7f8i".to_string(), "5L2HBnzbu6Auh2pkDRbFt5f4prvgE2LzknkuYLsKkacp".to_string()); assert!(service.sign(&my_key, message.as_bytes()).is_err()); } #[test] fn sign_verify_works() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let valid = service.verify(&my_did.verkey, message.as_bytes(), &signature).unwrap(); assert!(valid); } #[test] fn sign_verify_works_for_verkey_contained_crypto_type() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let verkey = my_did.verkey + ":ed25519"; let valid = service.verify(&verkey, message.as_bytes(), &signature).unwrap(); assert!(valid); } #[test] fn sign_verify_works_for_verkey_contained_invalid_crypto_type() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let verkey = format!("crypto_type:{}", my_did.verkey); assert!(service.verify(&verkey, message.as_bytes(), &signature).is_err()); } #[test] fn verify_not_works_for_invalid_verkey() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let verkey = "AnnxV4t3LUHKZaxVQDWoVaG44NrGmeDYMA4Gz6C2tCZd"; let valid = service.verify(verkey, message.as_bytes(), &signature).unwrap(); assert_eq!(false, valid); } #[test] fn encrypt_works() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let (their_did, their_key) = service.create_my_did(&did_info.clone()).unwrap(); let their_did = Did::new(their_did.did, their_did.verkey); service.encrypt(&my_key, &their_did.verkey, msg.as_bytes()).unwrap(); } #[test] fn encrypt_decrypt_works() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let my_did_for_encrypt = my_did.clone(); let my_key_for_encrypt = my_key.clone(); let their_did_for_decrypt = Did::new(my_did.did, my_did.verkey); let (their_did, their_key) = service.create_my_did(&did_info.clone()).unwrap(); let my_did_for_decrypt = their_did.clone(); let my_key_for_decrypt = their_key.clone(); let their_did_for_encrypt = Did::new(their_did.did, their_did.verkey); let (encrypted_message, noce) = service.encrypt(&my_key_for_encrypt, &their_did_for_encrypt.verkey, msg.as_bytes()).unwrap(); let decrypted_message = service.decrypt(&my_key_for_decrypt, &their_did_for_decrypt.verkey, &encrypted_message, &noce).unwrap(); assert_eq!(msg.as_bytes().to_vec(), decrypted_message); } #[test] fn encrypt_decrypt_works_for_verkey_contained_crypto_type() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let my_did_for_encrypt = my_did.clone(); let my_key_for_encrypt = my_key.clone(); let their_did_for_decrypt = Did::new(my_did.did, my_did.verkey); let (their_did, their_key) = service.create_my_did(&did_info.clone()).unwrap(); let my_did_for_decrypt = their_did.clone(); let my_key_for_decrypt = their_key.clone(); let their_did_for_encrypt = Did::new(their_did.did, their_did.verkey); let (encrypted_message, noce) = service.encrypt(&my_key_for_encrypt, &their_did_for_encrypt.verkey, msg.as_bytes()).unwrap(); let verkey = their_did_for_decrypt.verkey + ":ed25519"; let decrypted_message = service.decrypt(&my_key_for_decrypt, &verkey, &encrypted_message, &noce).unwrap(); assert_eq!(msg.as_bytes().to_vec(), decrypted_message); } #[test] fn encrypt_sealed_works() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (did, key) = service.create_my_did(&did_info.clone()).unwrap(); let did = Did::new(did.did, did.verkey); service.encrypt_sealed(&did.verkey, msg.as_bytes()).unwrap(); } #[test] fn encrypt_decrypt_sealed_works() { let service = SignusService::new(); let msg = "some message".as_bytes(); let did_info = MyDidInfo::new(None, None, None, None); let (did, key) = service.create_my_did(&did_info.clone()).unwrap(); let encrypt_did = Did::new(did.did.clone(), did.verkey.clone()); let encrypted_message = service.encrypt_sealed(&encrypt_did.verkey, msg).unwrap(); let decrypted_message = service.decrypt_sealed(&key, &encrypted_message).unwrap(); assert_eq!(msg, decrypted_message.as_slice()); } } Temporary disable checking key in create_their_did. Signed-off-by: Sergey Minaev <322af3f2df10918c6ef5280f56be0b711278b1ae@dsr-company.com> mod ed25519; pub mod types; use self::ed25519::ED25519CryptoType; use self::types::{ KeyInfo, MyDidInfo, TheirDidInfo, Key, Did }; use utils::crypto::base58::Base58; use utils::crypto::verkey_builder::build_full_verkey; use errors::common::CommonError; use errors::signus::SignusError; use std::collections::HashMap; use std::str; pub const DEFAULT_CRYPTO_TYPE: &'static str = "ed25519"; trait CryptoType { fn encrypt(&self, private_key: &[u8], public_key: &[u8], doc: &[u8], nonce: &[u8]) -> Result<Vec<u8>, CommonError>; fn decrypt(&self, private_key: &[u8], public_key: &[u8], doc: &[u8], nonce: &[u8]) -> Result<Vec<u8>, CommonError>; fn gen_nonce(&self) -> Vec<u8>; fn create_key(&self, seed: Option<&[u8]>) -> Result<(Vec<u8>, Vec<u8>), CommonError>; fn validate_key(&self, vk: &[u8]) -> Result<(), CommonError>; fn sign(&self, sk: &[u8], doc: &[u8]) -> Result<Vec<u8>, CommonError>; fn verify(&self, vk: &[u8], doc: &[u8], signature: &[u8]) -> Result<bool, CommonError>; fn encrypt_sealed(&self, vk: &[u8], doc: &[u8]) -> Result<Vec<u8>, CommonError>; fn decrypt_sealed(&self, vk: &[u8], sk: &[u8], doc: &[u8]) -> Result<Vec<u8>, CommonError>; } pub struct SignusService { crypto_types: HashMap<&'static str, Box<CryptoType>> } impl SignusService { pub fn new() -> SignusService { let mut crypto_types: HashMap<&str, Box<CryptoType>> = HashMap::new(); crypto_types.insert(DEFAULT_CRYPTO_TYPE, Box::new(ED25519CryptoType::new())); SignusService { crypto_types: crypto_types } } pub fn create_key(&self, key_info: &KeyInfo) -> Result<Key, SignusError> { let crypto_type_name = key_info.crypto_type .as_ref() .map(String::as_str) .unwrap_or(DEFAULT_CRYPTO_TYPE); if !self.crypto_types.contains_key(crypto_type_name) { return Err( SignusError::UnknownCryptoError( format!("KeyInfo contains unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let seed = key_info.seed.as_ref().map(String::as_bytes); let (vk, sk) = crypto_type.create_key(seed)?; let vk = Base58::encode(&vk); let sk = Base58::encode(&sk); if !crypto_type_name.eq(DEFAULT_CRYPTO_TYPE) { // Use suffix with crypto type name to store crypto type inside of vk let vk = format!("{}:{}", vk, crypto_type_name); } Ok(Key::new(vk, sk)) } pub fn create_my_did(&self, my_did_info: &MyDidInfo) -> Result<(Did, Key), SignusError> { let crypto_type_name = my_did_info.crypto_type .as_ref() .map(String::as_str) .unwrap_or(DEFAULT_CRYPTO_TYPE); if !self.crypto_types.contains_key(crypto_type_name) { return Err( SignusError::UnknownCryptoError( format!("MyDidInfo info contains unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let seed = my_did_info.seed.as_ref().map(String::as_bytes); let (vk, sk) = crypto_type.create_key(seed)?; let did = match my_did_info.did { Some(ref did) => Base58::decode(did)?, _ if my_did_info.cid == Some(true) => vk.clone(), _ => vk[0..16].to_vec() }; let did = Base58::encode(&did); let vk = Base58::encode(&vk); let sk = Base58::encode(&sk); if !crypto_type_name.eq(DEFAULT_CRYPTO_TYPE) { // Use suffix with crypto type name to store crypto type inside of vk let vk = format!("{}:{}", vk, crypto_type_name); } Ok((Did::new(did, vk.clone()), Key::new(vk, sk))) } pub fn create_their_did(&self, their_did_info: &TheirDidInfo) -> Result<Did, SignusError> { // Check did is correct Base58 Base58::decode(&their_did_info.did)?; let verkey = build_full_verkey(their_did_info.did.as_str(), their_did_info.verkey.as_ref().map(String::as_str))?; //TODO FIXME self.validate_key(&verkey)?; let did = Did::new(their_did_info.did.clone(), verkey); Ok(did) } pub fn sign(&self, my_key: &Key, doc: &[u8]) -> Result<Vec<u8>, SignusError> { let crypto_type_name = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); splits[1] } else { DEFAULT_CRYPTO_TYPE }; if !self.crypto_types.contains_key(crypto_type_name) { return Err( SignusError::UnknownCryptoError( format!("Trying to sign message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let my_sk = Base58::decode(my_key.signkey.as_str())?; let signature = crypto_type.sign(&my_sk, doc)?; Ok(signature) } pub fn verify(&self, their_vk: &str, msg: &[u8], signature: &[u8]) -> Result<bool, SignusError> { let (their_vk, crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(crypto_type_name) { return Err(SignusError::UnknownCryptoError( format!("Trying to verify message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let their_vk = Base58::decode(&their_vk)?; Ok(crypto_type.verify(&their_vk, msg, signature)?) } pub fn encrypt(&self, my_key: &Key, their_vk: &str, doc: &[u8]) -> Result<(Vec<u8>, Vec<u8>), SignusError> { let (my_vk, crypto_type_name) = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); (splits[0], splits[1]) } else { (my_key.verkey.as_str(), DEFAULT_CRYPTO_TYPE) }; let (their_vk, their_crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError(format!("Trying to encrypt message with unknown crypto: {}", crypto_type_name))); } if !crypto_type_name.eq(their_crypto_type_name) { // TODO: FIXME: Use dedicated error code return Err(SignusError::UnknownCryptoError( format!("My key crypto type is incompatible with their key crypto type: {} {}", crypto_type_name, their_crypto_type_name))); } let crypto_type = self.crypto_types.get(&crypto_type_name).unwrap(); let my_sk = Base58::decode(my_key.signkey.as_str())?; let their_vk = Base58::decode(their_vk)?; let nonce = crypto_type.gen_nonce(); let encrypted_doc = crypto_type.encrypt(&my_sk, &their_vk, doc, &nonce)?; Ok((encrypted_doc, nonce)) } pub fn decrypt(&self, my_key: &Key, their_vk: &str, doc: &[u8], nonce: &[u8]) -> Result<Vec<u8>, SignusError> { let (my_vk, crypto_type_name) = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); (splits[0], splits[1]) } else { (my_key.verkey.as_str(), DEFAULT_CRYPTO_TYPE) }; let (their_vk, their_crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError( format!("Trying to decrypt message with unknown crypto: {}", crypto_type_name))); } if !crypto_type_name.eq(their_crypto_type_name) { // TODO: FIXME: Use dedicated error code return Err(SignusError::UnknownCryptoError( format!("My key crypto type is incompatible with their key crypto type: {} {}", crypto_type_name, their_crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let my_sk = Base58::decode(&my_key.signkey)?; let their_vk = Base58::decode(their_vk)?; let decrypted_doc = crypto_type.decrypt(&my_sk, &their_vk, &doc, &nonce)?; Ok(decrypted_doc) } pub fn encrypt_sealed(&self, their_vk: &str, doc: &[u8]) -> Result<Vec<u8>, SignusError> { let (their_vk, crypto_type_name) = if their_vk.contains(":") { let splits: Vec<&str> = their_vk.split(":").collect(); (splits[0], splits[1]) } else { (their_vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError(format!("Trying to encrypt sealed message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let their_vk = Base58::decode(their_vk)?; let encrypted_doc = crypto_type.encrypt_sealed(&their_vk, doc)?; Ok(encrypted_doc) } pub fn decrypt_sealed(&self, my_key: &Key, doc: &[u8]) -> Result<Vec<u8>, SignusError> { let (my_vk, crypto_type_name) = if my_key.verkey.contains(":") { let splits: Vec<&str> = my_key.verkey.split(":").collect(); (splits[0], splits[1]) } else { (my_key.verkey.as_str(), DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError( format!("Trying to decrypt sealed message with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let my_vk = Base58::decode(my_vk)?; let my_sk = Base58::decode(my_key.signkey.as_str())?; let decrypted_doc = crypto_type.decrypt_sealed(&my_vk, &my_sk, doc)?; Ok(decrypted_doc) } pub fn validate_key(&self, vk: &str) -> Result<(), SignusError> { let (vk, crypto_type_name) = if vk.contains(":") { let splits: Vec<&str> = vk.split(":").collect(); (splits[0], splits[1]) } else { (vk, DEFAULT_CRYPTO_TYPE) }; if !self.crypto_types.contains_key(&crypto_type_name) { return Err(SignusError::UnknownCryptoError(format!("Trying to use key with unknown crypto: {}", crypto_type_name))); } let crypto_type = self.crypto_types.get(crypto_type_name).unwrap(); let vk = Base58::decode(vk)?; crypto_type.validate_key(&vk)?; Ok(()) } pub fn validate_did(&self, did: &str) -> Result<(), SignusError> { let did = Base58::decode(did)?; if did.len() != 16 && did.len() != 32 { return Err(SignusError::CommonError( CommonError::InvalidStructure( format!("Trying to use did with unexpected len: {}", did.len())))); } Ok(()) } } #[cfg(test)] mod tests { use super::*; use services::signus::types::MyDidInfo; #[test] fn create_my_did_with_works_for_empty_info() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); service.create_my_did(&did_info).unwrap(); } #[test] fn create_my_did_works_for_passed_did() { let service = SignusService::new(); let did = "NcYxiDXkpYi6ov5FcYDi1e"; let did_info = MyDidInfo::new(Some(did.clone().to_string()), None, None, None); let (my_did, _) = service.create_my_did(&did_info).unwrap(); assert_eq!(did, my_did.did); } #[test] fn create_my_did_not_works_for_invalid_crypto_type() { let service = SignusService::new(); let did = Some("NcYxiDXkpYi6ov5FcYDi1e".to_string()); let crypto_type = Some("type".to_string()); let did_info = MyDidInfo::new(did.clone(), None, crypto_type, None); assert!(service.create_my_did(&did_info).is_err()); } #[test] fn create_my_did_works_for_seed() { let service = SignusService::new(); let did = Some("NcYxiDXkpYi6ov5FcYDi1e".to_string()); let seed = Some("00000000000000000000000000000My1".to_string()); let did_info_with_seed = MyDidInfo::new(did.clone(), seed, None, None); let did_info_without_seed = MyDidInfo::new(did.clone(), None, None, None); let (did_with_seed, _) = service.create_my_did(&did_info_with_seed).unwrap(); let (did_without_seed, _) = service.create_my_did(&did_info_without_seed).unwrap(); assert_ne!(did_with_seed.verkey, did_without_seed.verkey) } #[test] fn create_their_did_works_without_verkey() { let service = SignusService::new(); let did = "CnEDk9HrMnmiHXEV1WFgbVCRteYnPqsJwrTdcZaNhFVW"; let their_did_info = TheirDidInfo::new(did.to_string(), None); let their_did = service.create_their_did(&their_did_info).unwrap(); assert_eq!(did.to_string(), their_did.did); assert_eq!(did.to_string(), their_did.verkey); } #[test] fn create_their_did_works_for_full_verkey() { let service = SignusService::new(); let did = "8wZcEriaNLNKtteJvx7f8i"; let verkey = "5L2HBnzbu6Auh2pkDRbFt5f4prvgE2LzknkuYLsKkacp"; let their_did_info = TheirDidInfo::new(did.to_string(), Some(verkey.to_string())); let their_did = service.create_their_did(&their_did_info).unwrap(); assert_eq!(did.to_string(), their_did.did); assert_eq!(verkey, their_did.verkey); } #[test] fn create_their_did_works_for_abbreviated_verkey() { let service = SignusService::new(); let did = "8wZcEriaNLNKtteJvx7f8i"; let their_did_info = TheirDidInfo::new(did.to_string(), Some("~NcYxiDXkpYi6ov5FcYDi1e".to_string())); let their_did = service.create_their_did(&their_did_info).unwrap(); assert_eq!(did.to_string(), their_did.did); assert_eq!("5L2HBnzbu6Auh2pkDRbFt5f4prvgE2LzknkuYLsKkacp", their_did.verkey); } #[test] fn sign_works() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); service.sign(&my_key, message.as_bytes()).unwrap(); } #[test] fn sign_works_for_invalid_signkey() { let service = SignusService::new(); let message = r#"message"#; let my_key = Key::new("8wZcEriaNLNKtteJvx7f8i".to_string(), "5L2HBnzbu6Auh2pkDRbFt5f4prvgE2LzknkuYLsKkacp".to_string()); assert!(service.sign(&my_key, message.as_bytes()).is_err()); } #[test] fn sign_verify_works() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let valid = service.verify(&my_did.verkey, message.as_bytes(), &signature).unwrap(); assert!(valid); } #[test] fn sign_verify_works_for_verkey_contained_crypto_type() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let verkey = my_did.verkey + ":ed25519"; let valid = service.verify(&verkey, message.as_bytes(), &signature).unwrap(); assert!(valid); } #[test] fn sign_verify_works_for_verkey_contained_invalid_crypto_type() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let verkey = format!("crypto_type:{}", my_did.verkey); assert!(service.verify(&verkey, message.as_bytes(), &signature).is_err()); } #[test] fn verify_not_works_for_invalid_verkey() { let service = SignusService::new(); let did_info = MyDidInfo::new(None, None, None, None); let message = r#"message"#; let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let signature = service.sign(&my_key, message.as_bytes()).unwrap(); let verkey = "AnnxV4t3LUHKZaxVQDWoVaG44NrGmeDYMA4Gz6C2tCZd"; let valid = service.verify(verkey, message.as_bytes(), &signature).unwrap(); assert_eq!(false, valid); } #[test] fn encrypt_works() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let (their_did, their_key) = service.create_my_did(&did_info.clone()).unwrap(); let their_did = Did::new(their_did.did, their_did.verkey); service.encrypt(&my_key, &their_did.verkey, msg.as_bytes()).unwrap(); } #[test] fn encrypt_decrypt_works() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let my_did_for_encrypt = my_did.clone(); let my_key_for_encrypt = my_key.clone(); let their_did_for_decrypt = Did::new(my_did.did, my_did.verkey); let (their_did, their_key) = service.create_my_did(&did_info.clone()).unwrap(); let my_did_for_decrypt = their_did.clone(); let my_key_for_decrypt = their_key.clone(); let their_did_for_encrypt = Did::new(their_did.did, their_did.verkey); let (encrypted_message, noce) = service.encrypt(&my_key_for_encrypt, &their_did_for_encrypt.verkey, msg.as_bytes()).unwrap(); let decrypted_message = service.decrypt(&my_key_for_decrypt, &their_did_for_decrypt.verkey, &encrypted_message, &noce).unwrap(); assert_eq!(msg.as_bytes().to_vec(), decrypted_message); } #[test] fn encrypt_decrypt_works_for_verkey_contained_crypto_type() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (my_did, my_key) = service.create_my_did(&did_info).unwrap(); let my_did_for_encrypt = my_did.clone(); let my_key_for_encrypt = my_key.clone(); let their_did_for_decrypt = Did::new(my_did.did, my_did.verkey); let (their_did, their_key) = service.create_my_did(&did_info.clone()).unwrap(); let my_did_for_decrypt = their_did.clone(); let my_key_for_decrypt = their_key.clone(); let their_did_for_encrypt = Did::new(their_did.did, their_did.verkey); let (encrypted_message, noce) = service.encrypt(&my_key_for_encrypt, &their_did_for_encrypt.verkey, msg.as_bytes()).unwrap(); let verkey = their_did_for_decrypt.verkey + ":ed25519"; let decrypted_message = service.decrypt(&my_key_for_decrypt, &verkey, &encrypted_message, &noce).unwrap(); assert_eq!(msg.as_bytes().to_vec(), decrypted_message); } #[test] fn encrypt_sealed_works() { let service = SignusService::new(); let msg = "some message"; let did_info = MyDidInfo::new(None, None, None, None); let (did, key) = service.create_my_did(&did_info.clone()).unwrap(); let did = Did::new(did.did, did.verkey); service.encrypt_sealed(&did.verkey, msg.as_bytes()).unwrap(); } #[test] fn encrypt_decrypt_sealed_works() { let service = SignusService::new(); let msg = "some message".as_bytes(); let did_info = MyDidInfo::new(None, None, None, None); let (did, key) = service.create_my_did(&did_info.clone()).unwrap(); let encrypt_did = Did::new(did.did.clone(), did.verkey.clone()); let encrypted_message = service.encrypt_sealed(&encrypt_did.verkey, msg).unwrap(); let decrypted_message = service.decrypt_sealed(&key, &encrypted_message).unwrap(); assert_eq!(msg, decrypted_message.as_slice()); } }
//! This is NOT a real example. This is a test designed to see if we can actually run the turtle //! process use std::process; use turtle::Drawing; fn main() { let mut drawing = Drawing::new(); let mut turtle = drawing.add_turtle(); turtle.set_speed(2); turtle.right(90.0); turtle.forward(50.0); process::exit(0); } Noting that runtest doesn't currently work since it can't close the window //! This is NOT a real example. This is a test designed to see if we can actually run the turtle //! process use std::process; use turtle::Turtle; fn main() { let mut turtle = Turtle::new(); turtle.set_speed(2); turtle.right(90.0); turtle.forward(50.0); //TODO: Exiting the process currently doesn't cause the window to get closed. We should add a // `close(self)` or `quit(self)` method to `Drawing` that closes the window explicitly. process::exit(0); }
#![no_std] extern crate cortex_m; extern crate stm32f30x_hal as hal; extern crate embedded_hal; #[macro_use(block)] extern crate nb; // TODO Remove this dependancy use embedded_hal::spi::{Mode, Phase, Polarity}; use cortex_m::asm; use hal::prelude::*; use hal::spi::Spi; use hal::stm32f30x; use hal::delay::Delay; fn main() { let cp = cortex_m::Peripherals::take().unwrap(); let p = stm32f30x::Peripherals::take().unwrap(); let mut flash = p.FLASH.constrain(); let mut rcc = p.RCC.constrain(); let mut gpioa = p.GPIOA.split(&mut rcc.ahb); // clock configuration using the default settings (all clocks run at 8 MHz) let clocks = rcc.cfgr.freeze(&mut flash.acr); // Set up delay let mut delay = Delay::new(cp.SYST, clocks); // Set up DISP_EN (Active high) let mut pa3 = gpioa .pa3 .into_push_pull_output(&mut gpioa.moder, &mut gpioa.otyper); // DISP_EN pa3.set_high(); // Set up our CS (Active high) let mut pa2 = gpioa .pa2 .into_push_pull_output(&mut gpioa.moder, &mut gpioa.otyper); // CS pa2.set_low(); // Set up SPI let pa5 = gpioa .pa5 .into_af5(&mut gpioa.moder, &mut gpioa.afrl); // SCK let pa6 = gpioa .pa6 .into_af5(&mut gpioa.moder, &mut gpioa.afrl); // MISO let pa7 = gpioa .pa7 .into_af5(&mut gpioa.moder, &mut gpioa.afrl); // MOSI //let mode = Mode { polarity: Polarity::IdleLow, phase: Phase::CaptureOnFirstTransition }; let mode = Mode { polarity: Polarity::IdleLow, phase: Phase::CaptureOnSecondTransition }; let mut spi = Spi::spi1(p.SPI1, (pa5, pa6, pa7), mode, 1.mhz(), clocks, &mut rcc.apb2); // Wait to let everything set up delay.delay_ms(200_u16); asm::bkpt(); //let clear_data = [0b0010_0000, 0x00]; let clear_data = [0x20, 0x00]; let set_line_value = [ 0x80, 0xba, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x00, 0x00 ]; let set_display = [0x00, 0x00]; // Send data pa2.set_high(); spi.write(&clear_data); //delay.delay_ms(5_u16); pa2.set_low(); asm::bkpt(); // Send data pa2.set_high(); spi.write(&set_line_value); //delay.delay_ms(5_u16); pa2.set_low(); asm::bkpt(); // Send data pa2.set_high(); spi.write(&set_display); //delay.delay_ms(5_u16); pa2.set_low(); asm::bkpt() } Basic display functional #![no_std] extern crate cortex_m; extern crate stm32f30x_hal as hal; extern crate embedded_hal; extern crate ls010b7dh01; #[macro_use(block)] extern crate nb; // TODO Remove this dependancy use embedded_hal::spi::{Mode, Phase, Polarity}; use cortex_m::asm; use hal::prelude::*; use hal::spi::Spi; use hal::stm32f30x; use hal::delay::Delay; fn main() { let cp = cortex_m::Peripherals::take().unwrap(); let p = stm32f30x::Peripherals::take().unwrap(); let mut flash = p.FLASH.constrain(); let mut rcc = p.RCC.constrain(); let mut gpioa = p.GPIOA.split(&mut rcc.ahb); let mut gpiob = p.GPIOB.split(&mut rcc.ahb); // clock configuration using the default settings (all clocks run at 8 MHz) let clocks = rcc.cfgr.freeze(&mut flash.acr); // Set up delay let mut delay = Delay::new(cp.SYST, clocks); // Set up DISP_EN (Active high) let mut disp_en = gpiob .pb14 .into_push_pull_output(&mut gpiob.moder, &mut gpiob.otyper); // DISP_EN disp_en.set_high(); // Set up our CS (Active high) let mut cs = gpiob .pb2 .into_push_pull_output(&mut gpiob.moder, &mut gpiob.otyper); // CS cs.set_low(); // Set up 5V_en let mut v5_en = gpioa .pa1 .into_push_pull_output(&mut gpioa.moder, &mut gpioa.otyper); // 5V_en v5_en.set_high(); // Set up SPI let pa5 = gpioa .pa5 .into_af5(&mut gpioa.moder, &mut gpioa.afrl); // SCK let pa6 = gpioa .pa6 .into_af5(&mut gpioa.moder, &mut gpioa.afrl); // MISO let pa7 = gpioa .pa7 .into_af5(&mut gpioa.moder, &mut gpioa.afrl); // MOSI //let mode = Mode { polarity: Polarity::IdleLow, phase: Phase::CaptureOnFirstTransition }; let mode = Mode { polarity: Polarity::IdleLow, phase: Phase::CaptureOnSecondTransition }; let mut spi = Spi::spi1(p.SPI1, (pa5, pa6, pa7), mode, 1.mhz(), clocks, &mut rcc.apb2); // Wait to let everything set up delay.delay_ms(200_u16); asm::bkpt(); //let clear_data = [0b0010_0000, 0x00]; let clear_data = [0x20, 0x00]; let set_line_value = [ 0x80, 0xba, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x00, 0x00 ]; let set_display = [0x00, 0x00]; // Send data cs.set_high(); spi.write(&clear_data); //delay.delay_ms(5_u16); cs.set_low(); asm::bkpt(); // Send data cs.set_high(); spi.write(&set_line_value); //delay.delay_ms(5_u16); cs.set_low(); asm::bkpt(); // Send data cs.set_high(); spi.write(&set_display); //delay.delay_ms(5_u16); cs.set_low(); asm::bkpt() }
extern crate rustc_ast; extern crate rustc_data_structures; extern crate rustc_span; extern crate rustc_target; use rustc_ast::ast::{ AngleBracketedArg, AngleBracketedArgs, AnonConst, Arm, AssocItemKind, AssocTyConstraint, AssocTyConstraintKind, Async, AttrId, AttrItem, AttrKind, AttrStyle, Attribute, BareFnTy, BinOpKind, BindingMode, Block, BlockCheckMode, BorrowKind, CaptureBy, Const, Crate, CrateSugar, Defaultness, EnumDef, Expr, ExprKind, Extern, Field, FieldPat, FloatTy, FnDecl, FnHeader, FnRetTy, FnSig, ForeignItemKind, ForeignMod, GenericArg, GenericArgs, GenericBound, GenericParam, GenericParamKind, Generics, GlobalAsm, ImplPolarity, InlineAsm, InlineAsmOperand, InlineAsmOptions, InlineAsmRegOrRegClass, InlineAsmTemplatePiece, IntTy, IsAuto, Item, ItemKind, Label, Lifetime, Lit, LitFloatType, LitIntType, LitKind, LlvmAsmDialect, LlvmInlineAsm, LlvmInlineAsmOutput, Local, MacArgs, MacCall, MacCallStmt, MacDelimiter, MacStmtStyle, MacroDef, Mod, Movability, MutTy, Mutability, NodeId, Param, ParenthesizedArgs, Pat, PatKind, Path, PathSegment, PolyTraitRef, QSelf, RangeEnd, RangeLimits, RangeSyntax, Stmt, StmtKind, StrLit, StrStyle, StructField, StructRest, TraitBoundModifier, TraitObjectSyntax, TraitRef, Ty, TyKind, UintTy, UnOp, Unsafe, UnsafeSource, UseTree, UseTreeKind, Variant, VariantData, Visibility, VisibilityKind, WhereBoundPredicate, WhereClause, WhereEqPredicate, WherePredicate, WhereRegionPredicate, }; use rustc_ast::ptr::P; use rustc_ast::token::{self, CommentKind, DelimToken, Token, TokenKind}; use rustc_ast::tokenstream::{DelimSpan, LazyTokenStream, TokenStream, TokenTree}; use rustc_data_structures::sync::Lrc; use rustc_data_structures::thin_vec::ThinVec; use rustc_span::source_map::Spanned; use rustc_span::symbol::Ident; use rustc_span::{Span, Symbol, SyntaxContext}; use std::mem; pub trait SpanlessEq { fn eq(&self, other: &Self) -> bool; } impl<T: SpanlessEq> SpanlessEq for P<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: ?Sized + SpanlessEq> SpanlessEq for Lrc<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: SpanlessEq> SpanlessEq for Option<T> { fn eq(&self, other: &Self) -> bool { match (self, other) { (None, None) => true, (Some(this), Some(other)) => SpanlessEq::eq(this, other), _ => false, } } } impl<T: SpanlessEq> SpanlessEq for [T] { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for Vec<T> { fn eq(&self, other: &Self) -> bool { <[T] as SpanlessEq>::eq(self, other) } } impl<T: SpanlessEq> SpanlessEq for ThinVec<T> { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self .iter() .zip(other.iter()) .all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for Spanned<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.node, &other.node) } } impl<A: SpanlessEq, B: SpanlessEq> SpanlessEq for (A, B) { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) } } macro_rules! spanless_eq_true { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, _other: &Self) -> bool { true } } }; } spanless_eq_true!(Span); spanless_eq_true!(DelimSpan); spanless_eq_true!(AttrId); spanless_eq_true!(NodeId); spanless_eq_true!(SyntaxContext); macro_rules! spanless_eq_partial_eq { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { PartialEq::eq(self, other) } } }; } spanless_eq_partial_eq!(bool); spanless_eq_partial_eq!(u8); spanless_eq_partial_eq!(u16); spanless_eq_partial_eq!(u128); spanless_eq_partial_eq!(usize); spanless_eq_partial_eq!(char); spanless_eq_partial_eq!(String); spanless_eq_partial_eq!(Symbol); spanless_eq_partial_eq!(CommentKind); spanless_eq_partial_eq!(DelimToken); spanless_eq_partial_eq!(InlineAsmOptions); macro_rules! spanless_eq_struct { { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* } => { impl $(<$param: SpanlessEq>)* SpanlessEq for $name $(<$param>)* { fn eq(&self, other: &Self) -> bool { let $name { $($field,)* $($ignore: _,)* } = self; let $name { $($field: $other,)* $($ignore: _,)* } = other; $(SpanlessEq::eq($field, $other))&&* } } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $next:ident $($rest:ident)* $(!$ignore:ident)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* [$next other] $($rest)* $(!$ignore)* } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* !$next:ident $(!$rest:ident)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* $(![$ignore])* ![$next] $(!$rest)* } }; } macro_rules! spanless_eq_enum { { $name:ident; $([$variant:ident $([$field:tt $this:ident $other:ident])*])* } => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { match self { $( $name::$variant { .. } => {} )* } #[allow(unreachable_patterns)] match (self, other) { $( ( $name::$variant { $($field: $this),* }, $name::$variant { $($field: $other),* }, ) => { true $(&& SpanlessEq::eq($this, $other))* } )* _ => false, } } } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$($named:tt)*] ( $i:tt $($field:tt)* ) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [$($named)* [$i this other]] ( $($field)* ) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$($named:tt)*] () $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next $($named)*] $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident ( $($field:tt)* ) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [] ( $($field)* ) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next] $($rest)* } }; } spanless_eq_struct!(AngleBracketedArgs; span args); spanless_eq_struct!(AnonConst; id value); spanless_eq_struct!(Arm; attrs pat guard body span id is_placeholder); spanless_eq_struct!(AssocTyConstraint; id ident kind span); spanless_eq_struct!(AttrItem; path args tokens); spanless_eq_struct!(Attribute; kind id style span); spanless_eq_struct!(BareFnTy; unsafety ext generic_params decl); spanless_eq_struct!(Block; stmts id rules span tokens); spanless_eq_struct!(Crate; module attrs span proc_macros); spanless_eq_struct!(EnumDef; variants); spanless_eq_struct!(Expr; id kind span attrs !tokens); spanless_eq_struct!(Field; attrs id span ident expr is_shorthand is_placeholder); spanless_eq_struct!(FieldPat; ident pat is_shorthand attrs id span is_placeholder); spanless_eq_struct!(FnDecl; inputs output); spanless_eq_struct!(FnHeader; constness asyncness unsafety ext); spanless_eq_struct!(FnSig; header decl span); spanless_eq_struct!(ForeignMod; unsafety abi items); spanless_eq_struct!(GenericParam; id ident attrs bounds is_placeholder kind); spanless_eq_struct!(Generics; params where_clause span); spanless_eq_struct!(GlobalAsm; asm); spanless_eq_struct!(InlineAsm; template operands options line_spans); spanless_eq_struct!(Item<K>; attrs id span vis ident kind !tokens); spanless_eq_struct!(Label; ident); spanless_eq_struct!(Lifetime; id ident); spanless_eq_struct!(Lit; token kind span); spanless_eq_struct!(LlvmInlineAsm; asm asm_str_style outputs inputs clobbers volatile alignstack dialect); spanless_eq_struct!(LlvmInlineAsmOutput; constraint expr is_rw is_indirect); spanless_eq_struct!(Local; pat ty init id span attrs); spanless_eq_struct!(MacCall; path args prior_type_ascription); spanless_eq_struct!(MacCallStmt; mac style attrs); spanless_eq_struct!(MacroDef; body macro_rules); spanless_eq_struct!(Mod; inner unsafety items inline); spanless_eq_struct!(MutTy; ty mutbl); spanless_eq_struct!(Param; attrs ty pat id span is_placeholder); spanless_eq_struct!(ParenthesizedArgs; span inputs output); spanless_eq_struct!(Pat; id kind span tokens); spanless_eq_struct!(Path; span segments tokens); spanless_eq_struct!(PathSegment; ident id args); spanless_eq_struct!(PolyTraitRef; bound_generic_params trait_ref span); spanless_eq_struct!(QSelf; ty path_span position); spanless_eq_struct!(Stmt; id kind span tokens); spanless_eq_struct!(StrLit; style symbol suffix span symbol_unescaped); spanless_eq_struct!(StructField; attrs id span vis ident ty is_placeholder); spanless_eq_struct!(Token; kind span); spanless_eq_struct!(TraitRef; path ref_id); spanless_eq_struct!(Ty; id kind span tokens); spanless_eq_struct!(UseTree; prefix kind span); spanless_eq_struct!(Variant; attrs id span vis ident data disr_expr is_placeholder); spanless_eq_struct!(Visibility; kind span tokens); spanless_eq_struct!(WhereBoundPredicate; span bound_generic_params bounded_ty bounds); spanless_eq_struct!(WhereClause; has_where_token predicates span); spanless_eq_struct!(WhereEqPredicate; id span lhs_ty rhs_ty); spanless_eq_struct!(WhereRegionPredicate; span lifetime bounds); spanless_eq_enum!(AngleBracketedArg; Arg(0) Constraint(0)); spanless_eq_enum!(AssocItemKind; Const(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(AssocTyConstraintKind; Equality(ty) Bound(bounds)); spanless_eq_enum!(Async; Yes(span closure_id return_impl_trait_id) No); spanless_eq_enum!(AttrKind; Normal(0 1) DocComment(0 1)); spanless_eq_enum!(AttrStyle; Outer Inner); spanless_eq_enum!(BinOpKind; Add Sub Mul Div Rem And Or BitXor BitAnd BitOr Shl Shr Eq Lt Le Ne Ge Gt); spanless_eq_enum!(BindingMode; ByRef(0) ByValue(0)); spanless_eq_enum!(BlockCheckMode; Default Unsafe(0)); spanless_eq_enum!(BorrowKind; Ref Raw); spanless_eq_enum!(CaptureBy; Value Ref); spanless_eq_enum!(Const; Yes(0) No); spanless_eq_enum!(CrateSugar; PubCrate JustCrate); spanless_eq_enum!(Defaultness; Default(0) Final); spanless_eq_enum!(Extern; None Implicit Explicit(0)); spanless_eq_enum!(FloatTy; F32 F64); spanless_eq_enum!(FnRetTy; Default(0) Ty(0)); spanless_eq_enum!(ForeignItemKind; Static(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(GenericArg; Lifetime(0) Type(0) Const(0)); spanless_eq_enum!(GenericArgs; AngleBracketed(0) Parenthesized(0)); spanless_eq_enum!(GenericBound; Trait(0 1) Outlives(0)); spanless_eq_enum!(GenericParamKind; Lifetime Type(default) Const(ty kw_span)); spanless_eq_enum!(ImplPolarity; Positive Negative(0)); spanless_eq_enum!(InlineAsmRegOrRegClass; Reg(0) RegClass(0)); spanless_eq_enum!(InlineAsmTemplatePiece; String(0) Placeholder(operand_idx modifier span)); spanless_eq_enum!(IntTy; Isize I8 I16 I32 I64 I128); spanless_eq_enum!(IsAuto; Yes No); spanless_eq_enum!(LitFloatType; Suffixed(0) Unsuffixed); spanless_eq_enum!(LitIntType; Signed(0) Unsigned(0) Unsuffixed); spanless_eq_enum!(LlvmAsmDialect; Att Intel); spanless_eq_enum!(MacArgs; Empty Delimited(0 1 2) Eq(0 1)); spanless_eq_enum!(MacDelimiter; Parenthesis Bracket Brace); spanless_eq_enum!(MacStmtStyle; Semicolon Braces NoBraces); spanless_eq_enum!(Movability; Static Movable); spanless_eq_enum!(Mutability; Mut Not); spanless_eq_enum!(RangeEnd; Included(0) Excluded); spanless_eq_enum!(RangeLimits; HalfOpen Closed); spanless_eq_enum!(StmtKind; Local(0) Item(0) Expr(0) Semi(0) Empty MacCall(0)); spanless_eq_enum!(StrStyle; Cooked Raw(0)); spanless_eq_enum!(StructRest; Base(0) Rest(0) None); spanless_eq_enum!(TokenTree; Token(0) Delimited(0 1 2)); spanless_eq_enum!(TraitBoundModifier; None Maybe MaybeConst MaybeConstMaybe); spanless_eq_enum!(TraitObjectSyntax; Dyn None); spanless_eq_enum!(UintTy; Usize U8 U16 U32 U64 U128); spanless_eq_enum!(UnOp; Deref Not Neg); spanless_eq_enum!(Unsafe; Yes(0) No); spanless_eq_enum!(UnsafeSource; CompilerGenerated UserProvided); spanless_eq_enum!(UseTreeKind; Simple(0 1 2) Nested(0) Glob); spanless_eq_enum!(VariantData; Struct(0 1) Tuple(0 1) Unit(0)); spanless_eq_enum!(VisibilityKind; Public Crate(0) Restricted(path id) Inherited); spanless_eq_enum!(WherePredicate; BoundPredicate(0) RegionPredicate(0) EqPredicate(0)); spanless_eq_enum!(ExprKind; Box(0) Array(0) ConstBlock(0) Call(0 1) MethodCall(0 1 2) Tup(0) Binary(0 1 2) Unary(0 1) Lit(0) Cast(0 1) Type(0 1) Let(0 1) If(0 1 2) While(0 1 2) ForLoop(0 1 2 3) Loop(0 1) Match(0 1) Closure(0 1 2 3 4 5) Block(0 1) Async(0 1 2) Await(0) TryBlock(0) Assign(0 1 2) AssignOp(0 1 2) Field(0 1) Index(0 1) Underscore Range(0 1 2) Path(0 1) AddrOf(0 1 2) Break(0 1) Continue(0) Ret(0) InlineAsm(0) LlvmInlineAsm(0) MacCall(0) Struct(0 1 2) Repeat(0 1) Paren(0) Try(0) Yield(0) Err); spanless_eq_enum!(InlineAsmOperand; In(reg expr) Out(reg late expr) InOut(reg late expr) SplitInOut(reg late in_expr out_expr) Const(expr) Sym(expr)); spanless_eq_enum!(ItemKind; ExternCrate(0) Use(0) Static(0 1 2) Const(0 1 2) Fn(0 1 2 3) Mod(0) ForeignMod(0) GlobalAsm(0) TyAlias(0 1 2 3) Enum(0 1) Struct(0 1) Union(0 1) Trait(0 1 2 3 4) TraitAlias(0 1) Impl(unsafety polarity defaultness constness generics of_trait self_ty items) MacCall(0) MacroDef(0)); spanless_eq_enum!(LitKind; Str(0 1) ByteStr(0) Byte(0) Char(0) Int(0 1) Float(0 1) Bool(0) Err(0)); spanless_eq_enum!(PatKind; Wild Ident(0 1 2) Struct(0 1 2) TupleStruct(0 1) Or(0) Path(0 1) Tuple(0) Box(0) Ref(0 1) Lit(0) Range(0 1 2) Slice(0) Rest Paren(0) MacCall(0)); spanless_eq_enum!(TyKind; Slice(0) Array(0 1) Ptr(0) Rptr(0 1) BareFn(0) Never Tup(0) Path(0 1) TraitObject(0 1) ImplTrait(0 1) Paren(0) Typeof(0) Infer ImplicitSelf MacCall(0) Err CVarArgs); impl SpanlessEq for Ident { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } // Give up on comparing literals inside of macros because there are so many // equivalent representations of the same literal; they are tested elsewhere impl SpanlessEq for token::Lit { fn eq(&self, other: &Self) -> bool { mem::discriminant(self) == mem::discriminant(other) } } impl SpanlessEq for RangeSyntax { fn eq(&self, _other: &Self) -> bool { match self { RangeSyntax::DotDotDot | RangeSyntax::DotDotEq => true, } } } impl SpanlessEq for TokenKind { fn eq(&self, other: &Self) -> bool { match (self, other) { (TokenKind::Literal(this), TokenKind::Literal(other)) => SpanlessEq::eq(this, other), (TokenKind::DotDotEq, _) | (TokenKind::DotDotDot, _) => match other { TokenKind::DotDotEq | TokenKind::DotDotDot => true, _ => false, }, _ => self == other, } } } impl SpanlessEq for TokenStream { fn eq(&self, other: &Self) -> bool { let mut this = self.clone().into_trees(); let mut other = other.clone().into_trees(); loop { let this = match this.next() { None => return other.next().is_none(), Some(val) => val, }; let other = match other.next() { None => return false, Some(val) => val, }; if !SpanlessEq::eq(&this, &other) { return false; } } } } impl SpanlessEq for LazyTokenStream { fn eq(&self, other: &Self) -> bool { let this = self.create_token_stream(); let other = other.create_token_stream(); SpanlessEq::eq(&this, &other) } } Support field skip on fields of enum variants extern crate rustc_ast; extern crate rustc_data_structures; extern crate rustc_span; extern crate rustc_target; use rustc_ast::ast::{ AngleBracketedArg, AngleBracketedArgs, AnonConst, Arm, AssocItemKind, AssocTyConstraint, AssocTyConstraintKind, Async, AttrId, AttrItem, AttrKind, AttrStyle, Attribute, BareFnTy, BinOpKind, BindingMode, Block, BlockCheckMode, BorrowKind, CaptureBy, Const, Crate, CrateSugar, Defaultness, EnumDef, Expr, ExprKind, Extern, Field, FieldPat, FloatTy, FnDecl, FnHeader, FnRetTy, FnSig, ForeignItemKind, ForeignMod, GenericArg, GenericArgs, GenericBound, GenericParam, GenericParamKind, Generics, GlobalAsm, ImplPolarity, InlineAsm, InlineAsmOperand, InlineAsmOptions, InlineAsmRegOrRegClass, InlineAsmTemplatePiece, IntTy, IsAuto, Item, ItemKind, Label, Lifetime, Lit, LitFloatType, LitIntType, LitKind, LlvmAsmDialect, LlvmInlineAsm, LlvmInlineAsmOutput, Local, MacArgs, MacCall, MacCallStmt, MacDelimiter, MacStmtStyle, MacroDef, Mod, Movability, MutTy, Mutability, NodeId, Param, ParenthesizedArgs, Pat, PatKind, Path, PathSegment, PolyTraitRef, QSelf, RangeEnd, RangeLimits, RangeSyntax, Stmt, StmtKind, StrLit, StrStyle, StructField, StructRest, TraitBoundModifier, TraitObjectSyntax, TraitRef, Ty, TyKind, UintTy, UnOp, Unsafe, UnsafeSource, UseTree, UseTreeKind, Variant, VariantData, Visibility, VisibilityKind, WhereBoundPredicate, WhereClause, WhereEqPredicate, WherePredicate, WhereRegionPredicate, }; use rustc_ast::ptr::P; use rustc_ast::token::{self, CommentKind, DelimToken, Token, TokenKind}; use rustc_ast::tokenstream::{DelimSpan, LazyTokenStream, TokenStream, TokenTree}; use rustc_data_structures::sync::Lrc; use rustc_data_structures::thin_vec::ThinVec; use rustc_span::source_map::Spanned; use rustc_span::symbol::Ident; use rustc_span::{Span, Symbol, SyntaxContext}; use std::mem; pub trait SpanlessEq { fn eq(&self, other: &Self) -> bool; } impl<T: SpanlessEq> SpanlessEq for P<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: ?Sized + SpanlessEq> SpanlessEq for Lrc<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: SpanlessEq> SpanlessEq for Option<T> { fn eq(&self, other: &Self) -> bool { match (self, other) { (None, None) => true, (Some(this), Some(other)) => SpanlessEq::eq(this, other), _ => false, } } } impl<T: SpanlessEq> SpanlessEq for [T] { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for Vec<T> { fn eq(&self, other: &Self) -> bool { <[T] as SpanlessEq>::eq(self, other) } } impl<T: SpanlessEq> SpanlessEq for ThinVec<T> { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self .iter() .zip(other.iter()) .all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for Spanned<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.node, &other.node) } } impl<A: SpanlessEq, B: SpanlessEq> SpanlessEq for (A, B) { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) } } macro_rules! spanless_eq_true { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, _other: &Self) -> bool { true } } }; } spanless_eq_true!(Span); spanless_eq_true!(DelimSpan); spanless_eq_true!(AttrId); spanless_eq_true!(NodeId); spanless_eq_true!(SyntaxContext); macro_rules! spanless_eq_partial_eq { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { PartialEq::eq(self, other) } } }; } spanless_eq_partial_eq!(bool); spanless_eq_partial_eq!(u8); spanless_eq_partial_eq!(u16); spanless_eq_partial_eq!(u128); spanless_eq_partial_eq!(usize); spanless_eq_partial_eq!(char); spanless_eq_partial_eq!(String); spanless_eq_partial_eq!(Symbol); spanless_eq_partial_eq!(CommentKind); spanless_eq_partial_eq!(DelimToken); spanless_eq_partial_eq!(InlineAsmOptions); macro_rules! spanless_eq_struct { { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* } => { impl $(<$param: SpanlessEq>)* SpanlessEq for $name $(<$param>)* { fn eq(&self, other: &Self) -> bool { let $name { $($field,)* $($ignore: _,)* } = self; let $name { $($field: $other,)* $($ignore: _,)* } = other; true $(&& SpanlessEq::eq($field, $other))* } } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* $next:ident $($rest:tt)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* [$next other] $(![$ignore])* $($rest)* } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* !$next:ident $($rest:tt)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* $(![$ignore])* ![$next] $($rest)* } }; } macro_rules! spanless_eq_enum { { $name:ident; $([$variant:ident $([$field:tt $this:ident $other:ident])* $(![$ignore:tt])*])* } => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { match self { $( $name::$variant { .. } => {} )* } #[allow(unreachable_patterns)] match (self, other) { $( ( $name::$variant { $($field: $this,)* $($ignore: _,)* }, $name::$variant { $($field: $other,)* $($ignore: _,)* }, ) => { true $(&& SpanlessEq::eq($this, $other))* } )* _ => false, } } } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$([$($named:tt)*])* $(![$ignore:tt])*] (!$i:tt $($field:tt)*) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [$([$($named)*])* $(![$ignore])* ![$i]] ($($field)*) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$([$($named:tt)*])* $(![$ignore:tt])*] ($i:tt $($field:tt)*) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [$([$($named)*])* [$i this other] $(![$ignore])*] ($($field)*) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$($named:tt)*] () $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next $($named)*] $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident ($($field:tt)*) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [] ($($field)*) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next] $($rest)* } }; } spanless_eq_struct!(AngleBracketedArgs; span args); spanless_eq_struct!(AnonConst; id value); spanless_eq_struct!(Arm; attrs pat guard body span id is_placeholder); spanless_eq_struct!(AssocTyConstraint; id ident kind span); spanless_eq_struct!(AttrItem; path args tokens); spanless_eq_struct!(Attribute; kind id style span); spanless_eq_struct!(BareFnTy; unsafety ext generic_params decl); spanless_eq_struct!(Block; stmts id rules span tokens); spanless_eq_struct!(Crate; module attrs span proc_macros); spanless_eq_struct!(EnumDef; variants); spanless_eq_struct!(Expr; id kind span attrs !tokens); spanless_eq_struct!(Field; attrs id span ident expr is_shorthand is_placeholder); spanless_eq_struct!(FieldPat; ident pat is_shorthand attrs id span is_placeholder); spanless_eq_struct!(FnDecl; inputs output); spanless_eq_struct!(FnHeader; constness asyncness unsafety ext); spanless_eq_struct!(FnSig; header decl span); spanless_eq_struct!(ForeignMod; unsafety abi items); spanless_eq_struct!(GenericParam; id ident attrs bounds is_placeholder kind); spanless_eq_struct!(Generics; params where_clause span); spanless_eq_struct!(GlobalAsm; asm); spanless_eq_struct!(InlineAsm; template operands options line_spans); spanless_eq_struct!(Item<K>; attrs id span vis ident kind !tokens); spanless_eq_struct!(Label; ident); spanless_eq_struct!(Lifetime; id ident); spanless_eq_struct!(Lit; token kind span); spanless_eq_struct!(LlvmInlineAsm; asm asm_str_style outputs inputs clobbers volatile alignstack dialect); spanless_eq_struct!(LlvmInlineAsmOutput; constraint expr is_rw is_indirect); spanless_eq_struct!(Local; pat ty init id span attrs); spanless_eq_struct!(MacCall; path args prior_type_ascription); spanless_eq_struct!(MacCallStmt; mac style attrs); spanless_eq_struct!(MacroDef; body macro_rules); spanless_eq_struct!(Mod; inner unsafety items inline); spanless_eq_struct!(MutTy; ty mutbl); spanless_eq_struct!(Param; attrs ty pat id span is_placeholder); spanless_eq_struct!(ParenthesizedArgs; span inputs output); spanless_eq_struct!(Pat; id kind span tokens); spanless_eq_struct!(Path; span segments tokens); spanless_eq_struct!(PathSegment; ident id args); spanless_eq_struct!(PolyTraitRef; bound_generic_params trait_ref span); spanless_eq_struct!(QSelf; ty path_span position); spanless_eq_struct!(Stmt; id kind span tokens); spanless_eq_struct!(StrLit; style symbol suffix span symbol_unescaped); spanless_eq_struct!(StructField; attrs id span vis ident ty is_placeholder); spanless_eq_struct!(Token; kind span); spanless_eq_struct!(TraitRef; path ref_id); spanless_eq_struct!(Ty; id kind span tokens); spanless_eq_struct!(UseTree; prefix kind span); spanless_eq_struct!(Variant; attrs id span vis ident data disr_expr is_placeholder); spanless_eq_struct!(Visibility; kind span tokens); spanless_eq_struct!(WhereBoundPredicate; span bound_generic_params bounded_ty bounds); spanless_eq_struct!(WhereClause; has_where_token predicates span); spanless_eq_struct!(WhereEqPredicate; id span lhs_ty rhs_ty); spanless_eq_struct!(WhereRegionPredicate; span lifetime bounds); spanless_eq_enum!(AngleBracketedArg; Arg(0) Constraint(0)); spanless_eq_enum!(AssocItemKind; Const(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(AssocTyConstraintKind; Equality(ty) Bound(bounds)); spanless_eq_enum!(Async; Yes(span closure_id return_impl_trait_id) No); spanless_eq_enum!(AttrKind; Normal(0 1) DocComment(0 1)); spanless_eq_enum!(AttrStyle; Outer Inner); spanless_eq_enum!(BinOpKind; Add Sub Mul Div Rem And Or BitXor BitAnd BitOr Shl Shr Eq Lt Le Ne Ge Gt); spanless_eq_enum!(BindingMode; ByRef(0) ByValue(0)); spanless_eq_enum!(BlockCheckMode; Default Unsafe(0)); spanless_eq_enum!(BorrowKind; Ref Raw); spanless_eq_enum!(CaptureBy; Value Ref); spanless_eq_enum!(Const; Yes(0) No); spanless_eq_enum!(CrateSugar; PubCrate JustCrate); spanless_eq_enum!(Defaultness; Default(0) Final); spanless_eq_enum!(Extern; None Implicit Explicit(0)); spanless_eq_enum!(FloatTy; F32 F64); spanless_eq_enum!(FnRetTy; Default(0) Ty(0)); spanless_eq_enum!(ForeignItemKind; Static(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(GenericArg; Lifetime(0) Type(0) Const(0)); spanless_eq_enum!(GenericArgs; AngleBracketed(0) Parenthesized(0)); spanless_eq_enum!(GenericBound; Trait(0 1) Outlives(0)); spanless_eq_enum!(GenericParamKind; Lifetime Type(default) Const(ty kw_span)); spanless_eq_enum!(ImplPolarity; Positive Negative(0)); spanless_eq_enum!(InlineAsmRegOrRegClass; Reg(0) RegClass(0)); spanless_eq_enum!(InlineAsmTemplatePiece; String(0) Placeholder(operand_idx modifier span)); spanless_eq_enum!(IntTy; Isize I8 I16 I32 I64 I128); spanless_eq_enum!(IsAuto; Yes No); spanless_eq_enum!(LitFloatType; Suffixed(0) Unsuffixed); spanless_eq_enum!(LitIntType; Signed(0) Unsigned(0) Unsuffixed); spanless_eq_enum!(LlvmAsmDialect; Att Intel); spanless_eq_enum!(MacArgs; Empty Delimited(0 1 2) Eq(0 1)); spanless_eq_enum!(MacDelimiter; Parenthesis Bracket Brace); spanless_eq_enum!(MacStmtStyle; Semicolon Braces NoBraces); spanless_eq_enum!(Movability; Static Movable); spanless_eq_enum!(Mutability; Mut Not); spanless_eq_enum!(RangeEnd; Included(0) Excluded); spanless_eq_enum!(RangeLimits; HalfOpen Closed); spanless_eq_enum!(StmtKind; Local(0) Item(0) Expr(0) Semi(0) Empty MacCall(0)); spanless_eq_enum!(StrStyle; Cooked Raw(0)); spanless_eq_enum!(StructRest; Base(0) Rest(0) None); spanless_eq_enum!(TokenTree; Token(0) Delimited(0 1 2)); spanless_eq_enum!(TraitBoundModifier; None Maybe MaybeConst MaybeConstMaybe); spanless_eq_enum!(TraitObjectSyntax; Dyn None); spanless_eq_enum!(UintTy; Usize U8 U16 U32 U64 U128); spanless_eq_enum!(UnOp; Deref Not Neg); spanless_eq_enum!(Unsafe; Yes(0) No); spanless_eq_enum!(UnsafeSource; CompilerGenerated UserProvided); spanless_eq_enum!(UseTreeKind; Simple(0 1 2) Nested(0) Glob); spanless_eq_enum!(VariantData; Struct(0 1) Tuple(0 1) Unit(0)); spanless_eq_enum!(VisibilityKind; Public Crate(0) Restricted(path id) Inherited); spanless_eq_enum!(WherePredicate; BoundPredicate(0) RegionPredicate(0) EqPredicate(0)); spanless_eq_enum!(ExprKind; Box(0) Array(0) ConstBlock(0) Call(0 1) MethodCall(0 1 2) Tup(0) Binary(0 1 2) Unary(0 1) Lit(0) Cast(0 1) Type(0 1) Let(0 1) If(0 1 2) While(0 1 2) ForLoop(0 1 2 3) Loop(0 1) Match(0 1) Closure(0 1 2 3 4 5) Block(0 1) Async(0 1 2) Await(0) TryBlock(0) Assign(0 1 2) AssignOp(0 1 2) Field(0 1) Index(0 1) Underscore Range(0 1 2) Path(0 1) AddrOf(0 1 2) Break(0 1) Continue(0) Ret(0) InlineAsm(0) LlvmInlineAsm(0) MacCall(0) Struct(0 1 2) Repeat(0 1) Paren(0) Try(0) Yield(0) Err); spanless_eq_enum!(InlineAsmOperand; In(reg expr) Out(reg late expr) InOut(reg late expr) SplitInOut(reg late in_expr out_expr) Const(expr) Sym(expr)); spanless_eq_enum!(ItemKind; ExternCrate(0) Use(0) Static(0 1 2) Const(0 1 2) Fn(0 1 2 3) Mod(0) ForeignMod(0) GlobalAsm(0) TyAlias(0 1 2 3) Enum(0 1) Struct(0 1) Union(0 1) Trait(0 1 2 3 4) TraitAlias(0 1) Impl(unsafety polarity defaultness constness generics of_trait self_ty items) MacCall(0) MacroDef(0)); spanless_eq_enum!(LitKind; Str(0 1) ByteStr(0) Byte(0) Char(0) Int(0 1) Float(0 1) Bool(0) Err(0)); spanless_eq_enum!(PatKind; Wild Ident(0 1 2) Struct(0 1 2) TupleStruct(0 1) Or(0) Path(0 1) Tuple(0) Box(0) Ref(0 1) Lit(0) Range(0 1 2) Slice(0) Rest Paren(0) MacCall(0)); spanless_eq_enum!(TyKind; Slice(0) Array(0 1) Ptr(0) Rptr(0 1) BareFn(0) Never Tup(0) Path(0 1) TraitObject(0 1) ImplTrait(0 1) Paren(0) Typeof(0) Infer ImplicitSelf MacCall(0) Err CVarArgs); impl SpanlessEq for Ident { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } // Give up on comparing literals inside of macros because there are so many // equivalent representations of the same literal; they are tested elsewhere impl SpanlessEq for token::Lit { fn eq(&self, other: &Self) -> bool { mem::discriminant(self) == mem::discriminant(other) } } impl SpanlessEq for RangeSyntax { fn eq(&self, _other: &Self) -> bool { match self { RangeSyntax::DotDotDot | RangeSyntax::DotDotEq => true, } } } impl SpanlessEq for TokenKind { fn eq(&self, other: &Self) -> bool { match (self, other) { (TokenKind::Literal(this), TokenKind::Literal(other)) => SpanlessEq::eq(this, other), (TokenKind::DotDotEq, _) | (TokenKind::DotDotDot, _) => match other { TokenKind::DotDotEq | TokenKind::DotDotDot => true, _ => false, }, _ => self == other, } } } impl SpanlessEq for TokenStream { fn eq(&self, other: &Self) -> bool { let mut this = self.clone().into_trees(); let mut other = other.clone().into_trees(); loop { let this = match this.next() { None => return other.next().is_none(), Some(val) => val, }; let other = match other.next() { None => return false, Some(val) => val, }; if !SpanlessEq::eq(&this, &other) { return false; } } } } impl SpanlessEq for LazyTokenStream { fn eq(&self, other: &Self) -> bool { let this = self.create_token_stream(); let other = other.create_token_stream(); SpanlessEq::eq(&this, &other) } }
extern crate rustc_ast; extern crate rustc_data_structures; extern crate rustc_span; extern crate rustc_target; use std::mem; use rustc_ast::ast::{ AngleBracketedArgs, AnonConst, Arm, AssocItemKind, AssocTyConstraint, AssocTyConstraintKind, Async, AttrId, AttrItem, AttrKind, AttrStyle, Attribute, BareFnTy, BinOpKind, BindingMode, Block, BlockCheckMode, BorrowKind, CaptureBy, Const, Crate, CrateSugar, Defaultness, EnumDef, Expr, ExprKind, Extern, Field, FieldPat, FloatTy, FnDecl, FnHeader, FnRetTy, FnSig, ForeignItemKind, ForeignMod, GenericArg, GenericArgs, GenericBound, GenericParam, GenericParamKind, Generics, GlobalAsm, Ident, ImplPolarity, IntTy, IsAuto, Item, ItemKind, Label, Lifetime, Lit, LitFloatType, LitIntType, LitKind, LlvmAsmDialect, LlvmInlineAsm, LlvmInlineAsmOutput, Local, MacArgs, MacCall, MacDelimiter, MacStmtStyle, MacroDef, Mod, Movability, MutTy, Mutability, NodeId, Param, ParenthesizedArgs, Pat, PatKind, Path, PathSegment, PolyTraitRef, QSelf, RangeEnd, RangeLimits, RangeSyntax, Stmt, StmtKind, StrLit, StrStyle, StructField, TraitBoundModifier, TraitObjectSyntax, TraitRef, Ty, TyKind, UintTy, UnOp, Unsafe, UnsafeSource, UseTree, UseTreeKind, Variant, VariantData, VisibilityKind, WhereBoundPredicate, WhereClause, WhereEqPredicate, WherePredicate, WhereRegionPredicate, }; use rustc_ast::ptr::P; use rustc_ast::token::{self, DelimToken, Token, TokenKind}; use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree}; use rustc_ast::util::comments; use rustc_data_structures::sync::Lrc; use rustc_data_structures::thin_vec::ThinVec; use rustc_span::source_map::Spanned; use rustc_span::{sym, Span, Symbol, SyntaxContext, DUMMY_SP}; pub trait SpanlessEq { fn eq(&self, other: &Self) -> bool; } impl<T: SpanlessEq> SpanlessEq for P<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: SpanlessEq> SpanlessEq for Lrc<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: SpanlessEq> SpanlessEq for Option<T> { fn eq(&self, other: &Self) -> bool { match (self, other) { (None, None) => true, (Some(this), Some(other)) => SpanlessEq::eq(this, other), _ => false, } } } impl<T: SpanlessEq> SpanlessEq for Vec<T> { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for ThinVec<T> { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self .iter() .zip(other.iter()) .all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for Spanned<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.node, &other.node) } } impl<A: SpanlessEq, B: SpanlessEq> SpanlessEq for (A, B) { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) } } impl<A: SpanlessEq, B: SpanlessEq, C: SpanlessEq> SpanlessEq for (A, B, C) { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) && SpanlessEq::eq(&self.2, &other.2) } } macro_rules! spanless_eq_true { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, _other: &Self) -> bool { true } } }; } spanless_eq_true!(Span); spanless_eq_true!(DelimSpan); spanless_eq_true!(AttrId); spanless_eq_true!(NodeId); spanless_eq_true!(SyntaxContext); macro_rules! spanless_eq_partial_eq { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { PartialEq::eq(self, other) } } }; } spanless_eq_partial_eq!(bool); spanless_eq_partial_eq!(u8); spanless_eq_partial_eq!(u16); spanless_eq_partial_eq!(u128); spanless_eq_partial_eq!(usize); spanless_eq_partial_eq!(char); spanless_eq_partial_eq!(Symbol); spanless_eq_partial_eq!(DelimToken); macro_rules! spanless_eq_struct { { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* } => { impl $(<$param: SpanlessEq>)* SpanlessEq for $name $(<$param>)* { fn eq(&self, other: &Self) -> bool { let $name { $($field,)* $($ignore: _,)* } = self; let $name { $($field: $other,)* $($ignore: _,)* } = other; $(SpanlessEq::eq($field, $other))&&* } } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $next:ident $($rest:ident)* $(!$ignore:ident)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* [$next other] $($rest)* $(!$ignore)* } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* !$next:ident $(!$rest:ident)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* $(![$ignore])* ![$next] $(!$rest)* } }; } macro_rules! spanless_eq_enum { { $name:ident; $([$variant:ident $([$field:tt $this:ident $other:ident])*])* } => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { match self { $( $name::$variant { .. } => {} )* } #[allow(unreachable_patterns)] match (self, other) { $( ( $name::$variant { $($field: $this),* }, $name::$variant { $($field: $other),* }, ) => { true $(&& SpanlessEq::eq($this, $other))* } )* _ => false, } } } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$($named:tt)*] ( $i:tt $($field:tt)* ) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [$($named)* [$i this other]] ( $($field)* ) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$($named:tt)*] () $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next $($named)*] $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident ( $($field:tt)* ) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [] ( $($field)* ) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next] $($rest)* } }; } spanless_eq_struct!(AngleBracketedArgs; span args constraints); spanless_eq_struct!(AnonConst; id value); spanless_eq_struct!(Arm; attrs pat guard body span id is_placeholder); spanless_eq_struct!(AssocTyConstraint; id ident kind span); spanless_eq_struct!(AttrItem; path args); spanless_eq_struct!(Attribute; kind id style span); spanless_eq_struct!(BareFnTy; unsafety ext generic_params decl); spanless_eq_struct!(Block; stmts id rules span); spanless_eq_struct!(Crate; module attrs span proc_macros); spanless_eq_struct!(EnumDef; variants); spanless_eq_struct!(Expr; id kind span attrs); spanless_eq_struct!(Field; attrs id span ident expr is_shorthand is_placeholder); spanless_eq_struct!(FieldPat; ident pat is_shorthand attrs id span is_placeholder); spanless_eq_struct!(FnDecl; inputs output); spanless_eq_struct!(FnHeader; constness asyncness unsafety ext); spanless_eq_struct!(FnSig; header decl); spanless_eq_struct!(ForeignMod; abi items); spanless_eq_struct!(GenericParam; id ident attrs bounds is_placeholder kind); spanless_eq_struct!(Generics; params where_clause span); spanless_eq_struct!(GlobalAsm; asm); spanless_eq_struct!(Item<K>; attrs id span vis ident kind !tokens); spanless_eq_struct!(Label; ident); spanless_eq_struct!(Lifetime; id ident); spanless_eq_struct!(Lit; token kind span); spanless_eq_struct!(LlvmInlineAsm; asm asm_str_style outputs inputs clobbers volatile alignstack dialect); spanless_eq_struct!(LlvmInlineAsmOutput; constraint expr is_rw is_indirect); spanless_eq_struct!(Local; pat ty init id span attrs); spanless_eq_struct!(MacCall; path args prior_type_ascription); spanless_eq_struct!(MacroDef; body macro_rules); spanless_eq_struct!(Mod; inner items inline); spanless_eq_struct!(MutTy; ty mutbl); spanless_eq_struct!(Param; attrs ty pat id span is_placeholder); spanless_eq_struct!(ParenthesizedArgs; span inputs output); spanless_eq_struct!(Pat; id kind span); spanless_eq_struct!(Path; span segments); spanless_eq_struct!(PathSegment; ident id args); spanless_eq_struct!(PolyTraitRef; bound_generic_params trait_ref span); spanless_eq_struct!(QSelf; ty path_span position); spanless_eq_struct!(Stmt; id kind span); spanless_eq_struct!(StrLit; style symbol suffix span symbol_unescaped); spanless_eq_struct!(StructField; attrs id span vis ident ty is_placeholder); spanless_eq_struct!(Token; kind span); spanless_eq_struct!(TraitRef; path ref_id); spanless_eq_struct!(Ty; id kind span); spanless_eq_struct!(UseTree; prefix kind span); spanless_eq_struct!(Variant; attrs id span vis ident data disr_expr is_placeholder); spanless_eq_struct!(WhereBoundPredicate; span bound_generic_params bounded_ty bounds); spanless_eq_struct!(WhereClause; predicates span); spanless_eq_struct!(WhereEqPredicate; id span lhs_ty rhs_ty); spanless_eq_struct!(WhereRegionPredicate; span lifetime bounds); spanless_eq_enum!(AssocItemKind; Const(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(AssocTyConstraintKind; Equality(ty) Bound(bounds)); spanless_eq_enum!(Async; Yes(span closure_id return_impl_trait_id) No); spanless_eq_enum!(AttrKind; Normal(0) DocComment(0)); spanless_eq_enum!(AttrStyle; Outer Inner); spanless_eq_enum!(BinOpKind; Add Sub Mul Div Rem And Or BitXor BitAnd BitOr Shl Shr Eq Lt Le Ne Ge Gt); spanless_eq_enum!(BindingMode; ByRef(0) ByValue(0)); spanless_eq_enum!(BlockCheckMode; Default Unsafe(0)); spanless_eq_enum!(BorrowKind; Ref Raw); spanless_eq_enum!(CaptureBy; Value Ref); spanless_eq_enum!(Const; Yes(0) No); spanless_eq_enum!(CrateSugar; PubCrate JustCrate); spanless_eq_enum!(Defaultness; Default(0) Final); spanless_eq_enum!(Extern; None Implicit Explicit(0)); spanless_eq_enum!(FloatTy; F32 F64); spanless_eq_enum!(FnRetTy; Default(0) Ty(0)); spanless_eq_enum!(ForeignItemKind; Static(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(GenericArg; Lifetime(0) Type(0) Const(0)); spanless_eq_enum!(GenericArgs; AngleBracketed(0) Parenthesized(0)); spanless_eq_enum!(GenericBound; Trait(0 1) Outlives(0)); spanless_eq_enum!(GenericParamKind; Lifetime Type(default) Const(ty)); spanless_eq_enum!(ImplPolarity; Positive Negative(0)); spanless_eq_enum!(IntTy; Isize I8 I16 I32 I64 I128); spanless_eq_enum!(IsAuto; Yes No); spanless_eq_enum!(LitFloatType; Suffixed(0) Unsuffixed); spanless_eq_enum!(LitIntType; Signed(0) Unsigned(0) Unsuffixed); spanless_eq_enum!(LlvmAsmDialect; Att Intel); spanless_eq_enum!(MacArgs; Empty Delimited(0 1 2) Eq(0 1)); spanless_eq_enum!(MacDelimiter; Parenthesis Bracket Brace); spanless_eq_enum!(MacStmtStyle; Semicolon Braces NoBraces); spanless_eq_enum!(Movability; Static Movable); spanless_eq_enum!(Mutability; Mut Not); spanless_eq_enum!(RangeEnd; Included(0) Excluded); spanless_eq_enum!(RangeLimits; HalfOpen Closed); spanless_eq_enum!(StmtKind; Local(0) Item(0) Expr(0) Semi(0) Empty MacCall(0)); spanless_eq_enum!(StrStyle; Cooked Raw(0)); spanless_eq_enum!(TokenTree; Token(0) Delimited(0 1 2)); spanless_eq_enum!(TraitBoundModifier; None Maybe MaybeConst MaybeConstMaybe); spanless_eq_enum!(TraitObjectSyntax; Dyn None); spanless_eq_enum!(UintTy; Usize U8 U16 U32 U64 U128); spanless_eq_enum!(UnOp; Deref Not Neg); spanless_eq_enum!(Unsafe; Yes(0) No); spanless_eq_enum!(UnsafeSource; CompilerGenerated UserProvided); spanless_eq_enum!(UseTreeKind; Simple(0 1 2) Nested(0) Glob); spanless_eq_enum!(VariantData; Struct(0 1) Tuple(0 1) Unit(0)); spanless_eq_enum!(VisibilityKind; Public Crate(0) Restricted(path id) Inherited); spanless_eq_enum!(WherePredicate; BoundPredicate(0) RegionPredicate(0) EqPredicate(0)); spanless_eq_enum!(ExprKind; Box(0) Array(0) Call(0 1) MethodCall(0 1) Tup(0) Binary(0 1 2) Unary(0 1) Lit(0) Cast(0 1) Type(0 1) Let(0 1) If(0 1 2) While(0 1 2) ForLoop(0 1 2 3) Loop(0 1) Match(0 1) Closure(0 1 2 3 4 5) Block(0 1) Async(0 1 2) Await(0) TryBlock(0) Assign(0 1 2) AssignOp(0 1 2) Field(0 1) Index(0 1) Range(0 1 2) Path(0 1) AddrOf(0 1 2) Break(0 1) Continue(0) Ret(0) LlvmInlineAsm(0) MacCall(0) Struct(0 1 2) Repeat(0 1) Paren(0) Try(0) Yield(0) Err); spanless_eq_enum!(ItemKind; ExternCrate(0) Use(0) Static(0 1 2) Const(0 1 2) Fn(0 1 2 3) Mod(0) ForeignMod(0) GlobalAsm(0) TyAlias(0 1 2 3) Enum(0 1) Struct(0 1) Union(0 1) Trait(0 1 2 3 4) TraitAlias(0 1) Impl(unsafety polarity defaultness constness generics of_trait self_ty items) MacCall(0) MacroDef(0)); spanless_eq_enum!(LitKind; Str(0 1) ByteStr(0) Byte(0) Char(0) Int(0 1) Float(0 1) Bool(0) Err(0)); spanless_eq_enum!(PatKind; Wild Ident(0 1 2) Struct(0 1 2) TupleStruct(0 1) Or(0) Path(0 1) Tuple(0) Box(0) Ref(0 1) Lit(0) Range(0 1 2) Slice(0) Rest Paren(0) MacCall(0)); spanless_eq_enum!(TyKind; Slice(0) Array(0 1) Ptr(0) Rptr(0 1) BareFn(0) Never Tup(0) Path(0 1) TraitObject(0 1) ImplTrait(0 1) Paren(0) Typeof(0) Infer ImplicitSelf MacCall(0) Err CVarArgs); impl SpanlessEq for Ident { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } // Give up on comparing literals inside of macros because there are so many // equivalent representations of the same literal; they are tested elsewhere impl SpanlessEq for token::Lit { fn eq(&self, other: &Self) -> bool { mem::discriminant(self) == mem::discriminant(other) } } impl SpanlessEq for RangeSyntax { fn eq(&self, _other: &Self) -> bool { match self { RangeSyntax::DotDotDot | RangeSyntax::DotDotEq => true, } } } impl SpanlessEq for TokenKind { fn eq(&self, other: &Self) -> bool { match (self, other) { (TokenKind::Literal(this), TokenKind::Literal(other)) => SpanlessEq::eq(this, other), (TokenKind::DotDotEq, _) | (TokenKind::DotDotDot, _) => match other { TokenKind::DotDotEq | TokenKind::DotDotDot => true, _ => false, }, _ => self == other, } } } impl SpanlessEq for TokenStream { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&expand_tts(self), &expand_tts(other)) } } fn expand_tts(tts: &TokenStream) -> Vec<TokenTree> { let mut tokens = Vec::new(); for tt in tts.clone().into_trees() { let c = match tt { TokenTree::Token(Token { kind: TokenKind::DocComment(c), .. }) => c, _ => { tokens.push(tt); continue; } }; let contents = comments::strip_doc_comment_decoration(&c.as_str()); let style = comments::doc_comment_style(&c.as_str()); tokens.push(TokenTree::token(TokenKind::Pound, DUMMY_SP)); if style == AttrStyle::Inner { tokens.push(TokenTree::token(TokenKind::Not, DUMMY_SP)); } let lit = token::Lit { kind: token::LitKind::Str, symbol: Symbol::intern(&contents), suffix: None, }; let tts = vec![ TokenTree::token(TokenKind::Ident(sym::doc, false), DUMMY_SP), TokenTree::token(TokenKind::Eq, DUMMY_SP), TokenTree::token(TokenKind::Literal(lit), DUMMY_SP), ]; tokens.push(TokenTree::Delimited( DelimSpan::dummy(), DelimToken::Bracket, tts.into_iter().collect::<TokenStream>().into(), )); } tokens } Update test suite to nightly-2020-03-29 extern crate rustc_ast; extern crate rustc_data_structures; extern crate rustc_span; extern crate rustc_target; use std::mem; use rustc_ast::ast::{ AngleBracketedArg, AngleBracketedArgs, AnonConst, Arm, AssocItemKind, AssocTyConstraint, AssocTyConstraintKind, Async, AttrId, AttrItem, AttrKind, AttrStyle, Attribute, BareFnTy, BinOpKind, BindingMode, Block, BlockCheckMode, BorrowKind, CaptureBy, Const, Crate, CrateSugar, Defaultness, EnumDef, Expr, ExprKind, Extern, Field, FieldPat, FloatTy, FnDecl, FnHeader, FnRetTy, FnSig, ForeignItemKind, ForeignMod, GenericArg, GenericArgs, GenericBound, GenericParam, GenericParamKind, Generics, GlobalAsm, Ident, ImplPolarity, IntTy, IsAuto, Item, ItemKind, Label, Lifetime, Lit, LitFloatType, LitIntType, LitKind, LlvmAsmDialect, LlvmInlineAsm, LlvmInlineAsmOutput, Local, MacArgs, MacCall, MacDelimiter, MacStmtStyle, MacroDef, Mod, Movability, MutTy, Mutability, NodeId, Param, ParenthesizedArgs, Pat, PatKind, Path, PathSegment, PolyTraitRef, QSelf, RangeEnd, RangeLimits, RangeSyntax, Stmt, StmtKind, StrLit, StrStyle, StructField, TraitBoundModifier, TraitObjectSyntax, TraitRef, Ty, TyKind, UintTy, UnOp, Unsafe, UnsafeSource, UseTree, UseTreeKind, Variant, VariantData, VisibilityKind, WhereBoundPredicate, WhereClause, WhereEqPredicate, WherePredicate, WhereRegionPredicate, }; use rustc_ast::ptr::P; use rustc_ast::token::{self, DelimToken, Token, TokenKind}; use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree}; use rustc_ast::util::comments; use rustc_data_structures::sync::Lrc; use rustc_data_structures::thin_vec::ThinVec; use rustc_span::source_map::Spanned; use rustc_span::{sym, Span, Symbol, SyntaxContext, DUMMY_SP}; pub trait SpanlessEq { fn eq(&self, other: &Self) -> bool; } impl<T: SpanlessEq> SpanlessEq for P<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: SpanlessEq> SpanlessEq for Lrc<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&**self, &**other) } } impl<T: SpanlessEq> SpanlessEq for Option<T> { fn eq(&self, other: &Self) -> bool { match (self, other) { (None, None) => true, (Some(this), Some(other)) => SpanlessEq::eq(this, other), _ => false, } } } impl<T: SpanlessEq> SpanlessEq for Vec<T> { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for ThinVec<T> { fn eq(&self, other: &Self) -> bool { self.len() == other.len() && self .iter() .zip(other.iter()) .all(|(a, b)| SpanlessEq::eq(a, b)) } } impl<T: SpanlessEq> SpanlessEq for Spanned<T> { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.node, &other.node) } } impl<A: SpanlessEq, B: SpanlessEq> SpanlessEq for (A, B) { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) } } impl<A: SpanlessEq, B: SpanlessEq, C: SpanlessEq> SpanlessEq for (A, B, C) { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&self.0, &other.0) && SpanlessEq::eq(&self.1, &other.1) && SpanlessEq::eq(&self.2, &other.2) } } macro_rules! spanless_eq_true { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, _other: &Self) -> bool { true } } }; } spanless_eq_true!(Span); spanless_eq_true!(DelimSpan); spanless_eq_true!(AttrId); spanless_eq_true!(NodeId); spanless_eq_true!(SyntaxContext); macro_rules! spanless_eq_partial_eq { ($name:ident) => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { PartialEq::eq(self, other) } } }; } spanless_eq_partial_eq!(bool); spanless_eq_partial_eq!(u8); spanless_eq_partial_eq!(u16); spanless_eq_partial_eq!(u128); spanless_eq_partial_eq!(usize); spanless_eq_partial_eq!(char); spanless_eq_partial_eq!(Symbol); spanless_eq_partial_eq!(DelimToken); macro_rules! spanless_eq_struct { { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* } => { impl $(<$param: SpanlessEq>)* SpanlessEq for $name $(<$param>)* { fn eq(&self, other: &Self) -> bool { let $name { $($field,)* $($ignore: _,)* } = self; let $name { $($field: $other,)* $($ignore: _,)* } = other; $(SpanlessEq::eq($field, $other))&&* } } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $next:ident $($rest:ident)* $(!$ignore:ident)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* [$next other] $($rest)* $(!$ignore)* } }; { $name:ident $(<$param:ident>)?; $([$field:ident $other:ident])* $(![$ignore:ident])* !$next:ident $(!$rest:ident)* } => { spanless_eq_struct! { $name $(<$param>)*; $([$field $other])* $(![$ignore])* ![$next] $(!$rest)* } }; } macro_rules! spanless_eq_enum { { $name:ident; $([$variant:ident $([$field:tt $this:ident $other:ident])*])* } => { impl SpanlessEq for $name { fn eq(&self, other: &Self) -> bool { match self { $( $name::$variant { .. } => {} )* } #[allow(unreachable_patterns)] match (self, other) { $( ( $name::$variant { $($field: $this),* }, $name::$variant { $($field: $other),* }, ) => { true $(&& SpanlessEq::eq($this, $other))* } )* _ => false, } } } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$($named:tt)*] ( $i:tt $($field:tt)* ) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [$($named)* [$i this other]] ( $($field)* ) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident [$($named:tt)*] () $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next $($named)*] $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident ( $($field:tt)* ) $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* $next [] ( $($field)* ) $($rest)* } }; { $name:ident; $([$variant:ident $($fields:tt)*])* $next:ident $($rest:tt)* } => { spanless_eq_enum! { $name; $([$variant $($fields)*])* [$next] $($rest)* } }; } spanless_eq_struct!(AngleBracketedArgs; span args); spanless_eq_struct!(AnonConst; id value); spanless_eq_struct!(Arm; attrs pat guard body span id is_placeholder); spanless_eq_struct!(AssocTyConstraint; id ident kind span); spanless_eq_struct!(AttrItem; path args); spanless_eq_struct!(Attribute; kind id style span); spanless_eq_struct!(BareFnTy; unsafety ext generic_params decl); spanless_eq_struct!(Block; stmts id rules span); spanless_eq_struct!(Crate; module attrs span proc_macros); spanless_eq_struct!(EnumDef; variants); spanless_eq_struct!(Expr; id kind span attrs); spanless_eq_struct!(Field; attrs id span ident expr is_shorthand is_placeholder); spanless_eq_struct!(FieldPat; ident pat is_shorthand attrs id span is_placeholder); spanless_eq_struct!(FnDecl; inputs output); spanless_eq_struct!(FnHeader; constness asyncness unsafety ext); spanless_eq_struct!(FnSig; header decl); spanless_eq_struct!(ForeignMod; abi items); spanless_eq_struct!(GenericParam; id ident attrs bounds is_placeholder kind); spanless_eq_struct!(Generics; params where_clause span); spanless_eq_struct!(GlobalAsm; asm); spanless_eq_struct!(Item<K>; attrs id span vis ident kind !tokens); spanless_eq_struct!(Label; ident); spanless_eq_struct!(Lifetime; id ident); spanless_eq_struct!(Lit; token kind span); spanless_eq_struct!(LlvmInlineAsm; asm asm_str_style outputs inputs clobbers volatile alignstack dialect); spanless_eq_struct!(LlvmInlineAsmOutput; constraint expr is_rw is_indirect); spanless_eq_struct!(Local; pat ty init id span attrs); spanless_eq_struct!(MacCall; path args prior_type_ascription); spanless_eq_struct!(MacroDef; body macro_rules); spanless_eq_struct!(Mod; inner items inline); spanless_eq_struct!(MutTy; ty mutbl); spanless_eq_struct!(Param; attrs ty pat id span is_placeholder); spanless_eq_struct!(ParenthesizedArgs; span inputs output); spanless_eq_struct!(Pat; id kind span); spanless_eq_struct!(Path; span segments); spanless_eq_struct!(PathSegment; ident id args); spanless_eq_struct!(PolyTraitRef; bound_generic_params trait_ref span); spanless_eq_struct!(QSelf; ty path_span position); spanless_eq_struct!(Stmt; id kind span); spanless_eq_struct!(StrLit; style symbol suffix span symbol_unescaped); spanless_eq_struct!(StructField; attrs id span vis ident ty is_placeholder); spanless_eq_struct!(Token; kind span); spanless_eq_struct!(TraitRef; path ref_id); spanless_eq_struct!(Ty; id kind span); spanless_eq_struct!(UseTree; prefix kind span); spanless_eq_struct!(Variant; attrs id span vis ident data disr_expr is_placeholder); spanless_eq_struct!(WhereBoundPredicate; span bound_generic_params bounded_ty bounds); spanless_eq_struct!(WhereClause; predicates span); spanless_eq_struct!(WhereEqPredicate; id span lhs_ty rhs_ty); spanless_eq_struct!(WhereRegionPredicate; span lifetime bounds); spanless_eq_enum!(AngleBracketedArg; Arg(0) Constraint(0)); spanless_eq_enum!(AssocItemKind; Const(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(AssocTyConstraintKind; Equality(ty) Bound(bounds)); spanless_eq_enum!(Async; Yes(span closure_id return_impl_trait_id) No); spanless_eq_enum!(AttrKind; Normal(0) DocComment(0)); spanless_eq_enum!(AttrStyle; Outer Inner); spanless_eq_enum!(BinOpKind; Add Sub Mul Div Rem And Or BitXor BitAnd BitOr Shl Shr Eq Lt Le Ne Ge Gt); spanless_eq_enum!(BindingMode; ByRef(0) ByValue(0)); spanless_eq_enum!(BlockCheckMode; Default Unsafe(0)); spanless_eq_enum!(BorrowKind; Ref Raw); spanless_eq_enum!(CaptureBy; Value Ref); spanless_eq_enum!(Const; Yes(0) No); spanless_eq_enum!(CrateSugar; PubCrate JustCrate); spanless_eq_enum!(Defaultness; Default(0) Final); spanless_eq_enum!(Extern; None Implicit Explicit(0)); spanless_eq_enum!(FloatTy; F32 F64); spanless_eq_enum!(FnRetTy; Default(0) Ty(0)); spanless_eq_enum!(ForeignItemKind; Static(0 1 2) Fn(0 1 2 3) TyAlias(0 1 2 3) MacCall(0)); spanless_eq_enum!(GenericArg; Lifetime(0) Type(0) Const(0)); spanless_eq_enum!(GenericArgs; AngleBracketed(0) Parenthesized(0)); spanless_eq_enum!(GenericBound; Trait(0 1) Outlives(0)); spanless_eq_enum!(GenericParamKind; Lifetime Type(default) Const(ty)); spanless_eq_enum!(ImplPolarity; Positive Negative(0)); spanless_eq_enum!(IntTy; Isize I8 I16 I32 I64 I128); spanless_eq_enum!(IsAuto; Yes No); spanless_eq_enum!(LitFloatType; Suffixed(0) Unsuffixed); spanless_eq_enum!(LitIntType; Signed(0) Unsigned(0) Unsuffixed); spanless_eq_enum!(LlvmAsmDialect; Att Intel); spanless_eq_enum!(MacArgs; Empty Delimited(0 1 2) Eq(0 1)); spanless_eq_enum!(MacDelimiter; Parenthesis Bracket Brace); spanless_eq_enum!(MacStmtStyle; Semicolon Braces NoBraces); spanless_eq_enum!(Movability; Static Movable); spanless_eq_enum!(Mutability; Mut Not); spanless_eq_enum!(RangeEnd; Included(0) Excluded); spanless_eq_enum!(RangeLimits; HalfOpen Closed); spanless_eq_enum!(StmtKind; Local(0) Item(0) Expr(0) Semi(0) Empty MacCall(0)); spanless_eq_enum!(StrStyle; Cooked Raw(0)); spanless_eq_enum!(TokenTree; Token(0) Delimited(0 1 2)); spanless_eq_enum!(TraitBoundModifier; None Maybe MaybeConst MaybeConstMaybe); spanless_eq_enum!(TraitObjectSyntax; Dyn None); spanless_eq_enum!(UintTy; Usize U8 U16 U32 U64 U128); spanless_eq_enum!(UnOp; Deref Not Neg); spanless_eq_enum!(Unsafe; Yes(0) No); spanless_eq_enum!(UnsafeSource; CompilerGenerated UserProvided); spanless_eq_enum!(UseTreeKind; Simple(0 1 2) Nested(0) Glob); spanless_eq_enum!(VariantData; Struct(0 1) Tuple(0 1) Unit(0)); spanless_eq_enum!(VisibilityKind; Public Crate(0) Restricted(path id) Inherited); spanless_eq_enum!(WherePredicate; BoundPredicate(0) RegionPredicate(0) EqPredicate(0)); spanless_eq_enum!(ExprKind; Box(0) Array(0) Call(0 1) MethodCall(0 1) Tup(0) Binary(0 1 2) Unary(0 1) Lit(0) Cast(0 1) Type(0 1) Let(0 1) If(0 1 2) While(0 1 2) ForLoop(0 1 2 3) Loop(0 1) Match(0 1) Closure(0 1 2 3 4 5) Block(0 1) Async(0 1 2) Await(0) TryBlock(0) Assign(0 1 2) AssignOp(0 1 2) Field(0 1) Index(0 1) Range(0 1 2) Path(0 1) AddrOf(0 1 2) Break(0 1) Continue(0) Ret(0) LlvmInlineAsm(0) MacCall(0) Struct(0 1 2) Repeat(0 1) Paren(0) Try(0) Yield(0) Err); spanless_eq_enum!(ItemKind; ExternCrate(0) Use(0) Static(0 1 2) Const(0 1 2) Fn(0 1 2 3) Mod(0) ForeignMod(0) GlobalAsm(0) TyAlias(0 1 2 3) Enum(0 1) Struct(0 1) Union(0 1) Trait(0 1 2 3 4) TraitAlias(0 1) Impl(unsafety polarity defaultness constness generics of_trait self_ty items) MacCall(0) MacroDef(0)); spanless_eq_enum!(LitKind; Str(0 1) ByteStr(0) Byte(0) Char(0) Int(0 1) Float(0 1) Bool(0) Err(0)); spanless_eq_enum!(PatKind; Wild Ident(0 1 2) Struct(0 1 2) TupleStruct(0 1) Or(0) Path(0 1) Tuple(0) Box(0) Ref(0 1) Lit(0) Range(0 1 2) Slice(0) Rest Paren(0) MacCall(0)); spanless_eq_enum!(TyKind; Slice(0) Array(0 1) Ptr(0) Rptr(0 1) BareFn(0) Never Tup(0) Path(0 1) TraitObject(0 1) ImplTrait(0 1) Paren(0) Typeof(0) Infer ImplicitSelf MacCall(0) Err CVarArgs); impl SpanlessEq for Ident { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } // Give up on comparing literals inside of macros because there are so many // equivalent representations of the same literal; they are tested elsewhere impl SpanlessEq for token::Lit { fn eq(&self, other: &Self) -> bool { mem::discriminant(self) == mem::discriminant(other) } } impl SpanlessEq for RangeSyntax { fn eq(&self, _other: &Self) -> bool { match self { RangeSyntax::DotDotDot | RangeSyntax::DotDotEq => true, } } } impl SpanlessEq for TokenKind { fn eq(&self, other: &Self) -> bool { match (self, other) { (TokenKind::Literal(this), TokenKind::Literal(other)) => SpanlessEq::eq(this, other), (TokenKind::DotDotEq, _) | (TokenKind::DotDotDot, _) => match other { TokenKind::DotDotEq | TokenKind::DotDotDot => true, _ => false, }, _ => self == other, } } } impl SpanlessEq for TokenStream { fn eq(&self, other: &Self) -> bool { SpanlessEq::eq(&expand_tts(self), &expand_tts(other)) } } fn expand_tts(tts: &TokenStream) -> Vec<TokenTree> { let mut tokens = Vec::new(); for tt in tts.clone().into_trees() { let c = match tt { TokenTree::Token(Token { kind: TokenKind::DocComment(c), .. }) => c, _ => { tokens.push(tt); continue; } }; let contents = comments::strip_doc_comment_decoration(&c.as_str()); let style = comments::doc_comment_style(&c.as_str()); tokens.push(TokenTree::token(TokenKind::Pound, DUMMY_SP)); if style == AttrStyle::Inner { tokens.push(TokenTree::token(TokenKind::Not, DUMMY_SP)); } let lit = token::Lit { kind: token::LitKind::Str, symbol: Symbol::intern(&contents), suffix: None, }; let tts = vec![ TokenTree::token(TokenKind::Ident(sym::doc, false), DUMMY_SP), TokenTree::token(TokenKind::Eq, DUMMY_SP), TokenTree::token(TokenKind::Literal(lit), DUMMY_SP), ]; tokens.push(TokenTree::Delimited( DelimSpan::dummy(), DelimToken::Bracket, tts.into_iter().collect::<TokenStream>().into(), )); } tokens }
//! Tests from HistogramTest.java extern crate hdrsample; extern crate num; extern crate rand; extern crate ieee754; use self::rand::Rng; use hdrsample::{Histogram, SubtractionError}; use hdrsample::serialization::{V2Serializer, Deserializer}; use std::borrow::Borrow; use std::cmp; use std::fmt; use num::Saturating; use ieee754::Ieee754; macro_rules! assert_near { ($a: expr, $b: expr, $tolerance: expr) => {{ let a = $a as f64; let b = $b as f64; let tol = $tolerance as f64; assert!((a - b).abs() <= b * tol, "assertion failed: `(left ~= right) (left: `{}`, right: `{}`, tolerance: `{:.5}%`)", a, b, 100.0 * tol); }} } fn verify_max<T: hdrsample::Counter, B: Borrow<Histogram<T>>>(hist: B) -> bool { let hist = hist.borrow(); if let Some(mx) = hist.iter_recorded() .map(|v| v.value()) .map(|v| hist.highest_equivalent(v)) .last() { hist.max() == mx } else { hist.max() == 0 } } fn assert_min_max_count<T: hdrsample::Counter, B: Borrow<Histogram<T>>>(hist: B) { let h = hist.borrow(); let mut min = None; let mut max = None; let mut total = 0; for i in 0..h.len() { let value = h.value_for(i); let count = h.count_at(value).unwrap(); if count == T::zero() { continue; } min = Some(cmp::min(min.unwrap_or(u64::max_value()), value)); max = Some(cmp::max(max.unwrap_or(0), value)); total = total.saturating_add(count.to_u64().unwrap()); } let min = min.map(|m| h.lowest_equivalent(m)).unwrap_or(0); let max = max.map(|m| h.highest_equivalent(m)).unwrap_or(0); assert_eq!(min, h.min()); assert_eq!(max, h.max()); assert_eq!(total, h.count()); } const TRACKABLE_MAX: u64 = 3600 * 1000 * 1000; // Store up to 2 * 10^3 in single-unit precision. Can be 5 at most. const SIGFIG: u8 = 3; const TEST_VALUE_LEVEL: u64 = 4; #[test] fn construction_arg_ranges() { assert!(Histogram::<u64>::new_with_max(1, SIGFIG).is_err()); assert!(Histogram::<u64>::new_with_max(TRACKABLE_MAX, 6).is_err()); } #[test] fn empty_histogram() { let h = Histogram::<u64>::new(SIGFIG).unwrap(); assert_eq!(h.min(), 0); assert_eq!(h.max(), 0); assert_near!(h.mean(), 0.0, 0.0000000000001); assert_near!(h.stdev(), 0.0, 0.0000000000001); assert_near!(h.percentile_below(0).unwrap(), 100.0, 0.0000000000001); assert!(verify_max(h)); } #[test] fn construction_arg_gets() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.low(), 1); assert_eq!(h.high(), TRACKABLE_MAX); assert_eq!(h.sigfig(), SIGFIG); let h = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.low(), 1000); } #[test] fn record() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; assert_eq!(h.count_at(TEST_VALUE_LEVEL), Ok(1)); assert_eq!(h.count(), 1); assert!(verify_max(h)); } #[test] fn record_past_trackable_max() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert!(h.record(3 * TRACKABLE_MAX).is_err()); } #[test] fn record_in_interval() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h.record_correct(TEST_VALUE_LEVEL, TEST_VALUE_LEVEL / 4).unwrap(); let mut r = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); r += TEST_VALUE_LEVEL; // The data will include corrected samples: assert_eq!(h.count_at((TEST_VALUE_LEVEL * 1) / 4), Ok(1)); assert_eq!(h.count_at((TEST_VALUE_LEVEL * 2) / 4), Ok(1)); assert_eq!(h.count_at((TEST_VALUE_LEVEL * 3) / 4), Ok(1)); assert_eq!(h.count_at((TEST_VALUE_LEVEL * 4) / 4), Ok(1)); assert_eq!(h.count(), 4); // But the raw data will not: assert_eq!(r.count_at((TEST_VALUE_LEVEL * 1) / 4), Ok(0)); assert_eq!(r.count_at((TEST_VALUE_LEVEL * 2) / 4), Ok(0)); assert_eq!(r.count_at((TEST_VALUE_LEVEL * 3) / 4), Ok(0)); assert_eq!(r.count_at((TEST_VALUE_LEVEL * 4) / 4), Ok(1)); assert_eq!(r.count(), 1); assert!(verify_max(h)); } #[test] fn reset() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; h.reset(); assert_eq!(h.count_at(TEST_VALUE_LEVEL), Ok(0)); assert_eq!(h.count(), 0); assert!(verify_max(h)); } #[test] fn add() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; h2 += TEST_VALUE_LEVEL; h2 += 1000 * TEST_VALUE_LEVEL; h1 += &h2; assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count(), 4); let mut big = Histogram::<u64>::new_with_max(2 * TRACKABLE_MAX, SIGFIG).unwrap(); big += TEST_VALUE_LEVEL; big += 1000 * TEST_VALUE_LEVEL; big += 2 * TRACKABLE_MAX; // Adding the smaller histogram to the bigger one should work: big += &h1; assert_eq!(big.count_at(TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(1000 * TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(2 * TRACKABLE_MAX), Ok(1)); // overflow smaller hist... assert_eq!(big.count(), 7); // But trying to add a larger histogram into a smaller one should throw an AIOOB: assert!(h1.add(&big).is_err()); assert!(verify_max(h1)); assert!(verify_max(h2)); assert!(verify_max(big)); } #[test] fn subtract_after_add() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; h2 += TEST_VALUE_LEVEL; h2 += 1000 * TEST_VALUE_LEVEL; h1.add(&h2).unwrap(); assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count(), 4); h1 += &h2; assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(3)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(3)); assert_eq!(h1.count(), 6); h1.subtract(&h2).unwrap(); assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count(), 4); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_to_zero_counts() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(1)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(1)); assert_eq!(h1.count(), 2); let clone = h1.clone(); h1.subtract(&clone).unwrap(); assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(0)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(0)); assert_eq!(h1.count(), 0); assert_min_max_count(h1); } #[test] fn subtract_to_negative_counts_error() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; h2.record_n(TEST_VALUE_LEVEL, 2).unwrap(); h2.record_n(1000 * TEST_VALUE_LEVEL, 2).unwrap(); assert_eq!(SubtractionError::SubtrahendCountExceedsMinuendCount, h1.subtract(&h2).unwrap_err()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_subtrahend_values_outside_minuend_range_error() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; let mut big = Histogram::<u64>::new_with_max(2 * TRACKABLE_MAX, SIGFIG).unwrap(); big += TEST_VALUE_LEVEL; big += 1000 * TEST_VALUE_LEVEL; big += 2 * TRACKABLE_MAX; assert_eq!(SubtractionError::SubtrahendValueExceedsMinuendRange, h1.subtract(&big).unwrap_err()); assert_min_max_count(h1); assert_min_max_count(big); } #[test] fn subtract_values_inside_minuend_range_works() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; let mut big = Histogram::<u64>::new_with_max(2 * TRACKABLE_MAX, SIGFIG).unwrap(); big += TEST_VALUE_LEVEL; big += 1000 * TEST_VALUE_LEVEL; big += 2 * TRACKABLE_MAX; let big2 = big.clone(); big += &big2; big += &big2; assert_eq!(big.count_at(TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(1000 * TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(2 * TRACKABLE_MAX), Ok(3)); // overflow smaller hist... assert_eq!(big.count(), 9); // Subtracting the smaller histogram from the bigger one should work: big -= &h1; assert_eq!(big.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(big.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(big.count_at(2 * TRACKABLE_MAX), Ok(3)); // overflow smaller hist... assert_eq!(big.count(), 7); assert_min_max_count(h1); assert_min_max_count(big); } #[test] fn subtract_values_strictly_inside_minuend_range_yields_same_min_max_no_restat() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += 1; h1 += 10; h1 += 100; h1 += 1000; h2 += 10; h2 += 100; // will not require a restat h1.subtract(&h2).unwrap(); assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); assert_eq!(2, h1.count()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_extent_of_minuend_zero_count_range_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += 1; h1 += 10; h1 += 100; h1 += 1000; h2 += 1; h2 += 1000; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(10, h1.min()); assert_eq!(100, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_extent_of_minuend_nonzero_count_range_recalculates_same_min_max() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1.record_n(1, 2).unwrap(); h1.record_n(10, 2).unwrap(); h1.record_n(100, 2).unwrap(); h1.record_n(1000, 2).unwrap(); h2 += 1; h2 += 1000; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_within_bucket_precision_of_of_minuend_min_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3001; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3100, h1.min()); assert_eq!(3301, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_minuend_min_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3000; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3100, h1.min()); assert_eq!(3301, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_within_bucket_precision_of_of_minuend_max_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3301; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3000, h1.min()); assert_eq!(3201, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_minuend_max_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3300; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3000, h1.min()); assert_eq!(3201, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_minuend_saturated_total_recalculates_saturated() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); h1.record_n(1, u64::max_value()).unwrap(); h1.record_n(10, u64::max_value()).unwrap(); h1.record_n(100, u64::max_value()).unwrap(); h1.record_n(1000, u64::max_value()).unwrap(); h2.record(10).unwrap(); h2.record(100).unwrap(); // will trigger a restat - total count is saturated h1.subtract(&h2).unwrap(); // min, max haven't changed assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); // still saturated assert_eq!(u64::max_value(), h1.count()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_minuend_saturated_total_recalculates_not_saturated() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); // 3 of these is just under u64::max_value() let chunk = (u64::max_value() / 16) * 5; h1.record_n(1, chunk).unwrap(); h1.record_n(10, chunk).unwrap(); h1.record_n(100, chunk).unwrap(); h1.record_n(1000, chunk).unwrap(); h2.record_n(10, chunk).unwrap(); // will trigger a restat - total count is saturated h1.subtract(&h2).unwrap(); // min, max haven't changed assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); // not saturated assert_eq!(u64::max_value() / 16 * 15, h1.count()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn equivalent_range() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.equivalent_range(1), 1); assert_eq!(h.equivalent_range(2500), 2); assert_eq!(h.equivalent_range(8191), 4); assert_eq!(h.equivalent_range(8192), 8); assert_eq!(h.equivalent_range(10000), 8); } #[test] fn scaled_equivalent_range() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.equivalent_range(1 * 1024), 1 * 1024); assert_eq!(h.equivalent_range(2500 * 1024), 2 * 1024); assert_eq!(h.equivalent_range(8191 * 1024), 4 * 1024); assert_eq!(h.equivalent_range(8192 * 1024), 8 * 1024); assert_eq!(h.equivalent_range(10000 * 1024), 8 * 1024); } #[test] fn lowest_equivalent() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.lowest_equivalent(10007), 10000); assert_eq!(h.lowest_equivalent(10009), 10008); } #[test] fn scaled_lowest_equivalent() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.lowest_equivalent(10007 * 1024), 10000 * 1024); assert_eq!(h.lowest_equivalent(10009 * 1024), 10008 * 1024); } #[test] fn highest_equivalent() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.highest_equivalent(8180), 8183); assert_eq!(h.highest_equivalent(8191), 8191); assert_eq!(h.highest_equivalent(8193), 8199); assert_eq!(h.highest_equivalent(9995), 9999); assert_eq!(h.highest_equivalent(10007), 10007); assert_eq!(h.highest_equivalent(10008), 10015); } #[test] fn scaled_highest_equivalent() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.highest_equivalent(8180 * 1024), 8183 * 1024 + 1023); assert_eq!(h.highest_equivalent(8191 * 1024), 8191 * 1024 + 1023); assert_eq!(h.highest_equivalent(8193 * 1024), 8199 * 1024 + 1023); assert_eq!(h.highest_equivalent(9995 * 1024), 9999 * 1024 + 1023); assert_eq!(h.highest_equivalent(10007 * 1024), 10007 * 1024 + 1023); assert_eq!(h.highest_equivalent(10008 * 1024), 10015 * 1024 + 1023); } #[test] fn median_equivalent() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.median_equivalent(4), 4); assert_eq!(h.median_equivalent(5), 5); assert_eq!(h.median_equivalent(4000), 4001); assert_eq!(h.median_equivalent(8000), 8002); assert_eq!(h.median_equivalent(10007), 10004); } #[test] fn median_equivalent_doesnt_panic_at_extremes() { let h = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let _ = h.median_equivalent(u64::max_value()); let _ = h.median_equivalent(u64::max_value() - 1); let _ = h.median_equivalent(0); let _ = h.median_equivalent(1); } #[test] fn scaled_median_equivalent() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.median_equivalent(1024 * 4), 1024 * 4 + 512); assert_eq!(h.median_equivalent(1024 * 5), 1024 * 5 + 512); assert_eq!(h.median_equivalent(1024 * 4000), 1024 * 4001); assert_eq!(h.median_equivalent(1024 * 8000), 1024 * 8002); assert_eq!(h.median_equivalent(1024 * 10007), 1024 * 10004); } fn are_equal<T, B1, B2>(actual: B1, expected: B2) where T: hdrsample::Counter + fmt::Debug, B1: Borrow<Histogram<T>>, B2: Borrow<Histogram<T>> { let actual = actual.borrow(); let expected = expected.borrow(); assert!(actual == expected); assert_eq!(actual.count_at(TEST_VALUE_LEVEL), expected.count_at(TEST_VALUE_LEVEL)); assert_eq!(actual.count_at(10 * TEST_VALUE_LEVEL), expected.count_at(10 * TEST_VALUE_LEVEL)); assert_eq!(actual.count(), expected.count()); assert!(verify_max(expected)); assert!(verify_max(actual)); } #[test] fn clone() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; h += 10 * TEST_VALUE_LEVEL; let max = h.high(); h.record_correct(max - 1, 31000).unwrap(); are_equal(h.clone(), h); } #[test] fn scaled_clone() { let mut h = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; h += 10 * TEST_VALUE_LEVEL; let max = h.high(); h.record_correct(max - 1, 31000).unwrap(); are_equal(h.clone(), h); } #[test] fn set_to() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 10 * TEST_VALUE_LEVEL; let max = h1.high(); h1.record_correct(max - 1, 31000).unwrap(); h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); h1 += 20 * TEST_VALUE_LEVEL; h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); } #[test] fn scaled_set_to() { let mut h1 = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 10 * TEST_VALUE_LEVEL; let max = h1.high(); h1.record_correct(max - 1, 31000).unwrap(); h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); h1 += 20 * TEST_VALUE_LEVEL; h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); } #[test] fn random_write_full_value_range_precision_5_no_panic() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 5).unwrap(); let mut rng = rand::weak_rng(); for _ in 0..1_000_000 { let mut r: u64 = rng.gen(); if r == 0 { r = 1; } h.record(r).unwrap(); } } #[test] fn random_write_full_value_range_precision_0_no_panic() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 0).unwrap(); let mut rng = rand::weak_rng(); for _ in 0..1_000_000 { let mut r: u64 = rng.gen(); if r == 0 { r = 1; } h.record(r).unwrap(); } } #[test] fn random_write_middle_of_value_range_precision_3_no_panic() { let low = 1_000; let high = 1_000_000_000; let mut h = Histogram::<u64>::new_with_bounds(low, high, 3).unwrap(); let mut rng = rand::weak_rng(); for _ in 0..1_000_000 { h.record(rng.gen_range(low, high + 1)).unwrap(); } } #[test] fn value_count_overflow_from_record_saturates_u16() { let mut h = Histogram::<u16>::new_with_max(TRACKABLE_MAX, 2).unwrap(); h.record_n(3, u16::max_value() - 1).unwrap(); h.record_n(3, u16::max_value() - 1).unwrap(); // individual count has saturated assert_eq!(u16::max_value(), h.count_at(3).unwrap()); // total is a u64 though assert_eq!((u16::max_value() - 1) as u64 * 2, h.count()); } #[test] fn value_count_overflow_from_record_saturates_u64() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_record_autoresize_doesnt_panic_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, 10000, 3).unwrap(); h.auto(true); h.record_n(1, u64::max_value() - 1).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); // forces resize h.record_n(1_000_000_000, u64::max_value() - 1).unwrap(); h.record_n(1_000_000_000, u64::max_value() - 1).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count_at(1_000_000_000).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_add_same_dimensions_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_add_different_precision_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); // different precision let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 4).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_add_with_resize_to_same_dimensions_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, 10_000, 3).unwrap(); h.auto(true); let mut h2 = Histogram::<u64>::new_with_bounds(1, 10_000_000_000, 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); // recording at value == h2 max should trigger h to resize to the same dimensions when added h2.record_n(10_000_000_000, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_record_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h.record_n(10, u64::max_value() - 1).unwrap(); assert_eq!(u64::max_value() - 1, h.count_at(1).unwrap()); assert_eq!(u64::max_value() - 1, h.count_at(10).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_same_dimensions_saturates_calculating_other_addend_total() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 10).unwrap(); h2.record_n(10, u64::max_value() - 1).unwrap(); h2.record_n(20, 10).unwrap(); // just h2's total would overflow h.add(h2).unwrap(); assert_eq!(u64::max_value() - 10, h.count_at(1).unwrap()); assert_eq!(10, h.count_at(20).unwrap()); // if accumulating total count for h2 had overflowed, we would see max_value - 1000 + 9 here assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_same_dimensions_saturates_when_added_to_orig_total_count() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 10).unwrap(); h2.record_n(10, 9).unwrap(); h2.record_n(20, 9).unwrap(); // h2's total wouldn't overflow, but it would when added to h1 h.add(h2).unwrap(); assert_eq!(u64::max_value() - 10, h.count_at(1).unwrap()); assert_eq!(9, h.count_at(20).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_different_precision_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); // different precision let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 4).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(20, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value() - 1, h.count_at(1).unwrap()); assert_eq!(u64::max_value() - 1, h.count_at(20).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_with_resize_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, 10_000, 3).unwrap(); h.auto(true); let mut h2 = Histogram::<u64>::new_with_bounds(1, 10_000_000_000, 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(10_000_000_000, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_deserialize_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); // can't go bigger than i64 max because it will be serialized h.record_n(1, i64::max_value() as u64).unwrap(); h.record_n(1000, i64::max_value() as u64).unwrap(); h.record_n(1000_000, i64::max_value() as u64).unwrap(); assert_eq!(u64::max_value(), h.count()); let mut vec = Vec::new(); V2Serializer::new().serialize(&h, &mut vec).unwrap(); let deser_h: Histogram<u64> = Deserializer::new().deserialize(&mut vec.as_slice()).unwrap(); assert_eq!(u64::max_value(), deser_h.count()); } #[test] fn subtract_underflow_guarded_by_per_value_count_check() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, 1).unwrap(); h2.record_n(1, 100).unwrap(); assert_eq!(SubtractionError::SubtrahendCountExceedsMinuendCount, h.subtract(h2).unwrap_err()); } #[test] fn quantile_2_values() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record(1).unwrap(); h.record(2).unwrap(); assert_eq!(1, h.value_at_quantile(0.25)); assert_eq!(1, h.value_at_quantile(0.5)); let almost_half = 0.5000000000000001; let next = 0.5000000000000002; // one ulp apart assert_eq!(almost_half, 0.5_f64.next()); assert_eq!(next, almost_half.next()); assert_eq!(1, h.value_at_quantile(0.5)); // ideally this would return 2, not 1 assert_eq!(1, h.value_at_quantile(almost_half)); assert_eq!(2, h.value_at_quantile(next)); } #[test] fn quantile_5_values() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record(1).unwrap(); h.record(2).unwrap(); h.record(2).unwrap(); h.record(2).unwrap(); h.record(2).unwrap(); assert_eq!(2, h.value_at_quantile(0.25)); assert_eq!(2, h.value_at_quantile(0.3)); } #[test] fn quantile_20k() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); for i in 1..20_001 { h.record(i).unwrap(); } assert_eq!(20_000, h.count()); assert!(h.equivalent(19961, h.value_at_quantile(0.99805))); } #[test] fn quantile_large_numbers() { let mut h = Histogram::<u64>::new_with_bounds(20_000_000, 100_000_000, 5).unwrap(); h.record(100_000_000).unwrap(); h.record(20_000_000).unwrap(); h.record(30_000_000).unwrap(); assert!(h.equivalent(20_000_000, h.value_at_quantile(0.5))); assert!(h.equivalent(30_000_000, h.value_at_quantile(0.5))); assert!(h.equivalent(100_000_000, h.value_at_quantile(0.8333))); assert!(h.equivalent(100_000_000, h.value_at_quantile(0.8334))); assert!(h.equivalent(100_000_000, h.value_at_quantile(0.99))); } #[test] fn value_at_quantile_matches_pctile_iter_sequence() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000, 50_000, 100_000]; for length in lengths { h.reset(); for i in 1..(length + 1) { h.record(i).unwrap(); } assert_eq!(length, h.count()); let iter = h.iter_percentiles(1000); for iter_val in iter { let calculated_value = h.value_at_quantile(iter_val.quantile()); let v = iter_val.value(); assert_eq!(v, calculated_value, "len {} iter quantile {} q count {} iter val {} -> {} calc val {} -> {}", length, iter_val.quantile(), iter_val.quantile() * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } #[test] fn value_at_quantile_matches_value_sequence() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000, 50_000, 100_000]; for length in lengths { h.reset(); for i in 1..(length + 1) { h.record(i).unwrap(); } assert_eq!(length, h.count()); for v in 1..(length + 1) { let quantile = v as f64 / length as f64; let calculated_value = h.value_at_quantile(quantile); if !h.equivalent(v, calculated_value) { assert_eq!(h.highest_equivalent(v), calculated_value, "len {} quantile {} q count {} actual {} -> {} calc {} -> {}", length, quantile, quantile * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } } #[test] fn value_at_quantile_matches_pctile_iter_random() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000, 50_000, 100_000]; let mut rng = rand::weak_rng(); for length in lengths { h.reset(); for _ in 1..(length + 1) { h.record(rng.gen()).unwrap(); } assert_eq!(length, h.count()); let iter = h.iter_percentiles(1000); for iter_val in iter { let calculated_value = h.value_at_quantile(iter_val.quantile()); let v = iter_val.value(); assert_eq!(v, calculated_value, "len {} iter quantile {} q count {} iter val {} -> {} calc val {} -> {}", length, iter_val.quantile(), iter_val.quantile() * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } #[test] fn value_at_quantile_matches_value_random() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut values = Vec::new(); let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000, 50_000, 100_000]; let mut rng = rand::weak_rng(); for length in lengths { h.reset(); for _ in 1..(length + 1) { let v = rng.gen(); h.record(v).unwrap(); values.push(v); } values.sort(); assert_eq!(length, h.count()); for (index, &v) in values.iter().enumerate() { let quantile = (index + 1) as f64 / length as f64; let calculated_value = h.value_at_quantile(quantile); if !h.equivalent(v, calculated_value) { // TODO this fails quickly // assert_eq!(h.highest_equivalent(v), calculated_value, // "len {} quantile {} q count {} actual {} -> {} calc {} -> {}", // length, quantile, quantile * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } } Make randomized tests use smaller histogram sizes because the tests are taking annoyingly long. //! Tests from HistogramTest.java extern crate hdrsample; extern crate num; extern crate rand; extern crate ieee754; use self::rand::Rng; use hdrsample::{Histogram, SubtractionError}; use hdrsample::serialization::{V2Serializer, Deserializer}; use std::borrow::Borrow; use std::cmp; use std::fmt; use num::Saturating; use ieee754::Ieee754; macro_rules! assert_near { ($a: expr, $b: expr, $tolerance: expr) => {{ let a = $a as f64; let b = $b as f64; let tol = $tolerance as f64; assert!((a - b).abs() <= b * tol, "assertion failed: `(left ~= right) (left: `{}`, right: `{}`, tolerance: `{:.5}%`)", a, b, 100.0 * tol); }} } fn verify_max<T: hdrsample::Counter, B: Borrow<Histogram<T>>>(hist: B) -> bool { let hist = hist.borrow(); if let Some(mx) = hist.iter_recorded() .map(|v| v.value()) .map(|v| hist.highest_equivalent(v)) .last() { hist.max() == mx } else { hist.max() == 0 } } fn assert_min_max_count<T: hdrsample::Counter, B: Borrow<Histogram<T>>>(hist: B) { let h = hist.borrow(); let mut min = None; let mut max = None; let mut total = 0; for i in 0..h.len() { let value = h.value_for(i); let count = h.count_at(value).unwrap(); if count == T::zero() { continue; } min = Some(cmp::min(min.unwrap_or(u64::max_value()), value)); max = Some(cmp::max(max.unwrap_or(0), value)); total = total.saturating_add(count.to_u64().unwrap()); } let min = min.map(|m| h.lowest_equivalent(m)).unwrap_or(0); let max = max.map(|m| h.highest_equivalent(m)).unwrap_or(0); assert_eq!(min, h.min()); assert_eq!(max, h.max()); assert_eq!(total, h.count()); } const TRACKABLE_MAX: u64 = 3600 * 1000 * 1000; // Store up to 2 * 10^3 in single-unit precision. Can be 5 at most. const SIGFIG: u8 = 3; const TEST_VALUE_LEVEL: u64 = 4; #[test] fn construction_arg_ranges() { assert!(Histogram::<u64>::new_with_max(1, SIGFIG).is_err()); assert!(Histogram::<u64>::new_with_max(TRACKABLE_MAX, 6).is_err()); } #[test] fn empty_histogram() { let h = Histogram::<u64>::new(SIGFIG).unwrap(); assert_eq!(h.min(), 0); assert_eq!(h.max(), 0); assert_near!(h.mean(), 0.0, 0.0000000000001); assert_near!(h.stdev(), 0.0, 0.0000000000001); assert_near!(h.percentile_below(0).unwrap(), 100.0, 0.0000000000001); assert!(verify_max(h)); } #[test] fn construction_arg_gets() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.low(), 1); assert_eq!(h.high(), TRACKABLE_MAX); assert_eq!(h.sigfig(), SIGFIG); let h = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.low(), 1000); } #[test] fn record() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; assert_eq!(h.count_at(TEST_VALUE_LEVEL), Ok(1)); assert_eq!(h.count(), 1); assert!(verify_max(h)); } #[test] fn record_past_trackable_max() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert!(h.record(3 * TRACKABLE_MAX).is_err()); } #[test] fn record_in_interval() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h.record_correct(TEST_VALUE_LEVEL, TEST_VALUE_LEVEL / 4).unwrap(); let mut r = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); r += TEST_VALUE_LEVEL; // The data will include corrected samples: assert_eq!(h.count_at((TEST_VALUE_LEVEL * 1) / 4), Ok(1)); assert_eq!(h.count_at((TEST_VALUE_LEVEL * 2) / 4), Ok(1)); assert_eq!(h.count_at((TEST_VALUE_LEVEL * 3) / 4), Ok(1)); assert_eq!(h.count_at((TEST_VALUE_LEVEL * 4) / 4), Ok(1)); assert_eq!(h.count(), 4); // But the raw data will not: assert_eq!(r.count_at((TEST_VALUE_LEVEL * 1) / 4), Ok(0)); assert_eq!(r.count_at((TEST_VALUE_LEVEL * 2) / 4), Ok(0)); assert_eq!(r.count_at((TEST_VALUE_LEVEL * 3) / 4), Ok(0)); assert_eq!(r.count_at((TEST_VALUE_LEVEL * 4) / 4), Ok(1)); assert_eq!(r.count(), 1); assert!(verify_max(h)); } #[test] fn reset() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; h.reset(); assert_eq!(h.count_at(TEST_VALUE_LEVEL), Ok(0)); assert_eq!(h.count(), 0); assert!(verify_max(h)); } #[test] fn add() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; h2 += TEST_VALUE_LEVEL; h2 += 1000 * TEST_VALUE_LEVEL; h1 += &h2; assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count(), 4); let mut big = Histogram::<u64>::new_with_max(2 * TRACKABLE_MAX, SIGFIG).unwrap(); big += TEST_VALUE_LEVEL; big += 1000 * TEST_VALUE_LEVEL; big += 2 * TRACKABLE_MAX; // Adding the smaller histogram to the bigger one should work: big += &h1; assert_eq!(big.count_at(TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(1000 * TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(2 * TRACKABLE_MAX), Ok(1)); // overflow smaller hist... assert_eq!(big.count(), 7); // But trying to add a larger histogram into a smaller one should throw an AIOOB: assert!(h1.add(&big).is_err()); assert!(verify_max(h1)); assert!(verify_max(h2)); assert!(verify_max(big)); } #[test] fn subtract_after_add() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; h2 += TEST_VALUE_LEVEL; h2 += 1000 * TEST_VALUE_LEVEL; h1.add(&h2).unwrap(); assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count(), 4); h1 += &h2; assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(3)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(3)); assert_eq!(h1.count(), 6); h1.subtract(&h2).unwrap(); assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(h1.count(), 4); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_to_zero_counts() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(1)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(1)); assert_eq!(h1.count(), 2); let clone = h1.clone(); h1.subtract(&clone).unwrap(); assert_eq!(h1.count_at(TEST_VALUE_LEVEL), Ok(0)); assert_eq!(h1.count_at(1000 * TEST_VALUE_LEVEL), Ok(0)); assert_eq!(h1.count(), 0); assert_min_max_count(h1); } #[test] fn subtract_to_negative_counts_error() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; h2.record_n(TEST_VALUE_LEVEL, 2).unwrap(); h2.record_n(1000 * TEST_VALUE_LEVEL, 2).unwrap(); assert_eq!(SubtractionError::SubtrahendCountExceedsMinuendCount, h1.subtract(&h2).unwrap_err()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_subtrahend_values_outside_minuend_range_error() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; let mut big = Histogram::<u64>::new_with_max(2 * TRACKABLE_MAX, SIGFIG).unwrap(); big += TEST_VALUE_LEVEL; big += 1000 * TEST_VALUE_LEVEL; big += 2 * TRACKABLE_MAX; assert_eq!(SubtractionError::SubtrahendValueExceedsMinuendRange, h1.subtract(&big).unwrap_err()); assert_min_max_count(h1); assert_min_max_count(big); } #[test] fn subtract_values_inside_minuend_range_works() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 1000 * TEST_VALUE_LEVEL; let mut big = Histogram::<u64>::new_with_max(2 * TRACKABLE_MAX, SIGFIG).unwrap(); big += TEST_VALUE_LEVEL; big += 1000 * TEST_VALUE_LEVEL; big += 2 * TRACKABLE_MAX; let big2 = big.clone(); big += &big2; big += &big2; assert_eq!(big.count_at(TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(1000 * TEST_VALUE_LEVEL), Ok(3)); assert_eq!(big.count_at(2 * TRACKABLE_MAX), Ok(3)); // overflow smaller hist... assert_eq!(big.count(), 9); // Subtracting the smaller histogram from the bigger one should work: big -= &h1; assert_eq!(big.count_at(TEST_VALUE_LEVEL), Ok(2)); assert_eq!(big.count_at(1000 * TEST_VALUE_LEVEL), Ok(2)); assert_eq!(big.count_at(2 * TRACKABLE_MAX), Ok(3)); // overflow smaller hist... assert_eq!(big.count(), 7); assert_min_max_count(h1); assert_min_max_count(big); } #[test] fn subtract_values_strictly_inside_minuend_range_yields_same_min_max_no_restat() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += 1; h1 += 10; h1 += 100; h1 += 1000; h2 += 10; h2 += 100; // will not require a restat h1.subtract(&h2).unwrap(); assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); assert_eq!(2, h1.count()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_extent_of_minuend_zero_count_range_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += 1; h1 += 10; h1 += 100; h1 += 1000; h2 += 1; h2 += 1000; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(10, h1.min()); assert_eq!(100, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_extent_of_minuend_nonzero_count_range_recalculates_same_min_max() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1.record_n(1, 2).unwrap(); h1.record_n(10, 2).unwrap(); h1.record_n(100, 2).unwrap(); h1.record_n(1000, 2).unwrap(); h2 += 1; h2 += 1000; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_within_bucket_precision_of_of_minuend_min_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3001; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3100, h1.min()); assert_eq!(3301, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_minuend_min_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3000; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3100, h1.min()); assert_eq!(3301, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_within_bucket_precision_of_of_minuend_max_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3301; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3000, h1.min()); assert_eq!(3201, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_at_minuend_max_recalculates_min_max() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 5).unwrap(); // sub bucket size is 2 above 2048 with 3 sigfits h1.record(3000).unwrap(); h1.record(3100).unwrap(); h1.record(3200).unwrap(); h1.record(3300).unwrap(); // h2 has 5 sigfits, so bucket size is 1 still h2 += 3300; // will trigger a restat because min/max values are having counts subtracted h1.subtract(&h2).unwrap(); assert_eq!(3000, h1.min()); assert_eq!(3201, h1.max()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_minuend_saturated_total_recalculates_saturated() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); h1.record_n(1, u64::max_value()).unwrap(); h1.record_n(10, u64::max_value()).unwrap(); h1.record_n(100, u64::max_value()).unwrap(); h1.record_n(1000, u64::max_value()).unwrap(); h2.record(10).unwrap(); h2.record(100).unwrap(); // will trigger a restat - total count is saturated h1.subtract(&h2).unwrap(); // min, max haven't changed assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); // still saturated assert_eq!(u64::max_value(), h1.count()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn subtract_values_minuend_saturated_total_recalculates_not_saturated() { let mut h1 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); // 3 of these is just under u64::max_value() let chunk = (u64::max_value() / 16) * 5; h1.record_n(1, chunk).unwrap(); h1.record_n(10, chunk).unwrap(); h1.record_n(100, chunk).unwrap(); h1.record_n(1000, chunk).unwrap(); h2.record_n(10, chunk).unwrap(); // will trigger a restat - total count is saturated h1.subtract(&h2).unwrap(); // min, max haven't changed assert_eq!(1, h1.min()); assert_eq!(1000, h1.max()); // not saturated assert_eq!(u64::max_value() / 16 * 15, h1.count()); assert_min_max_count(h1); assert_min_max_count(h2); } #[test] fn equivalent_range() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.equivalent_range(1), 1); assert_eq!(h.equivalent_range(2500), 2); assert_eq!(h.equivalent_range(8191), 4); assert_eq!(h.equivalent_range(8192), 8); assert_eq!(h.equivalent_range(10000), 8); } #[test] fn scaled_equivalent_range() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.equivalent_range(1 * 1024), 1 * 1024); assert_eq!(h.equivalent_range(2500 * 1024), 2 * 1024); assert_eq!(h.equivalent_range(8191 * 1024), 4 * 1024); assert_eq!(h.equivalent_range(8192 * 1024), 8 * 1024); assert_eq!(h.equivalent_range(10000 * 1024), 8 * 1024); } #[test] fn lowest_equivalent() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.lowest_equivalent(10007), 10000); assert_eq!(h.lowest_equivalent(10009), 10008); } #[test] fn scaled_lowest_equivalent() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.lowest_equivalent(10007 * 1024), 10000 * 1024); assert_eq!(h.lowest_equivalent(10009 * 1024), 10008 * 1024); } #[test] fn highest_equivalent() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.highest_equivalent(8180), 8183); assert_eq!(h.highest_equivalent(8191), 8191); assert_eq!(h.highest_equivalent(8193), 8199); assert_eq!(h.highest_equivalent(9995), 9999); assert_eq!(h.highest_equivalent(10007), 10007); assert_eq!(h.highest_equivalent(10008), 10015); } #[test] fn scaled_highest_equivalent() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.highest_equivalent(8180 * 1024), 8183 * 1024 + 1023); assert_eq!(h.highest_equivalent(8191 * 1024), 8191 * 1024 + 1023); assert_eq!(h.highest_equivalent(8193 * 1024), 8199 * 1024 + 1023); assert_eq!(h.highest_equivalent(9995 * 1024), 9999 * 1024 + 1023); assert_eq!(h.highest_equivalent(10007 * 1024), 10007 * 1024 + 1023); assert_eq!(h.highest_equivalent(10008 * 1024), 10015 * 1024 + 1023); } #[test] fn median_equivalent() { let h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.median_equivalent(4), 4); assert_eq!(h.median_equivalent(5), 5); assert_eq!(h.median_equivalent(4000), 4001); assert_eq!(h.median_equivalent(8000), 8002); assert_eq!(h.median_equivalent(10007), 10004); } #[test] fn median_equivalent_doesnt_panic_at_extremes() { let h = Histogram::<u64>::new_with_max(u64::max_value(), 3).unwrap(); let _ = h.median_equivalent(u64::max_value()); let _ = h.median_equivalent(u64::max_value() - 1); let _ = h.median_equivalent(0); let _ = h.median_equivalent(1); } #[test] fn scaled_median_equivalent() { let h = Histogram::<u64>::new_with_bounds(1024, TRACKABLE_MAX, SIGFIG).unwrap(); assert_eq!(h.median_equivalent(1024 * 4), 1024 * 4 + 512); assert_eq!(h.median_equivalent(1024 * 5), 1024 * 5 + 512); assert_eq!(h.median_equivalent(1024 * 4000), 1024 * 4001); assert_eq!(h.median_equivalent(1024 * 8000), 1024 * 8002); assert_eq!(h.median_equivalent(1024 * 10007), 1024 * 10004); } fn are_equal<T, B1, B2>(actual: B1, expected: B2) where T: hdrsample::Counter + fmt::Debug, B1: Borrow<Histogram<T>>, B2: Borrow<Histogram<T>> { let actual = actual.borrow(); let expected = expected.borrow(); assert!(actual == expected); assert_eq!(actual.count_at(TEST_VALUE_LEVEL), expected.count_at(TEST_VALUE_LEVEL)); assert_eq!(actual.count_at(10 * TEST_VALUE_LEVEL), expected.count_at(10 * TEST_VALUE_LEVEL)); assert_eq!(actual.count(), expected.count()); assert!(verify_max(expected)); assert!(verify_max(actual)); } #[test] fn clone() { let mut h = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; h += 10 * TEST_VALUE_LEVEL; let max = h.high(); h.record_correct(max - 1, 31000).unwrap(); are_equal(h.clone(), h); } #[test] fn scaled_clone() { let mut h = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); h += TEST_VALUE_LEVEL; h += 10 * TEST_VALUE_LEVEL; let max = h.high(); h.record_correct(max - 1, 31000).unwrap(); are_equal(h.clone(), h); } #[test] fn set_to() { let mut h1 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_max(TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 10 * TEST_VALUE_LEVEL; let max = h1.high(); h1.record_correct(max - 1, 31000).unwrap(); h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); h1 += 20 * TEST_VALUE_LEVEL; h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); } #[test] fn scaled_set_to() { let mut h1 = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1000, TRACKABLE_MAX, SIGFIG).unwrap(); h1 += TEST_VALUE_LEVEL; h1 += 10 * TEST_VALUE_LEVEL; let max = h1.high(); h1.record_correct(max - 1, 31000).unwrap(); h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); h1 += 20 * TEST_VALUE_LEVEL; h2.set_to(&h1).unwrap(); are_equal(&h1, &h2); } #[test] fn random_write_full_value_range_precision_5_no_panic() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 5).unwrap(); let mut rng = rand::weak_rng(); for _ in 0..1_000_000 { let mut r: u64 = rng.gen(); if r == 0 { r = 1; } h.record(r).unwrap(); } } #[test] fn random_write_full_value_range_precision_0_no_panic() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 0).unwrap(); let mut rng = rand::weak_rng(); for _ in 0..1_000_000 { let mut r: u64 = rng.gen(); if r == 0 { r = 1; } h.record(r).unwrap(); } } #[test] fn random_write_middle_of_value_range_precision_3_no_panic() { let low = 1_000; let high = 1_000_000_000; let mut h = Histogram::<u64>::new_with_bounds(low, high, 3).unwrap(); let mut rng = rand::weak_rng(); for _ in 0..1_000_000 { h.record(rng.gen_range(low, high + 1)).unwrap(); } } #[test] fn value_count_overflow_from_record_saturates_u16() { let mut h = Histogram::<u16>::new_with_max(TRACKABLE_MAX, 2).unwrap(); h.record_n(3, u16::max_value() - 1).unwrap(); h.record_n(3, u16::max_value() - 1).unwrap(); // individual count has saturated assert_eq!(u16::max_value(), h.count_at(3).unwrap()); // total is a u64 though assert_eq!((u16::max_value() - 1) as u64 * 2, h.count()); } #[test] fn value_count_overflow_from_record_saturates_u64() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_record_autoresize_doesnt_panic_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, 10000, 3).unwrap(); h.auto(true); h.record_n(1, u64::max_value() - 1).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); // forces resize h.record_n(1_000_000_000, u64::max_value() - 1).unwrap(); h.record_n(1_000_000_000, u64::max_value() - 1).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count_at(1_000_000_000).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_add_same_dimensions_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_add_different_precision_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); // different precision let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 4).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn value_count_overflow_from_add_with_resize_to_same_dimensions_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, 10_000, 3).unwrap(); h.auto(true); let mut h2 = Histogram::<u64>::new_with_bounds(1, 10_000_000_000, 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); // recording at value == h2 max should trigger h to resize to the same dimensions when added h2.record_n(10_000_000_000, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_record_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h.record_n(10, u64::max_value() - 1).unwrap(); assert_eq!(u64::max_value() - 1, h.count_at(1).unwrap()); assert_eq!(u64::max_value() - 1, h.count_at(10).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_same_dimensions_saturates_calculating_other_addend_total() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 10).unwrap(); h2.record_n(10, u64::max_value() - 1).unwrap(); h2.record_n(20, 10).unwrap(); // just h2's total would overflow h.add(h2).unwrap(); assert_eq!(u64::max_value() - 10, h.count_at(1).unwrap()); assert_eq!(10, h.count_at(20).unwrap()); // if accumulating total count for h2 had overflowed, we would see max_value - 1000 + 9 here assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_same_dimensions_saturates_when_added_to_orig_total_count() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, u64::max_value() - 10).unwrap(); h2.record_n(10, 9).unwrap(); h2.record_n(20, 9).unwrap(); // h2's total wouldn't overflow, but it would when added to h1 h.add(h2).unwrap(); assert_eq!(u64::max_value() - 10, h.count_at(1).unwrap()); assert_eq!(9, h.count_at(20).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_different_precision_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); // different precision let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 4).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(20, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value() - 1, h.count_at(1).unwrap()); assert_eq!(u64::max_value() - 1, h.count_at(20).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_add_with_resize_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, 10_000, 3).unwrap(); h.auto(true); let mut h2 = Histogram::<u64>::new_with_bounds(1, 10_000_000_000, 3).unwrap(); h.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(1, u64::max_value() - 1).unwrap(); h2.record_n(10_000_000_000, u64::max_value() - 1).unwrap(); h.add(h2).unwrap(); assert_eq!(u64::max_value(), h.count_at(1).unwrap()); assert_eq!(u64::max_value(), h.count()); } #[test] fn total_count_overflow_from_deserialize_saturates() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); // can't go bigger than i64 max because it will be serialized h.record_n(1, i64::max_value() as u64).unwrap(); h.record_n(1000, i64::max_value() as u64).unwrap(); h.record_n(1000_000, i64::max_value() as u64).unwrap(); assert_eq!(u64::max_value(), h.count()); let mut vec = Vec::new(); V2Serializer::new().serialize(&h, &mut vec).unwrap(); let deser_h: Histogram<u64> = Deserializer::new().deserialize(&mut vec.as_slice()).unwrap(); assert_eq!(u64::max_value(), deser_h.count()); } #[test] fn subtract_underflow_guarded_by_per_value_count_check() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut h2 = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record_n(1, 1).unwrap(); h2.record_n(1, 100).unwrap(); assert_eq!(SubtractionError::SubtrahendCountExceedsMinuendCount, h.subtract(h2).unwrap_err()); } #[test] fn quantile_2_values() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record(1).unwrap(); h.record(2).unwrap(); assert_eq!(1, h.value_at_quantile(0.25)); assert_eq!(1, h.value_at_quantile(0.5)); let almost_half = 0.5000000000000001; let next = 0.5000000000000002; // one ulp apart assert_eq!(almost_half, 0.5_f64.next()); assert_eq!(next, almost_half.next()); assert_eq!(1, h.value_at_quantile(0.5)); // ideally this would return 2, not 1 assert_eq!(1, h.value_at_quantile(almost_half)); assert_eq!(2, h.value_at_quantile(next)); } #[test] fn quantile_5_values() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); h.record(1).unwrap(); h.record(2).unwrap(); h.record(2).unwrap(); h.record(2).unwrap(); h.record(2).unwrap(); assert_eq!(2, h.value_at_quantile(0.25)); assert_eq!(2, h.value_at_quantile(0.3)); } #[test] fn quantile_20k() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); for i in 1..20_001 { h.record(i).unwrap(); } assert_eq!(20_000, h.count()); assert!(h.equivalent(19961, h.value_at_quantile(0.99805))); } #[test] fn quantile_large_numbers() { let mut h = Histogram::<u64>::new_with_bounds(20_000_000, 100_000_000, 5).unwrap(); h.record(100_000_000).unwrap(); h.record(20_000_000).unwrap(); h.record(30_000_000).unwrap(); assert!(h.equivalent(20_000_000, h.value_at_quantile(0.5))); assert!(h.equivalent(30_000_000, h.value_at_quantile(0.5))); assert!(h.equivalent(100_000_000, h.value_at_quantile(0.8333))); assert!(h.equivalent(100_000_000, h.value_at_quantile(0.8334))); assert!(h.equivalent(100_000_000, h.value_at_quantile(0.99))); } #[test] fn value_at_quantile_matches_pctile_iter_sequence() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000, 50_000, 100_000]; for length in lengths { h.reset(); for i in 1..(length + 1) { h.record(i).unwrap(); } assert_eq!(length, h.count()); let iter = h.iter_percentiles(1000); for iter_val in iter { let calculated_value = h.value_at_quantile(iter_val.quantile()); let v = iter_val.value(); assert_eq!(v, calculated_value, "len {} iter quantile {} q count {} iter val {} -> {} calc val {} -> {}", length, iter_val.quantile(), iter_val.quantile() * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } #[test] fn value_at_quantile_matches_value_sequence() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000, 50_000, 100_000]; for length in lengths { h.reset(); for i in 1..(length + 1) { h.record(i).unwrap(); } assert_eq!(length, h.count()); for v in 1..(length + 1) { let quantile = v as f64 / length as f64; let calculated_value = h.value_at_quantile(quantile); if !h.equivalent(v, calculated_value) { assert_eq!(h.highest_equivalent(v), calculated_value, "len {} quantile {} q count {} actual {} -> {} calc {} -> {}", length, quantile, quantile * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } } #[test] fn value_at_quantile_matches_pctile_iter_random() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); // random u64s tend to be pretty darn big, so percentile calculations have to scan more. let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000]; let mut rng = rand::weak_rng(); for length in lengths { h.reset(); for _ in 1..(length + 1) { h.record(rng.gen()).unwrap(); } assert_eq!(length, h.count()); let iter = h.iter_percentiles(1000); for iter_val in iter { let calculated_value = h.value_at_quantile(iter_val.quantile()); let v = iter_val.value(); assert_eq!(v, calculated_value, "len {} iter quantile {} q count {} iter val {} -> {} calc val {} -> {}", length, iter_val.quantile(), iter_val.quantile() * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } #[test] fn value_at_quantile_matches_value_random() { let mut h = Histogram::<u64>::new_with_bounds(1, u64::max_value(), 3).unwrap(); let mut values = Vec::new(); let lengths = vec![1, 5, 10, 50, 100, 500, 1_000, 5_000, 10_000]; let mut rng = rand::weak_rng(); for length in lengths { h.reset(); for _ in 1..(length + 1) { let v = rng.gen(); h.record(v).unwrap(); values.push(v); } values.sort(); assert_eq!(length, h.count()); for (index, &v) in values.iter().enumerate() { let quantile = (index + 1) as f64 / length as f64; let calculated_value = h.value_at_quantile(quantile); if !h.equivalent(v, calculated_value) { // TODO this fails quickly // assert_eq!(h.highest_equivalent(v), calculated_value, // "len {} quantile {} q count {} actual {} -> {} calc {} -> {}", // length, quantile, quantile * length as f64, v, h.highest_equivalent(v), calculated_value, h.highest_equivalent(calculated_value)); } } } }
extern crate ndarray; extern crate itertools; use ndarray::Array; use ndarray::{Ix, Si, S}; use ndarray::{ ArrayBase, Data, Dimension, }; use itertools::assert_equal; #[test] fn double_ended() { let a = Array::linspace(0., 7., 8); let mut it = a.iter().map(|x| *x); assert_eq!(it.next(), Some(0.)); assert_eq!(it.next_back(), Some(7.)); assert_eq!(it.next(), Some(1.)); assert_eq!(it.rev().last(), Some(2.)); } #[test] fn indexed() { let a = Array::linspace(0., 7., 8); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt as Ix); } let a = a.reshape((2, 4, 1)); let (mut i, mut j, k) = (0, 0, 0); for (idx, elt) in a.indexed_iter() { assert_eq!(idx, (i, j, k)); j += 1; if j == 4 { j = 0; i += 1; } println!("{:?}", (idx, elt)); } } fn assert_slice_correct<A, S, D>(v: &ArrayBase<S, D>) where S: Data<Elem=A>, D: Dimension, A: PartialEq + std::fmt::Debug, { let slc = v.as_slice(); assert!(slc.is_some()); let slc = slc.unwrap(); assert_eq!(v.len(), slc.len()); assert_equal(v.iter(), slc); } #[test] fn as_slice() { let a = Array::linspace(0., 7., 8); let a = a.reshape((2, 4, 1)); assert_slice_correct(&a); let a = a.reshape((2, 4)); assert_slice_correct(&a); assert!(a.view().subview(1, 0).as_slice().is_none()); let v = a.view(); assert_slice_correct(&v); assert_slice_correct(&v.subview(0, 0)); assert_slice_correct(&v.subview(0, 1)); assert!(v.slice(&[S, Si(0, Some(1), 1)]).as_slice().is_none()); println!("{:?}", v.slice(&[Si(0, Some(1), 2), S])); assert!(v.slice(&[Si(0, Some(1), 2), S]).as_slice().is_some()); // `u` is contiguous, because the column stride of `2` doesn't matter // when the result is just one row anyway -- length of that dimension is 1 let u = v.slice(&[Si(0, Some(1), 2), S]); println!("{:?}", u.shape()); println!("{:?}", u.strides()); println!("{:?}", v.slice(&[Si(0, Some(1), 2), S])); assert!(u.as_slice().is_some()); assert_slice_correct(&u); let a = a.reshape((8, 1)); assert_slice_correct(&a); let u = a.view().slice(&[Si(0, None, 2), S]); println!("u={:?}, shape={:?}, strides={:?}", u, u.shape(), u.strides()); assert!(u.as_slice().is_none()); } Add test for outer_iter extern crate ndarray; extern crate itertools; use ndarray::Array; use ndarray::{Ix, Si, S}; use ndarray::{ ArrayBase, Data, Dimension, }; use itertools::assert_equal; #[test] fn double_ended() { let a = Array::linspace(0., 7., 8); let mut it = a.iter().map(|x| *x); assert_eq!(it.next(), Some(0.)); assert_eq!(it.next_back(), Some(7.)); assert_eq!(it.next(), Some(1.)); assert_eq!(it.rev().last(), Some(2.)); } #[test] fn indexed() { let a = Array::linspace(0., 7., 8); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt as Ix); } let a = a.reshape((2, 4, 1)); let (mut i, mut j, k) = (0, 0, 0); for (idx, elt) in a.indexed_iter() { assert_eq!(idx, (i, j, k)); j += 1; if j == 4 { j = 0; i += 1; } println!("{:?}", (idx, elt)); } } fn assert_slice_correct<A, S, D>(v: &ArrayBase<S, D>) where S: Data<Elem=A>, D: Dimension, A: PartialEq + std::fmt::Debug, { let slc = v.as_slice(); assert!(slc.is_some()); let slc = slc.unwrap(); assert_eq!(v.len(), slc.len()); assert_equal(v.iter(), slc); } #[test] fn as_slice() { let a = Array::linspace(0., 7., 8); let a = a.reshape((2, 4, 1)); assert_slice_correct(&a); let a = a.reshape((2, 4)); assert_slice_correct(&a); assert!(a.view().subview(1, 0).as_slice().is_none()); let v = a.view(); assert_slice_correct(&v); assert_slice_correct(&v.subview(0, 0)); assert_slice_correct(&v.subview(0, 1)); assert!(v.slice(&[S, Si(0, Some(1), 1)]).as_slice().is_none()); println!("{:?}", v.slice(&[Si(0, Some(1), 2), S])); assert!(v.slice(&[Si(0, Some(1), 2), S]).as_slice().is_some()); // `u` is contiguous, because the column stride of `2` doesn't matter // when the result is just one row anyway -- length of that dimension is 1 let u = v.slice(&[Si(0, Some(1), 2), S]); println!("{:?}", u.shape()); println!("{:?}", u.strides()); println!("{:?}", v.slice(&[Si(0, Some(1), 2), S])); assert!(u.as_slice().is_some()); assert_slice_correct(&u); let a = a.reshape((8, 1)); assert_slice_correct(&a); let u = a.view().slice(&[Si(0, None, 2), S]); println!("u={:?}, shape={:?}, strides={:?}", u, u.shape(), u.strides()); assert!(u.as_slice().is_none()); } #[test] fn outer_iter() { let a = Array::from_iter(0..12); let a = a.reshape((2, 3, 2)); // [[[0, 1], // [2, 3], // [4, 5]], // [[6, 7], // [8, 9], // ... assert_equal(a.outer_iter().map(|v| v.iter().cloned().collect::<Vec<_>>()), vec![vec![0, 1], vec![2, 3], vec![4, 5], vec![6, 7], vec![8, 9], vec![10, 11]]); let mut b = Array::zeros((2, 3, 2)); b.swap_axes(0, 2); b.assign(&a); assert_equal(b.outer_iter().map(|v| v.iter().cloned().collect::<Vec<_>>()), vec![vec![0, 1], vec![2, 3], vec![4, 5], vec![6, 7], vec![8, 9], vec![10, 11]]); }
use syntax::ast; use syntax::codemap::{DUMMY_SP, Spanned, respan}; use syntax::ptr::P; use syntax::util::ThinVec; use aster::AstBuilder; #[test] fn test_lit() { let builder = AstBuilder::new(); fn check(expr: P<ast::Expr>, lit: P<ast::Lit>) { assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Lit(lit), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } check(builder.expr().bool(true), builder.lit().bool(true)); check(builder.expr().true_(), builder.lit().true_()); check(builder.expr().false_(), builder.lit().false_()); check(builder.expr().int(5), builder.lit().int(5)); check(builder.expr().i8(5), builder.lit().i8(5)); check(builder.expr().i16(5), builder.lit().i16(5)); check(builder.expr().i32(5), builder.lit().i32(5)); check(builder.expr().i64(5), builder.lit().i64(5)); check(builder.expr().isize(5), builder.lit().isize(5)); check(builder.expr().u8(5), builder.lit().u8(5)); check(builder.expr().u16(5), builder.lit().u16(5)); check(builder.expr().u32(5), builder.lit().u32(5)); check(builder.expr().u64(5), builder.lit().u64(5)); check(builder.expr().usize(5), builder.lit().usize(5)); // Doesn't crash. assert_eq!(builder.expr().i64(::std::i64::MIN), builder.expr().neg().lit().i64(1 << 63)); check(builder.expr().str("string"), builder.lit().str("string")); } #[test] fn test_path() { let builder = AstBuilder::new(); let expr = builder.expr().path() .id("x") .build(); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Path( None, builder.path().id("x").build(), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_qpath() { let builder = AstBuilder::new(); let expr = builder.expr().qpath() .ty().slice().infer() .id("into_vec"); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Path( Some(ast::QSelf { ty: builder.ty().slice().infer(), position: 0, }), builder.path().id("into_vec").build(), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); let expr: P<ast::Expr> = builder.expr().qpath() .ty().slice().infer() .as_().id("Slice").build() .id("into_vec"); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Path( Some(ast::QSelf { ty: builder.ty().slice().infer(), position: 1, }), builder.path() .id("Slice") .id("into_vec") .build(), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_bin() { let builder = AstBuilder::new(); assert_eq!( builder.expr().add().i8(1).i8(2), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Binary( Spanned { span: DUMMY_SP, node: ast::BinOpKind::Add, }, builder.expr().i8(1), builder.expr().i8(2), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_unit() { let builder = AstBuilder::new(); assert_eq!( builder.expr().unit(), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); assert_eq!( builder.expr().tuple().build(), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_tuple() { let builder = AstBuilder::new(); let expr = builder.expr().tuple() .expr().i8(1) .expr().tuple() .expr().unit() .expr().isize(2) .build() .build(); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![ builder.expr().i8(1), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![ builder.expr().unit(), builder.expr().isize(2), ]), span: DUMMY_SP, attrs: ThinVec::new(), }) ]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_slice() { let builder = AstBuilder::new(); let expr = builder.expr().slice() .expr().i8(1) .expr().i8(2) .expr().i8(3) .build(); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Vec(vec![ builder.expr().i8(1), builder.expr().i8(2), builder.expr().i8(3), ]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_vec() { let builder = AstBuilder::new(); let expr = builder.expr().vec() .expr().i8(1) .expr().i8(2) .expr().i8(3) .build(); assert_eq!( expr, builder.expr().call() .qpath().ty().slice().infer().id("into_vec") .arg().box_().slice() .expr().i8(1) .expr().i8(2) .expr().i8(3) .build() .build() ); } #[test] fn test_break() { let builder = AstBuilder::new(); let expr = builder.expr().break_(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Break(None)) ); let expr = builder.expr().break_to("'a"); let id = respan(DUMMY_SP, builder.id("'a")); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Break(Some(id))) ); } #[test] fn test_continue() { let builder = AstBuilder::new(); let expr = builder.expr().continue_(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Continue(None)) ); let expr = builder.expr().continue_to("'a"); let id = respan(DUMMY_SP, builder.id("'a")); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Continue(Some(id))) ); } #[test] fn test_return() { let builder = AstBuilder::new(); let expr = builder.expr().return_(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Ret(None)) ); let expr = builder.expr().return_expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Ret(Some(builder.expr().unit()))) ); } #[test] fn test_loop() { let builder = AstBuilder::new(); let expr = builder.expr().loop_() .block() .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Loop( builder.block().build(), None, ) ) ); let expr = builder.expr().loop_() .label("'a") .block() .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Loop( builder.block().build(), Some(respan(DUMMY_SP, builder.id("'a"))), ) ) ); } #[test] fn test_if() { let builder = AstBuilder::new(); let expr = builder.expr().if_() .true_() .then().expr().u32(1) .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr().true_(), builder.block().expr().u32(1), None, ) ) ); let expr = builder.expr().if_() .true_() .then().expr().u32(1) .else_().expr().u32(2); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr().true_(), builder.block().expr().u32(1), Some(builder.expr().block().expr().u32(2)) ) ) ); let expr = builder.expr() .if_() .eq().id("x").u32(1) .then() .expr().u32(1) .else_if() .eq().id("x").u32(2) .then() .expr().u32(2) .else_if() .eq().id("x").u32(3) .then() .expr().u32(3) .else_() .expr().u32(4); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr() .eq().id("x").u32(1), builder.block() .expr().u32(1), Some( builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr() .eq().id("x").u32(2), builder.block() .expr().u32(2), Some( builder.expr() .if_() .eq().id("x").u32(3) .then() .expr().u32(3) .else_() .expr().u32(4) ) ) ) ) ) ) ); } #[test] fn test_match() { let builder = AstBuilder::new(); let expr = builder.expr().match_().u32(0) .arm() .pat().expr().u32(0) .body().unit() .arm() .pat().expr().u32(1) .body().unit() .arm() .pat().wild() .body().unit() .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Match( builder.expr().u32(0), vec![ builder.arm() .pat().expr().u32(0) .body().unit(), builder.arm() .pat().expr().u32(1) .body().unit(), builder.arm() .pat().wild() .body().unit(), ] ) ) ); } #[test] fn test_index() { let builder = AstBuilder::new(); let expr = builder.expr().index() .id("x") .usize(2); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Index( builder.expr().id("x"), builder.expr().usize(2) ) ) ); } #[test] fn test_range() { let builder = AstBuilder::new(); assert_eq!( builder.expr().range().build(), builder.expr().build_expr_kind( ast::ExprKind::Range( None, None, ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range().from() .usize(0) .build(), builder.expr().build_expr_kind( ast::ExprKind::Range( Some(builder.expr().usize(0)), None, ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range() .to().usize(10), builder.expr().build_expr_kind( ast::ExprKind::Range( None, Some(builder.expr().usize(10)), ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range() .from().usize(0) .to().usize(10), builder.expr().build_expr_kind( ast::ExprKind::Range( Some(builder.expr().usize(0)), Some(builder.expr().usize(10)), ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range() .from().usize(0) .to_inclusive().usize(10), builder.expr().build_expr_kind( ast::ExprKind::Range( Some(builder.expr().usize(0)), Some(builder.expr().usize(10)), ast::RangeLimits::Closed) ) ); } #[test] fn test_repeat() { let builder = AstBuilder::new(); let expr = builder.expr().repeat() .u16(1024) .usize(16); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Repeat( builder.expr().u16(1024), builder.expr().usize(16) ) ) ); } #[test] fn test_trivial_closure() { let builder = AstBuilder::new(); let expr = builder.expr().closure() .by_value() .fn_decl().default_return() .expr().usize(1); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Closure( ast::CaptureBy::Value, builder.fn_decl().default_return(), builder.expr().usize(1), DUMMY_SP, ) ) ); } #[test] fn test_closure_by_ref() { let builder = AstBuilder::new(); let expr = builder.expr().closure() .by_ref() .fn_decl().default_return() .expr().usize(2); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Closure( ast::CaptureBy::Ref, builder.fn_decl().default_return(), builder.expr().usize(2), DUMMY_SP, ) ) ); } #[test] fn test_closure_block() { let builder = AstBuilder::new(); let expr = builder.expr().closure() .by_ref() .fn_decl().default_return() .expr().usize(3); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Closure( ast::CaptureBy::Ref, builder.fn_decl().default_return(), builder.expr().usize(3), DUMMY_SP, ) ) ); } #[test] fn test_while_loop() { let builder = AstBuilder::new(); let expr = builder.expr().while_().true_().block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::While( builder.expr().true_(), builder.block().expr().unit(), None, ) ) ); } #[test] fn test_while_loop_label() { let builder = AstBuilder::new(); let expr = builder.expr().while_().true_().label("'lab").block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::While( builder.expr().true_(), builder.block().expr().unit(), Some(respan(DUMMY_SP, builder.id("'lab"))), ) ) ); } #[test] fn test_while_let_loop() { let builder = AstBuilder::new(); let expr = builder.expr().while_().unit().pat().expr().unit().block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::WhileLet( builder.pat().expr().unit(), builder.expr().unit(), builder.block().expr().unit(), None, ) ) ); } #[test] fn test_while_let_loop_label() { let builder = AstBuilder::new(); let expr = builder.expr().while_().unit().label("'lab").pat().expr().unit().block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::WhileLet( builder.pat().expr().unit(), builder.expr().unit(), builder.block().expr().unit(), Some(respan(DUMMY_SP, builder.id("'lab"))), ) ) ); } #[test] fn test_type_ascription() { let builder = AstBuilder::new(); let expr = builder.expr().type_().u8(1).u8(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Type( builder.expr().u8(1), builder.ty().u8(), ) ) ); } Break-with in tests use syntax::ast; use syntax::codemap::{DUMMY_SP, Spanned, respan}; use syntax::ptr::P; use syntax::util::ThinVec; use aster::AstBuilder; #[test] fn test_lit() { let builder = AstBuilder::new(); fn check(expr: P<ast::Expr>, lit: P<ast::Lit>) { assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Lit(lit), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } check(builder.expr().bool(true), builder.lit().bool(true)); check(builder.expr().true_(), builder.lit().true_()); check(builder.expr().false_(), builder.lit().false_()); check(builder.expr().int(5), builder.lit().int(5)); check(builder.expr().i8(5), builder.lit().i8(5)); check(builder.expr().i16(5), builder.lit().i16(5)); check(builder.expr().i32(5), builder.lit().i32(5)); check(builder.expr().i64(5), builder.lit().i64(5)); check(builder.expr().isize(5), builder.lit().isize(5)); check(builder.expr().u8(5), builder.lit().u8(5)); check(builder.expr().u16(5), builder.lit().u16(5)); check(builder.expr().u32(5), builder.lit().u32(5)); check(builder.expr().u64(5), builder.lit().u64(5)); check(builder.expr().usize(5), builder.lit().usize(5)); // Doesn't crash. assert_eq!(builder.expr().i64(::std::i64::MIN), builder.expr().neg().lit().i64(1 << 63)); check(builder.expr().str("string"), builder.lit().str("string")); } #[test] fn test_path() { let builder = AstBuilder::new(); let expr = builder.expr().path() .id("x") .build(); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Path( None, builder.path().id("x").build(), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_qpath() { let builder = AstBuilder::new(); let expr = builder.expr().qpath() .ty().slice().infer() .id("into_vec"); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Path( Some(ast::QSelf { ty: builder.ty().slice().infer(), position: 0, }), builder.path().id("into_vec").build(), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); let expr: P<ast::Expr> = builder.expr().qpath() .ty().slice().infer() .as_().id("Slice").build() .id("into_vec"); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Path( Some(ast::QSelf { ty: builder.ty().slice().infer(), position: 1, }), builder.path() .id("Slice") .id("into_vec") .build(), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_bin() { let builder = AstBuilder::new(); assert_eq!( builder.expr().add().i8(1).i8(2), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Binary( Spanned { span: DUMMY_SP, node: ast::BinOpKind::Add, }, builder.expr().i8(1), builder.expr().i8(2), ), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_unit() { let builder = AstBuilder::new(); assert_eq!( builder.expr().unit(), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); assert_eq!( builder.expr().tuple().build(), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_tuple() { let builder = AstBuilder::new(); let expr = builder.expr().tuple() .expr().i8(1) .expr().tuple() .expr().unit() .expr().isize(2) .build() .build(); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![ builder.expr().i8(1), P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Tup(vec![ builder.expr().unit(), builder.expr().isize(2), ]), span: DUMMY_SP, attrs: ThinVec::new(), }) ]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_slice() { let builder = AstBuilder::new(); let expr = builder.expr().slice() .expr().i8(1) .expr().i8(2) .expr().i8(3) .build(); assert_eq!( expr, P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Vec(vec![ builder.expr().i8(1), builder.expr().i8(2), builder.expr().i8(3), ]), span: DUMMY_SP, attrs: ThinVec::new(), }) ); } #[test] fn test_vec() { let builder = AstBuilder::new(); let expr = builder.expr().vec() .expr().i8(1) .expr().i8(2) .expr().i8(3) .build(); assert_eq!( expr, builder.expr().call() .qpath().ty().slice().infer().id("into_vec") .arg().box_().slice() .expr().i8(1) .expr().i8(2) .expr().i8(3) .build() .build() ); } #[test] fn test_break() { let builder = AstBuilder::new(); let expr = builder.expr().break_(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Break(None, None)) ); let expr = builder.expr().break_to("'a"); let id = respan(DUMMY_SP, builder.id("'a")); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Break(Some(id), None)) ); } #[test] fn test_continue() { let builder = AstBuilder::new(); let expr = builder.expr().continue_(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Continue(None)) ); let expr = builder.expr().continue_to("'a"); let id = respan(DUMMY_SP, builder.id("'a")); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Continue(Some(id))) ); } #[test] fn test_return() { let builder = AstBuilder::new(); let expr = builder.expr().return_(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Ret(None)) ); let expr = builder.expr().return_expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind(ast::ExprKind::Ret(Some(builder.expr().unit()))) ); } #[test] fn test_loop() { let builder = AstBuilder::new(); let expr = builder.expr().loop_() .block() .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Loop( builder.block().build(), None, ) ) ); let expr = builder.expr().loop_() .label("'a") .block() .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Loop( builder.block().build(), Some(respan(DUMMY_SP, builder.id("'a"))), ) ) ); } #[test] fn test_if() { let builder = AstBuilder::new(); let expr = builder.expr().if_() .true_() .then().expr().u32(1) .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr().true_(), builder.block().expr().u32(1), None, ) ) ); let expr = builder.expr().if_() .true_() .then().expr().u32(1) .else_().expr().u32(2); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr().true_(), builder.block().expr().u32(1), Some(builder.expr().block().expr().u32(2)) ) ) ); let expr = builder.expr() .if_() .eq().id("x").u32(1) .then() .expr().u32(1) .else_if() .eq().id("x").u32(2) .then() .expr().u32(2) .else_if() .eq().id("x").u32(3) .then() .expr().u32(3) .else_() .expr().u32(4); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr() .eq().id("x").u32(1), builder.block() .expr().u32(1), Some( builder.expr().build_expr_kind( ast::ExprKind::If( builder.expr() .eq().id("x").u32(2), builder.block() .expr().u32(2), Some( builder.expr() .if_() .eq().id("x").u32(3) .then() .expr().u32(3) .else_() .expr().u32(4) ) ) ) ) ) ) ); } #[test] fn test_match() { let builder = AstBuilder::new(); let expr = builder.expr().match_().u32(0) .arm() .pat().expr().u32(0) .body().unit() .arm() .pat().expr().u32(1) .body().unit() .arm() .pat().wild() .body().unit() .build(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Match( builder.expr().u32(0), vec![ builder.arm() .pat().expr().u32(0) .body().unit(), builder.arm() .pat().expr().u32(1) .body().unit(), builder.arm() .pat().wild() .body().unit(), ] ) ) ); } #[test] fn test_index() { let builder = AstBuilder::new(); let expr = builder.expr().index() .id("x") .usize(2); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Index( builder.expr().id("x"), builder.expr().usize(2) ) ) ); } #[test] fn test_range() { let builder = AstBuilder::new(); assert_eq!( builder.expr().range().build(), builder.expr().build_expr_kind( ast::ExprKind::Range( None, None, ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range().from() .usize(0) .build(), builder.expr().build_expr_kind( ast::ExprKind::Range( Some(builder.expr().usize(0)), None, ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range() .to().usize(10), builder.expr().build_expr_kind( ast::ExprKind::Range( None, Some(builder.expr().usize(10)), ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range() .from().usize(0) .to().usize(10), builder.expr().build_expr_kind( ast::ExprKind::Range( Some(builder.expr().usize(0)), Some(builder.expr().usize(10)), ast::RangeLimits::HalfOpen) ) ); assert_eq!( builder.expr().range() .from().usize(0) .to_inclusive().usize(10), builder.expr().build_expr_kind( ast::ExprKind::Range( Some(builder.expr().usize(0)), Some(builder.expr().usize(10)), ast::RangeLimits::Closed) ) ); } #[test] fn test_repeat() { let builder = AstBuilder::new(); let expr = builder.expr().repeat() .u16(1024) .usize(16); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Repeat( builder.expr().u16(1024), builder.expr().usize(16) ) ) ); } #[test] fn test_trivial_closure() { let builder = AstBuilder::new(); let expr = builder.expr().closure() .by_value() .fn_decl().default_return() .expr().usize(1); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Closure( ast::CaptureBy::Value, builder.fn_decl().default_return(), builder.expr().usize(1), DUMMY_SP, ) ) ); } #[test] fn test_closure_by_ref() { let builder = AstBuilder::new(); let expr = builder.expr().closure() .by_ref() .fn_decl().default_return() .expr().usize(2); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Closure( ast::CaptureBy::Ref, builder.fn_decl().default_return(), builder.expr().usize(2), DUMMY_SP, ) ) ); } #[test] fn test_closure_block() { let builder = AstBuilder::new(); let expr = builder.expr().closure() .by_ref() .fn_decl().default_return() .expr().usize(3); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Closure( ast::CaptureBy::Ref, builder.fn_decl().default_return(), builder.expr().usize(3), DUMMY_SP, ) ) ); } #[test] fn test_while_loop() { let builder = AstBuilder::new(); let expr = builder.expr().while_().true_().block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::While( builder.expr().true_(), builder.block().expr().unit(), None, ) ) ); } #[test] fn test_while_loop_label() { let builder = AstBuilder::new(); let expr = builder.expr().while_().true_().label("'lab").block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::While( builder.expr().true_(), builder.block().expr().unit(), Some(respan(DUMMY_SP, builder.id("'lab"))), ) ) ); } #[test] fn test_while_let_loop() { let builder = AstBuilder::new(); let expr = builder.expr().while_().unit().pat().expr().unit().block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::WhileLet( builder.pat().expr().unit(), builder.expr().unit(), builder.block().expr().unit(), None, ) ) ); } #[test] fn test_while_let_loop_label() { let builder = AstBuilder::new(); let expr = builder.expr().while_().unit().label("'lab").pat().expr().unit().block().expr().unit(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::WhileLet( builder.pat().expr().unit(), builder.expr().unit(), builder.block().expr().unit(), Some(respan(DUMMY_SP, builder.id("'lab"))), ) ) ); } #[test] fn test_type_ascription() { let builder = AstBuilder::new(); let expr = builder.expr().type_().u8(1).u8(); assert_eq!( expr, builder.expr().build_expr_kind( ast::ExprKind::Type( builder.expr().u8(1), builder.ty().u8(), ) ) ); }
//! An implementation of `MemoryDB` database. use profiler; use leveldb::database::Database as _LevelDB; use leveldb::iterator::{Iterator as _Iterator, Iterable}; use leveldb::error::Error as _Error; use leveldb::database::snapshots::Snapshot as _Snapshot; use leveldb::options::{WriteOptions, ReadOptions}; use leveldb::database::batch::Writebatch; use leveldb::batch::Batch; use leveldb::snapshots::Snapshots; use std::fs; use std::io; use std::mem; use std::path::Path; use std::error; use std::sync::Arc; /// Options to consider when opening a new or pre-existing LevelDB database. pub use leveldb::options::Options as LevelDBOptions; /// Represents a LevelDB cache. pub use leveldb::database::cache::Cache as LevelDBCache; use super::{Database, Iterator, Iter, Snapshot, Error, Patch, Change, Result}; const LEVELDB_READ_OPTIONS: ReadOptions<'static> = ReadOptions { verify_checksums: false, fill_cache: true, snapshot: None, }; const LEVELDB_WRITE_OPTIONS: WriteOptions = WriteOptions { sync: false }; /// Database implementation on the top of LevelDB backend. #[derive(Clone)] pub struct LevelDB { db: Arc<_LevelDB>, } /// A snapshot of a `LevelDB`. struct LevelDBSnapshot { _db: Arc<_LevelDB>, snapshot: _Snapshot<'static>, } /// An iterator over an entries of a `LevelDB`. struct LevelDBIterator<'a> { iter: _Iterator<'a>, } impl From<_Error> for Error { fn from(err: _Error) -> Self { Error::new(error::Error::description(&err)) } } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::new(error::Error::description(&err)) } } impl LevelDB { /// Open a database stored in the specified path with the specified options. pub fn open<P: AsRef<Path>>(path: P, options: LevelDBOptions) -> Result<LevelDB> { // TODO: configurate LRU cache if options.create_if_missing { fs::create_dir_all(path.as_ref())?; } let database = _LevelDB::open(path.as_ref(), options)?; Ok(LevelDB { db: Arc::new(database) }) } } impl Database for LevelDB { fn clone(&self) -> Box<Database> { Box::new(Clone::clone(self)) } fn snapshot(&self) -> Box<Snapshot> { let _p = profiler::ProfilerSpan::new("LevelDB::snapshot"); Box::new(LevelDBSnapshot { _db: self.db.clone(), snapshot: unsafe { mem::transmute(self.db.snapshot()) }, }) } fn merge(&mut self, patch: Patch) -> Result<()> { let _p = profiler::ProfilerSpan::new("LevelDB::merge"); let mut batch = Writebatch::new(); for (key, change) in patch { match change { Change::Put(ref v) => batch.put(key, v), Change::Delete => batch.delete(key), } } self.db .write(LEVELDB_WRITE_OPTIONS, &batch) .map_err(Into::into) } } impl Snapshot for LevelDBSnapshot { fn get(&self, key: &[u8]) -> Option<Vec<u8>> { let _p = profiler::ProfilerSpan::new("LevelDBSnapshot::get"); match self.snapshot.get(LEVELDB_READ_OPTIONS, key) { Ok(value) => value, Err(err) => panic!(err), } } fn iter<'a>(&'a self, from: &[u8]) -> Iter<'a> { let _p = profiler::ProfilerSpan::new("LevelDBSnapshot::iter"); let mut iter = self.snapshot.iter(LEVELDB_READ_OPTIONS); iter.seek(from); Box::new(LevelDBIterator { iter: iter }) } } impl<'a> Iterator for LevelDBIterator<'a> { fn next(&mut self) -> Option<(&[u8], &[u8])> { let _p = profiler::ProfilerSpan::new("LevelDBIterator::next"); self.iter.next() } fn peek(&mut self) -> Option<(&[u8], &[u8])> { let _p = profiler::ProfilerSpan::new("LevelDBIterator::peek"); self.iter.peek() } } impl ::std::fmt::Debug for LevelDB { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "LevelDB(..)") } } impl ::std::fmt::Debug for LevelDBSnapshot { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "LevelDBSnapshot(..)") } } I should put `LevelDB` between ticks in the documentation Former-commit-id: b424cc58dfc3111a2f9325bf4a63b093a127493c //! An implementation of `MemoryDB` database. use profiler; use leveldb::database::Database as _LevelDB; use leveldb::iterator::{Iterator as _Iterator, Iterable}; use leveldb::error::Error as _Error; use leveldb::database::snapshots::Snapshot as _Snapshot; use leveldb::options::{WriteOptions, ReadOptions}; use leveldb::database::batch::Writebatch; use leveldb::batch::Batch; use leveldb::snapshots::Snapshots; use std::fs; use std::io; use std::mem; use std::path::Path; use std::error; use std::sync::Arc; /// Options to consider when opening a new or pre-existing `LevelDB` database. pub use leveldb::options::Options as LevelDBOptions; /// Represents a `LevelDB` cache. pub use leveldb::database::cache::Cache as LevelDBCache; use super::{Database, Iterator, Iter, Snapshot, Error, Patch, Change, Result}; const LEVELDB_READ_OPTIONS: ReadOptions<'static> = ReadOptions { verify_checksums: false, fill_cache: true, snapshot: None, }; const LEVELDB_WRITE_OPTIONS: WriteOptions = WriteOptions { sync: false }; /// Database implementation on the top of `LevelDB` backend. #[derive(Clone)] pub struct LevelDB { db: Arc<_LevelDB>, } /// A snapshot of a `LevelDB`. struct LevelDBSnapshot { _db: Arc<_LevelDB>, snapshot: _Snapshot<'static>, } /// An iterator over an entries of a `LevelDB`. struct LevelDBIterator<'a> { iter: _Iterator<'a>, } impl From<_Error> for Error { fn from(err: _Error) -> Self { Error::new(error::Error::description(&err)) } } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::new(error::Error::description(&err)) } } impl LevelDB { /// Open a database stored in the specified path with the specified options. pub fn open<P: AsRef<Path>>(path: P, options: LevelDBOptions) -> Result<LevelDB> { // TODO: configurate LRU cache if options.create_if_missing { fs::create_dir_all(path.as_ref())?; } let database = _LevelDB::open(path.as_ref(), options)?; Ok(LevelDB { db: Arc::new(database) }) } } impl Database for LevelDB { fn clone(&self) -> Box<Database> { Box::new(Clone::clone(self)) } fn snapshot(&self) -> Box<Snapshot> { let _p = profiler::ProfilerSpan::new("LevelDB::snapshot"); Box::new(LevelDBSnapshot { _db: self.db.clone(), snapshot: unsafe { mem::transmute(self.db.snapshot()) }, }) } fn merge(&mut self, patch: Patch) -> Result<()> { let _p = profiler::ProfilerSpan::new("LevelDB::merge"); let mut batch = Writebatch::new(); for (key, change) in patch { match change { Change::Put(ref v) => batch.put(key, v), Change::Delete => batch.delete(key), } } self.db .write(LEVELDB_WRITE_OPTIONS, &batch) .map_err(Into::into) } } impl Snapshot for LevelDBSnapshot { fn get(&self, key: &[u8]) -> Option<Vec<u8>> { let _p = profiler::ProfilerSpan::new("LevelDBSnapshot::get"); match self.snapshot.get(LEVELDB_READ_OPTIONS, key) { Ok(value) => value, Err(err) => panic!(err), } } fn iter<'a>(&'a self, from: &[u8]) -> Iter<'a> { let _p = profiler::ProfilerSpan::new("LevelDBSnapshot::iter"); let mut iter = self.snapshot.iter(LEVELDB_READ_OPTIONS); iter.seek(from); Box::new(LevelDBIterator { iter: iter }) } } impl<'a> Iterator for LevelDBIterator<'a> { fn next(&mut self) -> Option<(&[u8], &[u8])> { let _p = profiler::ProfilerSpan::new("LevelDBIterator::next"); self.iter.next() } fn peek(&mut self) -> Option<(&[u8], &[u8])> { let _p = profiler::ProfilerSpan::new("LevelDBIterator::peek"); self.iter.peek() } } impl ::std::fmt::Debug for LevelDB { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "LevelDB(..)") } } impl ::std::fmt::Debug for LevelDBSnapshot { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "LevelDBSnapshot(..)") } }
import std::{vec, str, option, unsafe, fs, sys, ctypes}; import std::map::hashmap; import lib::llvm::llvm; import lib::llvm::llvm::ValueRef; import middle::trans_common::*; import middle::ty; import syntax::{ast, codemap}; import ast::ty; import util::ppaux::ty_to_str; const LLVMDebugVersion: int = (9 << 16); const DW_LANG_RUST: int = 0x9000; const DW_VIRTUALITY_none: int = 0; const CompileUnitTag: int = 17; const FileDescriptorTag: int = 41; const SubprogramTag: int = 46; const SubroutineTag: int = 21; const BasicTypeDescriptorTag: int = 36; const AutoVariableTag: int = 256; const ArgVariableTag: int = 257; const ReturnVariableTag: int = 258; const LexicalBlockTag: int = 11; const PointerTypeTag: int = 15; const StructureTypeTag: int = 19; const MemberTag: int = 13; const DW_ATE_boolean: int = 0x02; const DW_ATE_float: int = 0x04; const DW_ATE_signed: int = 0x05; const DW_ATE_signed_char: int = 0x06; const DW_ATE_unsigned: int = 0x07; const DW_ATE_unsigned_char: int = 0x08; fn as_buf(s: str) -> str::sbuf { str::as_buf(s, {|sbuf| sbuf}) } fn llstr(s: str) -> ValueRef { llvm::LLVMMDString(as_buf(s), str::byte_len(s)) } fn lltag(lltag: int) -> ValueRef { lli32(LLVMDebugVersion | lltag) } fn lli32(val: int) -> ValueRef { C_i32(val as i32) } fn lli64(val: int) -> ValueRef { C_i64(val as i64) } fn lli1(bval: bool) -> ValueRef { C_bool(bval) } fn llmdnode(elems: [ValueRef]) -> ValueRef unsafe { llvm::LLVMMDNode(vec::unsafe::to_ptr(elems), vec::len(elems)) } fn llunused() -> ValueRef { lli32(0x0) } fn llnull() -> ValueRef unsafe { unsafe::reinterpret_cast(std::ptr::null::<ValueRef>()) } fn update_cache(cache: metadata_cache, mdtag: int, val: debug_metadata) { let existing = if cache.contains_key(mdtag) { cache.get(mdtag) } else { [] }; cache.insert(mdtag, existing + [val]); } //////////////// type debug_ctxt = { llmetadata: metadata_cache, //llmod: ValueRef, //opt: bool, names: trans_common::namegen }; //////////////// type metadata<T> = {node: ValueRef, data: T}; type file_md = {path: str}; type compile_unit_md = {path: str}; type subprogram_md = {name: str, file: str}; type local_var_md = {id: ast::node_id}; type tydesc_md = {hash: uint}; type block_md = {start: codemap::loc, end: codemap::loc}; type argument_md = {id: ast::node_id}; type retval_md = {id: ast::node_id}; type metadata_cache = hashmap<int, [debug_metadata]>; tag debug_metadata { file_metadata(@metadata<file_md>); compile_unit_metadata(@metadata<compile_unit_md>); subprogram_metadata(@metadata<subprogram_md>); local_var_metadata(@metadata<local_var_md>); tydesc_metadata(@metadata<tydesc_md>); block_metadata(@metadata<block_md>); argument_metadata(@metadata<argument_md>); retval_metadata(@metadata<retval_md>); } fn cast_safely<copy T, U>(val: T) -> U unsafe { let val2 = val; let val3 = unsafe::reinterpret_cast(val2); unsafe::leak(val2); ret val3; } fn md_from_metadata<T>(val: debug_metadata) -> T unsafe { alt val { file_metadata(md) { cast_safely(md) } compile_unit_metadata(md) { cast_safely(md) } subprogram_metadata(md) { cast_safely(md) } local_var_metadata(md) { cast_safely(md) } tydesc_metadata(md) { cast_safely(md) } block_metadata(md) { cast_safely(md) } argument_metadata(md) { cast_safely(md) } retval_metadata(md) { cast_safely(md) } } } fn cached_metadata<copy T>(cache: metadata_cache, mdtag: int, eq: block(md: T) -> bool) -> option::t<T> unsafe { if cache.contains_key(mdtag) { let items = cache.get(mdtag); for item in items { let md: T = md_from_metadata::<T>(item); if eq(md) { ret option::some(md); } } } ret option::none; } fn get_compile_unit_metadata(cx: @crate_ctxt, full_path: str) -> @metadata<compile_unit_md> { let cache = get_cache(cx); alt cached_metadata::<@metadata<compile_unit_md>>(cache, CompileUnitTag, {|md| md.data.path == full_path}) { option::some(md) { ret md; } option::none. {} } let fname = fs::basename(full_path); let path = fs::dirname(full_path); let unit_metadata = [lltag(CompileUnitTag), llunused(), lli32(DW_LANG_RUST), llstr(fname), llstr(path), llstr(#env["CFG_VERSION"]), lli1(false), // main compile unit lli1(cx.sess.get_opts().optimize != 0u), llstr(""), // flags (???) lli32(0) // runtime version (???) // list of enum types // list of retained values // list of subprograms // list of global variables ]; let unit_node = llmdnode(unit_metadata); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.cu"), str::byte_len("llvm.dbg.cu"), unit_node); let mdval = @{node: unit_node, data: {path: full_path}}; update_cache(cache, CompileUnitTag, compile_unit_metadata(mdval)); ret mdval; } fn get_cache(cx: @crate_ctxt) -> metadata_cache { option::get(cx.dbg_cx).llmetadata } fn get_file_metadata(cx: @crate_ctxt, full_path: str) -> @metadata<file_md> { let cache = get_cache(cx);; let tg = FileDescriptorTag; alt cached_metadata::<@metadata<file_md>>( cache, tg, {|md| md.data.path == full_path}) { option::some(md) { ret md; } option::none. {} } let fname = fs::basename(full_path); let path = fs::dirname(full_path); let unit_node = get_compile_unit_metadata(cx, full_path).node; let file_md = [lltag(tg), llstr(fname), llstr(path), unit_node]; let val = llmdnode(file_md); let mdval = @{node: val, data: {path: full_path}}; update_cache(cache, tg, file_metadata(mdval)); ret mdval; } fn line_from_span(cm: codemap::codemap, sp: codemap::span) -> uint { codemap::lookup_char_pos(cm, sp.lo).line } fn get_block_metadata(cx: @block_ctxt) -> @metadata<block_md> { let cache = get_cache(bcx_ccx(cx)); let start = codemap::lookup_char_pos(bcx_ccx(cx).sess.get_codemap(), cx.sp.lo); let fname = start.filename; let end = codemap::lookup_char_pos(bcx_ccx(cx).sess.get_codemap(), cx.sp.hi); let tg = LexicalBlockTag; alt cached_metadata::<@metadata<block_md>>( cache, tg, {|md| start == md.data.start && end == md.data.end}) { option::some(md) { ret md; } option::none. {} } let parent = alt cx.parent { trans_common::parent_none. { function_metadata_from_block(cx).node } trans_common::parent_some(bcx) { get_block_metadata(cx).node } }; let file_node = get_file_metadata(bcx_ccx(cx), fname); let unique_id = alt cache.find(LexicalBlockTag) { option::some(v) { vec::len(v) as int } option::none. { 0 } }; let lldata = [lltag(tg), parent, lli32(start.line as int), lli32(start.col as int), file_node.node, lli32(unique_id) ]; let val = llmdnode(lldata); let mdval = @{node: val, data: {start: start, end: end}}; update_cache(cache, tg, block_metadata(mdval)); ret mdval; } fn size_and_align_of<T>() -> (int, int) { (sys::size_of::<T>() as int, sys::align_of::<T>() as int) } fn get_basic_type_metadata(cx: @crate_ctxt, t: ty::t, ty: @ast::ty) -> @metadata<tydesc_md> { let cache = get_cache(cx); let tg = BasicTypeDescriptorTag; alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| ty::hash_ty(t) == ty::hash_ty(md.data.hash)}) { option::some(md) { ret md; } option::none. {} } let (name, (size, align), encoding) = alt ty.node { ast::ty_bool. {("bool", size_and_align_of::<bool>(), DW_ATE_boolean)} ast::ty_int(m) { alt m { ast::ty_char. {("char", size_and_align_of::<char>(), DW_ATE_unsigned)} ast::ty_i. {("int", size_and_align_of::<int>(), DW_ATE_signed)} ast::ty_i8. {("i8", size_and_align_of::<i8>(), DW_ATE_signed_char)} ast::ty_i16. {("i16", size_and_align_of::<i16>(), DW_ATE_signed)} ast::ty_i32. {("i32", size_and_align_of::<i32>(), DW_ATE_signed)} ast::ty_i64. {("i64", size_and_align_of::<i64>(), DW_ATE_signed)} }} ast::ty_uint(m) { alt m { ast::ty_u. {("uint", size_and_align_of::<uint>(), DW_ATE_unsigned)} ast::ty_u8. {("u8", size_and_align_of::<u8>(), DW_ATE_unsigned_char)} ast::ty_u16. {("u16", size_and_align_of::<u16>(), DW_ATE_unsigned)} ast::ty_u32. {("u32", size_and_align_of::<u32>(), DW_ATE_unsigned)} ast::ty_u64. {("u64", size_and_align_of::<u64>(), DW_ATE_unsigned)} }} ast::ty_float(m) { alt m { ast::ty_f. {("float", size_and_align_of::<float>(), DW_ATE_float)} ast::ty_f32. {("f32", size_and_align_of::<f32>(), DW_ATE_float)} ast::ty_f64. {("f64", size_and_align_of::<f64>(), DW_ATE_float)} }} }; let fname = filename_from_span(cx, ty.span); let file_node = get_file_metadata(cx, fname); let cu_node = get_compile_unit_metadata(cx, fname); let lldata = [lltag(tg), cu_node.node, llstr(name), file_node.node, lli32(0), //XXX source line lli64(size * 8), // size in bits lli64(align * 8), // alignment in bits lli64(0), //XXX offset? lli32(0), //XXX flags? lli32(encoding)]; let llnode = llmdnode(lldata); let mdval = @{node: llnode, data: {hash: ty::hash_ty(t)}}; update_cache(cache, tg, tydesc_metadata(mdval)); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.ty"), str::byte_len("llvm.dbg.ty"), llnode); ret mdval; } fn get_pointer_type_metadata(cx: @crate_ctxt, t: ty::t, span: codemap::span, pointee: @metadata<tydesc_md>) -> @metadata<tydesc_md> { let tg = PointerTypeTag; /*let cache = cx.llmetadata; alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| ty::hash_ty(t) == ty::hash_ty(md.data.hash)}) { option::some(md) { ret md; } option::none. {} }*/ let (size, align) = size_and_align_of::<ctypes::intptr_t>(); let fname = filename_from_span(cx, span); let file_node = get_file_metadata(cx, fname); //let cu_node = get_compile_unit_metadata(cx, fname); let lldata = [lltag(tg), file_node.node, llstr(""), file_node.node, lli32(0), //XXX source line lli64(size * 8), // size in bits lli64(align * 8), // alignment in bits lli64(0), //XXX offset? lli32(0), pointee.node]; let llnode = llmdnode(lldata); let mdval = @{node: llnode, data: {hash: ty::hash_ty(t)}}; //update_cache(cache, tg, tydesc_metadata(mdval)); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.ty"), str::byte_len("llvm.dbg.ty"), llnode); ret mdval; } type struct_ctxt = { file: ValueRef, name: str, line: int, mutable members: [ValueRef], mutable total_size: int, align: int }; fn finish_structure(cx: @struct_ctxt) -> ValueRef { let lldata = [lltag(StructureTypeTag), cx.file, llstr(cx.name), // type name cx.file, // source file definition lli32(cx.line), // source line definition lli64(cx.total_size), // size of members lli64(cx.align), // align lli64(0), // offset lli32(0), // flags llnull(), // derived from llmdnode(cx.members), // members lli32(0), // runtime language llnull() ]; ret llmdnode(lldata); } fn create_structure(file: @metadata<file_md>, name: str, line: int) -> @struct_ctxt { let cx = @{file: file.node, name: name, line: line, mutable members: [], mutable total_size: 0, align: 64 //XXX different alignment per arch? }; ret cx; } fn add_member(cx: @struct_ctxt, name: str, line: int, size: int, align: int, ty: ValueRef) { let lldata = [lltag(MemberTag), cx.file, llstr(name), cx.file, lli32(line), lli64(size * 8), lli64(align * 8), lli64(cx.total_size), lli32(0), ty]; cx.total_size += size * 8; cx.members += [llmdnode(lldata)]; } fn get_record_metadata(cx: @crate_ctxt, t: ty::t, fields: [ast::ty_field], span: codemap::span) -> @metadata<tydesc_md> { let fname = filename_from_span(cx, span); let file_node = get_file_metadata(cx, fname); let scx = create_structure(file_node, option::get(cx.dbg_cx).names.next("rec"), line_from_span(cx.sess.get_codemap(), span) as int); for field in fields { //let field_t = option::get(ccx_tcx(cx).ast_ty_to_ty_cache.get(field.node.mt.ty)); let field_t = ty::get_field(ccx_tcx(cx), t, field.node.ident).mt.ty; let ty_md = get_ty_metadata(cx, field_t, field.node.mt.ty); let (size, align) = member_size_and_align(field.node.mt.ty); add_member(scx, field.node.ident, line_from_span(cx.sess.get_codemap(), field.span) as int, size as int, align as int, ty_md.node); } let mdval = @{node: finish_structure(scx), data:{hash: t}}; ret mdval; } fn get_boxed_type_metadata(cx: @crate_ctxt, outer: ty::t, inner: ty::t, span: codemap::span, boxed: @metadata<tydesc_md>) -> @metadata<tydesc_md> { let tg = StructureTypeTag; /*let cache = cx.llmetadata; alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| ty::hash_ty(outer) == ty::hash_ty(md.data.hash)}) { option::some(md) { ret md; } option::none. {} }*/ let (size, align) = size_and_align_of::<@int>(); let fname = filename_from_span(cx, span); let file_node = get_file_metadata(cx, fname); //let cu_node = get_compile_unit_metadata(cx, fname); let tcx = ccx_tcx(cx); let uint_t = ty::mk_uint(tcx); let uint_ty = @{node: ast::ty_uint(ast::ty_u), span: span}; let refcount_type = get_basic_type_metadata(cx, uint_t, uint_ty); /*let refcount_ptr_type = get_pointer_type_metadata(cx, ty::mk_imm_uniq(tcx, uint_t), span, refcount_type);*/ /*let boxed_ptr_type = get_pointer_type_metadata(cx, ty::mk_imm_uniq(tcx, inner), span, boxed);*/ //let ptr_size = sys::size_of::<ctypes::intptr_t>() as int; //let ptr_align = sys::align_of::<ctypes::intptr_t>() as int; let size = sys::size_of::<uint>() as int * 8; let total_size = size; let refcount = [lltag(MemberTag), file_node.node, llstr("refcnt"), file_node.node, lli32(0), lli64(size), lli64(sys::align_of::<uint>() as int * 8), lli64(0), lli32(0), refcount_type.node]; let size = 64; //XXX member_size_and_align(???) let boxed_member = [lltag(MemberTag), file_node.node, llstr("boxed"), file_node.node, lli32(0), lli64(size), lli64(64), //XXX align of inner lli64(total_size), lli32(0), boxed.node]; total_size += size; let members = [llmdnode(refcount), llmdnode(boxed_member)]; let lldata = [lltag(tg), file_node.node, llstr(ty_to_str(ccx_tcx(cx), outer)), file_node.node, lli32(0), //XXX source line lli64(total_size), // size in bits lli64(align * 8), // alignment in bits lli64(0), //XXX offset? lli32(0), //XXX flags llnull(), // derived from llmdnode(members), // members lli32(0) // runtime language ]; let llnode = llmdnode(lldata); let mdval = @{node: llnode, data: {hash: outer}}; //update_cache(cache, tg, tydesc_metadata(mdval)); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.ty"), str::byte_len("llvm.dbg.ty"), llnode); ret mdval; } fn member_size_and_align(ty: @ast::ty) -> (int, int) { alt ty.node { ast::ty_bool. { size_and_align_of::<bool>() } ast::ty_int(m) { alt m { ast::ty_char. { size_and_align_of::<char>() } ast::ty_i. { size_and_align_of::<int>() } ast::ty_i8. { size_and_align_of::<i8>() } ast::ty_i16. { size_and_align_of::<i16>() } ast::ty_i32. { size_and_align_of::<i32>() } }} ast::ty_uint(m) { alt m { ast::ty_u. { size_and_align_of::<uint>() } ast::ty_u8. { size_and_align_of::<i8>() } ast::ty_u16. { size_and_align_of::<u16>() } ast::ty_u32. { size_and_align_of::<u32>() } }} ast::ty_float(m) { alt m { ast::ty_f. { size_and_align_of::<float>() } ast::ty_f32. { size_and_align_of::<f32>() } ast::ty_f64. { size_and_align_of::<f64>() } }} ast::ty_box(_) | ast::ty_uniq(_) { size_and_align_of::<ctypes::uintptr_t>() } ast::ty_rec(fields) { let total_size = 0; for field in fields { let (size, _) = member_size_and_align(field.node.mt.ty); total_size += size; } (total_size, 64) //XXX different align for other arches? } } } fn get_ty_metadata(cx: @crate_ctxt, t: ty::t, ty: @ast::ty) -> @metadata<tydesc_md> { /*let cache = get_cache(cx); alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| t == md.data.hash}) { option::some(md) { ret md; } option::none. {} }*/ fn t_to_ty(cx: @crate_ctxt, t: ty::t, span: codemap::span) -> @ast::ty { let ty = alt ty::struct(ccx_tcx(cx), t) { ty::ty_nil. { ast::ty_nil } ty::ty_bot. { ast::ty_bot } ty::ty_bool. { ast::ty_bool } ty::ty_int(t) { ast::ty_int(t) } ty::ty_float(t) { ast::ty_float(t) } ty::ty_uint(t) { ast::ty_uint(t) } ty::ty_box(mt) { ast::ty_box({ty: t_to_ty(cx, mt.ty, span), mut: mt.mut}) } ty::ty_uniq(mt) { ast::ty_uniq({ty: t_to_ty(cx, mt.ty, span), mut: mt.mut}) } ty::ty_rec(fields) { let fs = []; for field in fields { fs += [{node: {ident: field.ident, mt: {ty: t_to_ty(cx, field.mt.ty, span), mut: field.mt.mut}}, span: span}]; } ast::ty_rec(fs) } }; ret @{node: ty, span: span}; } alt ty.node { ast::ty_box(mt) { let inner_t = alt ty::struct(ccx_tcx(cx), t) { ty::ty_box(boxed) { boxed.ty } }; let md = get_ty_metadata(cx, inner_t, mt.ty); let box = get_boxed_type_metadata(cx, t, inner_t, ty.span, md); ret get_pointer_type_metadata(cx, t, ty.span, box); } ast::ty_uniq(mt) { let inner_t = alt ty::struct(ccx_tcx(cx), t) { ty::ty_uniq(boxed) { boxed.ty } }; let md = get_ty_metadata(cx, inner_t, mt.ty); ret get_pointer_type_metadata(cx, t, ty.span, md); } ast::ty_infer. { let inferred = t_to_ty(cx, t, ty.span); ret get_ty_metadata(cx, t, inferred); } ast::ty_rec(fields) { ret get_record_metadata(cx, t, fields, ty.span); } _ { ret get_basic_type_metadata(cx, t, ty); } }; } fn function_metadata_from_block(bcx: @block_ctxt) -> @metadata<subprogram_md> { let cx = bcx_ccx(bcx); let fcx = bcx_fcx(bcx); let fn_node = cx.ast_map.get(fcx.id); let fn_item = alt fn_node { ast_map::node_item(item) { item } }; get_function_metadata(fcx, fn_item, fcx.llfn) } fn filename_from_span(cx: @crate_ctxt, sp: codemap::span) -> str { codemap::lookup_char_pos(cx.sess.get_codemap(), sp.lo).filename } fn get_local_var_metadata(bcx: @block_ctxt, local: @ast::local) -> @metadata<local_var_md> unsafe { let cx = bcx_ccx(bcx); let cache = get_cache(cx); alt cached_metadata::<@metadata<local_var_md>>( cache, AutoVariableTag, {|md| md.data.id == local.node.id}) { option::some(md) { ret md; } option::none. {} } let name = alt local.node.pat.node { ast::pat_bind(ident) { ident } }; let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), local.span.lo); let ty = trans::node_id_type(cx, local.node.id); let tymd = get_ty_metadata(cx, ty, local.node.ty); let filemd = get_file_metadata(cx, loc.filename); let context = alt bcx.parent { trans_common::parent_none. { function_metadata_from_block(bcx).node } trans_common::parent_some(_) { get_block_metadata(bcx).node } }; let lldata = [lltag(AutoVariableTag), context, // context llstr(name), // name filemd.node, lli32(loc.line as int), // line tymd.node, lli32(0) //XXX flags ]; let mdnode = llmdnode(lldata); let mdval = @{node: mdnode, data: {id: local.node.id}}; update_cache(cache, AutoVariableTag, local_var_metadata(mdval)); let llptr = alt bcx.fcx.lllocals.find(local.node.id) { option::some(local_mem(v)) { v } option::none. { alt bcx.fcx.lllocals.get(local.node.pat.id) { local_imm(v) { v } } } }; let declargs = [llmdnode([llptr]), mdnode]; trans_build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; } //FIXME: consolidate with get_local_var_metadata /*fn get_retval_metadata(bcx: @block_ctxt) -> @metadata<retval_md> unsafe { let fcx = bcx_fcx(bcx); let cx = fcx_ccx(fcx); let cache = cx.llmetadata; alt cached_metadata::<@metadata<retval_md>>( cache, ReturnVariableTag, {|md| md.data.id == fcx.id}) { option::some(md) { ret md; } option::none. {} } let item = alt option::get(cx.ast_map.find(fcx.id)) { ast_map::node_item(item) { item } }; let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), fcx.sp.lo); let ret_ty = alt item.node { ast::item_fn(f, _) { f.decl.output } }; let ty_node = alt ret_ty.node { ast::ty_nil. { llnull() } _ { get_ty_metadata(cx, ty::node_id_to_type(ccx_tcx(cx), item.id), ret_ty).node } }; /*let ty_node = get_ty_metadata(cx, ty::node_id_to_type(ccx_tcx(cx), fcx.id), ty).node;*/ //let ty = trans::node_id_type(cx, arg.id); //let tymd = get_ty_metadata(cx, ty, arg.ty); let filemd = get_file_metadata(cx, loc.filename); let fn_node = cx.ast_map.get(fcx.id); let fn_item = alt fn_node { ast_map::node_item(item) { item } }; let context = get_function_metadata(fcx, fn_item, fcx.llfn); let lldata = [lltag(ReturnVariableTag), context.node, // context llstr("%0"), // name filemd.node, lli32(loc.line as int), // line ty_node, lli32(0) //XXX flags ]; let mdnode = llmdnode(lldata); let mdval = @{node: mdnode, data: {id: fcx.id}}; update_cache(cache, ReturnVariableTag, retval_metadata(mdval)); let llptr = fcx.llretptr; let declargs = [llmdnode([llptr]), mdnode]; trans_build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; }*/ //FIXME: consolidate with get_local_var_metadata fn get_arg_metadata(bcx: @block_ctxt, arg: ast::arg) -> @metadata<argument_md> unsafe { let fcx = bcx_fcx(bcx); let cx = fcx_ccx(fcx); let cache = get_cache(cx); alt cached_metadata::<@metadata<argument_md>>( cache, ArgVariableTag, {|md| md.data.id == arg.id}) { option::some(md) { ret md; } option::none. {} } let arg_n = alt cx.ast_map.get(arg.id) { ast_map::node_arg(_, n) { n - 2u } }; let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), fcx.sp.lo); let ty = trans::node_id_type(cx, arg.id); let tymd = get_ty_metadata(cx, ty, arg.ty); let filemd = get_file_metadata(cx, loc.filename); let fn_node = cx.ast_map.get(fcx.id); let fn_item = alt fn_node { ast_map::node_item(item) { item } }; let context = get_function_metadata(fcx, fn_item, fcx.llfn); let lldata = [lltag(ArgVariableTag), context.node, // context llstr(arg.ident), // name filemd.node, lli32(loc.line as int), // line tymd.node, lli32(0) //XXX flags ]; let mdnode = llmdnode(lldata); let mdval = @{node: mdnode, data: {id: arg.id}}; update_cache(cache, ArgVariableTag, argument_metadata(mdval)); let llptr = alt fcx.llargs.get(arg.id) { local_mem(v) | local_imm(v) { v } }; let declargs = [llmdnode([llptr]), mdnode]; trans_build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; } fn update_source_pos(cx: @block_ctxt, s: codemap::span) -> @debug_source_pos { let dsp = @debug_source_pos(cx); if !bcx_ccx(cx).sess.get_opts().debuginfo { ret dsp; } let cm = bcx_ccx(cx).sess.get_codemap(); if vec::is_empty(cx.source_pos.pos) { cx.source_pos.usable = true; } cx.source_pos.pos += [codemap::lookup_char_pos(cm, s.lo)]; //XXX maybe hi ret dsp; } fn invalidate_source_pos(cx: @block_ctxt) -> @invalidated_source_pos { let isp = @invalidated_source_pos(cx); if !bcx_ccx(cx).sess.get_opts().debuginfo { ret isp; } cx.source_pos.usable = false; ret isp; } fn revalidate_source_pos(cx: @block_ctxt) { if !bcx_ccx(cx).sess.get_opts().debuginfo { ret; } cx.source_pos.usable = true; } fn reset_source_pos(cx: @block_ctxt) { if !bcx_ccx(cx).sess.get_opts().debuginfo { ret; } vec::pop(cx.source_pos.pos); } resource debug_source_pos(bcx: @block_ctxt) { reset_source_pos(bcx); } resource invalidated_source_pos(bcx: @block_ctxt) { revalidate_source_pos(bcx); } fn add_line_info(cx: @block_ctxt, llinstr: ValueRef) { if !bcx_ccx(cx).sess.get_opts().debuginfo || !cx.source_pos.usable || vec::is_empty(cx.source_pos.pos) { ret; } let loc = option::get(vec::last(cx.source_pos.pos)); let blockmd = get_block_metadata(cx); let kind_id = llvm::LLVMGetMDKindID(as_buf("dbg"), str::byte_len("dbg")); let scopedata = [lli32(loc.line as int), lli32(loc.col as int), blockmd.node, llnull()]; let dbgscope = llmdnode(scopedata); llvm::LLVMSetMetadata(llinstr, kind_id, dbgscope); } fn get_function_metadata(fcx: @fn_ctxt, item: @ast::item, llfndecl: ValueRef) -> @metadata<subprogram_md> { let cx = fcx_ccx(fcx); let cache = get_cache(cx); alt cached_metadata::<@metadata<subprogram_md>>( cache, SubprogramTag, {|md| md.data.name == item.ident && /*sub.path == ??*/ true}) { option::some(md) { ret md; } option::none. {} } let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), item.span.lo); let file_node = get_file_metadata(cx, loc.filename).node; let mangled = cx.item_symbols.get(item.id); let ret_ty = alt item.node { ast::item_fn(f, _) { f.decl.output } }; let ty_node = alt ret_ty.node { ast::ty_nil. { llnull() } _ { get_ty_metadata(cx, ty::node_id_to_type(ccx_tcx(cx), item.id), ret_ty).node } }; let sub_type = llmdnode([ty_node]); let sub_metadata = [lltag(SubroutineTag), file_node, llstr(""), file_node, lli32(0), lli64(0), lli64(0), lli64(0), lli32(0), llnull(), sub_type, lli32(0), llnull()]; let sub_node = llmdnode(sub_metadata); let fn_metadata = [lltag(SubprogramTag), llunused(), file_node, llstr(item.ident), llstr(item.ident), //XXX fully-qualified C++ name llstr(mangled), //XXX MIPS name????? file_node, lli32(loc.line as int), sub_node, lli1(false), //XXX static (check export) lli1(true), // not extern lli32(DW_VIRTUALITY_none), // virtual-ness lli32(0i), //index into virt func llnull(), // base type with vtbl lli1(false), // artificial lli1(cx.sess.get_opts().optimize != 0u), llfndecl //list of template params //func decl descriptor //list of func vars ]; let val = llmdnode(fn_metadata); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.sp"), str::byte_len("llvm.dbg.sp"), val); let mdval = @{node: val, data: {name: item.ident, file: loc.filename}}; update_cache(cache, SubprogramTag, subprogram_metadata(mdval)); /*alt ret_ty.node { ast::ty_nil. {} _ { let _ = get_retval_metadata(fcx, ret_ty); } }*/ ret mdval; } Add support for vectors. import std::{vec, str, option, unsafe, fs, sys, ctypes}; import std::map::hashmap; import lib::llvm::llvm; import lib::llvm::llvm::ValueRef; import middle::trans_common::*; import middle::ty; import syntax::{ast, codemap}; import ast::ty; import util::ppaux::ty_to_str; const LLVMDebugVersion: int = (9 << 16); const DW_LANG_RUST: int = 0x9000; const DW_VIRTUALITY_none: int = 0; const CompileUnitTag: int = 17; const FileDescriptorTag: int = 41; const SubprogramTag: int = 46; const SubroutineTag: int = 21; const BasicTypeDescriptorTag: int = 36; const AutoVariableTag: int = 256; const ArgVariableTag: int = 257; const ReturnVariableTag: int = 258; const LexicalBlockTag: int = 11; const PointerTypeTag: int = 15; const StructureTypeTag: int = 19; const MemberTag: int = 13; const ArrayTypeTag: int = 1; const SubrangeTag: int = 33; const DW_ATE_boolean: int = 0x02; const DW_ATE_float: int = 0x04; const DW_ATE_signed: int = 0x05; const DW_ATE_signed_char: int = 0x06; const DW_ATE_unsigned: int = 0x07; const DW_ATE_unsigned_char: int = 0x08; fn as_buf(s: str) -> str::sbuf { str::as_buf(s, {|sbuf| sbuf}) } fn llstr(s: str) -> ValueRef { llvm::LLVMMDString(as_buf(s), str::byte_len(s)) } fn lltag(lltag: int) -> ValueRef { lli32(LLVMDebugVersion | lltag) } fn lli32(val: int) -> ValueRef { C_i32(val as i32) } fn lli64(val: int) -> ValueRef { C_i64(val as i64) } fn lli1(bval: bool) -> ValueRef { C_bool(bval) } fn llmdnode(elems: [ValueRef]) -> ValueRef unsafe { llvm::LLVMMDNode(vec::unsafe::to_ptr(elems), vec::len(elems)) } fn llunused() -> ValueRef { lli32(0x0) } fn llnull() -> ValueRef unsafe { unsafe::reinterpret_cast(std::ptr::null::<ValueRef>()) } fn update_cache(cache: metadata_cache, mdtag: int, val: debug_metadata) { let existing = if cache.contains_key(mdtag) { cache.get(mdtag) } else { [] }; cache.insert(mdtag, existing + [val]); } //////////////// type debug_ctxt = { llmetadata: metadata_cache, //llmod: ValueRef, //opt: bool, names: trans_common::namegen }; //////////////// type metadata<T> = {node: ValueRef, data: T}; type file_md = {path: str}; type compile_unit_md = {path: str}; type subprogram_md = {name: str, file: str}; type local_var_md = {id: ast::node_id}; type tydesc_md = {hash: uint}; type block_md = {start: codemap::loc, end: codemap::loc}; type argument_md = {id: ast::node_id}; type retval_md = {id: ast::node_id}; type metadata_cache = hashmap<int, [debug_metadata]>; tag debug_metadata { file_metadata(@metadata<file_md>); compile_unit_metadata(@metadata<compile_unit_md>); subprogram_metadata(@metadata<subprogram_md>); local_var_metadata(@metadata<local_var_md>); tydesc_metadata(@metadata<tydesc_md>); block_metadata(@metadata<block_md>); argument_metadata(@metadata<argument_md>); retval_metadata(@metadata<retval_md>); } fn cast_safely<copy T, U>(val: T) -> U unsafe { let val2 = val; let val3 = unsafe::reinterpret_cast(val2); unsafe::leak(val2); ret val3; } fn md_from_metadata<T>(val: debug_metadata) -> T unsafe { alt val { file_metadata(md) { cast_safely(md) } compile_unit_metadata(md) { cast_safely(md) } subprogram_metadata(md) { cast_safely(md) } local_var_metadata(md) { cast_safely(md) } tydesc_metadata(md) { cast_safely(md) } block_metadata(md) { cast_safely(md) } argument_metadata(md) { cast_safely(md) } retval_metadata(md) { cast_safely(md) } } } fn cached_metadata<copy T>(cache: metadata_cache, mdtag: int, eq: block(md: T) -> bool) -> option::t<T> unsafe { if cache.contains_key(mdtag) { let items = cache.get(mdtag); for item in items { let md: T = md_from_metadata::<T>(item); if eq(md) { ret option::some(md); } } } ret option::none; } fn get_compile_unit_metadata(cx: @crate_ctxt, full_path: str) -> @metadata<compile_unit_md> { let cache = get_cache(cx); alt cached_metadata::<@metadata<compile_unit_md>>(cache, CompileUnitTag, {|md| md.data.path == full_path}) { option::some(md) { ret md; } option::none. {} } let fname = fs::basename(full_path); let path = fs::dirname(full_path); let unit_metadata = [lltag(CompileUnitTag), llunused(), lli32(DW_LANG_RUST), llstr(fname), llstr(path), llstr(#env["CFG_VERSION"]), lli1(false), // main compile unit lli1(cx.sess.get_opts().optimize != 0u), llstr(""), // flags (???) lli32(0) // runtime version (???) // list of enum types // list of retained values // list of subprograms // list of global variables ]; let unit_node = llmdnode(unit_metadata); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.cu"), str::byte_len("llvm.dbg.cu"), unit_node); let mdval = @{node: unit_node, data: {path: full_path}}; update_cache(cache, CompileUnitTag, compile_unit_metadata(mdval)); ret mdval; } fn get_cache(cx: @crate_ctxt) -> metadata_cache { option::get(cx.dbg_cx).llmetadata } fn get_file_metadata(cx: @crate_ctxt, full_path: str) -> @metadata<file_md> { let cache = get_cache(cx);; let tg = FileDescriptorTag; alt cached_metadata::<@metadata<file_md>>( cache, tg, {|md| md.data.path == full_path}) { option::some(md) { ret md; } option::none. {} } let fname = fs::basename(full_path); let path = fs::dirname(full_path); let unit_node = get_compile_unit_metadata(cx, full_path).node; let file_md = [lltag(tg), llstr(fname), llstr(path), unit_node]; let val = llmdnode(file_md); let mdval = @{node: val, data: {path: full_path}}; update_cache(cache, tg, file_metadata(mdval)); ret mdval; } fn line_from_span(cm: codemap::codemap, sp: codemap::span) -> uint { codemap::lookup_char_pos(cm, sp.lo).line } fn get_block_metadata(cx: @block_ctxt) -> @metadata<block_md> { let cache = get_cache(bcx_ccx(cx)); let start = codemap::lookup_char_pos(bcx_ccx(cx).sess.get_codemap(), cx.sp.lo); let fname = start.filename; let end = codemap::lookup_char_pos(bcx_ccx(cx).sess.get_codemap(), cx.sp.hi); let tg = LexicalBlockTag; alt cached_metadata::<@metadata<block_md>>( cache, tg, {|md| start == md.data.start && end == md.data.end}) { option::some(md) { ret md; } option::none. {} } let parent = alt cx.parent { trans_common::parent_none. { function_metadata_from_block(cx).node } trans_common::parent_some(bcx) { get_block_metadata(cx).node } }; let file_node = get_file_metadata(bcx_ccx(cx), fname); let unique_id = alt cache.find(LexicalBlockTag) { option::some(v) { vec::len(v) as int } option::none. { 0 } }; let lldata = [lltag(tg), parent, lli32(start.line as int), lli32(start.col as int), file_node.node, lli32(unique_id) ]; let val = llmdnode(lldata); let mdval = @{node: val, data: {start: start, end: end}}; update_cache(cache, tg, block_metadata(mdval)); ret mdval; } fn size_and_align_of<T>() -> (int, int) { (sys::size_of::<T>() as int, sys::align_of::<T>() as int) } fn get_basic_type_metadata(cx: @crate_ctxt, t: ty::t, ty: @ast::ty) -> @metadata<tydesc_md> { let cache = get_cache(cx); let tg = BasicTypeDescriptorTag; alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| ty::hash_ty(t) == ty::hash_ty(md.data.hash)}) { option::some(md) { ret md; } option::none. {} } let (name, (size, align), encoding) = alt ty.node { ast::ty_bool. {("bool", size_and_align_of::<bool>(), DW_ATE_boolean)} ast::ty_int(m) { alt m { ast::ty_char. {("char", size_and_align_of::<char>(), DW_ATE_unsigned)} ast::ty_i. {("int", size_and_align_of::<int>(), DW_ATE_signed)} ast::ty_i8. {("i8", size_and_align_of::<i8>(), DW_ATE_signed_char)} ast::ty_i16. {("i16", size_and_align_of::<i16>(), DW_ATE_signed)} ast::ty_i32. {("i32", size_and_align_of::<i32>(), DW_ATE_signed)} ast::ty_i64. {("i64", size_and_align_of::<i64>(), DW_ATE_signed)} }} ast::ty_uint(m) { alt m { ast::ty_u. {("uint", size_and_align_of::<uint>(), DW_ATE_unsigned)} ast::ty_u8. {("u8", size_and_align_of::<u8>(), DW_ATE_unsigned_char)} ast::ty_u16. {("u16", size_and_align_of::<u16>(), DW_ATE_unsigned)} ast::ty_u32. {("u32", size_and_align_of::<u32>(), DW_ATE_unsigned)} ast::ty_u64. {("u64", size_and_align_of::<u64>(), DW_ATE_unsigned)} }} ast::ty_float(m) { alt m { ast::ty_f. {("float", size_and_align_of::<float>(), DW_ATE_float)} ast::ty_f32. {("f32", size_and_align_of::<f32>(), DW_ATE_float)} ast::ty_f64. {("f64", size_and_align_of::<f64>(), DW_ATE_float)} }} }; let fname = filename_from_span(cx, ty.span); let file_node = get_file_metadata(cx, fname); let cu_node = get_compile_unit_metadata(cx, fname); let lldata = [lltag(tg), cu_node.node, llstr(name), file_node.node, lli32(0), //XXX source line lli64(size * 8), // size in bits lli64(align * 8), // alignment in bits lli64(0), //XXX offset? lli32(0), //XXX flags? lli32(encoding)]; let llnode = llmdnode(lldata); let mdval = @{node: llnode, data: {hash: ty::hash_ty(t)}}; update_cache(cache, tg, tydesc_metadata(mdval)); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.ty"), str::byte_len("llvm.dbg.ty"), llnode); ret mdval; } fn get_pointer_type_metadata(cx: @crate_ctxt, t: ty::t, span: codemap::span, pointee: @metadata<tydesc_md>) -> @metadata<tydesc_md> { let tg = PointerTypeTag; /*let cache = cx.llmetadata; alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| ty::hash_ty(t) == ty::hash_ty(md.data.hash)}) { option::some(md) { ret md; } option::none. {} }*/ let (size, align) = size_and_align_of::<ctypes::intptr_t>(); let fname = filename_from_span(cx, span); let file_node = get_file_metadata(cx, fname); //let cu_node = get_compile_unit_metadata(cx, fname); let lldata = [lltag(tg), file_node.node, llstr(""), file_node.node, lli32(0), //XXX source line lli64(size * 8), // size in bits lli64(align * 8), // alignment in bits lli64(0), //XXX offset? lli32(0), pointee.node]; let llnode = llmdnode(lldata); let mdval = @{node: llnode, data: {hash: ty::hash_ty(t)}}; //update_cache(cache, tg, tydesc_metadata(mdval)); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.ty"), str::byte_len("llvm.dbg.ty"), llnode); ret mdval; } type struct_ctxt = { file: ValueRef, name: str, line: int, mutable members: [ValueRef], mutable total_size: int, align: int }; fn finish_structure(cx: @struct_ctxt) -> ValueRef { ret create_composite_type(StructureTypeTag, cx.name, cx.file, cx.line, cx.total_size, cx.align, 0, option::none, option::some(cx.members)); } fn create_structure(file: @metadata<file_md>, name: str, line: int) -> @struct_ctxt { let cx = @{file: file.node, name: name, line: line, mutable members: [], mutable total_size: 0, align: 64 //XXX different alignment per arch? }; ret cx; } fn add_member(cx: @struct_ctxt, name: str, line: int, size: int, align: int, ty: ValueRef) { let lldata = [lltag(MemberTag), cx.file, llstr(name), cx.file, lli32(line), lli64(size * 8), lli64(align * 8), lli64(cx.total_size), lli32(0), ty]; cx.total_size += size * 8; cx.members += [llmdnode(lldata)]; } fn get_record_metadata(cx: @crate_ctxt, t: ty::t, fields: [ast::ty_field], span: codemap::span) -> @metadata<tydesc_md> { let fname = filename_from_span(cx, span); let file_node = get_file_metadata(cx, fname); let scx = create_structure(file_node, option::get(cx.dbg_cx).names.next("rec"), line_from_span(cx.sess.get_codemap(), span) as int); for field in fields { let field_t = ty::get_field(ccx_tcx(cx), t, field.node.ident).mt.ty; let ty_md = get_ty_metadata(cx, field_t, field.node.mt.ty); let (size, align) = member_size_and_align(field.node.mt.ty); add_member(scx, field.node.ident, line_from_span(cx.sess.get_codemap(), field.span) as int, size as int, align as int, ty_md.node); } let mdval = @{node: finish_structure(scx), data:{hash: t}}; ret mdval; } fn get_boxed_type_metadata(cx: @crate_ctxt, outer: ty::t, inner: ty::t, span: codemap::span, boxed: @metadata<tydesc_md>) -> @metadata<tydesc_md> { let tg = StructureTypeTag; /*let cache = cx.llmetadata; alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| ty::hash_ty(outer) == ty::hash_ty(md.data.hash)}) { option::some(md) { ret md; } option::none. {} }*/ let fname = filename_from_span(cx, span); let file_node = get_file_metadata(cx, fname); //let cu_node = get_compile_unit_metadata(cx, fname); let tcx = ccx_tcx(cx); let uint_t = ty::mk_uint(tcx); let uint_ty = @{node: ast::ty_uint(ast::ty_u), span: span}; let refcount_type = get_basic_type_metadata(cx, uint_t, uint_ty); let scx = create_structure(file_node, ty_to_str(ccx_tcx(cx), outer), 0); add_member(scx, "refcnt", 0, sys::size_of::<uint>() as int, sys::align_of::<uint>() as int, refcount_type.node); add_member(scx, "boxed", 0, 8, //XXX member_size_and_align(??) 8, //XXX just a guess boxed.node); let llnode = finish_structure(scx); let mdval = @{node: llnode, data: {hash: outer}}; //update_cache(cache, tg, tydesc_metadata(mdval)); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.ty"), str::byte_len("llvm.dbg.ty"), llnode); ret mdval; } fn create_composite_type(type_tag: int, name: str, file: ValueRef, line: int, size: int, align: int, offset: int, derived: option::t<ValueRef>, members: option::t<[ValueRef]>) -> ValueRef { let lldata = [lltag(type_tag), file, llstr(name), // type name file, // source file definition lli32(line), // source line definition lli64(size), // size of members lli64(align), // align lli64(offset), // offset lli32(0), // flags option::is_none(derived) ? llnull() : // derived from option::get(derived), option::is_none(members) ? llnull() : // members llmdnode(option::get(members)), lli32(0), // runtime language llnull() ]; ret llmdnode(lldata); } fn get_vec_metadata(cx: @crate_ctxt, vec_t: ty::t, elem_t: ty::t, vec_ty: @ast::ty) -> @metadata<tydesc_md> { let fname = filename_from_span(cx, vec_ty.span); let file_node = get_file_metadata(cx, fname); let elem_ty = alt vec_ty.node { ast::ty_vec(mt) { mt.ty } }; let elem_ty_md = get_ty_metadata(cx, elem_t, elem_ty); let tcx = ccx_tcx(cx); let scx = create_structure(file_node, ty_to_str(tcx, vec_t), 0); let uint_ty = @{node: ast::ty_uint(ast::ty_u), span: vec_ty.span}; let size_t_type = get_basic_type_metadata(cx, ty::mk_uint(tcx), uint_ty); add_member(scx, "fill", 0, sys::size_of::<ctypes::size_t>() as int, sys::align_of::<ctypes::size_t>() as int, size_t_type.node); add_member(scx, "alloc", 0, sys::size_of::<ctypes::size_t>() as int, sys::align_of::<ctypes::size_t>() as int, size_t_type.node); let subrange = llmdnode([lltag(SubrangeTag), lli64(0), lli64(0)]); let (arr_size, arr_align) = member_size_and_align(elem_ty); let data_ptr = create_composite_type(ArrayTypeTag, "", file_node.node, 0, arr_size, arr_align, 0, option::some(elem_ty_md.node), option::some([subrange])); add_member(scx, "data", 0, 0, // according to an equivalent clang dump, the size should be 0 sys::align_of::<u8>() as int, data_ptr); let llnode = finish_structure(scx); ret @{node: llnode, data: {hash: vec_t}}; } fn member_size_and_align(ty: @ast::ty) -> (int, int) { alt ty.node { ast::ty_bool. { size_and_align_of::<bool>() } ast::ty_int(m) { alt m { ast::ty_char. { size_and_align_of::<char>() } ast::ty_i. { size_and_align_of::<int>() } ast::ty_i8. { size_and_align_of::<i8>() } ast::ty_i16. { size_and_align_of::<i16>() } ast::ty_i32. { size_and_align_of::<i32>() } }} ast::ty_uint(m) { alt m { ast::ty_u. { size_and_align_of::<uint>() } ast::ty_u8. { size_and_align_of::<i8>() } ast::ty_u16. { size_and_align_of::<u16>() } ast::ty_u32. { size_and_align_of::<u32>() } }} ast::ty_float(m) { alt m { ast::ty_f. { size_and_align_of::<float>() } ast::ty_f32. { size_and_align_of::<f32>() } ast::ty_f64. { size_and_align_of::<f64>() } }} ast::ty_box(_) | ast::ty_uniq(_) { size_and_align_of::<ctypes::uintptr_t>() } ast::ty_rec(fields) { let total_size = 0; for field in fields { let (size, _) = member_size_and_align(field.node.mt.ty); total_size += size; } (total_size, 64) //XXX different align for other arches? } ast::ty_vec(_) { size_and_align_of::<ctypes::uintptr_t>() } } } fn get_ty_metadata(cx: @crate_ctxt, t: ty::t, ty: @ast::ty) -> @metadata<tydesc_md> { /*let cache = get_cache(cx); alt cached_metadata::<@metadata<tydesc_md>>( cache, tg, {|md| t == md.data.hash}) { option::some(md) { ret md; } option::none. {} }*/ fn t_to_ty(cx: @crate_ctxt, t: ty::t, span: codemap::span) -> @ast::ty { let ty = alt ty::struct(ccx_tcx(cx), t) { ty::ty_nil. { ast::ty_nil } ty::ty_bot. { ast::ty_bot } ty::ty_bool. { ast::ty_bool } ty::ty_int(t) { ast::ty_int(t) } ty::ty_float(t) { ast::ty_float(t) } ty::ty_uint(t) { ast::ty_uint(t) } ty::ty_box(mt) { ast::ty_box({ty: t_to_ty(cx, mt.ty, span), mut: mt.mut}) } ty::ty_uniq(mt) { ast::ty_uniq({ty: t_to_ty(cx, mt.ty, span), mut: mt.mut}) } ty::ty_rec(fields) { let fs = []; for field in fields { fs += [{node: {ident: field.ident, mt: {ty: t_to_ty(cx, field.mt.ty, span), mut: field.mt.mut}}, span: span}]; } ast::ty_rec(fs) } ty::ty_vec(mt) { ast::ty_vec({ty: t_to_ty(cx, mt.ty, span), mut: mt.mut}) } }; ret @{node: ty, span: span}; } alt ty.node { ast::ty_box(mt) { let inner_t = alt ty::struct(ccx_tcx(cx), t) { ty::ty_box(boxed) { boxed.ty } }; let md = get_ty_metadata(cx, inner_t, mt.ty); let box = get_boxed_type_metadata(cx, t, inner_t, ty.span, md); ret get_pointer_type_metadata(cx, t, ty.span, box); } ast::ty_uniq(mt) { let inner_t = alt ty::struct(ccx_tcx(cx), t) { ty::ty_uniq(boxed) { boxed.ty } }; let md = get_ty_metadata(cx, inner_t, mt.ty); ret get_pointer_type_metadata(cx, t, ty.span, md); } ast::ty_infer. { let inferred = t_to_ty(cx, t, ty.span); ret get_ty_metadata(cx, t, inferred); } ast::ty_rec(fields) { ret get_record_metadata(cx, t, fields, ty.span); } ast::ty_vec(mt) { let inner_t = ty::sequence_element_type(ccx_tcx(cx), t); let v = get_vec_metadata(cx, t, inner_t, ty); ret get_pointer_type_metadata(cx, t, ty.span, v); } _ { ret get_basic_type_metadata(cx, t, ty); } }; } fn function_metadata_from_block(bcx: @block_ctxt) -> @metadata<subprogram_md> { let cx = bcx_ccx(bcx); let fcx = bcx_fcx(bcx); let fn_node = cx.ast_map.get(fcx.id); let fn_item = alt fn_node { ast_map::node_item(item) { item } }; get_function_metadata(fcx, fn_item, fcx.llfn) } fn filename_from_span(cx: @crate_ctxt, sp: codemap::span) -> str { codemap::lookup_char_pos(cx.sess.get_codemap(), sp.lo).filename } fn get_local_var_metadata(bcx: @block_ctxt, local: @ast::local) -> @metadata<local_var_md> unsafe { let cx = bcx_ccx(bcx); let cache = get_cache(cx); alt cached_metadata::<@metadata<local_var_md>>( cache, AutoVariableTag, {|md| md.data.id == local.node.id}) { option::some(md) { ret md; } option::none. {} } let name = alt local.node.pat.node { ast::pat_bind(ident) { ident } }; let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), local.span.lo); let ty = trans::node_id_type(cx, local.node.id); let tymd = get_ty_metadata(cx, ty, local.node.ty); let filemd = get_file_metadata(cx, loc.filename); let context = alt bcx.parent { trans_common::parent_none. { function_metadata_from_block(bcx).node } trans_common::parent_some(_) { get_block_metadata(bcx).node } }; let lldata = [lltag(AutoVariableTag), context, // context llstr(name), // name filemd.node, lli32(loc.line as int), // line tymd.node, lli32(0) //XXX flags ]; let mdnode = llmdnode(lldata); let mdval = @{node: mdnode, data: {id: local.node.id}}; update_cache(cache, AutoVariableTag, local_var_metadata(mdval)); let llptr = alt bcx.fcx.lllocals.find(local.node.id) { option::some(local_mem(v)) { v } option::none. { alt bcx.fcx.lllocals.get(local.node.pat.id) { local_imm(v) { v } } } }; let declargs = [llmdnode([llptr]), mdnode]; trans_build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; } //FIXME: consolidate with get_local_var_metadata /*fn get_retval_metadata(bcx: @block_ctxt) -> @metadata<retval_md> unsafe { let fcx = bcx_fcx(bcx); let cx = fcx_ccx(fcx); let cache = cx.llmetadata; alt cached_metadata::<@metadata<retval_md>>( cache, ReturnVariableTag, {|md| md.data.id == fcx.id}) { option::some(md) { ret md; } option::none. {} } let item = alt option::get(cx.ast_map.find(fcx.id)) { ast_map::node_item(item) { item } }; let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), fcx.sp.lo); let ret_ty = alt item.node { ast::item_fn(f, _) { f.decl.output } }; let ty_node = alt ret_ty.node { ast::ty_nil. { llnull() } _ { get_ty_metadata(cx, ty::node_id_to_type(ccx_tcx(cx), item.id), ret_ty).node } }; /*let ty_node = get_ty_metadata(cx, ty::node_id_to_type(ccx_tcx(cx), fcx.id), ty).node;*/ //let ty = trans::node_id_type(cx, arg.id); //let tymd = get_ty_metadata(cx, ty, arg.ty); let filemd = get_file_metadata(cx, loc.filename); let fn_node = cx.ast_map.get(fcx.id); let fn_item = alt fn_node { ast_map::node_item(item) { item } }; let context = get_function_metadata(fcx, fn_item, fcx.llfn); let lldata = [lltag(ReturnVariableTag), context.node, // context llstr("%0"), // name filemd.node, lli32(loc.line as int), // line ty_node, lli32(0) //XXX flags ]; let mdnode = llmdnode(lldata); let mdval = @{node: mdnode, data: {id: fcx.id}}; update_cache(cache, ReturnVariableTag, retval_metadata(mdval)); let llptr = fcx.llretptr; let declargs = [llmdnode([llptr]), mdnode]; trans_build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; }*/ //FIXME: consolidate with get_local_var_metadata fn get_arg_metadata(bcx: @block_ctxt, arg: ast::arg) -> @metadata<argument_md> unsafe { let fcx = bcx_fcx(bcx); let cx = fcx_ccx(fcx); let cache = get_cache(cx); alt cached_metadata::<@metadata<argument_md>>( cache, ArgVariableTag, {|md| md.data.id == arg.id}) { option::some(md) { ret md; } option::none. {} } let arg_n = alt cx.ast_map.get(arg.id) { ast_map::node_arg(_, n) { n - 2u } }; let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), fcx.sp.lo); let ty = trans::node_id_type(cx, arg.id); let tymd = get_ty_metadata(cx, ty, arg.ty); let filemd = get_file_metadata(cx, loc.filename); let fn_node = cx.ast_map.get(fcx.id); let fn_item = alt fn_node { ast_map::node_item(item) { item } }; let context = get_function_metadata(fcx, fn_item, fcx.llfn); let lldata = [lltag(ArgVariableTag), context.node, // context llstr(arg.ident), // name filemd.node, lli32(loc.line as int), // line tymd.node, lli32(0) //XXX flags ]; let mdnode = llmdnode(lldata); let mdval = @{node: mdnode, data: {id: arg.id}}; update_cache(cache, ArgVariableTag, argument_metadata(mdval)); let llptr = alt fcx.llargs.get(arg.id) { local_mem(v) | local_imm(v) { v } }; let declargs = [llmdnode([llptr]), mdnode]; trans_build::Call(bcx, cx.intrinsics.get("llvm.dbg.declare"), declargs); ret mdval; } fn update_source_pos(cx: @block_ctxt, s: codemap::span) -> @debug_source_pos { let dsp = @debug_source_pos(cx); if !bcx_ccx(cx).sess.get_opts().debuginfo { ret dsp; } let cm = bcx_ccx(cx).sess.get_codemap(); if vec::is_empty(cx.source_pos.pos) { cx.source_pos.usable = true; } cx.source_pos.pos += [codemap::lookup_char_pos(cm, s.lo)]; //XXX maybe hi ret dsp; } fn invalidate_source_pos(cx: @block_ctxt) -> @invalidated_source_pos { let isp = @invalidated_source_pos(cx); if !bcx_ccx(cx).sess.get_opts().debuginfo { ret isp; } cx.source_pos.usable = false; ret isp; } fn revalidate_source_pos(cx: @block_ctxt) { if !bcx_ccx(cx).sess.get_opts().debuginfo { ret; } cx.source_pos.usable = true; } fn reset_source_pos(cx: @block_ctxt) { if !bcx_ccx(cx).sess.get_opts().debuginfo { ret; } vec::pop(cx.source_pos.pos); } resource debug_source_pos(bcx: @block_ctxt) { reset_source_pos(bcx); } resource invalidated_source_pos(bcx: @block_ctxt) { revalidate_source_pos(bcx); } fn add_line_info(cx: @block_ctxt, llinstr: ValueRef) { if !bcx_ccx(cx).sess.get_opts().debuginfo || !cx.source_pos.usable || vec::is_empty(cx.source_pos.pos) { ret; } let loc = option::get(vec::last(cx.source_pos.pos)); let blockmd = get_block_metadata(cx); let kind_id = llvm::LLVMGetMDKindID(as_buf("dbg"), str::byte_len("dbg")); let scopedata = [lli32(loc.line as int), lli32(loc.col as int), blockmd.node, llnull()]; let dbgscope = llmdnode(scopedata); llvm::LLVMSetMetadata(llinstr, kind_id, dbgscope); } fn get_function_metadata(fcx: @fn_ctxt, item: @ast::item, llfndecl: ValueRef) -> @metadata<subprogram_md> { let cx = fcx_ccx(fcx); let cache = get_cache(cx); alt cached_metadata::<@metadata<subprogram_md>>( cache, SubprogramTag, {|md| md.data.name == item.ident && /*sub.path == ??*/ true}) { option::some(md) { ret md; } option::none. {} } let loc = codemap::lookup_char_pos(cx.sess.get_codemap(), item.span.lo); let file_node = get_file_metadata(cx, loc.filename).node; let mangled = cx.item_symbols.get(item.id); let ret_ty = alt item.node { ast::item_fn(f, _) { f.decl.output } }; let ty_node = alt ret_ty.node { ast::ty_nil. { llnull() } _ { get_ty_metadata(cx, ty::node_id_to_type(ccx_tcx(cx), item.id), ret_ty).node } }; let sub_type = llmdnode([ty_node]); let sub_metadata = [lltag(SubroutineTag), file_node, llstr(""), file_node, lli32(0), lli64(0), lli64(0), lli64(0), lli32(0), llnull(), sub_type, lli32(0), llnull()]; let sub_node = llmdnode(sub_metadata); let fn_metadata = [lltag(SubprogramTag), llunused(), file_node, llstr(item.ident), llstr(item.ident), //XXX fully-qualified C++ name llstr(mangled), //XXX MIPS name????? file_node, lli32(loc.line as int), sub_node, lli1(false), //XXX static (check export) lli1(true), // not extern lli32(DW_VIRTUALITY_none), // virtual-ness lli32(0i), //index into virt func llnull(), // base type with vtbl lli1(false), // artificial lli1(cx.sess.get_opts().optimize != 0u), llfndecl //list of template params //func decl descriptor //list of func vars ]; let val = llmdnode(fn_metadata); llvm::LLVMAddNamedMetadataOperand(cx.llmod, as_buf("llvm.dbg.sp"), str::byte_len("llvm.dbg.sp"), val); let mdval = @{node: val, data: {name: item.ident, file: loc.filename}}; update_cache(cache, SubprogramTag, subprogram_metadata(mdval)); /*alt ret_ty.node { ast::ty_nil. {} _ { let _ = get_retval_metadata(fcx, ret_ty); } }*/ ret mdval; }
import std::{str, vec, option}; import option::{some, none}; import std::map::hashmap; import lib::llvm::llvm; import lib::llvm::llvm::{ValueRef, TypeRef, BasicBlockRef}; import trans_build::*; import trans::{new_sub_block_ctxt, new_scope_block_ctxt, load_if_immediate}; import ty::pat_ty; import syntax::ast; import syntax::ast_util; import syntax::ast_util::dummy_sp; import syntax::ast::def_id; import syntax::codemap::span; import util::common::lit_eq; import trans_common::*; // An option identifying a branch (either a literal or a tag variant) tag opt { lit(@ast::lit); var(/* variant id */uint, /* variant dids */{tg: def_id, var: def_id}); } fn opt_eq(a: opt, b: opt) -> bool { alt a { lit(la) { ret alt b { lit(lb) { lit_eq(la, lb) } var(_, _) { false } }; } var(ida, _) { ret alt b { lit(_) { false } var(idb, _) { ida == idb } }; } } } fn trans_opt(bcx: @block_ctxt, o: opt) -> result { alt o { lit(l) { ret trans::trans_lit(bcx, *l); } var(id, _) { ret rslt(bcx, C_int(id as int)); } } } fn variant_opt(ccx: @crate_ctxt, pat_id: ast::node_id) -> opt { let vdef = ast_util::variant_def_ids(ccx.tcx.def_map.get(pat_id)); let variants = ty::tag_variants(ccx.tcx, vdef.tg); let i = 0u; for v: ty::variant_info in variants { if vdef.var == v.id { ret var(i, vdef); } i += 1u; } fail; } type bind_map = [{ident: ast::ident, val: ValueRef}]; fn assoc(key: str, list: bind_map) -> option::t<ValueRef> { for elt: {ident: ast::ident, val: ValueRef} in list { if str::eq(elt.ident, key) { ret some(elt.val); } } ret none; } type match_branch = @{pats: [@ast::pat], bound: bind_map, data: @{body: BasicBlockRef, guard: option::t<@ast::expr>, id_map: ast_util::pat_id_map}}; type match = [match_branch]; fn matches_always(p: @ast::pat) -> bool { ret alt p.node { ast::pat_wild. { true } ast::pat_bind(_) { true } ast::pat_rec(_, _) { true } ast::pat_tup(_) { true } _ { false } }; } type enter_pat = fn(@ast::pat) -> option::t<[@ast::pat]>; fn enter_match(m: match, col: uint, val: ValueRef, e: enter_pat) -> match { let result = []; for br: match_branch in m { alt e(br.pats[col]) { some(sub) { let pats = vec::slice(br.pats, 0u, col) + sub + vec::slice(br.pats, col + 1u, vec::len(br.pats)); let new_br = @{pats: pats, bound: alt br.pats[col].node { ast::pat_bind(name) { br.bound + [{ident: name, val: val}] } _ { br.bound } } with *br}; result += [new_br]; } none. { } } } ret result; } fn enter_default(m: match, col: uint, val: ValueRef) -> match { fn e(p: @ast::pat) -> option::t<[@ast::pat]> { ret if matches_always(p) { some([]) } else { none }; } ret enter_match(m, col, val, e); } fn enter_opt(ccx: @crate_ctxt, m: match, opt: opt, col: uint, tag_size: uint, val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(ccx: @crate_ctxt, dummy: @ast::pat, opt: opt, size: uint, p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_tag(ctor, subpats) { ret if opt_eq(variant_opt(ccx, p.id), opt) { some(subpats) } else { none }; } ast::pat_lit(l) { ret if opt_eq(lit(l), opt) { some([]) } else { none }; } _ { ret some(vec::init_elt(dummy, size)); } } } ret enter_match(m, col, val, bind e(ccx, dummy, opt, tag_size, _)); } fn enter_rec(m: match, col: uint, fields: [ast::ident], val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(dummy: @ast::pat, fields: [ast::ident], p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_rec(fpats, _) { let pats = []; for fname: ast::ident in fields { let pat = dummy; for fpat: ast::field_pat in fpats { if str::eq(fpat.ident, fname) { pat = fpat.pat; break; } } pats += [pat]; } ret some(pats); } _ { ret some(vec::init_elt(dummy, vec::len(fields))); } } } ret enter_match(m, col, val, bind e(dummy, fields, _)); } fn enter_tup(m: match, col: uint, val: ValueRef, n_elts: uint) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(dummy: @ast::pat, n_elts: uint, p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_tup(elts) { ret some(elts); } _ { ret some(vec::init_elt(dummy, n_elts)); } } } ret enter_match(m, col, val, bind e(dummy, n_elts, _)); } fn enter_box(m: match, col: uint, val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(dummy: @ast::pat, p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_box(sub) { ret some([sub]); } _ { ret some([dummy]); } } } ret enter_match(m, col, val, bind e(dummy, _)); } fn get_options(ccx: @crate_ctxt, m: match, col: uint) -> [opt] { fn add_to_set(&set: [opt], val: opt) { for l: opt in set { if opt_eq(l, val) { ret; } } set += [val]; } let found = []; for br: match_branch in m { alt br.pats[col].node { ast::pat_lit(l) { add_to_set(found, lit(l)); } ast::pat_tag(_, _) { add_to_set(found, variant_opt(ccx, br.pats[col].id)); } _ { } } } ret found; } fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id, vdefs: {tg: def_id, var: def_id}, val: ValueRef) -> {vals: [ValueRef], bcx: @block_ctxt} { let ccx = bcx.fcx.lcx.ccx; let ty_param_substs = ty::node_id_to_type_params(ccx.tcx, pat_id); let blobptr = val; let variants = ty::tag_variants(ccx.tcx, vdefs.tg); let args = []; let size = vec::len(ty::tag_variant_with_id(ccx.tcx, vdefs.tg, vdefs.var).args); if size > 0u && vec::len(variants) != 1u { let tagptr = PointerCast(bcx, val, trans_common::T_opaque_tag_ptr(ccx.tn)); blobptr = GEP(bcx, tagptr, [C_int(0), C_int(1)]); } let i = 0u; let vdefs_tg = vdefs.tg; let vdefs_var = vdefs.var; while i < size { check (valid_variant_index(i, bcx, vdefs_tg, vdefs_var)); let r = trans::GEP_tag(bcx, blobptr, vdefs_tg, vdefs_var, ty_param_substs, i); bcx = r.bcx; args += [r.val]; i += 1u; } ret {vals: args, bcx: bcx}; } fn collect_record_fields(m: match, col: uint) -> [ast::ident] { let fields = []; for br: match_branch in m { alt br.pats[col].node { ast::pat_rec(fs, _) { for f: ast::field_pat in fs { if !vec::any(bind str::eq(f.ident, _), fields) { fields += [f.ident]; } } } _ { } } } ret fields; } fn any_box_pat(m: match, col: uint) -> bool { for br: match_branch in m { alt br.pats[col].node { ast::pat_box(_) { ret true; } _ { } } } ret false; } fn any_tup_pat(m: match, col: uint) -> bool { for br: match_branch in m { alt br.pats[col].node { ast::pat_tup(_) { ret true; } _ { } } } ret false; } type exit_node = {bound: bind_map, from: BasicBlockRef, to: BasicBlockRef}; type mk_fail = fn() -> BasicBlockRef; fn pick_col(m: match) -> uint { let scores = vec::init_elt_mut(0u, vec::len(m[0].pats)); for br: match_branch in m { let i = 0u; for p: @ast::pat in br.pats { alt p.node { ast::pat_lit(_) | ast::pat_tag(_, _) { scores[i] += 1u; } _ { } } i += 1u; } } let max_score = 0u; let best_col = 0u; let i = 0u; for score: uint in scores { // Irrefutable columns always go first, they'd only be duplicated in // the branches. if score == 0u { ret i; } // If no irrefutable ones are found, we pick the one with the biggest // branching factor. if score > max_score { max_score = score; best_col = i; } i += 1u; } ret best_col; } fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail, &exits: [exit_node]) { if vec::len(m) == 0u { Br(bcx, f()); ret; } if vec::len(m[0].pats) == 0u { let data = m[0].data; alt data.guard { some(e) { let guard_cx = new_scope_block_ctxt(bcx, "submatch_guard"); let next_cx = new_sub_block_ctxt(bcx, "submatch_next"); let else_cx = new_sub_block_ctxt(bcx, "submatch_else"); Br(bcx, guard_cx.llbb); // Temporarily set bindings. They'll be rewritten to PHI nodes for // the actual arm block. for each @{key: key, val: val} in data.id_map.items() { bcx.fcx.lllocals.insert(val, option::get(assoc(key, m[0].bound))); } let {bcx: guard_bcx, val: guard_val} = trans::trans_expr(guard_cx, e); guard_bcx = trans::trans_block_cleanups(guard_bcx, guard_cx); CondBr(guard_bcx, guard_val, next_cx.llbb, else_cx.llbb); compile_submatch(else_cx, vec::slice(m, 1u, vec::len(m)), vals, f, exits); bcx = next_cx; } _ { } } exits += [{bound: m[0].bound, from: bcx.llbb, to: data.body}]; Br(bcx, data.body); ret; } let col = pick_col(m); let val = vals[col]; let vals_left = vec::slice(vals, 0u, col) + vec::slice(vals, col + 1u, vec::len(vals)); let ccx = bcx.fcx.lcx.ccx; let pat_id = 0; for br: match_branch in m { // Find a real id (we're adding placeholder wildcard patterns, but // each column is guaranteed to have at least one real pattern) if pat_id == 0 { pat_id = br.pats[col].id; } } let rec_fields = collect_record_fields(m, col); // Separate path for extracting and binding record fields if vec::len(rec_fields) > 0u { let rec_ty = ty::node_id_to_monotype(ccx.tcx, pat_id); let fields = alt ty::struct(ccx.tcx, rec_ty) { ty::ty_rec(fields) { fields } }; let rec_vals = []; for field_name: ast::ident in rec_fields { let ix: uint = ty::field_idx(ccx.sess, dummy_sp(), field_name, fields); let r = trans::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]); rec_vals += [r.val]; bcx = r.bcx; } compile_submatch(bcx, enter_rec(m, col, rec_fields, val), rec_vals + vals_left, f, exits); ret; } if any_tup_pat(m, col) { let tup_ty = ty::node_id_to_monotype(ccx.tcx, pat_id); let n_tup_elts = alt ty::struct(ccx.tcx, tup_ty) { ty::ty_tup(elts) { vec::len(elts) } }; let tup_vals = [], i = 0u; while i < n_tup_elts { let r = trans::GEP_tup_like(bcx, tup_ty, val, [0, i as int]); tup_vals += [r.val]; bcx = r.bcx; i += 1u; } compile_submatch(bcx, enter_tup(m, col, val, n_tup_elts), tup_vals + vals_left, f, exits); ret; } // Unbox in case of a box field if any_box_pat(m, col) { let box = Load(bcx, val); let unboxed = InBoundsGEP(bcx, box, [C_int(0), C_int(back::abi::box_rc_field_body)]); compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left, f, exits); ret; } // Decide what kind of branch we need let opts = get_options(ccx, m, col); tag branch_kind { no_branch; single; switch; compare; } let kind = no_branch; let test_val = val; if vec::len(opts) > 0u { alt opts[0] { var(_, vdef) { if vec::len(ty::tag_variants(ccx.tcx, vdef.tg)) == 1u { kind = single; } else { let tagptr = PointerCast(bcx, val, trans_common::T_opaque_tag_ptr(ccx.tn)); let discrimptr = GEP(bcx, tagptr, [C_int(0), C_int(0)]); test_val = Load(bcx, discrimptr); kind = switch; } } lit(l) { kind = alt l.node { ast::lit_str(_) { compare } _ { test_val = Load(bcx, val); switch } }; } } } let else_cx = alt kind { no_branch. | single. { bcx } _ { new_sub_block_ctxt(bcx, "match_else") } }; let sw = if kind == switch { Switch(bcx, test_val, else_cx.llbb, vec::len(opts)) } else { C_int(0) }; // Placeholder for when not using a switch // Compile subtrees for each option for opt: opt in opts { let opt_cx = new_sub_block_ctxt(bcx, "match_case"); alt kind { single. { Br(bcx, opt_cx.llbb); } switch. { let r = trans_opt(bcx, opt); bcx = r.bcx; llvm::LLVMAddCase(sw, r.val, opt_cx.llbb); } compare. { let compare_cx = new_scope_block_ctxt(bcx, "compare_scope"); Br(bcx, compare_cx.llbb); bcx = compare_cx; let r = trans_opt(bcx, opt); bcx = r.bcx; let t = ty::node_id_to_type(ccx.tcx, pat_id); let eq = trans::trans_compare(bcx, ast::eq, test_val, t, r.val, t); let cleanup_cx = trans::trans_block_cleanups(bcx, compare_cx); bcx = new_sub_block_ctxt(bcx, "compare_next"); CondBr(cleanup_cx, eq.val, opt_cx.llbb, bcx.llbb); } _ { } } let size = 0u; let unpacked = []; alt opt { var(_, vdef) { let args = extract_variant_args(opt_cx, pat_id, vdef, val); size = vec::len(args.vals); unpacked = args.vals; opt_cx = args.bcx; } lit(_) { } } compile_submatch(opt_cx, enter_opt(ccx, m, opt, col, size, val), unpacked + vals_left, f, exits); } // Compile the fall-through case if kind == compare { Br(bcx, else_cx.llbb); } if kind != single { compile_submatch(else_cx, enter_default(m, col, val), vals_left, f, exits); } } // Returns false for unreachable blocks fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node], ids: ast_util::pat_id_map) -> bool { let our_block = bcx.llbb as uint; let success = true; for each @{key: name, val: node_id} in ids.items() { let llbbs = []; let vals = []; for ex: exit_node in map { if ex.to as uint == our_block { alt assoc(name, ex.bound) { some(val) { llbbs += [ex.from]; vals += [val]; } none. { } } } } if vec::len(vals) > 0u { let local = Phi(bcx, val_ty(vals[0]), vals, llbbs); bcx.fcx.lllocals.insert(node_id, local); } else { success = false; } } if success { // Copy references that the alias analysis considered unsafe for each @{val: node_id, _} in ids.items() { if bcx_ccx(bcx).copy_map.contains_key(node_id) { let local = bcx.fcx.lllocals.get(node_id); let e_ty = ty::node_id_to_type(bcx_tcx(bcx), node_id); let {bcx: abcx, val: alloc} = trans::alloc_ty(bcx, e_ty); bcx = trans::copy_val(abcx, trans::INIT, alloc, load_if_immediate(abcx, local, e_ty), e_ty); add_clean(bcx, alloc, e_ty); bcx.fcx.lllocals.insert(node_id, alloc); } } } ret success; } fn trans_alt(cx: @block_ctxt, expr: @ast::expr, arms: [ast::arm], output: trans::out_method) -> result { let bodies = []; let match: match = []; let er = trans::trans_expr(cx, expr); if ty::type_is_bot(bcx_tcx(cx), ty::expr_ty(bcx_tcx(cx), expr)) { // No need to generate code for alt, // since the disc diverges. if !is_terminated(cx) { ret rslt(cx, Unreachable(cx)); } else { ret er; } } for a: ast::arm in arms { let body = new_scope_block_ctxt(cx, "case_body"); let id_map = ast_util::pat_id_map(a.pats[0]); bodies += [body]; for p: @ast::pat in a.pats { match += [@{pats: [p], bound: [], data: @{body: body.llbb, guard: a.guard, id_map: id_map}}]; } } // Cached fail-on-fallthrough block let fail_cx = @mutable none; fn mk_fail(cx: @block_ctxt, sp: span, done: @mutable option::t<BasicBlockRef>) -> BasicBlockRef { alt *done { some(bb) { ret bb; } _ { } } let fail_cx = new_sub_block_ctxt(cx, "case_fallthrough"); trans::trans_fail(fail_cx, some(sp), "non-exhaustive match failure");; *done = some(fail_cx.llbb); ret fail_cx.llbb; } let exit_map = []; let t = trans::node_id_type(cx.fcx.lcx.ccx, expr.id); let vr = trans::spill_if_immediate(er.bcx, er.val, t); compile_submatch(vr.bcx, match, [vr.val], bind mk_fail(cx, expr.span, fail_cx), exit_map); let i = 0u; let arm_results = []; for a: ast::arm in arms { let body_cx = bodies[i]; if make_phi_bindings(body_cx, exit_map, ast_util::pat_id_map(a.pats[0])) { let block_res = trans::trans_block(body_cx, a.body, output); arm_results += [block_res]; } else { // Unreachable arm_results += [rslt(body_cx, C_nil())]; } i += 1u; } ret rslt(trans::join_branches(cx, arm_results), C_nil()); } // Not alt-related, but similar to the pattern-munging code above fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef, table: hashmap<ast::node_id, ValueRef>, make_copy: bool) -> @block_ctxt { let ccx = bcx.fcx.lcx.ccx; alt pat.node { ast::pat_bind(_) { if make_copy || ccx.copy_map.contains_key(pat.id) { let ty = ty::node_id_to_monotype(ccx.tcx, pat.id); // FIXME: Could constrain pat_bind to make this // check unnecessary. check (type_has_static_size(ccx, ty)); let llty = trans::type_of(ccx, pat.span, ty); let alloc = trans::alloca(bcx, llty); bcx = trans::copy_val(bcx, trans::INIT, alloc, trans::load_if_immediate(bcx, val, ty), ty); table.insert(pat.id, alloc); trans_common::add_clean(bcx, alloc, ty); } else { table.insert(pat.id, val); } } ast::pat_tag(_, sub) { if vec::len(sub) == 0u { ret bcx; } let vdefs = ast_util::variant_def_ids(ccx.tcx.def_map.get(pat.id)); let args = extract_variant_args(bcx, pat.id, vdefs, val); let i = 0; for argval: ValueRef in args.vals { bcx = bind_irrefutable_pat(bcx, sub[i], argval, table, make_copy); i += 1; } } ast::pat_rec(fields, _) { let rec_ty = ty::node_id_to_monotype(ccx.tcx, pat.id); let rec_fields = alt ty::struct(ccx.tcx, rec_ty) { ty::ty_rec(fields) { fields } }; for f: ast::field_pat in fields { let ix: uint = ty::field_idx(ccx.sess, pat.span, f.ident, rec_fields); let r = trans::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]); bcx = bind_irrefutable_pat(r.bcx, f.pat, r.val, table, make_copy); } } ast::pat_tup(elems) { let tup_ty = ty::node_id_to_monotype(ccx.tcx, pat.id); let i = 0u; for elem in elems { let r = trans::GEP_tup_like(bcx, tup_ty, val, [0, i as int]); bcx = bind_irrefutable_pat(r.bcx, elem, r.val, table, make_copy); i += 1u; } } ast::pat_box(inner) { let box = Load(bcx, val); let unboxed = InBoundsGEP(bcx, box, [C_int(0), C_int(back::abi::box_rc_field_body)]); bcx = bind_irrefutable_pat(bcx, inner, unboxed, table, true); } ast::pat_wild. | ast::pat_lit(_) { } } ret bcx; } // Local Variables: // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'"; // End: Added an extra check in trans_alt Gratuitous right now, but I'm going to change the type of trans::type_of import std::{str, vec, option}; import option::{some, none}; import std::map::hashmap; import lib::llvm::llvm; import lib::llvm::llvm::{ValueRef, TypeRef, BasicBlockRef}; import trans_build::*; import trans::{new_sub_block_ctxt, new_scope_block_ctxt, load_if_immediate}; import ty::pat_ty; import syntax::ast; import syntax::ast_util; import syntax::ast_util::dummy_sp; import syntax::ast::def_id; import syntax::codemap::span; import util::common::lit_eq; import trans_common::*; // An option identifying a branch (either a literal or a tag variant) tag opt { lit(@ast::lit); var(/* variant id */uint, /* variant dids */{tg: def_id, var: def_id}); } fn opt_eq(a: opt, b: opt) -> bool { alt a { lit(la) { ret alt b { lit(lb) { lit_eq(la, lb) } var(_, _) { false } }; } var(ida, _) { ret alt b { lit(_) { false } var(idb, _) { ida == idb } }; } } } fn trans_opt(bcx: @block_ctxt, o: opt) -> result { alt o { lit(l) { ret trans::trans_lit(bcx, *l); } var(id, _) { ret rslt(bcx, C_int(id as int)); } } } fn variant_opt(ccx: @crate_ctxt, pat_id: ast::node_id) -> opt { let vdef = ast_util::variant_def_ids(ccx.tcx.def_map.get(pat_id)); let variants = ty::tag_variants(ccx.tcx, vdef.tg); let i = 0u; for v: ty::variant_info in variants { if vdef.var == v.id { ret var(i, vdef); } i += 1u; } fail; } type bind_map = [{ident: ast::ident, val: ValueRef}]; fn assoc(key: str, list: bind_map) -> option::t<ValueRef> { for elt: {ident: ast::ident, val: ValueRef} in list { if str::eq(elt.ident, key) { ret some(elt.val); } } ret none; } type match_branch = @{pats: [@ast::pat], bound: bind_map, data: @{body: BasicBlockRef, guard: option::t<@ast::expr>, id_map: ast_util::pat_id_map}}; type match = [match_branch]; fn matches_always(p: @ast::pat) -> bool { ret alt p.node { ast::pat_wild. { true } ast::pat_bind(_) { true } ast::pat_rec(_, _) { true } ast::pat_tup(_) { true } _ { false } }; } type enter_pat = fn(@ast::pat) -> option::t<[@ast::pat]>; fn enter_match(m: match, col: uint, val: ValueRef, e: enter_pat) -> match { let result = []; for br: match_branch in m { alt e(br.pats[col]) { some(sub) { let pats = vec::slice(br.pats, 0u, col) + sub + vec::slice(br.pats, col + 1u, vec::len(br.pats)); let new_br = @{pats: pats, bound: alt br.pats[col].node { ast::pat_bind(name) { br.bound + [{ident: name, val: val}] } _ { br.bound } } with *br}; result += [new_br]; } none. { } } } ret result; } fn enter_default(m: match, col: uint, val: ValueRef) -> match { fn e(p: @ast::pat) -> option::t<[@ast::pat]> { ret if matches_always(p) { some([]) } else { none }; } ret enter_match(m, col, val, e); } fn enter_opt(ccx: @crate_ctxt, m: match, opt: opt, col: uint, tag_size: uint, val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(ccx: @crate_ctxt, dummy: @ast::pat, opt: opt, size: uint, p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_tag(ctor, subpats) { ret if opt_eq(variant_opt(ccx, p.id), opt) { some(subpats) } else { none }; } ast::pat_lit(l) { ret if opt_eq(lit(l), opt) { some([]) } else { none }; } _ { ret some(vec::init_elt(dummy, size)); } } } ret enter_match(m, col, val, bind e(ccx, dummy, opt, tag_size, _)); } fn enter_rec(m: match, col: uint, fields: [ast::ident], val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(dummy: @ast::pat, fields: [ast::ident], p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_rec(fpats, _) { let pats = []; for fname: ast::ident in fields { let pat = dummy; for fpat: ast::field_pat in fpats { if str::eq(fpat.ident, fname) { pat = fpat.pat; break; } } pats += [pat]; } ret some(pats); } _ { ret some(vec::init_elt(dummy, vec::len(fields))); } } } ret enter_match(m, col, val, bind e(dummy, fields, _)); } fn enter_tup(m: match, col: uint, val: ValueRef, n_elts: uint) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(dummy: @ast::pat, n_elts: uint, p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_tup(elts) { ret some(elts); } _ { ret some(vec::init_elt(dummy, n_elts)); } } } ret enter_match(m, col, val, bind e(dummy, n_elts, _)); } fn enter_box(m: match, col: uint, val: ValueRef) -> match { let dummy = @{id: 0, node: ast::pat_wild, span: dummy_sp()}; fn e(dummy: @ast::pat, p: @ast::pat) -> option::t<[@ast::pat]> { alt p.node { ast::pat_box(sub) { ret some([sub]); } _ { ret some([dummy]); } } } ret enter_match(m, col, val, bind e(dummy, _)); } fn get_options(ccx: @crate_ctxt, m: match, col: uint) -> [opt] { fn add_to_set(&set: [opt], val: opt) { for l: opt in set { if opt_eq(l, val) { ret; } } set += [val]; } let found = []; for br: match_branch in m { alt br.pats[col].node { ast::pat_lit(l) { add_to_set(found, lit(l)); } ast::pat_tag(_, _) { add_to_set(found, variant_opt(ccx, br.pats[col].id)); } _ { } } } ret found; } fn extract_variant_args(bcx: @block_ctxt, pat_id: ast::node_id, vdefs: {tg: def_id, var: def_id}, val: ValueRef) -> {vals: [ValueRef], bcx: @block_ctxt} { let ccx = bcx.fcx.lcx.ccx; let ty_param_substs = ty::node_id_to_type_params(ccx.tcx, pat_id); let blobptr = val; let variants = ty::tag_variants(ccx.tcx, vdefs.tg); let args = []; let size = vec::len(ty::tag_variant_with_id(ccx.tcx, vdefs.tg, vdefs.var).args); if size > 0u && vec::len(variants) != 1u { let tagptr = PointerCast(bcx, val, trans_common::T_opaque_tag_ptr(ccx.tn)); blobptr = GEP(bcx, tagptr, [C_int(0), C_int(1)]); } let i = 0u; let vdefs_tg = vdefs.tg; let vdefs_var = vdefs.var; while i < size { check (valid_variant_index(i, bcx, vdefs_tg, vdefs_var)); let r = trans::GEP_tag(bcx, blobptr, vdefs_tg, vdefs_var, ty_param_substs, i); bcx = r.bcx; args += [r.val]; i += 1u; } ret {vals: args, bcx: bcx}; } fn collect_record_fields(m: match, col: uint) -> [ast::ident] { let fields = []; for br: match_branch in m { alt br.pats[col].node { ast::pat_rec(fs, _) { for f: ast::field_pat in fs { if !vec::any(bind str::eq(f.ident, _), fields) { fields += [f.ident]; } } } _ { } } } ret fields; } fn any_box_pat(m: match, col: uint) -> bool { for br: match_branch in m { alt br.pats[col].node { ast::pat_box(_) { ret true; } _ { } } } ret false; } fn any_tup_pat(m: match, col: uint) -> bool { for br: match_branch in m { alt br.pats[col].node { ast::pat_tup(_) { ret true; } _ { } } } ret false; } type exit_node = {bound: bind_map, from: BasicBlockRef, to: BasicBlockRef}; type mk_fail = fn() -> BasicBlockRef; fn pick_col(m: match) -> uint { let scores = vec::init_elt_mut(0u, vec::len(m[0].pats)); for br: match_branch in m { let i = 0u; for p: @ast::pat in br.pats { alt p.node { ast::pat_lit(_) | ast::pat_tag(_, _) { scores[i] += 1u; } _ { } } i += 1u; } } let max_score = 0u; let best_col = 0u; let i = 0u; for score: uint in scores { // Irrefutable columns always go first, they'd only be duplicated in // the branches. if score == 0u { ret i; } // If no irrefutable ones are found, we pick the one with the biggest // branching factor. if score > max_score { max_score = score; best_col = i; } i += 1u; } ret best_col; } fn compile_submatch(bcx: @block_ctxt, m: match, vals: [ValueRef], f: mk_fail, &exits: [exit_node]) { if vec::len(m) == 0u { Br(bcx, f()); ret; } if vec::len(m[0].pats) == 0u { let data = m[0].data; alt data.guard { some(e) { let guard_cx = new_scope_block_ctxt(bcx, "submatch_guard"); let next_cx = new_sub_block_ctxt(bcx, "submatch_next"); let else_cx = new_sub_block_ctxt(bcx, "submatch_else"); Br(bcx, guard_cx.llbb); // Temporarily set bindings. They'll be rewritten to PHI nodes for // the actual arm block. for each @{key: key, val: val} in data.id_map.items() { bcx.fcx.lllocals.insert(val, option::get(assoc(key, m[0].bound))); } let {bcx: guard_bcx, val: guard_val} = trans::trans_expr(guard_cx, e); guard_bcx = trans::trans_block_cleanups(guard_bcx, guard_cx); CondBr(guard_bcx, guard_val, next_cx.llbb, else_cx.llbb); compile_submatch(else_cx, vec::slice(m, 1u, vec::len(m)), vals, f, exits); bcx = next_cx; } _ { } } exits += [{bound: m[0].bound, from: bcx.llbb, to: data.body}]; Br(bcx, data.body); ret; } let col = pick_col(m); let val = vals[col]; let vals_left = vec::slice(vals, 0u, col) + vec::slice(vals, col + 1u, vec::len(vals)); let ccx = bcx.fcx.lcx.ccx; let pat_id = 0; for br: match_branch in m { // Find a real id (we're adding placeholder wildcard patterns, but // each column is guaranteed to have at least one real pattern) if pat_id == 0 { pat_id = br.pats[col].id; } } let rec_fields = collect_record_fields(m, col); // Separate path for extracting and binding record fields if vec::len(rec_fields) > 0u { let rec_ty = ty::node_id_to_monotype(ccx.tcx, pat_id); let fields = alt ty::struct(ccx.tcx, rec_ty) { ty::ty_rec(fields) { fields } }; let rec_vals = []; for field_name: ast::ident in rec_fields { let ix: uint = ty::field_idx(ccx.sess, dummy_sp(), field_name, fields); let r = trans::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]); rec_vals += [r.val]; bcx = r.bcx; } compile_submatch(bcx, enter_rec(m, col, rec_fields, val), rec_vals + vals_left, f, exits); ret; } if any_tup_pat(m, col) { let tup_ty = ty::node_id_to_monotype(ccx.tcx, pat_id); let n_tup_elts = alt ty::struct(ccx.tcx, tup_ty) { ty::ty_tup(elts) { vec::len(elts) } }; let tup_vals = [], i = 0u; while i < n_tup_elts { let r = trans::GEP_tup_like(bcx, tup_ty, val, [0, i as int]); tup_vals += [r.val]; bcx = r.bcx; i += 1u; } compile_submatch(bcx, enter_tup(m, col, val, n_tup_elts), tup_vals + vals_left, f, exits); ret; } // Unbox in case of a box field if any_box_pat(m, col) { let box = Load(bcx, val); let unboxed = InBoundsGEP(bcx, box, [C_int(0), C_int(back::abi::box_rc_field_body)]); compile_submatch(bcx, enter_box(m, col, val), [unboxed] + vals_left, f, exits); ret; } // Decide what kind of branch we need let opts = get_options(ccx, m, col); tag branch_kind { no_branch; single; switch; compare; } let kind = no_branch; let test_val = val; if vec::len(opts) > 0u { alt opts[0] { var(_, vdef) { if vec::len(ty::tag_variants(ccx.tcx, vdef.tg)) == 1u { kind = single; } else { let tagptr = PointerCast(bcx, val, trans_common::T_opaque_tag_ptr(ccx.tn)); let discrimptr = GEP(bcx, tagptr, [C_int(0), C_int(0)]); test_val = Load(bcx, discrimptr); kind = switch; } } lit(l) { kind = alt l.node { ast::lit_str(_) { compare } _ { test_val = Load(bcx, val); switch } }; } } } let else_cx = alt kind { no_branch. | single. { bcx } _ { new_sub_block_ctxt(bcx, "match_else") } }; let sw = if kind == switch { Switch(bcx, test_val, else_cx.llbb, vec::len(opts)) } else { C_int(0) }; // Placeholder for when not using a switch // Compile subtrees for each option for opt: opt in opts { let opt_cx = new_sub_block_ctxt(bcx, "match_case"); alt kind { single. { Br(bcx, opt_cx.llbb); } switch. { let r = trans_opt(bcx, opt); bcx = r.bcx; llvm::LLVMAddCase(sw, r.val, opt_cx.llbb); } compare. { let compare_cx = new_scope_block_ctxt(bcx, "compare_scope"); Br(bcx, compare_cx.llbb); bcx = compare_cx; let r = trans_opt(bcx, opt); bcx = r.bcx; let t = ty::node_id_to_type(ccx.tcx, pat_id); let eq = trans::trans_compare(bcx, ast::eq, test_val, t, r.val, t); let cleanup_cx = trans::trans_block_cleanups(bcx, compare_cx); bcx = new_sub_block_ctxt(bcx, "compare_next"); CondBr(cleanup_cx, eq.val, opt_cx.llbb, bcx.llbb); } _ { } } let size = 0u; let unpacked = []; alt opt { var(_, vdef) { let args = extract_variant_args(opt_cx, pat_id, vdef, val); size = vec::len(args.vals); unpacked = args.vals; opt_cx = args.bcx; } lit(_) { } } compile_submatch(opt_cx, enter_opt(ccx, m, opt, col, size, val), unpacked + vals_left, f, exits); } // Compile the fall-through case if kind == compare { Br(bcx, else_cx.llbb); } if kind != single { compile_submatch(else_cx, enter_default(m, col, val), vals_left, f, exits); } } // Returns false for unreachable blocks fn make_phi_bindings(bcx: @block_ctxt, map: [exit_node], ids: ast_util::pat_id_map) -> bool { let our_block = bcx.llbb as uint; let success = true; for each @{key: name, val: node_id} in ids.items() { let llbbs = []; let vals = []; for ex: exit_node in map { if ex.to as uint == our_block { alt assoc(name, ex.bound) { some(val) { llbbs += [ex.from]; vals += [val]; } none. { } } } } if vec::len(vals) > 0u { let local = Phi(bcx, val_ty(vals[0]), vals, llbbs); bcx.fcx.lllocals.insert(node_id, local); } else { success = false; } } if success { // Copy references that the alias analysis considered unsafe for each @{val: node_id, _} in ids.items() { if bcx_ccx(bcx).copy_map.contains_key(node_id) { let local = bcx.fcx.lllocals.get(node_id); let e_ty = ty::node_id_to_type(bcx_tcx(bcx), node_id); let {bcx: abcx, val: alloc} = trans::alloc_ty(bcx, e_ty); bcx = trans::copy_val(abcx, trans::INIT, alloc, load_if_immediate(abcx, local, e_ty), e_ty); add_clean(bcx, alloc, e_ty); bcx.fcx.lllocals.insert(node_id, alloc); } } } ret success; } fn trans_alt(cx: @block_ctxt, expr: @ast::expr, arms: [ast::arm], output: trans::out_method) -> result { let bodies = []; let match: match = []; let er = trans::trans_expr(cx, expr); if ty::type_is_bot(bcx_tcx(cx), ty::expr_ty(bcx_tcx(cx), expr)) { // No need to generate code for alt, // since the disc diverges. if !is_terminated(cx) { ret rslt(cx, Unreachable(cx)); } else { ret er; } } for a: ast::arm in arms { let body = new_scope_block_ctxt(cx, "case_body"); let id_map = ast_util::pat_id_map(a.pats[0]); bodies += [body]; for p: @ast::pat in a.pats { match += [@{pats: [p], bound: [], data: @{body: body.llbb, guard: a.guard, id_map: id_map}}]; } } // Cached fail-on-fallthrough block let fail_cx = @mutable none; fn mk_fail(cx: @block_ctxt, sp: span, done: @mutable option::t<BasicBlockRef>) -> BasicBlockRef { alt *done { some(bb) { ret bb; } _ { } } let fail_cx = new_sub_block_ctxt(cx, "case_fallthrough"); trans::trans_fail(fail_cx, some(sp), "non-exhaustive match failure");; *done = some(fail_cx.llbb); ret fail_cx.llbb; } let exit_map = []; let t = trans::node_id_type(cx.fcx.lcx.ccx, expr.id); let vr = trans::spill_if_immediate(er.bcx, er.val, t); compile_submatch(vr.bcx, match, [vr.val], bind mk_fail(cx, expr.span, fail_cx), exit_map); let i = 0u; let arm_results = []; for a: ast::arm in arms { let body_cx = bodies[i]; if make_phi_bindings(body_cx, exit_map, ast_util::pat_id_map(a.pats[0])) { let block_res = trans::trans_block(body_cx, a.body, output); arm_results += [block_res]; } else { // Unreachable arm_results += [rslt(body_cx, C_nil())]; } i += 1u; } ret rslt(trans::join_branches(cx, arm_results), C_nil()); } // Not alt-related, but similar to the pattern-munging code above fn bind_irrefutable_pat(bcx: @block_ctxt, pat: @ast::pat, val: ValueRef, table: hashmap<ast::node_id, ValueRef>, make_copy: bool) -> @block_ctxt { let ccx = bcx.fcx.lcx.ccx; alt pat.node { ast::pat_bind(_) { if make_copy || ccx.copy_map.contains_key(pat.id) { let ty = ty::node_id_to_monotype(ccx.tcx, pat.id); // FIXME: Could constrain pat_bind to make this // check unnecessary. check (type_has_static_size(ccx, ty)); check non_ty_var(ccx, ty); let llty = trans::type_of(ccx, pat.span, ty); let alloc = trans::alloca(bcx, llty); bcx = trans::copy_val(bcx, trans::INIT, alloc, trans::load_if_immediate(bcx, val, ty), ty); table.insert(pat.id, alloc); trans_common::add_clean(bcx, alloc, ty); } else { table.insert(pat.id, val); } } ast::pat_tag(_, sub) { if vec::len(sub) == 0u { ret bcx; } let vdefs = ast_util::variant_def_ids(ccx.tcx.def_map.get(pat.id)); let args = extract_variant_args(bcx, pat.id, vdefs, val); let i = 0; for argval: ValueRef in args.vals { bcx = bind_irrefutable_pat(bcx, sub[i], argval, table, make_copy); i += 1; } } ast::pat_rec(fields, _) { let rec_ty = ty::node_id_to_monotype(ccx.tcx, pat.id); let rec_fields = alt ty::struct(ccx.tcx, rec_ty) { ty::ty_rec(fields) { fields } }; for f: ast::field_pat in fields { let ix: uint = ty::field_idx(ccx.sess, pat.span, f.ident, rec_fields); let r = trans::GEP_tup_like(bcx, rec_ty, val, [0, ix as int]); bcx = bind_irrefutable_pat(r.bcx, f.pat, r.val, table, make_copy); } } ast::pat_tup(elems) { let tup_ty = ty::node_id_to_monotype(ccx.tcx, pat.id); let i = 0u; for elem in elems { let r = trans::GEP_tup_like(bcx, tup_ty, val, [0, i as int]); bcx = bind_irrefutable_pat(r.bcx, elem, r.val, table, make_copy); i += 1u; } } ast::pat_box(inner) { let box = Load(bcx, val); let unboxed = InBoundsGEP(bcx, box, [C_int(0), C_int(back::abi::box_rc_field_body)]); bcx = bind_irrefutable_pat(bcx, inner, unboxed, table, true); } ast::pat_wild. | ast::pat_lit(_) { } } ret bcx; } // Local Variables: // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'"; // End:
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use common::{Diagnostic, DiagnosticsResult, NamedItem}; use graphql_ir::{ FragmentDefinition, LinkedField, Program, ScalarField, Selection, Transformed, Transformer, ValidationMessage, }; use crate::connections::ConnectionInterface; use crate::handle_fields::{build_handle_field_directive, HandleFieldDirectiveValues}; use interner::{Intern, StringKey}; use lazy_static::lazy_static; use schema::{Schema, Type}; use std::sync::Arc; pub fn transform_declarative_connection( program: &Program, connection_interface: &ConnectionInterface, ) -> DiagnosticsResult<Program> { let mut transform = DeclarativeConnectionMutationTransform::new(program, connection_interface); let next_program = transform .transform_program(program) .replace_or_else(|| program.clone()); if transform.errors.is_empty() { Ok(next_program) } else { Err(transform.errors) } } lazy_static! { static ref APPEND_EDGE: StringKey = "appendEdge".intern(); static ref APPEND_NODE: StringKey = "appendNode".intern(); static ref CONNECTIONS_ARG_NAME: StringKey = "connections".intern(); static ref DELETE_RECORD: StringKey = "deleteRecord".intern(); static ref DELETE_EDGE: StringKey = "deleteEdge".intern(); static ref PREPEND_EDGE: StringKey = "prependEdge".intern(); static ref PREPEND_NODE: StringKey = "prependNode".intern(); static ref EDGE_TYPENAME_ARG: StringKey = "edgeTypeName".intern(); } struct DeclarativeConnectionMutationTransform<'s, 'c> { program: &'s Program, errors: Vec<Diagnostic>, connection_interface: &'c ConnectionInterface, } impl<'s, 'c> DeclarativeConnectionMutationTransform<'s, 'c> { fn new(program: &'s Program, connection_interface: &'c ConnectionInterface) -> Self { Self { program, connection_interface, errors: vec![], } } } impl<'s, 'c> Transformer for DeclarativeConnectionMutationTransform<'s, 'c> { const NAME: &'static str = "DeclarativeConnectionMutationTransform"; const VISIT_ARGUMENTS: bool = false; const VISIT_DIRECTIVES: bool = false; fn transform_fragment(&mut self, _: &FragmentDefinition) -> Transformed<FragmentDefinition> { Transformed::Keep } fn transform_scalar_field(&mut self, field: &ScalarField) -> Transformed<Selection> { let linked_field_directive = field.directives.iter().find(|directive| { directive.name.item == *APPEND_EDGE || directive.name.item == *PREPEND_EDGE || directive.name.item == *APPEND_NODE || directive.name.item == *PREPEND_NODE }); if let Some(linked_field_directive) = linked_field_directive { self.errors.push(Diagnostic::error( ValidationMessage::ConnectionMutationDirectiveOnScalarField { directive_name: linked_field_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, field.definition.location, )); } let delete_directive = field.directives.iter().find(|directive| { directive.name.item == *DELETE_RECORD || directive.name.item == *DELETE_EDGE }); let field_definition = self.program.schema.field(field.definition.item); match delete_directive { None => Transformed::Keep, Some(delete_directive) => { let is_id = self.program.schema.is_id(field_definition.type_.inner()); if !is_id { self.errors.push(Diagnostic::error( ValidationMessage::DeleteRecordDirectiveOnUnsupportedType { directive_name: delete_directive.name.item, field_name: field.alias_or_name(&self.program.schema), current_type: self .program .schema .get_type_string(&field_definition.type_), }, delete_directive.name.location, )); Transformed::Keep } else { let connections_arg = delete_directive.arguments.named(*CONNECTIONS_ARG_NAME); let handle_directive = build_handle_field_directive(HandleFieldDirectiveValues { handle: delete_directive.name.item, key: "".intern(), dynamic_key: None, filters: None, handle_args: if let Some(connections_arg) = connections_arg { Some(vec![connections_arg.clone()]) } else { None }, }); let mut next_directives: Vec<_> = field .directives .iter() .filter(|directive| directive.name.item != *DELETE_RECORD) .cloned() .collect(); next_directives.push(handle_directive); Transformed::Replace(Selection::ScalarField(Arc::new(ScalarField { directives: next_directives, ..field.clone() }))) } } } } fn transform_linked_field(&mut self, field: &LinkedField) -> Transformed<Selection> { let transformed_field = self.default_transform_linked_field(field); let delete_directive = field.directives.named(*DELETE_RECORD); if let Some(delete_directive) = delete_directive { self.errors.push(Diagnostic::error( ValidationMessage::DeleteRecordDirectiveOnLinkedField { directive_name: delete_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, field.definition.location, )); } let edge_directive = field.directives.iter().find(|directive| { directive.name.item == *APPEND_EDGE || directive.name.item == *PREPEND_EDGE }); let node_directive = field.directives.iter().find(|directive| { directive.name.item == *APPEND_NODE || directive.name.item == *PREPEND_NODE }); match (edge_directive, node_directive) { (Some(edge_directive), Some(node_directive)) => { self.errors.push(Diagnostic::error( ValidationMessage::ConflictingEdgeAndNodeDirectives { edge_directive_name: edge_directive.name.item, node_directive_name: node_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, edge_directive.name.location, )); transformed_field } (None, None) => transformed_field, (Some(edge_directive), None) => { let connections_arg = edge_directive.arguments.named(*CONNECTIONS_ARG_NAME); match connections_arg { None => { self.errors.push(Diagnostic::error( ValidationMessage::ConnectionsArgumentRequired { directive_name: edge_directive.name.item, }, edge_directive.name.location, )); transformed_field } Some(connections_arg) => { let field_definition = self.program.schema.field(field.definition.item); let mut has_cursor_field = false; let mut has_node_field = false; if let Type::Object(id) = field_definition.type_.inner() { let object = self.program.schema.object(id); for field_id in &object.fields { let current_field = self.program.schema.field(*field_id); if current_field.name == self.connection_interface.cursor { has_cursor_field = true; } else if current_field.name == self.connection_interface.node { has_node_field = true; } } } if has_cursor_field && has_node_field { let handle_directive = build_handle_field_directive(HandleFieldDirectiveValues { handle: edge_directive.name.item, key: "".intern(), dynamic_key: None, filters: None, handle_args: Some(vec![connections_arg.clone()]), }); let mut next_field = match transformed_field { Transformed::Replace(Selection::LinkedField(linked_field)) => { (*linked_field).clone() } Transformed::Keep => field.clone(), _ => panic!( "DeclarativeConnection got unexpected transform result: `{:?}`.", transformed_field ), }; let index = next_field .directives .iter() .position(|directive| { directive.name.item == edge_directive.name.item }) .expect("Expected the edge directive to exist."); next_field.directives[index] = handle_directive; Transformed::Replace(Selection::LinkedField(Arc::new(next_field))) } else { self.errors.push(Diagnostic::error( ValidationMessage::EdgeDirectiveOnUnsupportedType { directive_name: edge_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, edge_directive.name.location, )); Transformed::Keep } } } } (None, Some(node_directive)) => { let connections_arg = node_directive.arguments.named(*CONNECTIONS_ARG_NAME); match connections_arg { None => { self.errors.push(Diagnostic::error( ValidationMessage::ConnectionsArgumentRequired { directive_name: node_directive.name.item, }, node_directive.name.location, )); transformed_field } Some(connections_arg) => { let edge_typename_arg = node_directive.arguments.named(*EDGE_TYPENAME_ARG); if let Some(edge_typename_arg) = edge_typename_arg { let field_definition = self.program.schema.field(field.definition.item); match field_definition.type_.inner() { Type::Object(_) | Type::Interface(_) => { let handle_directive = build_handle_field_directive(HandleFieldDirectiveValues { handle: node_directive.name.item, key: "".intern(), dynamic_key: None, filters: None, handle_args: Some(vec![ connections_arg.clone(), edge_typename_arg.clone(), ]), }); let mut next_field = match transformed_field { Transformed::Replace(Selection::LinkedField( linked_field, )) => (*linked_field).clone(), Transformed::Keep => field.clone(), _ => panic!( "DeclarativeConnection got unexpected transform result: `{:?}`.", transformed_field ), }; let index = next_field .directives .iter() .position(|directive| { directive.name.item == node_directive.name.item }) .expect("Expected the edge directive to exist."); next_field.directives[index] = handle_directive; Transformed::Replace(Selection::LinkedField(Arc::new( next_field, ))) } _ => { self.errors.push(Diagnostic::error( ValidationMessage::NodeDirectiveOnUnsupportedType { directive_name: node_directive.name.item, field_name: field.alias_or_name(&self.program.schema), current_type: self .program .schema .get_type_string(&field_definition.type_), }, node_directive.name.location, )); Transformed::Keep } } } else { self.errors.push(Diagnostic::error( ValidationMessage::NodeDirectiveMissesRequiredEdgeTypeName { directive_name: node_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, node_directive.name.location, )); Transformed::Keep } } } } } } } Allow @appendEdge @prependNode on unions Reviewed By: alunyov Differential Revision: D30852008 fbshipit-source-id: 9d9c1235c7c9c986a4364601c98646b03f3e5cb5 /* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use common::{Diagnostic, DiagnosticsResult, NamedItem}; use graphql_ir::{ FragmentDefinition, LinkedField, Program, ScalarField, Selection, Transformed, Transformer, ValidationMessage, }; use crate::connections::ConnectionInterface; use crate::handle_fields::{build_handle_field_directive, HandleFieldDirectiveValues}; use interner::{Intern, StringKey}; use lazy_static::lazy_static; use schema::{Schema, Type}; use std::sync::Arc; pub fn transform_declarative_connection( program: &Program, connection_interface: &ConnectionInterface, ) -> DiagnosticsResult<Program> { let mut transform = DeclarativeConnectionMutationTransform::new(program, connection_interface); let next_program = transform .transform_program(program) .replace_or_else(|| program.clone()); if transform.errors.is_empty() { Ok(next_program) } else { Err(transform.errors) } } lazy_static! { static ref APPEND_EDGE: StringKey = "appendEdge".intern(); static ref APPEND_NODE: StringKey = "appendNode".intern(); static ref CONNECTIONS_ARG_NAME: StringKey = "connections".intern(); static ref DELETE_RECORD: StringKey = "deleteRecord".intern(); static ref DELETE_EDGE: StringKey = "deleteEdge".intern(); static ref PREPEND_EDGE: StringKey = "prependEdge".intern(); static ref PREPEND_NODE: StringKey = "prependNode".intern(); static ref EDGE_TYPENAME_ARG: StringKey = "edgeTypeName".intern(); } struct DeclarativeConnectionMutationTransform<'s, 'c> { program: &'s Program, errors: Vec<Diagnostic>, connection_interface: &'c ConnectionInterface, } impl<'s, 'c> DeclarativeConnectionMutationTransform<'s, 'c> { fn new(program: &'s Program, connection_interface: &'c ConnectionInterface) -> Self { Self { program, connection_interface, errors: vec![], } } } impl<'s, 'c> Transformer for DeclarativeConnectionMutationTransform<'s, 'c> { const NAME: &'static str = "DeclarativeConnectionMutationTransform"; const VISIT_ARGUMENTS: bool = false; const VISIT_DIRECTIVES: bool = false; fn transform_fragment(&mut self, _: &FragmentDefinition) -> Transformed<FragmentDefinition> { Transformed::Keep } fn transform_scalar_field(&mut self, field: &ScalarField) -> Transformed<Selection> { let linked_field_directive = field.directives.iter().find(|directive| { directive.name.item == *APPEND_EDGE || directive.name.item == *PREPEND_EDGE || directive.name.item == *APPEND_NODE || directive.name.item == *PREPEND_NODE }); if let Some(linked_field_directive) = linked_field_directive { self.errors.push(Diagnostic::error( ValidationMessage::ConnectionMutationDirectiveOnScalarField { directive_name: linked_field_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, field.definition.location, )); } let delete_directive = field.directives.iter().find(|directive| { directive.name.item == *DELETE_RECORD || directive.name.item == *DELETE_EDGE }); let field_definition = self.program.schema.field(field.definition.item); match delete_directive { None => Transformed::Keep, Some(delete_directive) => { let is_id = self.program.schema.is_id(field_definition.type_.inner()); if !is_id { self.errors.push(Diagnostic::error( ValidationMessage::DeleteRecordDirectiveOnUnsupportedType { directive_name: delete_directive.name.item, field_name: field.alias_or_name(&self.program.schema), current_type: self .program .schema .get_type_string(&field_definition.type_), }, delete_directive.name.location, )); Transformed::Keep } else { let connections_arg = delete_directive.arguments.named(*CONNECTIONS_ARG_NAME); let handle_directive = build_handle_field_directive(HandleFieldDirectiveValues { handle: delete_directive.name.item, key: "".intern(), dynamic_key: None, filters: None, handle_args: if let Some(connections_arg) = connections_arg { Some(vec![connections_arg.clone()]) } else { None }, }); let mut next_directives: Vec<_> = field .directives .iter() .filter(|directive| directive.name.item != *DELETE_RECORD) .cloned() .collect(); next_directives.push(handle_directive); Transformed::Replace(Selection::ScalarField(Arc::new(ScalarField { directives: next_directives, ..field.clone() }))) } } } } fn transform_linked_field(&mut self, field: &LinkedField) -> Transformed<Selection> { let transformed_field = self.default_transform_linked_field(field); let delete_directive = field.directives.named(*DELETE_RECORD); if let Some(delete_directive) = delete_directive { self.errors.push(Diagnostic::error( ValidationMessage::DeleteRecordDirectiveOnLinkedField { directive_name: delete_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, field.definition.location, )); } let edge_directive = field.directives.iter().find(|directive| { directive.name.item == *APPEND_EDGE || directive.name.item == *PREPEND_EDGE }); let node_directive = field.directives.iter().find(|directive| { directive.name.item == *APPEND_NODE || directive.name.item == *PREPEND_NODE }); match (edge_directive, node_directive) { (Some(edge_directive), Some(node_directive)) => { self.errors.push(Diagnostic::error( ValidationMessage::ConflictingEdgeAndNodeDirectives { edge_directive_name: edge_directive.name.item, node_directive_name: node_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, edge_directive.name.location, )); transformed_field } (None, None) => transformed_field, (Some(edge_directive), None) => { let connections_arg = edge_directive.arguments.named(*CONNECTIONS_ARG_NAME); match connections_arg { None => { self.errors.push(Diagnostic::error( ValidationMessage::ConnectionsArgumentRequired { directive_name: edge_directive.name.item, }, edge_directive.name.location, )); transformed_field } Some(connections_arg) => { let field_definition = self.program.schema.field(field.definition.item); let mut has_cursor_field = false; let mut has_node_field = false; if let Type::Object(id) = field_definition.type_.inner() { let object = self.program.schema.object(id); for field_id in &object.fields { let current_field = self.program.schema.field(*field_id); if current_field.name == self.connection_interface.cursor { has_cursor_field = true; } else if current_field.name == self.connection_interface.node { has_node_field = true; } } } if has_cursor_field && has_node_field { let handle_directive = build_handle_field_directive(HandleFieldDirectiveValues { handle: edge_directive.name.item, key: "".intern(), dynamic_key: None, filters: None, handle_args: Some(vec![connections_arg.clone()]), }); let mut next_field = match transformed_field { Transformed::Replace(Selection::LinkedField(linked_field)) => { (*linked_field).clone() } Transformed::Keep => field.clone(), _ => panic!( "DeclarativeConnection got unexpected transform result: `{:?}`.", transformed_field ), }; let index = next_field .directives .iter() .position(|directive| { directive.name.item == edge_directive.name.item }) .expect("Expected the edge directive to exist."); next_field.directives[index] = handle_directive; Transformed::Replace(Selection::LinkedField(Arc::new(next_field))) } else { self.errors.push(Diagnostic::error( ValidationMessage::EdgeDirectiveOnUnsupportedType { directive_name: edge_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, edge_directive.name.location, )); Transformed::Keep } } } } (None, Some(node_directive)) => { let connections_arg = node_directive.arguments.named(*CONNECTIONS_ARG_NAME); match connections_arg { None => { self.errors.push(Diagnostic::error( ValidationMessage::ConnectionsArgumentRequired { directive_name: node_directive.name.item, }, node_directive.name.location, )); transformed_field } Some(connections_arg) => { let edge_typename_arg = node_directive.arguments.named(*EDGE_TYPENAME_ARG); if let Some(edge_typename_arg) = edge_typename_arg { let field_definition = self.program.schema.field(field.definition.item); match field_definition.type_.inner() { Type::Object(_) | Type::Interface(_) | Type::Union(_) => { let handle_directive = build_handle_field_directive(HandleFieldDirectiveValues { handle: node_directive.name.item, key: "".intern(), dynamic_key: None, filters: None, handle_args: Some(vec![ connections_arg.clone(), edge_typename_arg.clone(), ]), }); let mut next_field = match transformed_field { Transformed::Replace(Selection::LinkedField( linked_field, )) => (*linked_field).clone(), Transformed::Keep => field.clone(), _ => panic!( "DeclarativeConnection got unexpected transform result: `{:?}`.", transformed_field ), }; let index = next_field .directives .iter() .position(|directive| { directive.name.item == node_directive.name.item }) .expect("Expected the edge directive to exist."); next_field.directives[index] = handle_directive; Transformed::Replace(Selection::LinkedField(Arc::new( next_field, ))) } _ => { self.errors.push(Diagnostic::error( ValidationMessage::NodeDirectiveOnUnsupportedType { directive_name: node_directive.name.item, field_name: field.alias_or_name(&self.program.schema), current_type: self .program .schema .get_type_string(&field_definition.type_), }, node_directive.name.location, )); Transformed::Keep } } } else { self.errors.push(Diagnostic::error( ValidationMessage::NodeDirectiveMissesRequiredEdgeTypeName { directive_name: node_directive.name.item, field_name: field.alias_or_name(&self.program.schema), }, node_directive.name.location, )); Transformed::Keep } } } } } } }
//! Checking that constant values used in types can be successfully evaluated. //! //! For concrete constants, this is fairly simple as we can just try and evaluate it. //! //! When dealing with polymorphic constants, for example `std::mem::size_of::<T>() - 1`, //! this is not as easy. //! //! In this case we try to build an abstract representation of this constant using //! `mir_abstract_const` which can then be checked for structural equality with other //! generic constants mentioned in the `caller_bounds` of the current environment. use crate::traits::ty::subst::GenericArg; use rustc_errors::ErrorReported; use rustc_hir::def::DefKind; use rustc_index::bit_set::BitSet; use rustc_index::vec::IndexVec; use rustc_infer::infer::InferCtxt; use rustc_middle::mir::abstract_const::{Node, NodeId, NotConstEvaluatable}; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::mir::{self, Rvalue, StatementKind, TerminatorKind}; use rustc_middle::ty::subst::{Subst, SubstsRef}; use rustc_middle::ty::{self, TyCtxt, TypeFoldable}; use rustc_session::lint; use rustc_span::def_id::LocalDefId; use rustc_span::Span; use std::cmp; use std::iter; use std::ops::ControlFlow; /// Check if a given constant can be evaluated. pub fn is_const_evaluatable<'cx, 'tcx>( infcx: &InferCtxt<'cx, 'tcx>, uv: ty::Unevaluated<'tcx, ()>, param_env: ty::ParamEnv<'tcx>, span: Span, ) -> Result<(), NotConstEvaluatable> { debug!("is_const_evaluatable({:?})", uv); if infcx.tcx.features().generic_const_exprs { let tcx = infcx.tcx; match AbstractConst::new(tcx, uv)? { // We are looking at a generic abstract constant. Some(ct) => { for pred in param_env.caller_bounds() { match pred.kind().skip_binder() { ty::PredicateKind::ConstEvaluatable(uv) => { if let Some(b_ct) = AbstractConst::new(tcx, uv)? { // Try to unify with each subtree in the AbstractConst to allow for // `N + 1` being const evaluatable even if theres only a `ConstEvaluatable` // predicate for `(N + 1) * 2` let result = walk_abstract_const(tcx, b_ct, |b_ct| { match try_unify(tcx, ct, b_ct) { true => ControlFlow::BREAK, false => ControlFlow::CONTINUE, } }); if let ControlFlow::Break(()) = result { debug!("is_const_evaluatable: abstract_const ~~> ok"); return Ok(()); } } } _ => {} // don't care } } // We were unable to unify the abstract constant with // a constant found in the caller bounds, there are // now three possible cases here. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] enum FailureKind { /// The abstract const still references an inference /// variable, in this case we return `TooGeneric`. MentionsInfer, /// The abstract const references a generic parameter, /// this means that we emit an error here. MentionsParam, /// The substs are concrete enough that we can simply /// try and evaluate the given constant. Concrete, } let mut failure_kind = FailureKind::Concrete; walk_abstract_const::<!, _>(tcx, ct, |node| match node.root(tcx, ct.substs) { Node::Leaf(leaf) => { if leaf.has_infer_types_or_consts() { failure_kind = FailureKind::MentionsInfer; } else if leaf.definitely_has_param_types_or_consts(tcx) { failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam); } ControlFlow::CONTINUE } Node::Cast(_, _, ty) => { if ty.has_infer_types_or_consts() { failure_kind = FailureKind::MentionsInfer; } else if ty.definitely_has_param_types_or_consts(tcx) { failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam); } ControlFlow::CONTINUE } Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => { ControlFlow::CONTINUE } }); match failure_kind { FailureKind::MentionsInfer => { return Err(NotConstEvaluatable::MentionsInfer); } FailureKind::MentionsParam => { return Err(NotConstEvaluatable::MentionsParam); } FailureKind::Concrete => { // Dealt with below by the same code which handles this // without the feature gate. } } } None => { // If we are dealing with a concrete constant, we can // reuse the old code path and try to evaluate // the constant. } } } let future_compat_lint = || { if let Some(local_def_id) = uv.def.did.as_local() { infcx.tcx.struct_span_lint_hir( lint::builtin::CONST_EVALUATABLE_UNCHECKED, infcx.tcx.hir().local_def_id_to_hir_id(local_def_id), span, |err| { err.build("cannot use constants which depend on generic parameters in types") .emit(); }, ); } }; // FIXME: We should only try to evaluate a given constant here if it is fully concrete // as we don't want to allow things like `[u8; std::mem::size_of::<*mut T>()]`. // // We previously did not check this, so we only emit a future compat warning if // const evaluation succeeds and the given constant is still polymorphic for now // and hopefully soon change this to an error. // // See #74595 for more details about this. let concrete = infcx.const_eval_resolve(param_env, uv.expand(), Some(span)); if concrete.is_ok() && uv.substs(infcx.tcx).definitely_has_param_types_or_consts(infcx.tcx) { match infcx.tcx.def_kind(uv.def.did) { DefKind::AnonConst => { let mir_body = infcx.tcx.mir_for_ctfe_opt_const_arg(uv.def); if mir_body.is_polymorphic { future_compat_lint(); } } _ => future_compat_lint(), } } debug!(?concrete, "is_const_evaluatable"); match concrete { Err(ErrorHandled::TooGeneric) => Err(match uv.has_infer_types_or_consts() { true => NotConstEvaluatable::MentionsInfer, false => NotConstEvaluatable::MentionsParam, }), Err(ErrorHandled::Linted) => { infcx.tcx.sess.delay_span_bug(span, "constant in type had error reported as lint"); Err(NotConstEvaluatable::Error(ErrorReported)) } Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e)), Ok(_) => Ok(()), } } /// A tree representing an anonymous constant. /// /// This is only able to represent a subset of `MIR`, /// and should not leak any information about desugarings. #[derive(Debug, Clone, Copy)] pub struct AbstractConst<'tcx> { // FIXME: Consider adding something like `IndexSlice` // and use this here. pub inner: &'tcx [Node<'tcx>], pub substs: SubstsRef<'tcx>, } impl<'tcx> AbstractConst<'tcx> { pub fn new( tcx: TyCtxt<'tcx>, uv: ty::Unevaluated<'tcx, ()>, ) -> Result<Option<AbstractConst<'tcx>>, ErrorReported> { let inner = tcx.mir_abstract_const_opt_const_arg(uv.def)?; debug!("AbstractConst::new({:?}) = {:?}", uv, inner); Ok(inner.map(|inner| AbstractConst { inner, substs: uv.substs(tcx) })) } pub fn from_const( tcx: TyCtxt<'tcx>, ct: &ty::Const<'tcx>, ) -> Result<Option<AbstractConst<'tcx>>, ErrorReported> { match ct.val { ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv.shrink()), ty::ConstKind::Error(_) => Err(ErrorReported), _ => Ok(None), } } #[inline] pub fn subtree(self, node: NodeId) -> AbstractConst<'tcx> { AbstractConst { inner: &self.inner[..=node.index()], substs: self.substs } } #[inline] pub fn root(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Node<'tcx> { let mut node = self.inner.last().copied().unwrap(); if let Node::Leaf(leaf) = node { node = Node::Leaf(leaf.subst(tcx, substs)); } node } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct WorkNode<'tcx> { node: Node<'tcx>, span: Span, used: bool, } struct AbstractConstBuilder<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, /// The current WIP node tree. /// /// We require all nodes to be used in the final abstract const, /// so we store this here. Note that we also consider nodes as used /// if they are mentioned in an assert, so some used nodes are never /// actually reachable by walking the [`AbstractConst`]. nodes: IndexVec<NodeId, WorkNode<'tcx>>, locals: IndexVec<mir::Local, NodeId>, /// We only allow field accesses if they access /// the result of a checked operation. checked_op_locals: BitSet<mir::Local>, } impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> { fn error(&mut self, span: Option<Span>, msg: &str) -> Result<!, ErrorReported> { self.tcx .sess .struct_span_err(self.body.span, "overly complex generic constant") .span_label(span.unwrap_or(self.body.span), msg) .help("consider moving this anonymous constant into a `const` function") .emit(); Err(ErrorReported) } fn new( tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, ) -> Result<Option<AbstractConstBuilder<'a, 'tcx>>, ErrorReported> { let mut builder = AbstractConstBuilder { tcx, body, nodes: IndexVec::new(), locals: IndexVec::from_elem(NodeId::MAX, &body.local_decls), checked_op_locals: BitSet::new_empty(body.local_decls.len()), }; // We don't have to look at concrete constants, as we // can just evaluate them. if !body.is_polymorphic { return Ok(None); } // We only allow consts without control flow, so // we check for cycles here which simplifies the // rest of this implementation. if body.is_cfg_cyclic() { builder.error(None, "cyclic anonymous constants are forbidden")?; } Ok(Some(builder)) } fn add_node(&mut self, node: Node<'tcx>, span: Span) -> NodeId { // Mark used nodes. match node { Node::Leaf(_) => (), Node::Binop(_, lhs, rhs) => { self.nodes[lhs].used = true; self.nodes[rhs].used = true; } Node::UnaryOp(_, input) => { self.nodes[input].used = true; } Node::FunctionCall(func, nodes) => { self.nodes[func].used = true; nodes.iter().for_each(|&n| self.nodes[n].used = true); } Node::Cast(_, operand, _) => { self.nodes[operand].used = true; } } // Nodes start as unused. self.nodes.push(WorkNode { node, span, used: false }) } fn place_to_local( &mut self, span: Span, p: &mir::Place<'tcx>, ) -> Result<mir::Local, ErrorReported> { const ZERO_FIELD: mir::Field = mir::Field::from_usize(0); // Do not allow any projections. // // One exception are field accesses on the result of checked operations, // which are required to support things like `1 + 2`. if let Some(p) = p.as_local() { debug_assert!(!self.checked_op_locals.contains(p)); Ok(p) } else if let &[mir::ProjectionElem::Field(ZERO_FIELD, _)] = p.projection.as_ref() { // Only allow field accesses if the given local // contains the result of a checked operation. if self.checked_op_locals.contains(p.local) { Ok(p.local) } else { self.error(Some(span), "unsupported projection")?; } } else { self.error(Some(span), "unsupported projection")?; } } fn operand_to_node( &mut self, span: Span, op: &mir::Operand<'tcx>, ) -> Result<NodeId, ErrorReported> { debug!("operand_to_node: op={:?}", op); match op { mir::Operand::Copy(p) | mir::Operand::Move(p) => { let local = self.place_to_local(span, p)?; Ok(self.locals[local]) } mir::Operand::Constant(ct) => match ct.literal { mir::ConstantKind::Ty(ct) => Ok(self.add_node(Node::Leaf(ct), span)), mir::ConstantKind::Val(..) => self.error(Some(span), "unsupported constant")?, }, } } /// We do not allow all binary operations in abstract consts, so filter disallowed ones. fn check_binop(op: mir::BinOp) -> bool { use mir::BinOp::*; match op { Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr | Eq | Lt | Le | Ne | Ge | Gt => true, Offset => false, } } /// While we currently allow all unary operations, we still want to explicitly guard against /// future changes here. fn check_unop(op: mir::UnOp) -> bool { use mir::UnOp::*; match op { Not | Neg => true, } } fn build_statement(&mut self, stmt: &mir::Statement<'tcx>) -> Result<(), ErrorReported> { debug!("AbstractConstBuilder: stmt={:?}", stmt); let span = stmt.source_info.span; match stmt.kind { StatementKind::Assign(box (ref place, ref rvalue)) => { let local = self.place_to_local(span, place)?; match *rvalue { Rvalue::Use(ref operand) => { self.locals[local] = self.operand_to_node(span, operand)?; Ok(()) } Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) if Self::check_binop(op) => { let lhs = self.operand_to_node(span, lhs)?; let rhs = self.operand_to_node(span, rhs)?; self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span); if op.is_checkable() { bug!("unexpected unchecked checkable binary operation"); } else { Ok(()) } } Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) if Self::check_binop(op) => { let lhs = self.operand_to_node(span, lhs)?; let rhs = self.operand_to_node(span, rhs)?; self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span); self.checked_op_locals.insert(local); Ok(()) } Rvalue::UnaryOp(op, ref operand) if Self::check_unop(op) => { let operand = self.operand_to_node(span, operand)?; self.locals[local] = self.add_node(Node::UnaryOp(op, operand), span); Ok(()) } Rvalue::Cast(cast_kind, ref operand, ty) => { let operand = self.operand_to_node(span, operand)?; self.locals[local] = self.add_node(Node::Cast(cast_kind, operand, ty), span); Ok(()) } _ => self.error(Some(span), "unsupported rvalue")?, } } // These are not actually relevant for us here, so we can ignore them. StatementKind::AscribeUserType(..) | StatementKind::StorageLive(_) | StatementKind::StorageDead(_) => Ok(()), _ => self.error(Some(stmt.source_info.span), "unsupported statement")?, } } /// Possible return values: /// /// - `None`: unsupported terminator, stop building /// - `Some(None)`: supported terminator, finish building /// - `Some(Some(block))`: support terminator, build `block` next fn build_terminator( &mut self, terminator: &mir::Terminator<'tcx>, ) -> Result<Option<mir::BasicBlock>, ErrorReported> { debug!("AbstractConstBuilder: terminator={:?}", terminator); match terminator.kind { TerminatorKind::Goto { target } => Ok(Some(target)), TerminatorKind::Return => Ok(None), TerminatorKind::Call { ref func, ref args, destination: Some((ref place, target)), // We do not care about `cleanup` here. Any branch which // uses `cleanup` will fail const-eval and they therefore // do not matter when checking for const evaluatability. // // Do note that even if `panic::catch_unwind` is made const, // we still do not have to care about this, as we do not look // into functions. cleanup: _, // Do not allow overloaded operators for now, // we probably do want to allow this in the future. // // This is currently fairly irrelevant as it requires `const Trait`s. from_hir_call: true, fn_span, } => { let local = self.place_to_local(fn_span, place)?; let func = self.operand_to_node(fn_span, func)?; let args = self.tcx.arena.alloc_from_iter( args.iter() .map(|arg| self.operand_to_node(terminator.source_info.span, arg)) .collect::<Result<Vec<NodeId>, _>>()?, ); self.locals[local] = self.add_node(Node::FunctionCall(func, args), fn_span); Ok(Some(target)) } TerminatorKind::Assert { ref cond, expected: false, target, .. } => { let p = match cond { mir::Operand::Copy(p) | mir::Operand::Move(p) => p, mir::Operand::Constant(_) => bug!("unexpected assert"), }; const ONE_FIELD: mir::Field = mir::Field::from_usize(1); debug!("proj: {:?}", p.projection); if let Some(p) = p.as_local() { debug_assert!(!self.checked_op_locals.contains(p)); // Mark locals directly used in asserts as used. // // This is needed because division does not use `CheckedBinop` but instead // adds an explicit assert for `divisor != 0`. self.nodes[self.locals[p]].used = true; return Ok(Some(target)); } else if let &[mir::ProjectionElem::Field(ONE_FIELD, _)] = p.projection.as_ref() { // Only allow asserts checking the result of a checked operation. if self.checked_op_locals.contains(p.local) { return Ok(Some(target)); } } self.error(Some(terminator.source_info.span), "unsupported assertion")?; } _ => self.error(Some(terminator.source_info.span), "unsupported terminator")?, } } /// Builds the abstract const by walking the mir from start to finish /// and bailing out when encountering an unsupported operation. fn build(mut self) -> Result<&'tcx [Node<'tcx>], ErrorReported> { let mut block = &self.body.basic_blocks()[mir::START_BLOCK]; // We checked for a cyclic cfg above, so this should terminate. loop { debug!("AbstractConstBuilder: block={:?}", block); for stmt in block.statements.iter() { self.build_statement(stmt)?; } if let Some(next) = self.build_terminator(block.terminator())? { block = &self.body.basic_blocks()[next]; } else { break; } } assert_eq!(self.locals[mir::RETURN_PLACE], self.nodes.last().unwrap()); for n in self.nodes.iter() { if let Node::Leaf(ty::Const { val: ty::ConstKind::Unevaluated(ct), ty: _ }) = n.node { // `AbstractConst`s should not contain any promoteds as they require references which // are not allowed. assert_eq!(ct.promoted, None); } } self.nodes[self.locals[mir::RETURN_PLACE]].used = true; if let Some(&unused) = self.nodes.iter().find(|n| !n.used) { self.error(Some(unused.span), "dead code")?; } Ok(self.tcx.arena.alloc_from_iter(self.nodes.into_iter().map(|n| n.node))) } } /// Builds an abstract const, do not use this directly, but use `AbstractConst::new` instead. pub(super) fn mir_abstract_const<'tcx>( tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>, ) -> Result<Option<&'tcx [mir::abstract_const::Node<'tcx>]>, ErrorReported> { if tcx.features().generic_const_exprs { match tcx.def_kind(def.did) { // FIXME(generic_const_exprs): We currently only do this for anonymous constants, // meaning that we do not look into associated constants. I(@lcnr) am not yet sure whether // we want to look into them or treat them as opaque projections. // // Right now we do neither of that and simply always fail to unify them. DefKind::AnonConst => (), _ => return Ok(None), } let body = tcx.mir_const(def).borrow(); AbstractConstBuilder::new(tcx, &body)?.map(AbstractConstBuilder::build).transpose() } else { Ok(None) } } pub(super) fn try_unify_abstract_consts<'tcx>( tcx: TyCtxt<'tcx>, (a, b): (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>), ) -> bool { (|| { if let Some(a) = AbstractConst::new(tcx, a)? { if let Some(b) = AbstractConst::new(tcx, b)? { return Ok(try_unify(tcx, a, b)); } } Ok(false) })() .unwrap_or_else(|ErrorReported| true) // FIXME(generic_const_exprs): We should instead have this // method return the resulting `ty::Const` and return `ConstKind::Error` // on `ErrorReported`. } pub fn walk_abstract_const<'tcx, R, F>( tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, mut f: F, ) -> ControlFlow<R> where F: FnMut(AbstractConst<'tcx>) -> ControlFlow<R>, { fn recurse<'tcx, R>( tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, f: &mut dyn FnMut(AbstractConst<'tcx>) -> ControlFlow<R>, ) -> ControlFlow<R> { f(ct)?; let root = ct.root(tcx, ct.substs); match root { Node::Leaf(_) => ControlFlow::CONTINUE, Node::Binop(_, l, r) => { recurse(tcx, ct.subtree(l), f)?; recurse(tcx, ct.subtree(r), f) } Node::UnaryOp(_, v) => recurse(tcx, ct.subtree(v), f), Node::FunctionCall(func, args) => { recurse(tcx, ct.subtree(func), f)?; args.iter().try_for_each(|&arg| recurse(tcx, ct.subtree(arg), f)) } Node::Cast(_, operand, _) => recurse(tcx, ct.subtree(operand), f), } } recurse(tcx, ct, &mut f) } /// Tries to unify two abstract constants using structural equality. pub(super) fn try_unify<'tcx>( tcx: TyCtxt<'tcx>, mut a: AbstractConst<'tcx>, mut b: AbstractConst<'tcx>, ) -> bool { // We substitute generics repeatedly to allow AbstractConsts to unify where a // ConstKind::Unevalated could be turned into an AbstractConst that would unify e.g. // Param(N) should unify with Param(T), substs: [Unevaluated("T2", [Unevaluated("T3", [Param(N)])])] while let Node::Leaf(a_ct) = a.root(tcx, a.substs) { match AbstractConst::from_const(tcx, a_ct) { Ok(Some(a_act)) => a = a_act, Ok(None) => break, Err(_) => return true, } } while let Node::Leaf(b_ct) = b.root(tcx, b.substs) { match AbstractConst::from_const(tcx, b_ct) { Ok(Some(b_act)) => b = b_act, Ok(None) => break, Err(_) => return true, } } match (a.root(tcx, a.substs), b.root(tcx, b.substs)) { (Node::Leaf(a_ct), Node::Leaf(b_ct)) => { if a_ct.ty != b_ct.ty { return false; } match (a_ct.val, b_ct.val) { // We can just unify errors with everything to reduce the amount of // emitted errors here. (ty::ConstKind::Error(_), _) | (_, ty::ConstKind::Error(_)) => true, (ty::ConstKind::Param(a_param), ty::ConstKind::Param(b_param)) => { a_param == b_param } (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val, // If we have `fn a<const N: usize>() -> [u8; N + 1]` and `fn b<const M: usize>() -> [u8; 1 + M]` // we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This // means that we only allow inference variables if they are equal. (ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val, // We expand generic anonymous constants at the start of this function, so this // branch should only be taking when dealing with associated constants, at // which point directly comparing them seems like the desired behavior. // // FIXME(generic_const_exprs): This isn't actually the case. // We also take this branch for concrete anonymous constants and // expand generic anonymous constants with concrete substs. (ty::ConstKind::Unevaluated(a_uv), ty::ConstKind::Unevaluated(b_uv)) => { a_uv == b_uv } // FIXME(generic_const_exprs): We may want to either actually try // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like // this, for now we just return false here. _ => false, } } (Node::Binop(a_op, al, ar), Node::Binop(b_op, bl, br)) if a_op == b_op => { try_unify(tcx, a.subtree(al), b.subtree(bl)) && try_unify(tcx, a.subtree(ar), b.subtree(br)) } (Node::UnaryOp(a_op, av), Node::UnaryOp(b_op, bv)) if a_op == b_op => { try_unify(tcx, a.subtree(av), b.subtree(bv)) } (Node::FunctionCall(a_f, a_args), Node::FunctionCall(b_f, b_args)) if a_args.len() == b_args.len() => { try_unify(tcx, a.subtree(a_f), b.subtree(b_f)) && iter::zip(a_args, b_args) .all(|(&an, &bn)| try_unify(tcx, a.subtree(an), b.subtree(bn))) } (Node::Cast(a_cast_kind, a_operand, a_ty), Node::Cast(b_cast_kind, b_operand, b_ty)) if (a_ty == b_ty) && (a_cast_kind == b_cast_kind) => { try_unify(tcx, a.subtree(a_operand), b.subtree(b_operand)) } _ => false, } } Minor cleanup: make imports more consistent //! Checking that constant values used in types can be successfully evaluated. //! //! For concrete constants, this is fairly simple as we can just try and evaluate it. //! //! When dealing with polymorphic constants, for example `std::mem::size_of::<T>() - 1`, //! this is not as easy. //! //! In this case we try to build an abstract representation of this constant using //! `mir_abstract_const` which can then be checked for structural equality with other //! generic constants mentioned in the `caller_bounds` of the current environment. use rustc_errors::ErrorReported; use rustc_hir::def::DefKind; use rustc_index::bit_set::BitSet; use rustc_index::vec::IndexVec; use rustc_infer::infer::InferCtxt; use rustc_middle::mir::abstract_const::{Node, NodeId, NotConstEvaluatable}; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::mir::{self, Rvalue, StatementKind, TerminatorKind}; use rustc_middle::ty::subst::{GenericArg, Subst, SubstsRef}; use rustc_middle::ty::{self, TyCtxt, TypeFoldable}; use rustc_session::lint; use rustc_span::def_id::LocalDefId; use rustc_span::Span; use std::cmp; use std::iter; use std::ops::ControlFlow; /// Check if a given constant can be evaluated. pub fn is_const_evaluatable<'cx, 'tcx>( infcx: &InferCtxt<'cx, 'tcx>, uv: ty::Unevaluated<'tcx, ()>, param_env: ty::ParamEnv<'tcx>, span: Span, ) -> Result<(), NotConstEvaluatable> { debug!("is_const_evaluatable({:?})", uv); if infcx.tcx.features().generic_const_exprs { let tcx = infcx.tcx; match AbstractConst::new(tcx, uv)? { // We are looking at a generic abstract constant. Some(ct) => { for pred in param_env.caller_bounds() { match pred.kind().skip_binder() { ty::PredicateKind::ConstEvaluatable(uv) => { if let Some(b_ct) = AbstractConst::new(tcx, uv)? { // Try to unify with each subtree in the AbstractConst to allow for // `N + 1` being const evaluatable even if theres only a `ConstEvaluatable` // predicate for `(N + 1) * 2` let result = walk_abstract_const(tcx, b_ct, |b_ct| { match try_unify(tcx, ct, b_ct) { true => ControlFlow::BREAK, false => ControlFlow::CONTINUE, } }); if let ControlFlow::Break(()) = result { debug!("is_const_evaluatable: abstract_const ~~> ok"); return Ok(()); } } } _ => {} // don't care } } // We were unable to unify the abstract constant with // a constant found in the caller bounds, there are // now three possible cases here. #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] enum FailureKind { /// The abstract const still references an inference /// variable, in this case we return `TooGeneric`. MentionsInfer, /// The abstract const references a generic parameter, /// this means that we emit an error here. MentionsParam, /// The substs are concrete enough that we can simply /// try and evaluate the given constant. Concrete, } let mut failure_kind = FailureKind::Concrete; walk_abstract_const::<!, _>(tcx, ct, |node| match node.root(tcx, ct.substs) { Node::Leaf(leaf) => { if leaf.has_infer_types_or_consts() { failure_kind = FailureKind::MentionsInfer; } else if leaf.definitely_has_param_types_or_consts(tcx) { failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam); } ControlFlow::CONTINUE } Node::Cast(_, _, ty) => { if ty.has_infer_types_or_consts() { failure_kind = FailureKind::MentionsInfer; } else if ty.definitely_has_param_types_or_consts(tcx) { failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam); } ControlFlow::CONTINUE } Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => { ControlFlow::CONTINUE } }); match failure_kind { FailureKind::MentionsInfer => { return Err(NotConstEvaluatable::MentionsInfer); } FailureKind::MentionsParam => { return Err(NotConstEvaluatable::MentionsParam); } FailureKind::Concrete => { // Dealt with below by the same code which handles this // without the feature gate. } } } None => { // If we are dealing with a concrete constant, we can // reuse the old code path and try to evaluate // the constant. } } } let future_compat_lint = || { if let Some(local_def_id) = uv.def.did.as_local() { infcx.tcx.struct_span_lint_hir( lint::builtin::CONST_EVALUATABLE_UNCHECKED, infcx.tcx.hir().local_def_id_to_hir_id(local_def_id), span, |err| { err.build("cannot use constants which depend on generic parameters in types") .emit(); }, ); } }; // FIXME: We should only try to evaluate a given constant here if it is fully concrete // as we don't want to allow things like `[u8; std::mem::size_of::<*mut T>()]`. // // We previously did not check this, so we only emit a future compat warning if // const evaluation succeeds and the given constant is still polymorphic for now // and hopefully soon change this to an error. // // See #74595 for more details about this. let concrete = infcx.const_eval_resolve(param_env, uv.expand(), Some(span)); if concrete.is_ok() && uv.substs(infcx.tcx).definitely_has_param_types_or_consts(infcx.tcx) { match infcx.tcx.def_kind(uv.def.did) { DefKind::AnonConst => { let mir_body = infcx.tcx.mir_for_ctfe_opt_const_arg(uv.def); if mir_body.is_polymorphic { future_compat_lint(); } } _ => future_compat_lint(), } } debug!(?concrete, "is_const_evaluatable"); match concrete { Err(ErrorHandled::TooGeneric) => Err(match uv.has_infer_types_or_consts() { true => NotConstEvaluatable::MentionsInfer, false => NotConstEvaluatable::MentionsParam, }), Err(ErrorHandled::Linted) => { infcx.tcx.sess.delay_span_bug(span, "constant in type had error reported as lint"); Err(NotConstEvaluatable::Error(ErrorReported)) } Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e)), Ok(_) => Ok(()), } } /// A tree representing an anonymous constant. /// /// This is only able to represent a subset of `MIR`, /// and should not leak any information about desugarings. #[derive(Debug, Clone, Copy)] pub struct AbstractConst<'tcx> { // FIXME: Consider adding something like `IndexSlice` // and use this here. pub inner: &'tcx [Node<'tcx>], pub substs: SubstsRef<'tcx>, } impl<'tcx> AbstractConst<'tcx> { pub fn new( tcx: TyCtxt<'tcx>, uv: ty::Unevaluated<'tcx, ()>, ) -> Result<Option<AbstractConst<'tcx>>, ErrorReported> { let inner = tcx.mir_abstract_const_opt_const_arg(uv.def)?; debug!("AbstractConst::new({:?}) = {:?}", uv, inner); Ok(inner.map(|inner| AbstractConst { inner, substs: uv.substs(tcx) })) } pub fn from_const( tcx: TyCtxt<'tcx>, ct: &ty::Const<'tcx>, ) -> Result<Option<AbstractConst<'tcx>>, ErrorReported> { match ct.val { ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv.shrink()), ty::ConstKind::Error(_) => Err(ErrorReported), _ => Ok(None), } } #[inline] pub fn subtree(self, node: NodeId) -> AbstractConst<'tcx> { AbstractConst { inner: &self.inner[..=node.index()], substs: self.substs } } #[inline] pub fn root(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Node<'tcx> { let mut node = self.inner.last().copied().unwrap(); if let Node::Leaf(leaf) = node { node = Node::Leaf(leaf.subst(tcx, substs)); } node } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct WorkNode<'tcx> { node: Node<'tcx>, span: Span, used: bool, } struct AbstractConstBuilder<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, /// The current WIP node tree. /// /// We require all nodes to be used in the final abstract const, /// so we store this here. Note that we also consider nodes as used /// if they are mentioned in an assert, so some used nodes are never /// actually reachable by walking the [`AbstractConst`]. nodes: IndexVec<NodeId, WorkNode<'tcx>>, locals: IndexVec<mir::Local, NodeId>, /// We only allow field accesses if they access /// the result of a checked operation. checked_op_locals: BitSet<mir::Local>, } impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> { fn error(&mut self, span: Option<Span>, msg: &str) -> Result<!, ErrorReported> { self.tcx .sess .struct_span_err(self.body.span, "overly complex generic constant") .span_label(span.unwrap_or(self.body.span), msg) .help("consider moving this anonymous constant into a `const` function") .emit(); Err(ErrorReported) } fn new( tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, ) -> Result<Option<AbstractConstBuilder<'a, 'tcx>>, ErrorReported> { let mut builder = AbstractConstBuilder { tcx, body, nodes: IndexVec::new(), locals: IndexVec::from_elem(NodeId::MAX, &body.local_decls), checked_op_locals: BitSet::new_empty(body.local_decls.len()), }; // We don't have to look at concrete constants, as we // can just evaluate them. if !body.is_polymorphic { return Ok(None); } // We only allow consts without control flow, so // we check for cycles here which simplifies the // rest of this implementation. if body.is_cfg_cyclic() { builder.error(None, "cyclic anonymous constants are forbidden")?; } Ok(Some(builder)) } fn add_node(&mut self, node: Node<'tcx>, span: Span) -> NodeId { // Mark used nodes. match node { Node::Leaf(_) => (), Node::Binop(_, lhs, rhs) => { self.nodes[lhs].used = true; self.nodes[rhs].used = true; } Node::UnaryOp(_, input) => { self.nodes[input].used = true; } Node::FunctionCall(func, nodes) => { self.nodes[func].used = true; nodes.iter().for_each(|&n| self.nodes[n].used = true); } Node::Cast(_, operand, _) => { self.nodes[operand].used = true; } } // Nodes start as unused. self.nodes.push(WorkNode { node, span, used: false }) } fn place_to_local( &mut self, span: Span, p: &mir::Place<'tcx>, ) -> Result<mir::Local, ErrorReported> { const ZERO_FIELD: mir::Field = mir::Field::from_usize(0); // Do not allow any projections. // // One exception are field accesses on the result of checked operations, // which are required to support things like `1 + 2`. if let Some(p) = p.as_local() { debug_assert!(!self.checked_op_locals.contains(p)); Ok(p) } else if let &[mir::ProjectionElem::Field(ZERO_FIELD, _)] = p.projection.as_ref() { // Only allow field accesses if the given local // contains the result of a checked operation. if self.checked_op_locals.contains(p.local) { Ok(p.local) } else { self.error(Some(span), "unsupported projection")?; } } else { self.error(Some(span), "unsupported projection")?; } } fn operand_to_node( &mut self, span: Span, op: &mir::Operand<'tcx>, ) -> Result<NodeId, ErrorReported> { debug!("operand_to_node: op={:?}", op); match op { mir::Operand::Copy(p) | mir::Operand::Move(p) => { let local = self.place_to_local(span, p)?; Ok(self.locals[local]) } mir::Operand::Constant(ct) => match ct.literal { mir::ConstantKind::Ty(ct) => Ok(self.add_node(Node::Leaf(ct), span)), mir::ConstantKind::Val(..) => self.error(Some(span), "unsupported constant")?, }, } } /// We do not allow all binary operations in abstract consts, so filter disallowed ones. fn check_binop(op: mir::BinOp) -> bool { use mir::BinOp::*; match op { Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr | Eq | Lt | Le | Ne | Ge | Gt => true, Offset => false, } } /// While we currently allow all unary operations, we still want to explicitly guard against /// future changes here. fn check_unop(op: mir::UnOp) -> bool { use mir::UnOp::*; match op { Not | Neg => true, } } fn build_statement(&mut self, stmt: &mir::Statement<'tcx>) -> Result<(), ErrorReported> { debug!("AbstractConstBuilder: stmt={:?}", stmt); let span = stmt.source_info.span; match stmt.kind { StatementKind::Assign(box (ref place, ref rvalue)) => { let local = self.place_to_local(span, place)?; match *rvalue { Rvalue::Use(ref operand) => { self.locals[local] = self.operand_to_node(span, operand)?; Ok(()) } Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) if Self::check_binop(op) => { let lhs = self.operand_to_node(span, lhs)?; let rhs = self.operand_to_node(span, rhs)?; self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span); if op.is_checkable() { bug!("unexpected unchecked checkable binary operation"); } else { Ok(()) } } Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) if Self::check_binop(op) => { let lhs = self.operand_to_node(span, lhs)?; let rhs = self.operand_to_node(span, rhs)?; self.locals[local] = self.add_node(Node::Binop(op, lhs, rhs), span); self.checked_op_locals.insert(local); Ok(()) } Rvalue::UnaryOp(op, ref operand) if Self::check_unop(op) => { let operand = self.operand_to_node(span, operand)?; self.locals[local] = self.add_node(Node::UnaryOp(op, operand), span); Ok(()) } Rvalue::Cast(cast_kind, ref operand, ty) => { let operand = self.operand_to_node(span, operand)?; self.locals[local] = self.add_node(Node::Cast(cast_kind, operand, ty), span); Ok(()) } _ => self.error(Some(span), "unsupported rvalue")?, } } // These are not actually relevant for us here, so we can ignore them. StatementKind::AscribeUserType(..) | StatementKind::StorageLive(_) | StatementKind::StorageDead(_) => Ok(()), _ => self.error(Some(stmt.source_info.span), "unsupported statement")?, } } /// Possible return values: /// /// - `None`: unsupported terminator, stop building /// - `Some(None)`: supported terminator, finish building /// - `Some(Some(block))`: support terminator, build `block` next fn build_terminator( &mut self, terminator: &mir::Terminator<'tcx>, ) -> Result<Option<mir::BasicBlock>, ErrorReported> { debug!("AbstractConstBuilder: terminator={:?}", terminator); match terminator.kind { TerminatorKind::Goto { target } => Ok(Some(target)), TerminatorKind::Return => Ok(None), TerminatorKind::Call { ref func, ref args, destination: Some((ref place, target)), // We do not care about `cleanup` here. Any branch which // uses `cleanup` will fail const-eval and they therefore // do not matter when checking for const evaluatability. // // Do note that even if `panic::catch_unwind` is made const, // we still do not have to care about this, as we do not look // into functions. cleanup: _, // Do not allow overloaded operators for now, // we probably do want to allow this in the future. // // This is currently fairly irrelevant as it requires `const Trait`s. from_hir_call: true, fn_span, } => { let local = self.place_to_local(fn_span, place)?; let func = self.operand_to_node(fn_span, func)?; let args = self.tcx.arena.alloc_from_iter( args.iter() .map(|arg| self.operand_to_node(terminator.source_info.span, arg)) .collect::<Result<Vec<NodeId>, _>>()?, ); self.locals[local] = self.add_node(Node::FunctionCall(func, args), fn_span); Ok(Some(target)) } TerminatorKind::Assert { ref cond, expected: false, target, .. } => { let p = match cond { mir::Operand::Copy(p) | mir::Operand::Move(p) => p, mir::Operand::Constant(_) => bug!("unexpected assert"), }; const ONE_FIELD: mir::Field = mir::Field::from_usize(1); debug!("proj: {:?}", p.projection); if let Some(p) = p.as_local() { debug_assert!(!self.checked_op_locals.contains(p)); // Mark locals directly used in asserts as used. // // This is needed because division does not use `CheckedBinop` but instead // adds an explicit assert for `divisor != 0`. self.nodes[self.locals[p]].used = true; return Ok(Some(target)); } else if let &[mir::ProjectionElem::Field(ONE_FIELD, _)] = p.projection.as_ref() { // Only allow asserts checking the result of a checked operation. if self.checked_op_locals.contains(p.local) { return Ok(Some(target)); } } self.error(Some(terminator.source_info.span), "unsupported assertion")?; } _ => self.error(Some(terminator.source_info.span), "unsupported terminator")?, } } /// Builds the abstract const by walking the mir from start to finish /// and bailing out when encountering an unsupported operation. fn build(mut self) -> Result<&'tcx [Node<'tcx>], ErrorReported> { let mut block = &self.body.basic_blocks()[mir::START_BLOCK]; // We checked for a cyclic cfg above, so this should terminate. loop { debug!("AbstractConstBuilder: block={:?}", block); for stmt in block.statements.iter() { self.build_statement(stmt)?; } if let Some(next) = self.build_terminator(block.terminator())? { block = &self.body.basic_blocks()[next]; } else { break; } } assert_eq!(self.locals[mir::RETURN_PLACE], self.nodes.last().unwrap()); for n in self.nodes.iter() { if let Node::Leaf(ty::Const { val: ty::ConstKind::Unevaluated(ct), ty: _ }) = n.node { // `AbstractConst`s should not contain any promoteds as they require references which // are not allowed. assert_eq!(ct.promoted, None); } } self.nodes[self.locals[mir::RETURN_PLACE]].used = true; if let Some(&unused) = self.nodes.iter().find(|n| !n.used) { self.error(Some(unused.span), "dead code")?; } Ok(self.tcx.arena.alloc_from_iter(self.nodes.into_iter().map(|n| n.node))) } } /// Builds an abstract const, do not use this directly, but use `AbstractConst::new` instead. pub(super) fn mir_abstract_const<'tcx>( tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>, ) -> Result<Option<&'tcx [mir::abstract_const::Node<'tcx>]>, ErrorReported> { if tcx.features().generic_const_exprs { match tcx.def_kind(def.did) { // FIXME(generic_const_exprs): We currently only do this for anonymous constants, // meaning that we do not look into associated constants. I(@lcnr) am not yet sure whether // we want to look into them or treat them as opaque projections. // // Right now we do neither of that and simply always fail to unify them. DefKind::AnonConst => (), _ => return Ok(None), } let body = tcx.mir_const(def).borrow(); AbstractConstBuilder::new(tcx, &body)?.map(AbstractConstBuilder::build).transpose() } else { Ok(None) } } pub(super) fn try_unify_abstract_consts<'tcx>( tcx: TyCtxt<'tcx>, (a, b): (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>), ) -> bool { (|| { if let Some(a) = AbstractConst::new(tcx, a)? { if let Some(b) = AbstractConst::new(tcx, b)? { return Ok(try_unify(tcx, a, b)); } } Ok(false) })() .unwrap_or_else(|ErrorReported| true) // FIXME(generic_const_exprs): We should instead have this // method return the resulting `ty::Const` and return `ConstKind::Error` // on `ErrorReported`. } pub fn walk_abstract_const<'tcx, R, F>( tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, mut f: F, ) -> ControlFlow<R> where F: FnMut(AbstractConst<'tcx>) -> ControlFlow<R>, { fn recurse<'tcx, R>( tcx: TyCtxt<'tcx>, ct: AbstractConst<'tcx>, f: &mut dyn FnMut(AbstractConst<'tcx>) -> ControlFlow<R>, ) -> ControlFlow<R> { f(ct)?; let root = ct.root(tcx, ct.substs); match root { Node::Leaf(_) => ControlFlow::CONTINUE, Node::Binop(_, l, r) => { recurse(tcx, ct.subtree(l), f)?; recurse(tcx, ct.subtree(r), f) } Node::UnaryOp(_, v) => recurse(tcx, ct.subtree(v), f), Node::FunctionCall(func, args) => { recurse(tcx, ct.subtree(func), f)?; args.iter().try_for_each(|&arg| recurse(tcx, ct.subtree(arg), f)) } Node::Cast(_, operand, _) => recurse(tcx, ct.subtree(operand), f), } } recurse(tcx, ct, &mut f) } /// Tries to unify two abstract constants using structural equality. pub(super) fn try_unify<'tcx>( tcx: TyCtxt<'tcx>, mut a: AbstractConst<'tcx>, mut b: AbstractConst<'tcx>, ) -> bool { // We substitute generics repeatedly to allow AbstractConsts to unify where a // ConstKind::Unevalated could be turned into an AbstractConst that would unify e.g. // Param(N) should unify with Param(T), substs: [Unevaluated("T2", [Unevaluated("T3", [Param(N)])])] while let Node::Leaf(a_ct) = a.root(tcx, a.substs) { match AbstractConst::from_const(tcx, a_ct) { Ok(Some(a_act)) => a = a_act, Ok(None) => break, Err(_) => return true, } } while let Node::Leaf(b_ct) = b.root(tcx, b.substs) { match AbstractConst::from_const(tcx, b_ct) { Ok(Some(b_act)) => b = b_act, Ok(None) => break, Err(_) => return true, } } match (a.root(tcx, a.substs), b.root(tcx, b.substs)) { (Node::Leaf(a_ct), Node::Leaf(b_ct)) => { if a_ct.ty != b_ct.ty { return false; } match (a_ct.val, b_ct.val) { // We can just unify errors with everything to reduce the amount of // emitted errors here. (ty::ConstKind::Error(_), _) | (_, ty::ConstKind::Error(_)) => true, (ty::ConstKind::Param(a_param), ty::ConstKind::Param(b_param)) => { a_param == b_param } (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val, // If we have `fn a<const N: usize>() -> [u8; N + 1]` and `fn b<const M: usize>() -> [u8; 1 + M]` // we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This // means that we only allow inference variables if they are equal. (ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val, // We expand generic anonymous constants at the start of this function, so this // branch should only be taking when dealing with associated constants, at // which point directly comparing them seems like the desired behavior. // // FIXME(generic_const_exprs): This isn't actually the case. // We also take this branch for concrete anonymous constants and // expand generic anonymous constants with concrete substs. (ty::ConstKind::Unevaluated(a_uv), ty::ConstKind::Unevaluated(b_uv)) => { a_uv == b_uv } // FIXME(generic_const_exprs): We may want to either actually try // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like // this, for now we just return false here. _ => false, } } (Node::Binop(a_op, al, ar), Node::Binop(b_op, bl, br)) if a_op == b_op => { try_unify(tcx, a.subtree(al), b.subtree(bl)) && try_unify(tcx, a.subtree(ar), b.subtree(br)) } (Node::UnaryOp(a_op, av), Node::UnaryOp(b_op, bv)) if a_op == b_op => { try_unify(tcx, a.subtree(av), b.subtree(bv)) } (Node::FunctionCall(a_f, a_args), Node::FunctionCall(b_f, b_args)) if a_args.len() == b_args.len() => { try_unify(tcx, a.subtree(a_f), b.subtree(b_f)) && iter::zip(a_args, b_args) .all(|(&an, &bn)| try_unify(tcx, a.subtree(an), b.subtree(bn))) } (Node::Cast(a_cast_kind, a_operand, a_ty), Node::Cast(b_cast_kind, b_operand, b_ty)) if (a_ty == b_ty) && (a_cast_kind == b_cast_kind) => { try_unify(tcx, a.subtree(a_operand), b.subtree(b_operand)) } _ => false, } }
use ra_syntax::{ ast::{self, BlockExpr, Expr, LoopBodyOwner}, AstNode, SyntaxNode, }; use crate::{AssistContext, AssistId, Assists}; // Assist: change_return_type_to_result // // Change the function's return type to Result. // // ``` // fn foo() -> i32<|> { 42i32 } // ``` // -> // ``` // fn foo() -> Result<i32, ${0:_}> { Ok(42i32) } // ``` pub(crate) fn change_return_type_to_result(acc: &mut Assists, ctx: &AssistContext) -> Option<()> { let ret_type = ctx.find_node_at_offset::<ast::RetType>()?; // FIXME: extend to lambdas as well let fn_def = ret_type.syntax().parent().and_then(ast::FnDef::cast)?; let type_ref = &ret_type.type_ref()?; if type_ref.syntax().text().to_string().starts_with("Result<") { return None; } let block_expr = &fn_def.body()?; acc.add( AssistId("change_return_type_to_result"), "Change return type to Result", type_ref.syntax().text_range(), |builder| { let mut tail_return_expr_collector = TailReturnCollector::new(); tail_return_expr_collector.collect_jump_exprs(block_expr, false); tail_return_expr_collector.collect_tail_exprs(block_expr); for ret_expr_arg in tail_return_expr_collector.exprs_to_wrap { builder.replace_node_and_indent(&ret_expr_arg, format!("Ok({})", ret_expr_arg)); } match ctx.config.snippet_cap { Some(cap) => { let snippet = format!("Result<{}, ${{0:_}}>", type_ref); builder.replace_snippet(cap, type_ref.syntax().text_range(), snippet) } None => builder .replace(type_ref.syntax().text_range(), format!("Result<{}, _>", type_ref)), } }, ) } struct TailReturnCollector { exprs_to_wrap: Vec<SyntaxNode>, } impl TailReturnCollector { fn new() -> Self { Self { exprs_to_wrap: vec![] } } /// Collect all`return` expression fn collect_jump_exprs(&mut self, block_expr: &BlockExpr, collect_break: bool) { let statements = block_expr.statements(); for stmt in statements { let expr = match &stmt { ast::Stmt::ExprStmt(stmt) => stmt.expr(), ast::Stmt::LetStmt(stmt) => stmt.initializer(), }; if let Some(expr) = &expr { self.handle_exprs(expr, collect_break); } } // Browse tail expressions for each block if let Some(expr) = block_expr.expr() { if let Some(last_exprs) = get_tail_expr_from_block(&expr) { for last_expr in last_exprs { let last_expr = match last_expr { NodeType::Node(expr) | NodeType::Leaf(expr) => expr, }; if let Some(last_expr) = Expr::cast(last_expr.clone()) { self.handle_exprs(&last_expr, collect_break); } else if let Some(expr_stmt) = ast::Stmt::cast(last_expr) { let expr_stmt = match &expr_stmt { ast::Stmt::ExprStmt(stmt) => stmt.expr(), ast::Stmt::LetStmt(stmt) => stmt.initializer(), }; if let Some(expr) = &expr_stmt { self.handle_exprs(expr, collect_break); } } } } } } fn handle_exprs(&mut self, expr: &Expr, collect_break: bool) { match expr { Expr::BlockExpr(block_expr) => { self.collect_jump_exprs(&block_expr, collect_break); } Expr::ReturnExpr(ret_expr) => { if let Some(ret_expr_arg) = &ret_expr.expr() { self.exprs_to_wrap.push(ret_expr_arg.syntax().clone()); } } Expr::BreakExpr(break_expr) if collect_break => { if let Some(break_expr_arg) = &break_expr.expr() { self.exprs_to_wrap.push(break_expr_arg.syntax().clone()); } } Expr::IfExpr(if_expr) => { for block in if_expr.blocks() { self.collect_jump_exprs(&block, collect_break); } } Expr::LoopExpr(loop_expr) => { if let Some(block_expr) = loop_expr.loop_body() { self.collect_jump_exprs(&block_expr, collect_break); } } Expr::ForExpr(for_expr) => { if let Some(block_expr) = for_expr.loop_body() { self.collect_jump_exprs(&block_expr, collect_break); } } Expr::WhileExpr(while_expr) => { if let Some(block_expr) = while_expr.loop_body() { self.collect_jump_exprs(&block_expr, collect_break); } } Expr::MatchExpr(match_expr) => { if let Some(arm_list) = match_expr.match_arm_list() { arm_list.arms().filter_map(|match_arm| match_arm.expr()).for_each(|expr| { self.handle_exprs(&expr, collect_break); }); } } _ => {} } } fn collect_tail_exprs(&mut self, block: &BlockExpr) { if let Some(expr) = block.expr() { self.handle_exprs(&expr, true); self.fetch_tail_exprs(&expr); } } fn fetch_tail_exprs(&mut self, expr: &Expr) { if let Some(exprs) = get_tail_expr_from_block(expr) { for node_type in &exprs { match node_type { NodeType::Leaf(expr) => { self.exprs_to_wrap.push(expr.clone()); } NodeType::Node(expr) => match &Expr::cast(expr.clone()) { Some(last_expr) => { self.fetch_tail_exprs(last_expr); } None => { self.exprs_to_wrap.push(expr.clone()); } }, } } } } } #[derive(Debug)] enum NodeType { Leaf(SyntaxNode), Node(SyntaxNode), } /// Get a tail expression inside a block fn get_tail_expr_from_block(expr: &Expr) -> Option<Vec<NodeType>> { match expr { Expr::IfExpr(if_expr) => { let mut nodes = vec![]; for block in if_expr.blocks() { if let Some(block_expr) = block.expr() { if let Some(tail_exprs) = get_tail_expr_from_block(&block_expr) { nodes.extend(tail_exprs); } } else if let Some(last_expr) = block.syntax().last_child() { nodes.push(NodeType::Node(last_expr)); } else { nodes.push(NodeType::Node(block.syntax().clone())); } } Some(nodes) } Expr::LoopExpr(loop_expr) => { loop_expr.syntax().last_child().map(|lc| vec![NodeType::Node(lc)]) } Expr::ForExpr(for_expr) => { for_expr.syntax().last_child().map(|lc| vec![NodeType::Node(lc)]) } Expr::WhileExpr(while_expr) => { while_expr.syntax().last_child().map(|lc| vec![NodeType::Node(lc)]) } Expr::BlockExpr(block_expr) => { block_expr.expr().map(|lc| vec![NodeType::Node(lc.syntax().clone())]) } Expr::MatchExpr(match_expr) => { let arm_list = match_expr.match_arm_list()?; let arms: Vec<NodeType> = arm_list .arms() .filter_map(|match_arm| match_arm.expr()) .map(|expr| match expr { Expr::ReturnExpr(ret_expr) => NodeType::Node(ret_expr.syntax().clone()), Expr::BreakExpr(break_expr) => NodeType::Node(break_expr.syntax().clone()), _ => match expr.syntax().last_child() { Some(last_expr) => NodeType::Node(last_expr), None => NodeType::Node(expr.syntax().clone()), }, }) .collect(); Some(arms) } Expr::BreakExpr(expr) => expr.expr().map(|e| vec![NodeType::Leaf(e.syntax().clone())]), Expr::ReturnExpr(ret_expr) => Some(vec![NodeType::Node(ret_expr.syntax().clone())]), Expr::CallExpr(call_expr) => Some(vec![NodeType::Leaf(call_expr.syntax().clone())]), Expr::Literal(lit_expr) => Some(vec![NodeType::Leaf(lit_expr.syntax().clone())]), Expr::TupleExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::ArrayExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::ParenExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::PathExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::Label(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::RecordLit(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::IndexExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::MethodCallExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::AwaitExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::CastExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::RefExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::PrefixExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::RangeExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::BinExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::MacroCall(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::BoxExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), _ => None, } } #[cfg(test)] mod tests { use crate::tests::{check_assist, check_assist_not_applicable}; use super::*; #[test] fn change_return_type_to_result_simple() { check_assist( change_return_type_to_result, r#"fn foo() -> i3<|>2 { let test = "test"; return 42i32; }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; return Ok(42i32); }"#, ); } #[test] fn change_return_type_to_result_simple_return_type() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let test = "test"; return 42i32; }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; return Ok(42i32); }"#, ); } #[test] fn change_return_type_to_result_simple_return_type_bad_cursor() { check_assist_not_applicable( change_return_type_to_result, r#"fn foo() -> i32 { let test = "test";<|> return 42i32; }"#, ); } #[test] fn change_return_type_to_result_simple_with_cursor() { check_assist( change_return_type_to_result, r#"fn foo() -> <|>i32 { let test = "test"; return 42i32; }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; return Ok(42i32); }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail() { check_assist( change_return_type_to_result, r#"fn foo() -><|> i32 { let test = "test"; 42i32 }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; Ok(42i32) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_only() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { 42i32 }"#, r#"fn foo() -> Result<i32, ${0:_}> { Ok(42i32) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { if true { 42i32 } else { 24i32 } }"#, r#"fn foo() -> Result<i32, ${0:_}> { if true { Ok(42i32) } else { Ok(24i32) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_nested_if() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { if true { if false { 1 } else { 2 } } else { 24i32 } }"#, r#"fn foo() -> Result<i32, ${0:_}> { if true { if false { Ok(1) } else { Ok(2) } } else { Ok(24i32) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_await() { check_assist( change_return_type_to_result, r#"async fn foo() -> i<|>32 { if true { if false { 1.await } else { 2.await } } else { 24i32.await } }"#, r#"async fn foo() -> Result<i32, ${0:_}> { if true { if false { Ok(1.await) } else { Ok(2.await) } } else { Ok(24i32.await) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_array() { check_assist( change_return_type_to_result, r#"fn foo() -> [i32;<|> 3] { [1, 2, 3] }"#, r#"fn foo() -> Result<[i32; 3], ${0:_}> { Ok([1, 2, 3]) }"#, ); } #[test] fn change_return_type_to_result_simple_with_cast() { check_assist( change_return_type_to_result, r#"fn foo() -<|>> i32 { if true { if false { 1 as i32 } else { 2 as i32 } } else { 24 as i32 } }"#, r#"fn foo() -> Result<i32, ${0:_}> { if true { if false { Ok(1 as i32) } else { Ok(2 as i32) } } else { Ok(24 as i32) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_match() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; match my_var { 5 => 42i32, _ => 24i32, } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; match my_var { 5 => Ok(42i32), _ => Ok(24i32), } }"#, ); } #[test] fn change_return_type_to_result_simple_with_loop_with_tail() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; loop { println!("test"); 5 } my_var }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; loop { println!("test"); 5 } Ok(my_var) }"#, ); } #[test] fn change_return_type_to_result_simple_with_loop_in_let_stmt() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = let x = loop { break 1; }; my_var }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = let x = loop { break 1; }; Ok(my_var) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_match_return_expr() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; let res = match my_var { 5 => 42i32, _ => return 24i32, }; res }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; let res = match my_var { 5 => 42i32, _ => return Ok(24i32), }; Ok(res) }"#, ); check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; let res = if my_var == 5 { 42i32 } else { return 24i32; }; res }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; let res = if my_var == 5 { 42i32 } else { return Ok(24i32); }; Ok(res) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_match_deeper() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; match my_var { 5 => { if true { 42i32 } else { 25i32 } }, _ => { let test = "test"; if test == "test" { return bar(); } 53i32 }, } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; match my_var { 5 => { if true { Ok(42i32) } else { Ok(25i32) } }, _ => { let test = "test"; if test == "test" { return Ok(bar()); } Ok(53i32) }, } }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_early_return() { check_assist( change_return_type_to_result, r#"fn foo() -> i<|>32 { let test = "test"; if test == "test" { return 24i32; } 53i32 }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; if test == "test" { return Ok(24i32); } Ok(53i32) }"#, ); } #[test] fn change_return_type_to_result_simple_with_closure() { check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -><|> u32 { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return 99; } else { return 0; } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return Ok(99); } else { return Ok(0); } } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u32<|> { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return 99; } else { return 0; } } let t = None; t.unwrap_or_else(|| the_field) }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return Ok(99); } else { return Ok(0); } } let t = None; Ok(t.unwrap_or_else(|| the_field)) }"#, ); } #[test] fn change_return_type_to_result_simple_with_weird_forms() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let test = "test"; if test == "test" { return 24i32; } let mut i = 0; loop { if i == 1 { break 55; } i += 1; } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; if test == "test" { return Ok(24i32); } let mut i = 0; loop { if i == 1 { break Ok(55); } i += 1; } }"#, ); check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let test = "test"; if test == "test" { return 24i32; } let mut i = 0; loop { loop { if i == 1 { break 55; } i += 1; } } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; if test == "test" { return Ok(24i32); } let mut i = 0; loop { loop { if i == 1 { break Ok(55); } i += 1; } } }"#, ); check_assist( change_return_type_to_result, r#"fn foo() -> i3<|>2 { let test = "test"; let other = 5; if test == "test" { let res = match other { 5 => 43, _ => return 56, }; } let mut i = 0; loop { loop { if i == 1 { break 55; } i += 1; } } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; let other = 5; if test == "test" { let res = match other { 5 => 43, _ => return Ok(56), }; } let mut i = 0; loop { loop { if i == 1 { break Ok(55); } i += 1; } } }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u32<|> { if the_field < 5 { let mut i = 0; loop { if i > 5 { return 55u32; } i += 3; } match i { 5 => return 99, _ => return 0, }; } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; loop { if i > 5 { return Ok(55u32); } i += 3; } match i { 5 => return Ok(99), _ => return Ok(0), }; } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u3<|>2 { if the_field < 5 { let mut i = 0; match i { 5 => return 99, _ => return 0, } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; match i { 5 => return Ok(99), _ => return Ok(0), } } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u32<|> { if the_field < 5 { let mut i = 0; if i == 5 { return 99 } else { return 0 } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; if i == 5 { return Ok(99) } else { return Ok(0) } } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> <|>u32 { if the_field < 5 { let mut i = 0; if i == 5 { return 99; } else { return 0; } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; if i == 5 { return Ok(99); } else { return Ok(0); } } Ok(the_field) }"#, ); } } do not suggest assist for return type to result in bad case #4826 Signed-off-by: Benjamin Coenen <481f8cf7e6db3368f7a1a2be85b2c44f6c1b4e1e@users.noreply.github.com> use ra_syntax::{ ast::{self, BlockExpr, Expr, LoopBodyOwner}, AstNode, SyntaxNode, }; use crate::{AssistContext, AssistId, Assists}; // Assist: change_return_type_to_result // // Change the function's return type to Result. // // ``` // fn foo() -> i32<|> { 42i32 } // ``` // -> // ``` // fn foo() -> Result<i32, ${0:_}> { Ok(42i32) } // ``` pub(crate) fn change_return_type_to_result(acc: &mut Assists, ctx: &AssistContext) -> Option<()> { let ret_type = ctx.find_node_at_offset::<ast::RetType>()?; // FIXME: extend to lambdas as well let fn_def = ret_type.syntax().parent().and_then(ast::FnDef::cast)?; let type_ref = &ret_type.type_ref()?; let ret_type_str = type_ref.syntax().text().to_string(); let first_part_ret_type = ret_type_str.splitn(2, '<').next(); if let Some(ret_type_first_part) = first_part_ret_type { if ret_type_first_part.ends_with("Result") { return None; } } let block_expr = &fn_def.body()?; acc.add( AssistId("change_return_type_to_result"), "Change return type to Result", type_ref.syntax().text_range(), |builder| { let mut tail_return_expr_collector = TailReturnCollector::new(); tail_return_expr_collector.collect_jump_exprs(block_expr, false); tail_return_expr_collector.collect_tail_exprs(block_expr); for ret_expr_arg in tail_return_expr_collector.exprs_to_wrap { builder.replace_node_and_indent(&ret_expr_arg, format!("Ok({})", ret_expr_arg)); } match ctx.config.snippet_cap { Some(cap) => { let snippet = format!("Result<{}, ${{0:_}}>", type_ref); builder.replace_snippet(cap, type_ref.syntax().text_range(), snippet) } None => builder .replace(type_ref.syntax().text_range(), format!("Result<{}, _>", type_ref)), } }, ) } struct TailReturnCollector { exprs_to_wrap: Vec<SyntaxNode>, } impl TailReturnCollector { fn new() -> Self { Self { exprs_to_wrap: vec![] } } /// Collect all`return` expression fn collect_jump_exprs(&mut self, block_expr: &BlockExpr, collect_break: bool) { let statements = block_expr.statements(); for stmt in statements { let expr = match &stmt { ast::Stmt::ExprStmt(stmt) => stmt.expr(), ast::Stmt::LetStmt(stmt) => stmt.initializer(), }; if let Some(expr) = &expr { self.handle_exprs(expr, collect_break); } } // Browse tail expressions for each block if let Some(expr) = block_expr.expr() { if let Some(last_exprs) = get_tail_expr_from_block(&expr) { for last_expr in last_exprs { let last_expr = match last_expr { NodeType::Node(expr) | NodeType::Leaf(expr) => expr, }; if let Some(last_expr) = Expr::cast(last_expr.clone()) { self.handle_exprs(&last_expr, collect_break); } else if let Some(expr_stmt) = ast::Stmt::cast(last_expr) { let expr_stmt = match &expr_stmt { ast::Stmt::ExprStmt(stmt) => stmt.expr(), ast::Stmt::LetStmt(stmt) => stmt.initializer(), }; if let Some(expr) = &expr_stmt { self.handle_exprs(expr, collect_break); } } } } } } fn handle_exprs(&mut self, expr: &Expr, collect_break: bool) { match expr { Expr::BlockExpr(block_expr) => { self.collect_jump_exprs(&block_expr, collect_break); } Expr::ReturnExpr(ret_expr) => { if let Some(ret_expr_arg) = &ret_expr.expr() { self.exprs_to_wrap.push(ret_expr_arg.syntax().clone()); } } Expr::BreakExpr(break_expr) if collect_break => { if let Some(break_expr_arg) = &break_expr.expr() { self.exprs_to_wrap.push(break_expr_arg.syntax().clone()); } } Expr::IfExpr(if_expr) => { for block in if_expr.blocks() { self.collect_jump_exprs(&block, collect_break); } } Expr::LoopExpr(loop_expr) => { if let Some(block_expr) = loop_expr.loop_body() { self.collect_jump_exprs(&block_expr, collect_break); } } Expr::ForExpr(for_expr) => { if let Some(block_expr) = for_expr.loop_body() { self.collect_jump_exprs(&block_expr, collect_break); } } Expr::WhileExpr(while_expr) => { if let Some(block_expr) = while_expr.loop_body() { self.collect_jump_exprs(&block_expr, collect_break); } } Expr::MatchExpr(match_expr) => { if let Some(arm_list) = match_expr.match_arm_list() { arm_list.arms().filter_map(|match_arm| match_arm.expr()).for_each(|expr| { self.handle_exprs(&expr, collect_break); }); } } _ => {} } } fn collect_tail_exprs(&mut self, block: &BlockExpr) { if let Some(expr) = block.expr() { self.handle_exprs(&expr, true); self.fetch_tail_exprs(&expr); } } fn fetch_tail_exprs(&mut self, expr: &Expr) { if let Some(exprs) = get_tail_expr_from_block(expr) { for node_type in &exprs { match node_type { NodeType::Leaf(expr) => { self.exprs_to_wrap.push(expr.clone()); } NodeType::Node(expr) => match &Expr::cast(expr.clone()) { Some(last_expr) => { self.fetch_tail_exprs(last_expr); } None => { self.exprs_to_wrap.push(expr.clone()); } }, } } } } } #[derive(Debug)] enum NodeType { Leaf(SyntaxNode), Node(SyntaxNode), } /// Get a tail expression inside a block fn get_tail_expr_from_block(expr: &Expr) -> Option<Vec<NodeType>> { match expr { Expr::IfExpr(if_expr) => { let mut nodes = vec![]; for block in if_expr.blocks() { if let Some(block_expr) = block.expr() { if let Some(tail_exprs) = get_tail_expr_from_block(&block_expr) { nodes.extend(tail_exprs); } } else if let Some(last_expr) = block.syntax().last_child() { nodes.push(NodeType::Node(last_expr)); } else { nodes.push(NodeType::Node(block.syntax().clone())); } } Some(nodes) } Expr::LoopExpr(loop_expr) => { loop_expr.syntax().last_child().map(|lc| vec![NodeType::Node(lc)]) } Expr::ForExpr(for_expr) => { for_expr.syntax().last_child().map(|lc| vec![NodeType::Node(lc)]) } Expr::WhileExpr(while_expr) => { while_expr.syntax().last_child().map(|lc| vec![NodeType::Node(lc)]) } Expr::BlockExpr(block_expr) => { block_expr.expr().map(|lc| vec![NodeType::Node(lc.syntax().clone())]) } Expr::MatchExpr(match_expr) => { let arm_list = match_expr.match_arm_list()?; let arms: Vec<NodeType> = arm_list .arms() .filter_map(|match_arm| match_arm.expr()) .map(|expr| match expr { Expr::ReturnExpr(ret_expr) => NodeType::Node(ret_expr.syntax().clone()), Expr::BreakExpr(break_expr) => NodeType::Node(break_expr.syntax().clone()), _ => match expr.syntax().last_child() { Some(last_expr) => NodeType::Node(last_expr), None => NodeType::Node(expr.syntax().clone()), }, }) .collect(); Some(arms) } Expr::BreakExpr(expr) => expr.expr().map(|e| vec![NodeType::Leaf(e.syntax().clone())]), Expr::ReturnExpr(ret_expr) => Some(vec![NodeType::Node(ret_expr.syntax().clone())]), Expr::CallExpr(call_expr) => Some(vec![NodeType::Leaf(call_expr.syntax().clone())]), Expr::Literal(lit_expr) => Some(vec![NodeType::Leaf(lit_expr.syntax().clone())]), Expr::TupleExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::ArrayExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::ParenExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::PathExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::Label(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::RecordLit(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::IndexExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::MethodCallExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::AwaitExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::CastExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::RefExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::PrefixExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::RangeExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::BinExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::MacroCall(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), Expr::BoxExpr(expr) => Some(vec![NodeType::Leaf(expr.syntax().clone())]), _ => None, } } #[cfg(test)] mod tests { use crate::tests::{check_assist, check_assist_not_applicable}; use super::*; #[test] fn change_return_type_to_result_simple() { check_assist( change_return_type_to_result, r#"fn foo() -> i3<|>2 { let test = "test"; return 42i32; }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; return Ok(42i32); }"#, ); } #[test] fn change_return_type_to_result_simple_return_type() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let test = "test"; return 42i32; }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; return Ok(42i32); }"#, ); } #[test] fn change_return_type_to_result_simple_return_type_bad_cursor() { check_assist_not_applicable( change_return_type_to_result, r#"fn foo() -> i32 { let test = "test";<|> return 42i32; }"#, ); } #[test] fn change_return_type_to_result_simple_return_type_already_result_std() { check_assist_not_applicable( change_return_type_to_result, r#"fn foo() -> std::result::Result<i32<|>, String> { let test = "test"; return 42i32; }"#, ); } #[test] fn change_return_type_to_result_simple_return_type_already_result() { check_assist_not_applicable( change_return_type_to_result, r#"fn foo() -> Result<i32<|>, String> { let test = "test"; return 42i32; }"#, ); } #[test] fn change_return_type_to_result_simple_with_cursor() { check_assist( change_return_type_to_result, r#"fn foo() -> <|>i32 { let test = "test"; return 42i32; }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; return Ok(42i32); }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail() { check_assist( change_return_type_to_result, r#"fn foo() -><|> i32 { let test = "test"; 42i32 }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; Ok(42i32) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_only() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { 42i32 }"#, r#"fn foo() -> Result<i32, ${0:_}> { Ok(42i32) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { if true { 42i32 } else { 24i32 } }"#, r#"fn foo() -> Result<i32, ${0:_}> { if true { Ok(42i32) } else { Ok(24i32) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_nested_if() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { if true { if false { 1 } else { 2 } } else { 24i32 } }"#, r#"fn foo() -> Result<i32, ${0:_}> { if true { if false { Ok(1) } else { Ok(2) } } else { Ok(24i32) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_await() { check_assist( change_return_type_to_result, r#"async fn foo() -> i<|>32 { if true { if false { 1.await } else { 2.await } } else { 24i32.await } }"#, r#"async fn foo() -> Result<i32, ${0:_}> { if true { if false { Ok(1.await) } else { Ok(2.await) } } else { Ok(24i32.await) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_array() { check_assist( change_return_type_to_result, r#"fn foo() -> [i32;<|> 3] { [1, 2, 3] }"#, r#"fn foo() -> Result<[i32; 3], ${0:_}> { Ok([1, 2, 3]) }"#, ); } #[test] fn change_return_type_to_result_simple_with_cast() { check_assist( change_return_type_to_result, r#"fn foo() -<|>> i32 { if true { if false { 1 as i32 } else { 2 as i32 } } else { 24 as i32 } }"#, r#"fn foo() -> Result<i32, ${0:_}> { if true { if false { Ok(1 as i32) } else { Ok(2 as i32) } } else { Ok(24 as i32) } }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_match() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; match my_var { 5 => 42i32, _ => 24i32, } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; match my_var { 5 => Ok(42i32), _ => Ok(24i32), } }"#, ); } #[test] fn change_return_type_to_result_simple_with_loop_with_tail() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; loop { println!("test"); 5 } my_var }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; loop { println!("test"); 5 } Ok(my_var) }"#, ); } #[test] fn change_return_type_to_result_simple_with_loop_in_let_stmt() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = let x = loop { break 1; }; my_var }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = let x = loop { break 1; }; Ok(my_var) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_match_return_expr() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; let res = match my_var { 5 => 42i32, _ => return 24i32, }; res }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; let res = match my_var { 5 => 42i32, _ => return Ok(24i32), }; Ok(res) }"#, ); check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; let res = if my_var == 5 { 42i32 } else { return 24i32; }; res }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; let res = if my_var == 5 { 42i32 } else { return Ok(24i32); }; Ok(res) }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_match_deeper() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let my_var = 5; match my_var { 5 => { if true { 42i32 } else { 25i32 } }, _ => { let test = "test"; if test == "test" { return bar(); } 53i32 }, } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let my_var = 5; match my_var { 5 => { if true { Ok(42i32) } else { Ok(25i32) } }, _ => { let test = "test"; if test == "test" { return Ok(bar()); } Ok(53i32) }, } }"#, ); } #[test] fn change_return_type_to_result_simple_with_tail_block_like_early_return() { check_assist( change_return_type_to_result, r#"fn foo() -> i<|>32 { let test = "test"; if test == "test" { return 24i32; } 53i32 }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; if test == "test" { return Ok(24i32); } Ok(53i32) }"#, ); } #[test] fn change_return_type_to_result_simple_with_closure() { check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -><|> u32 { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return 99; } else { return 0; } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return Ok(99); } else { return Ok(0); } } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u32<|> { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return 99; } else { return 0; } } let t = None; t.unwrap_or_else(|| the_field) }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { let true_closure = || { return true; }; if the_field < 5 { let mut i = 0; if true_closure() { return Ok(99); } else { return Ok(0); } } let t = None; Ok(t.unwrap_or_else(|| the_field)) }"#, ); } #[test] fn change_return_type_to_result_simple_with_weird_forms() { check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let test = "test"; if test == "test" { return 24i32; } let mut i = 0; loop { if i == 1 { break 55; } i += 1; } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; if test == "test" { return Ok(24i32); } let mut i = 0; loop { if i == 1 { break Ok(55); } i += 1; } }"#, ); check_assist( change_return_type_to_result, r#"fn foo() -> i32<|> { let test = "test"; if test == "test" { return 24i32; } let mut i = 0; loop { loop { if i == 1 { break 55; } i += 1; } } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; if test == "test" { return Ok(24i32); } let mut i = 0; loop { loop { if i == 1 { break Ok(55); } i += 1; } } }"#, ); check_assist( change_return_type_to_result, r#"fn foo() -> i3<|>2 { let test = "test"; let other = 5; if test == "test" { let res = match other { 5 => 43, _ => return 56, }; } let mut i = 0; loop { loop { if i == 1 { break 55; } i += 1; } } }"#, r#"fn foo() -> Result<i32, ${0:_}> { let test = "test"; let other = 5; if test == "test" { let res = match other { 5 => 43, _ => return Ok(56), }; } let mut i = 0; loop { loop { if i == 1 { break Ok(55); } i += 1; } } }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u32<|> { if the_field < 5 { let mut i = 0; loop { if i > 5 { return 55u32; } i += 3; } match i { 5 => return 99, _ => return 0, }; } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; loop { if i > 5 { return Ok(55u32); } i += 3; } match i { 5 => return Ok(99), _ => return Ok(0), }; } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u3<|>2 { if the_field < 5 { let mut i = 0; match i { 5 => return 99, _ => return 0, } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; match i { 5 => return Ok(99), _ => return Ok(0), } } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> u32<|> { if the_field < 5 { let mut i = 0; if i == 5 { return 99 } else { return 0 } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; if i == 5 { return Ok(99) } else { return Ok(0) } } Ok(the_field) }"#, ); check_assist( change_return_type_to_result, r#"fn foo(the_field: u32) -> <|>u32 { if the_field < 5 { let mut i = 0; if i == 5 { return 99; } else { return 0; } } the_field }"#, r#"fn foo(the_field: u32) -> Result<u32, ${0:_}> { if the_field < 5 { let mut i = 0; if i == 5 { return Ok(99); } else { return Ok(0); } } Ok(the_field) }"#, ); } }
extern crate owning_ref; extern crate sodiumoxide; extern crate r2d2; extern crate r2d2_postgres; use ::std::sync::RwLock; use postgres; use self::r2d2_postgres::{TlsMode, PostgresConnectionManager}; use serde_json; use self::owning_ref::OwningHandle; use std::rc::Rc; use std::time::Duration; use errors::wallet::WalletStorageError; use errors::common::CommonError; use wql::language; use wql::query; use wql::transaction; use wql::storage::{StorageIterator, WalletStorage, StorageRecord, EncryptedValue, Tag, TagName}; use self::r2d2_postgres::r2d2::Pool; fn default_true() -> bool { true } fn default_false() -> bool { false } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct RecordOptions { #[serde(default = "default_false")] retrieve_type: bool, #[serde(default = "default_true")] retrieve_value: bool, #[serde(default = "default_false")] retrieve_tags: bool, } impl RecordOptions { pub fn id() -> String { let options = RecordOptions { retrieve_type: false, retrieve_value: false, retrieve_tags: false, }; serde_json::to_string(&options).unwrap() } pub fn id_value() -> String { let options = RecordOptions { retrieve_type: false, retrieve_value: true, retrieve_tags: false, }; serde_json::to_string(&options).unwrap() } } impl Default for RecordOptions { fn default() -> RecordOptions { RecordOptions { retrieve_type: false, retrieve_value: true, retrieve_tags: false, } } } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct SearchOptions { #[serde(default = "default_true")] retrieve_records: bool, #[serde(default = "default_false")] retrieve_total_count: bool, #[serde(default = "default_false")] retrieve_type: bool, #[serde(default = "default_true")] retrieve_value: bool, #[serde(default = "default_false")] retrieve_tags: bool, } impl SearchOptions { pub fn id_value() -> String { let options = SearchOptions { retrieve_records: true, retrieve_total_count: true, retrieve_type: true, retrieve_value: true, retrieve_tags: false, }; serde_json::to_string(&options).unwrap() } } impl Default for SearchOptions { fn default() -> SearchOptions { SearchOptions { retrieve_records: true, retrieve_total_count: false, retrieve_type: false, retrieve_value: true, retrieve_tags: false, } } } const _POSTGRES_DB: &str = "postgres"; const _WALLETS_DB: &str = "wallets"; const _PLAIN_TAGS_QUERY: &str = "SELECT name, value from tags_plaintext where item_id = $1"; const _ENCRYPTED_TAGS_QUERY: &str = "SELECT name, value from tags_encrypted where item_id = $1"; const _PLAIN_TAGS_QUERY_MULTI: &str = "SELECT name, value from tags_plaintext where item_id = $1 and wallet_id = $2"; const _ENCRYPTED_TAGS_QUERY_MULTI: &str = "SELECT name, value from tags_encrypted where item_id = $1 and wallet_id = $2"; const _CREATE_WALLET_DATABASE: &str = "CREATE DATABASE \"$1\""; const _CREATE_WALLETS_DATABASE: &str = "CREATE DATABASE wallets"; // Note: wallet id length was constrained before by postgres database name length to 64 characters, keeping the same restrictions const _CREATE_SCHEMA: [&str; 12] = [ "CREATE TABLE IF NOT EXISTS metadata ( id BIGSERIAL PRIMARY KEY, value BYTEA NOT NULL )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_values ON metadata(value)", "CREATE TABLE IF NOT EXISTS items( id BIGSERIAL PRIMARY KEY, type BYTEA NOT NULL, name BYTEA NOT NULL, value BYTEA NOT NULL, key BYTEA NOT NULL )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_items_type_name ON items(type, name)", "CREATE TABLE IF NOT EXISTS tags_encrypted( name BYTEA NOT NULL, value BYTEA NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(name, item_id), FOREIGN KEY(item_id) REFERENCES items(id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_name ON tags_encrypted(name)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_value ON tags_encrypted(value)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_item_id ON tags_encrypted(item_id)", "CREATE TABLE IF NOT EXISTS tags_plaintext( name BYTEA NOT NULL, value TEXT NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(name, item_id), FOREIGN KEY(item_id) REFERENCES items(id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_name ON tags_plaintext(name)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_value ON tags_plaintext(value)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_item_id ON tags_plaintext(item_id)" ]; const _CREATE_SCHEMA_MULTI: [&str; 14] = [ "CREATE TABLE IF NOT EXISTS metadata ( wallet_id VARCHAR(64) NOT NULL, value BYTEA NOT NULL, PRIMARY KEY(wallet_id) )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_wallet_id_id ON metadata(wallet_id)", "CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_values ON metadata(wallet_id, value)", "CREATE TABLE IF NOT EXISTS items( wallet_id VARCHAR(64) NOT NULL, id BIGSERIAL NOT NULL, type BYTEA NOT NULL, name BYTEA NOT NULL, value BYTEA NOT NULL, key BYTEA NOT NULL, PRIMARY KEY(wallet_id, id) )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_items_wallet_id_id ON items(wallet_id, id)", "CREATE UNIQUE INDEX IF NOT EXISTS ux_items_type_name ON items(wallet_id, type, name)", "CREATE TABLE IF NOT EXISTS tags_encrypted( wallet_id VARCHAR(64) NOT NULL, name BYTEA NOT NULL, value BYTEA NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(wallet_id, name, item_id), FOREIGN KEY(wallet_id, item_id) REFERENCES items(wallet_id, id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_name ON tags_encrypted(wallet_id, name)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_value ON tags_encrypted(wallet_id, value)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_wallet_id_item_id ON tags_encrypted(wallet_id, item_id)", "CREATE TABLE IF NOT EXISTS tags_plaintext( wallet_id VARCHAR(64) NOT NULL, name BYTEA NOT NULL, value TEXT NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(wallet_id, name, item_id), FOREIGN KEY(wallet_id, item_id) REFERENCES items(wallet_id, id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_name ON tags_plaintext(wallet_id, name)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_value ON tags_plaintext(wallet_id, value)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_wallet_id_item_id ON tags_plaintext(wallet_id, item_id)" ]; const _DROP_WALLET_DATABASE: &str = "DROP DATABASE \"$1\""; const _DROP_SCHEMA: [&str; 4] = [ "DROP TABLE tags_plaintext", "DROP TABLE tags_encrypted", "DROP TABLE items", "DROP TABLE metadata" ]; const _DELETE_WALLET_MULTI: [&str; 4] = [ "DELETE FROM tags_plaintext WHERE wallet_id = $1", "DELETE FROM tags_encrypted WHERE wallet_id = $1", "DELETE FROM items WHERE wallet_id = $1", "DELETE FROM metadata WHERE wallet_id = $1" ]; #[derive(Debug)] struct TagRetriever<'a> { plain_tags_stmt: postgres::stmt::Statement<'a>, encrypted_tags_stmt: postgres::stmt::Statement<'a>, wallet_id: Option<String>, } type TagRetrieverOwned = OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<TagRetriever<'static>>>; impl<'a> TagRetriever<'a> { fn new_owned(conn: Rc<r2d2::PooledConnection<PostgresConnectionManager>>, wallet_id: Option<String>) -> Result<TagRetrieverOwned, WalletStorageError> { OwningHandle::try_new(conn.clone(), |conn| -> Result<_, postgres::Error> { let (plain_tags_stmt, encrypted_tags_stmt) = unsafe { match wallet_id { Some(_) => ((*conn).prepare(_PLAIN_TAGS_QUERY_MULTI)?, (*conn).prepare(_ENCRYPTED_TAGS_QUERY_MULTI)?), None => ((*conn).prepare(_PLAIN_TAGS_QUERY)?, (*conn).prepare(_ENCRYPTED_TAGS_QUERY)?) } }; let tr = TagRetriever { plain_tags_stmt, encrypted_tags_stmt, wallet_id, }; Ok(Box::new(tr)) }).map_err(WalletStorageError::from) } fn retrieve(&mut self, id: i64) -> Result<Vec<Tag>, WalletStorageError> { let mut tags = Vec::new(); let plain_results = match self.wallet_id { Some(ref w_id) => self.plain_tags_stmt.query(&[&id, &w_id])?, None => self.plain_tags_stmt.query(&[&id])? }; let mut iter_plain = plain_results.iter(); while let Some(res) = iter_plain.next() { let row = res; tags.push(Tag::PlainText(row.get(0), row.get(1))); } let encrypted_results = match self.wallet_id { Some(ref w_id) => self.encrypted_tags_stmt.query(&[&id, &w_id])?, None => self.encrypted_tags_stmt.query(&[&id])? }; let mut iter_encrypted = encrypted_results.iter(); while let Some(res) = iter_encrypted.next() { let row = res; tags.push(Tag::Encrypted(row.get(0), row.get(1))); } Ok(tags) } } struct PostgresStorageIterator { rows: Option< OwningHandle< OwningHandle< Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>, Box<postgres::rows::Rows<>>>>, tag_retriever: Option<TagRetrieverOwned>, options: RecordOptions, total_count: Option<usize>, iter_count: usize, } impl PostgresStorageIterator { fn new(stmt: Option<OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>>, args: &[&dyn postgres::types::ToSql], options: RecordOptions, tag_retriever: Option<TagRetrieverOwned>, total_count: Option<usize>) -> Result<PostgresStorageIterator, WalletStorageError> { let mut iter = PostgresStorageIterator { rows: None, tag_retriever, options, total_count, iter_count: 0, }; if let Some(stmt) = stmt { iter.rows = Some(OwningHandle::try_new( stmt, |stmt| unsafe { (*(stmt as *mut postgres::stmt::Statement)).query(args).map(Box::new) }, )?); } Ok(iter) } } impl StorageIterator for PostgresStorageIterator { fn next(&mut self) -> Result<Option<StorageRecord>, WalletStorageError> { // if records are not requested. if self.rows.is_none() { return Ok(None); } // TODO not sure if iter().nth() is the most efficient way to iterate through the result set // TODO investigate if the Iter object can be cached between calls to next() match self.rows.as_mut().unwrap().iter().nth(self.iter_count) { Some(row) => { self.iter_count = self.iter_count + 1; let name = row.get(1); let value = if self.options.retrieve_value { Some(EncryptedValue::new(row.get(2), row.get(3))) } else { None }; let tags = if self.options.retrieve_tags { match self.tag_retriever { Some(ref mut tag_retriever) => Some(tag_retriever.retrieve(row.get(0))?), None => return Err(WalletStorageError::CommonError( CommonError::InvalidState("Fetch tags option set and tag retriever is None".to_string()) )) } } else { None }; let type_ = if self.options.retrieve_type { Some(row.get(4)) } else { None }; Ok(Some(StorageRecord::new(name, value, type_, tags))) } //Some(Err(err)) => Err(WalletStorageError::from(err)), None => Ok(None) } } fn get_total_count(&self) -> Result<Option<usize>, WalletStorageError> { Ok(self.total_count) } } #[derive(Deserialize, Debug)] pub struct PostgresConfig { url: String, tls: Option<String>, // default off max_connections: Option<u32>, // default 5 min_idle_time: Option<u32>, // default 0, deprecated min_idle_count: Option<u32>, // default 0 connection_timeout: Option<u64>, // default 5 wallet_scheme: Option<WalletScheme>, // default DatabasePerWallet } impl PostgresConfig { fn tls(&self) -> postgres::TlsMode { match &self.tls { Some(tls) => match tls.as_ref() { "None" => postgres::TlsMode::None, // TODO add tls support for connecting to postgres db //"Prefer" => postgres::TlsMode::Prefer(&postgres::Connection), //"Require" => postgres::TlsMode::Require(&postgres::Connection), _ => postgres::TlsMode::None }, None => postgres::TlsMode::None } } fn r2d2_tls(&self) -> TlsMode { match &self.tls { Some(tls) => match tls.as_ref() { "None" => TlsMode::None, // TODO add tls support for connecting to postgres db //"Prefer" => TlsMode::Prefer(&postgres::Connection), //"Require" => TlsMode::Require(&postgres::Connection), _ => TlsMode::None }, None => TlsMode::None } } /// Sets the maximum number of connections managed by the pool. fn max_connections(&self) -> u32 { match &self.max_connections { Some(conn) => *conn, None => 5 } } /// Sets the minimum idle connection count maintained by the pool. fn min_idle_count(&self) -> u32 { match self.min_idle_count { Some(idle_count) => idle_count, None => match self.min_idle_time { Some(idle_count_deprecated) => { warn!("Configuration option min_idle_time is deprecated. Use min_idle_count instead."); idle_count_deprecated } None => 0 } } } /// Sets the idle timeout used by the pool. fn connection_timeout(&self) -> u64 { match &self.connection_timeout { Some(timeout) => *timeout, None => 5 } } } #[derive(Deserialize, Debug)] pub struct PostgresCredentials { account: String, password: String, admin_account: Option<String>, admin_password: Option<String>, } #[derive(Debug)] pub struct PostgresStorage { pool: r2d2::Pool<PostgresConnectionManager>, wallet_id: String, } pub trait WalletStorageType { fn init_storage(&self, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError>; fn create_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>, metadata: &[u8]) -> Result<(), WalletStorageError>; fn open_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<Box<PostgresStorage>, WalletStorageError>; fn delete_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError>; } #[derive(Deserialize, Debug)] #[derive(Copy, Clone)] enum WalletScheme { DatabasePerWallet, MultiWalletSingleTable, MultiWalletSingleTableSharedPool, MultiWalletMultiTable, } trait WalletStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError>; // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError>; // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError>; // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError>; // determine physical table name based on wallet strategy fn table_name(&self, id: &str, base_name: &str) -> String; // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String>; } pub struct PostgresStorageType {} struct DatabasePerWalletStrategy {} struct MultiWalletSingleTableStrategy {} struct MultiWalletMultiTableStrategy {} struct MultiWalletSingleTableStrategySharedPool { pool: r2d2::Pool<PostgresConnectionManager> } impl WalletStrategy for MultiWalletSingleTableStrategySharedPool { // initialize storage based on wallet storage strategy fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // create database and tables for storage // if admin user and password aren't provided then bail if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = postgres::Connection::connect(&url_base[..], postgres::TlsMode::None)?; if let Err(error) = conn.execute(&_CREATE_WALLETS_DATABASE, &[]) { if error.code() != Some(&postgres::error::DUPLICATE_DATABASE) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating the database: {}", error))); } else { // if database already exists, assume tables are created already and return conn.finish()?; return Ok(()); } } conn.finish()?; let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; for sql in &_CREATE_SCHEMA_MULTI { if let Err(error) = conn.execute(sql, &[]) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error))); } } conn.finish()?; Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> { // insert metadata let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; // We allow error on conflict since this indicates AlreadyExists error let ret = match conn.execute("INSERT INTO metadata(wallet_id, value) VALUES($1, $2)", &[&id, &metadata]) { Ok(_) => Ok(()), Err(error) => { if error.code() == Some(&postgres::error::UNIQUE_VIOLATION) { Err(WalletStorageError::AlreadyExists) } else { Err(WalletStorageError::IOError(format!("Error occurred while inserting into metadata: {}", error))) } } }; conn.finish()?; ret } // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { debug!(" MultiWalletSingleTableStrategySharedPool open >> "); let _url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); debug!("MultiWalletSingleTableStrategySharedPool open <<"); Ok(Box::new(PostgresStorage { pool: self.pool.clone(), wallet_id: id.to_string(), })) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { let url = PostgresStorageType::_postgres_url(&_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; let mut ret = Ok(()); for sql in &_DELETE_WALLET_MULTI { ret = match conn.execute(sql, &[&id]) { Ok(row_count) => { if row_count == 0 { Err(WalletStorageError::NotFound) } else { Ok(()) } } Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while deleting wallet: {}", error))) } } }; conn.finish()?; return ret; } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO Some("AND wallet_id = $$".to_owned()) } } impl WalletStrategy for DatabasePerWalletStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // no-op debug!("Initializing storage strategy DatabasePerWalletStrategy."); Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> { // create database for wallet // if admin user and password aren't provided then bail if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(id, &config, &credentials); let conn = postgres::Connection::connect(&url_base[..], config.tls())?; let create_db_sql = str::replace(_CREATE_WALLET_DATABASE, "$1", id); let mut schema_result = match conn.execute(&create_db_sql, &[]) { Ok(_) => Ok(()), Err(_error) => { Err(WalletStorageError::AlreadyExists) } }; conn.finish()?; let conn = match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; for sql in &_CREATE_SCHEMA { match schema_result { Ok(_) => schema_result = match conn.execute(sql, &[]) { Ok(_) => Ok(()), Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error))) } }, _ => () } }; let ret = match schema_result { Ok(_) => { match conn.execute("INSERT INTO metadata(value) VALUES($1) ON CONFLICT (value) DO UPDATE SET value = excluded.value", &[&metadata]) { Ok(_) => Ok(()), Err(error) => { //std::fs::remove_file(db_path)?; Err(WalletStorageError::IOError(format!("Error occurred while inserting the keys: {}", error))) } } } Err(error) => Err(error) }; conn.finish()?; ret } // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { let url = PostgresStorageType::_postgres_url(id, &config, &credentials); // don't need a connection, but connect just to verify we can let _conn = match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => conn, Err(_) => return Err(WalletStorageError::NotFound) }; // TODO close _conn let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) { Ok(manager) => manager, Err(_) => return Err(WalletStorageError::NotFound) }; let pool = match r2d2::Pool::builder() .min_idle(Some(config.min_idle_count())) .max_size(config.max_connections()) .idle_timeout(Some(Duration::new(config.connection_timeout(), 0))) .build(manager) { Ok(pool) => pool, Err(_) => return Err(WalletStorageError::NotFound) }; Ok(Box::new(PostgresStorage { pool: pool, wallet_id: id.to_string(), })) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // if admin user and password aren't provided then bail if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(id, &config, &credentials); match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => { for sql in &_DROP_SCHEMA { match conn.execute(sql, &[]) { Ok(_) => (), Err(_) => () }; } let _ret = conn.finish(); () } Err(_) => return Err(WalletStorageError::NotFound) }; let conn = postgres::Connection::connect(url_base, config.tls())?; let drop_db_sql = str::replace(_DROP_WALLET_DATABASE, "$1", id); let ret = match conn.execute(&drop_db_sql, &[]) { Ok(_) => Ok(()), Err(_) => Ok(()) }; conn.finish()?; ret } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO None } } impl WalletStrategy for MultiWalletSingleTableStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // create database and tables for storage // if admin user and password aren't provided then bail debug!("Initializing storage strategy MultiWalletSingleTableStrategy."); if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = postgres::Connection::connect(&url_base[..], postgres::TlsMode::None)?; if let Err(error) = conn.execute(&_CREATE_WALLETS_DATABASE, &[]) { if error.code() != Some(&postgres::error::DUPLICATE_DATABASE) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating the database: {}", error))); } else { // if database already exists, assume tables are created already and return conn.finish()?; return Ok(()); } } conn.finish()?; let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; for sql in &_CREATE_SCHEMA_MULTI { if let Err(error) = conn.execute(sql, &[]) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error))); } } conn.finish()?; Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> { // insert metadata let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; // We allow error on conflict since this indicates AlreadyExists error let ret = match conn.execute("INSERT INTO metadata(wallet_id, value) VALUES($1, $2)", &[&id, &metadata]) { Ok(_) => Ok(()), Err(error) => { if error.code() == Some(&postgres::error::UNIQUE_VIOLATION) { Err(WalletStorageError::AlreadyExists) } else { Err(WalletStorageError::IOError(format!("Error occurred while inserting into metadata: {}", error))) } } }; conn.finish()?; ret } // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); // don't need a connection, but connect just to verify we can let conn = match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => conn, Err(_) => return Err(WalletStorageError::NotFound) }; // select metadata for this wallet to ensure it exists let res: Result<Vec<u8>, WalletStorageError> = { let mut rows = conn.query( "SELECT value FROM metadata WHERE wallet_id = $1", &[&id]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } }; match res { Ok(_entity) => (), Err(_) => return Err(WalletStorageError::NotFound) }; // TODO close conn let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) { Ok(manager) => manager, Err(_) => return Err(WalletStorageError::NotFound) }; let pool = match r2d2::Pool::builder() .min_idle(Some(config.min_idle_count())) .max_size(config.max_connections()) .idle_timeout(Some(Duration::new(config.connection_timeout(), 0))) .build(manager) { Ok(pool) => pool, Err(_) => return Err(WalletStorageError::NotFound) }; Ok(Box::new(PostgresStorage { pool: pool, wallet_id: id.to_string(), })) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { let url = PostgresStorageType::_postgres_url(&_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; let mut ret = Ok(()); for sql in &_DELETE_WALLET_MULTI { ret = match conn.execute(sql, &[&id]) { Ok(row_count) => { if row_count == 0 { Err(WalletStorageError::NotFound) } else { Ok(()) } } Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while deleting wallet: {}", error))) } } }; conn.finish()?; return ret; } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO Some("AND wallet_id = $$".to_owned()) } } impl WalletStrategy for MultiWalletMultiTableStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // create database for storage // TODO Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials, _metadata: &[u8]) -> Result<(), WalletStorageError> { // create tables for wallet storage // TODO Ok(()) } // open a wallet based on wallet storage strategy fn open_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { // TODO Err(WalletStorageError::NotFound) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // TODO Ok(()) } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO None } } lazy_static! { static ref SELECTED_STRATEGY: RwLock< Option<Box<dyn WalletStrategy + Send + Sync>> > = RwLock::new(None); } impl PostgresStorageType { pub fn new() -> PostgresStorageType { PostgresStorageType {} } fn _admin_postgres_url(config: &PostgresConfig, credentials: &PostgresCredentials) -> String { let mut url_base = "postgresql://".to_owned(); match credentials.admin_account { Some(ref account) => url_base.push_str(&account[..]), None => () } url_base.push_str(":"); match credentials.admin_password { Some(ref password) => url_base.push_str(&password[..]), None => () } url_base.push_str("@"); url_base.push_str(&config.url[..]); url_base } fn _base_postgres_url(config: &PostgresConfig, credentials: &PostgresCredentials) -> String { let mut url_base = "postgresql://".to_owned(); url_base.push_str(&credentials.account[..]); url_base.push_str(":"); url_base.push_str(&credentials.password[..]); url_base.push_str("@"); url_base.push_str(&config.url[..]); url_base } fn _postgres_url(id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> String { let mut url_base = PostgresStorageType::_base_postgres_url(config, credentials); url_base.push_str("/"); url_base.push_str(id); url_base } } impl WalletStorage for PostgresStorage { /// /// Tries to fetch values and/or tags from the storage. /// Returns Result with StorageEntity object which holds requested data in case of success or /// Result with WalletStorageError in case of failure. /// /// /// # Arguments /// /// * `type_` - type_ of the item in storage /// * `id` - id of the item in storage /// * `options` - JSon containing what needs to be fetched. /// Example: {"retrieveValue": true, "retrieveTags": true} /// /// # Returns /// /// Result that can be either: /// /// * `StorageEntity` - Contains name, optional value and optional tags /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::Closed` - Storage is closed /// * `WalletStorageError::ItemNotFound` - Item is not found in database /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn get(&self, type_: &[u8], id: &[u8], options: &str) -> Result<StorageRecord, WalletStorageError> { let options: RecordOptions = if options == "{}" { // FIXME: RecordOptions::default() } else { serde_json::from_str(options)? }; let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res: Result<(i64, Vec<u8>, Vec<u8>), WalletStorageError> = { let mut rows = match query_qualifier { Some(_) => conn.query( "SELECT id, value, key FROM items where type = $1 AND name = $2 AND wallet_id = $3", &[&type_.to_vec(), &id.to_vec(), &self.wallet_id]), None => conn.query( "SELECT id, value, key FROM items where type = $1 AND name = $2", &[&type_.to_vec(), &id.to_vec()]) }; match rows.as_mut().unwrap().iter().next() { Some(row) => Ok((row.get(0), row.get(1), row.get(2))), None => Err(WalletStorageError::ItemNotFound) } }; let item = match res { Ok(entity) => entity, Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)) }; let value = if options.retrieve_value { Some(EncryptedValue::new(item.1, item.2)) } else { None }; let type_ = if options.retrieve_type { Some(type_.clone()) } else { None }; let tags = if options.retrieve_tags { let mut tags = Vec::new(); // get all encrypted. let rows = match query_qualifier { Some(_) => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_encrypted WHERE item_id = $1 AND wallet_id = $2")?; stmt.query(&[&item.0, &self.wallet_id])? } None => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_encrypted WHERE item_id = $1")?; stmt.query(&[&item.0])? } }; let mut iter = rows.iter(); while let Some(res) = iter.next() { let row = res; //let tag_name: Vec<u8> = row.get(0); //let tag_value: Vec<u8> = row.get(1); tags.push(Tag::Encrypted(row.get(0), row.get(1))); } // get all plain let rows = match query_qualifier { Some(_) => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_plaintext WHERE item_id = $1 AND wallet_id = $2")?; stmt.query(&[&item.0, &self.wallet_id])? } None => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_plaintext WHERE item_id = $1")?; stmt.query(&[&item.0])? } }; let mut iter = rows.iter(); while let Some(res) = iter.next() { let row = res; //let tag_name: Vec<u8> = row.get(0); //let tag_value: String = row.get(1); tags.push(Tag::PlainText(row.get(0), row.get(1))); } Some(tags) } else { None }; Ok(StorageRecord::new(id.to_vec(), value, type_.map(|val| val.to_vec()), tags)) } /// /// inserts value and tags into storage. /// Returns Result with () on success or /// Result with WalletStorageError in case of failure. /// /// /// # Arguments /// /// * `type_` - type of the item in storage /// * `id` - id of the item in storage /// * `value` - value of the item in storage /// * `value_key` - key used to encrypt the value /// * `tags` - tags assigned to the value /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` class of errors can be throw by this method: /// /// * `WalletStorageError::Closed` - Storage is closed /// * `WalletStorageError::ItemAlreadyExists` - Item is already present in database /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn add(&self, type_: &[u8], id: &[u8], value: &EncryptedValue, tags: &[Tag]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; let res = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO items (type, name, value, key, wallet_id) VALUES ($1, $2, $3, $4, $5) RETURNING id")? .query(&[&type_.to_vec(), &id.to_vec(), &value.data, &value.key, &self.wallet_id]), None => tx.prepare_cached("INSERT INTO items (type, name, value, key) VALUES ($1, $2, $3, $4) RETURNING id")? .query(&[&type_.to_vec(), &id.to_vec(), &value.data, &value.key]) }; let item_id = match res { Ok(rows) => { let res = match rows.iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; item_id } Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } }; let item_id = item_id as i64; if !tags.is_empty() { let stmt_e = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3)")? }; let stmt_p = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3)")? }; for tag in tags { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => stmt_e.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => stmt_e.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } &Tag::PlainText(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => stmt_p.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => stmt_p.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } }; } } tx.commit()?; Ok(()) } fn update(&self, type_: &[u8], id: &[u8], value: &EncryptedValue) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res = match query_qualifier { Some(_) => conn.prepare_cached("UPDATE items SET value = $1, key = $2 WHERE type = $3 AND name = $4 AND wallet_id = $5")? .execute(&[&value.data, &value.key, &type_.to_vec(), &id.to_vec(), &self.wallet_id]), None => conn.prepare_cached("UPDATE items SET value = $1, key = $2 WHERE type = $3 AND name = $4")? .execute(&[&value.data, &value.key, &type_.to_vec(), &id.to_vec()]) }; match res { Ok(1) => Ok(()), Ok(0) => Err(WalletStorageError::ItemNotFound), Ok(count) => Err(WalletStorageError::CommonError(CommonError::InvalidState(format!("Postgres returned update row count: {}", count)))), Err(err) => Err(WalletStorageError::from(err)), } } fn add_tags(&self, type_: &[u8], id: &[u8], tags: &[Tag]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; let res = match query_qualifier { Some(_) => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } None => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; if !tags.is_empty() { let enc_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4) ON CONFLICT (name, item_id, wallet_id) DO UPDATE SET value = excluded.value")?, None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3) ON CONFLICT (name, item_id) DO UPDATE SET value = excluded.value")? }; let plain_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4) ON CONFLICT (name, item_id, wallet_id) DO UPDATE SET value = excluded.value")?, None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3) ON CONFLICT (name, item_id) DO UPDATE SET value = excluded.value")? }; for tag in tags { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } &Tag::PlainText(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } }; } } tx.commit()?; Ok(()) } fn update_tags(&self, type_: &[u8], id: &[u8], tags: &[Tag]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; let res = match query_qualifier { Some(_) => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2 AND wallet_id = $3")? .query(&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } None => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; match query_qualifier { Some(_) => { tx.execute("DELETE FROM tags_encrypted WHERE item_id = $1 AND wallet_id = $2", &[&item_id, &self.wallet_id])?; tx.execute("DELETE FROM tags_plaintext WHERE item_id = $1 AND wallet_id = $2", &[&item_id, &self.wallet_id])?; } None => { tx.execute("DELETE FROM tags_encrypted WHERE item_id = $1", &[&item_id])?; tx.execute("DELETE FROM tags_plaintext WHERE item_id = $1", &[&item_id])?; } }; if !tags.is_empty() { let enc_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3)")? }; let plain_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3)")? }; for tag in tags { match query_qualifier { Some(_) => { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id])?, &Tag::PlainText(ref tag_name, ref tag_data) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id])? } } None => { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])?, &Tag::PlainText(ref tag_name, ref tag_data) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])? } } }; } } tx.commit()?; Ok(()) } fn delete_tags(&self, type_: &[u8], id: &[u8], tag_names: &[TagName]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res = match query_qualifier { Some(_) => { let mut rows = conn.prepare_cached("SELECT id FROM items WHERE type =$1 AND name = $2 AND wallet_id = $3")? .query(&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } None => { let mut rows = conn.prepare_cached("SELECT id FROM items WHERE type =$1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; { let enc_tag_delete_stmt = match query_qualifier { Some(_) => tx.prepare_cached("DELETE FROM tags_encrypted WHERE item_id = $1 AND name = $2 AND wallet_id = $3")?, None => tx.prepare_cached("DELETE FROM tags_encrypted WHERE item_id = $1 AND name = $2")? }; let plain_tag_delete_stmt = match query_qualifier { Some(_) => tx.prepare_cached("DELETE FROM tags_plaintext WHERE item_id = $1 AND name = $2 AND wallet_id = $3")?, None => tx.prepare_cached("DELETE FROM tags_plaintext WHERE item_id = $1 AND name = $2")? }; for tag_name in tag_names { match query_qualifier { Some(_) => match tag_name { &TagName::OfEncrypted(ref tag_name) => enc_tag_delete_stmt.execute(&[&item_id, tag_name, &self.wallet_id])?, &TagName::OfPlain(ref tag_name) => plain_tag_delete_stmt.execute(&[&item_id, tag_name, &self.wallet_id])?, }, None => match tag_name { &TagName::OfEncrypted(ref tag_name) => enc_tag_delete_stmt.execute(&[&item_id, tag_name])?, &TagName::OfPlain(ref tag_name) => plain_tag_delete_stmt.execute(&[&item_id, tag_name])?, } }; } } tx.commit()?; Ok(()) } /// /// deletes value and tags into storage. /// Returns Result with () on success or /// Result with WalletStorageError in case of failure. /// /// /// # Arguments /// /// * `type_` - type of the item in storage /// * `id` - id of the item in storage /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::Closed` - Storage is closed /// * `WalletStorageError::ItemNotFound` - Item is not found in database /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn delete(&self, type_: &[u8], id: &[u8]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let row_count = match query_qualifier { Some(_) => conn.execute( "DELETE FROM items where type = $1 AND name = $2 AND wallet_id = $3", &[&type_.to_vec(), &id.to_vec(), &self.wallet_id], )?, None => conn.execute( "DELETE FROM items where type = $1 AND name = $2", &[&type_.to_vec(), &id.to_vec()], )? }; if row_count == 1 { Ok(()) } else { Err(WalletStorageError::ItemNotFound) } } fn get_storage_metadata(&self) -> Result<Vec<u8>, WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res: Result<Vec<u8>, WalletStorageError> = { let mut rows = match query_qualifier { Some(_) => conn.query( "SELECT value FROM metadata WHERE wallet_id = $1", &[&self.wallet_id]), None => conn.query( "SELECT value FROM metadata", &[]) }; match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } }; match res { Ok(entity) => Ok(entity), Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)) } } fn set_storage_metadata(&self, metadata: &[u8]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res = match query_qualifier { Some(_) => conn.execute("UPDATE metadata SET value = $1 WHERE wallet_id = $2", &[&metadata.to_vec(), &self.wallet_id]), None => conn.execute("UPDATE metadata SET value = $1", &[&metadata.to_vec()]) }; match res { Ok(_) => Ok(()), Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while inserting the keys: {}", error))) } } } fn get_all(&self) -> Result<Box<dyn StorageIterator>, WalletStorageError> { let query_qualifier = get_wallet_strategy_qualifier()?; let statement = match query_qualifier { Some(_) => self._prepare_statement("SELECT id, name, value, key, type FROM items WHERE wallet_id = $1")?, None => self._prepare_statement("SELECT id, name, value, key, type FROM items")? }; let fetch_options = RecordOptions { retrieve_type: true, retrieve_value: true, retrieve_tags: true, }; let pool = self.pool.clone(); let tag_retriever = match query_qualifier { Some(_) => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), Some(self.wallet_id.clone()))?), None => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), None)?) }; let storage_iterator = match query_qualifier { Some(_) => PostgresStorageIterator::new(Some(statement), &[&self.wallet_id], fetch_options, tag_retriever, None)?, None => PostgresStorageIterator::new(Some(statement), &[], fetch_options, tag_retriever, None)? }; Ok(Box::new(storage_iterator)) } fn search(&self, type_: &[u8], query: &language::Operator, options: Option<&str>) -> Result<Box<dyn StorageIterator>, WalletStorageError> { let type_ = type_.to_vec(); // FIXME let search_options = match options { None => SearchOptions::default(), Some(option_str) => serde_json::from_str(option_str)? }; let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let wallet_id_arg = self.wallet_id.to_owned(); let total_count: Option<usize> = if search_options.retrieve_total_count { let (query_string, query_arguments) = match query_qualifier { Some(_) => { let (mut query_string, mut query_arguments) = query::wql_to_sql_count(&type_, query)?; query_arguments.push(&wallet_id_arg); let arg_str = format!(" AND i.wallet_id = ${}", query_arguments.len()); query_string.push_str(&arg_str); let mut with_clause = false; if query_string.contains("tags_plaintext") { query_arguments.push(&wallet_id_arg); query_string = format!("tags_plaintext as (select * from tags_plaintext where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if query_string.contains("tags_encrypted") { if with_clause { query_string = format!(", {}", query_string); } query_arguments.push(&wallet_id_arg); query_string = format!("tags_encrypted as (select * from tags_encrypted where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if with_clause { query_string = format!("WITH {}", query_string); } (query_string, query_arguments) } None => query::wql_to_sql_count(&type_, query)? }; let mut rows = conn.query( &query_string, &query_arguments[..]); match rows.as_mut().unwrap().iter().next() { Some(row) => { let x: i64 = row.get(0); Some(x as usize) } None => None } } else { None }; if search_options.retrieve_records { let fetch_options = RecordOptions { retrieve_value: search_options.retrieve_value, retrieve_tags: search_options.retrieve_tags, retrieve_type: search_options.retrieve_type, }; let (query_string, query_arguments) = match query_qualifier { Some(_) => { let (mut query_string, mut query_arguments) = query::wql_to_sql(&type_, query, options)?; query_arguments.push(&wallet_id_arg); let arg_str = format!(" AND i.wallet_id = ${}", query_arguments.len()); query_string.push_str(&arg_str); let mut with_clause = false; if query_string.contains("tags_plaintext") { query_arguments.push(&wallet_id_arg); query_string = format!("tags_plaintext as (select * from tags_plaintext where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if query_string.contains("tags_encrypted") { if with_clause { query_string = format!(", {}", query_string); } query_arguments.push(&wallet_id_arg); query_string = format!("tags_encrypted as (select * from tags_encrypted where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if with_clause { query_string = format!("WITH {}", query_string); } (query_string, query_arguments) } None => query::wql_to_sql(&type_, query, options)? }; let statement = self._prepare_statement(&query_string)?; let tag_retriever = if fetch_options.retrieve_tags { let pool = self.pool.clone(); match query_qualifier { Some(_) => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), Some(self.wallet_id.clone()))?), None => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), None)?) } } else { None }; let storage_iterator = PostgresStorageIterator::new(Some(statement), &query_arguments[..], fetch_options, tag_retriever, total_count)?; Ok(Box::new(storage_iterator)) } else { let storage_iterator = PostgresStorageIterator::new(None, &[], RecordOptions::default(), None, total_count)?; Ok(Box::new(storage_iterator)) } } fn close(&mut self) -> Result<(), WalletStorageError> { // TODO throws a borrow error if we try to close the connection here; temporary workaround is to rely on idle connection timeout Ok(()) } } impl PostgresStorage { fn _prepare_statement(&self, sql: &str) -> Result< OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>, WalletStorageError> { let pool = self.pool.clone(); OwningHandle::try_new(Rc::new(pool.get().unwrap()).clone(), |conn| { unsafe { (*conn).prepare(sql) }.map(Box::new).map_err(WalletStorageError::from) }) } } fn create_connection_pool(config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Pool<PostgresConnectionManager>, WalletStorageError> { let _url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) { Ok(manager) => manager, Err(e) => { return Err(WalletStorageError::GenericError(format!("Problem creating PostgresConnectionManager. Details {:?}", e))); } }; debug!("MultiWalletSingleTableStrategySharedPool open >> building connection pool"); match r2d2::Pool::builder() .min_idle(Some(config.min_idle_count())) .max_size(config.max_connections()) .idle_timeout(Some(Duration::new(config.connection_timeout(), 0))) .build(manager) { Ok(pool) => Ok(pool), Err(e) => Err(WalletStorageError::GenericError(format!("Problem creating PostgresConnectionManager. Details {:?}", e))) } } fn set_wallet_strategy(strategy: Box<dyn WalletStrategy + Send + Sync>) { let mut write_strategy = SELECTED_STRATEGY.write().unwrap(); *write_strategy = Some(strategy); } fn get_wallet_strategy_qualifier() -> Result<Option<String>, WalletStorageError> { let read_strategy = SELECTED_STRATEGY.read().unwrap(); read_strategy.as_ref() .map_or(Err(WalletStorageError::GenericError(format!("Storage was not yet initialized."))), |strategy| Ok(strategy.query_qualifier()), ) } impl WalletStorageType for PostgresStorageType { /// /// Initializes the wallets database and creates the necessary tables for all wallets /// This needs to be called once at the very beginning, I'm not entirely sure the best way to enforce it /// /// # Arguments /// /// * `storage_config` - config containing the location of Postgres DB files /// * `storage_credentials` - DB credentials /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::NotFound` - File with the provided id not found /// * `IOError(..)` - Deletion of the file form the file-system failed /// fn init_storage(&self, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; { let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(_) => { return Err(WalletStorageError::GenericError(format!("Storage was already initialized."))); } None => info!("Initializing postgresql storage for the first time") }; }; match config.wallet_scheme { Some(scheme) => match scheme { WalletScheme::DatabasePerWallet => { debug!("Initialising postgresql using DatabasePerWallet strategy."); set_wallet_strategy(Box::new(DatabasePerWalletStrategy {})); } WalletScheme::MultiWalletSingleTable => { debug!("Initialising postgresql using MultiWalletSingleTable strategy."); set_wallet_strategy(Box::new(MultiWalletSingleTableStrategy {})); } WalletScheme::MultiWalletMultiTable => { debug!("Initialising postgresql using MultiWalletMultiTable strategy."); set_wallet_strategy(Box::new(MultiWalletMultiTableStrategy {})); } WalletScheme::MultiWalletSingleTableSharedPool => { debug!("Initialising postgresql using MultiWalletSingleTableSharedPool strategy."); let pool = create_connection_pool(&config, &credentials)?; set_wallet_strategy(Box::new(MultiWalletSingleTableStrategySharedPool { pool })); } }, None => { debug!("Initialising postgresql but strategy was not specified in storage \ configuration. Using DatabasePerWallet strategy by default."); set_wallet_strategy(Box::new(DatabasePerWalletStrategy {})); } }; let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(strategy) => { strategy.init_storage(&config, &credentials) } None => panic!("Was about to initialize postgresql storage strategy, but not strategy \ was yet set. You should never see this error.") } } /// /// Deletes the Postgres database file with the provided id from the path specified in the /// config file. /// /// # Arguments /// /// * `id` - the wallet id /// * `storage_config` - Postgres DB connection config /// * `storage_credentials` - DB credentials /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::NotFound` - File with the provided id not found /// * `IOError(..)` - Deletion of the file form the file-system failed /// fn delete_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; let strategy_read_lock = SELECTED_STRATEGY.read().unwrap(); strategy_read_lock .as_ref() .expect("Should never happen") .delete_wallet(id, &config, &credentials) } /// /// Creates the Postgres DB schema with the provided name in the id specified in the config file, /// and initializes the encryption keys needed for encryption and decryption of data. /// /// # Arguments /// /// * `id` - name of the Postgres DB schema /// * `config` - config containing the location of postgres db /// * `credentials` - DB credentials /// * `metadata` - encryption keys that need to be stored in the newly created DB /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `AlreadyExists` - Schema with a given name already exists in the database /// * `IOError("IO error during storage operation:...")` - Connection to the DB failed /// * `IOError("Error occurred while creating wallet file:..)"` - Creation of schema failed /// * `IOError("Error occurred while inserting the keys...")` - Insertion of keys failed /// fn create_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>, metadata: &[u8]) -> Result<(), WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; // initialize using the global selected_strategy object let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(strategy) => { strategy.create_wallet(id, &config, &credentials, metadata) } None => panic!("Should never happen") } } /// /// Establishes a connection to the SQLite DB with the provided id located in the path /// specified in the config. In case of a successful onection returns a Storage object /// embedding the connection and the encryption keys that will be used for encryption and /// decryption operations. /// /// /// # Arguments /// /// * `id` - id of the SQLite DB file /// * `config` - config containing the location of SQLite DB files /// * `credentials` - DB credentials /// /// # Returns /// /// Result that can be either: /// /// * `(Box<Storage>, Vec<u8>)` - Tuple of `SQLiteStorage` and `encryption keys` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::NotFound` - File with the provided id not found /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn open_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<Box<PostgresStorage>, WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; // initialize using the global selected_strategy object let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(strategy) => { strategy.open_wallet(id, &config, &credentials) } None => panic!("Should never happen") } } } #[cfg(test)] mod tests { use super::*; use std::env; use utils::test; #[test] fn postgres_storage_type_create_works() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); } #[test] fn postgres_storage_type_create_works_for_twice() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()); assert_match!(Err(WalletStorageError::AlreadyExists), res); } #[test] fn postgres_storage_get_storage_metadata_works() { _cleanup(); let storage = _storage(); let metadata = storage.get_storage_metadata().unwrap(); assert_eq!(metadata, _metadata()); } #[test] fn postgres_storage_type_delete_works() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); } #[test] fn postgres_storage_type_delete_works_for_non_existing() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.delete_storage("unknown", Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])); assert_match!(Err(WalletStorageError::NotFound), res); storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); } #[test] fn postgres_storage_type_open_works() { _cleanup(); _storage(); } #[test] fn postgres_storage_type_open_works_for_not_created() { _cleanup(); let storage_type = PostgresStorageType::new(); let res = storage_type.open_storage("unknown", Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])); assert_match!(Err(WalletStorageError::NotFound), res); } #[test] fn postgres_storage_add_works_with_config() { _cleanup(); let storage = _storage_db_pool(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); } #[test] fn postgres_storage_add_works_for_is_802() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add(&_type1(), &_id1(), &_value1(), &_tags()); assert_match!(Err(WalletStorageError::ItemAlreadyExists), res); let res = storage.add(&_type1(), &_id1(), &_value1(), &_tags()); assert_match!(Err(WalletStorageError::ItemAlreadyExists), res); } #[test] fn postgres_storage_set_get_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); } #[test] fn postgres_storage_set_get_works_for_twice() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add(&_type1(), &_id1(), &_value2(), &_tags()); assert_match!(Err(WalletStorageError::ItemAlreadyExists), res); } #[test] fn postgres_storage_set_get_works_for_reopen() { _cleanup(); { _storage().add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); } let storage_type = PostgresStorageType::new(); let storage = storage_type.open_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); } #[test] fn postgres_storage_get_works_for_wrong_key() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.get(&_type1(), &_id2(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_delete_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); storage.delete(&_type1(), &_id1()).unwrap(); let res = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_delete_works_for_non_existing() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.delete(&_type1(), &_id2()); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_create_and_find_multiple_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record1 = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record1.value.unwrap(), _value1()); assert_eq!(_sort(record1.tags.unwrap()), _sort(_tags())); storage.add(&_type2(), &_id2(), &_value2(), &_tags()).unwrap(); let record2 = storage.get(&_type2(), &_id2(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record2.value.unwrap(), _value2()); assert_eq!(_sort(record2.tags.unwrap()), _sort(_tags())); } #[test] fn postgres_storage_get_all_workss() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); storage.add(&_type2(), &_id2(), &_value2(), &_tags()).unwrap(); let mut storage_iterator = storage.get_all().unwrap(); let record = storage_iterator.next().unwrap().unwrap(); assert_eq!(record.type_.unwrap(), _type1()); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); let record = storage_iterator.next().unwrap().unwrap(); assert_eq!(record.type_.unwrap(), _type2()); assert_eq!(record.value.unwrap(), _value2()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); let record = storage_iterator.next().unwrap(); assert!(record.is_none()); } #[test] fn postgres_storage_get_all_works_for_empty() { _cleanup(); let storage = _storage(); let mut storage_iterator = storage.get_all().unwrap(); let record = storage_iterator.next().unwrap(); assert!(record.is_none()); } #[test] fn postgres_storage_update_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); storage.update(&_type1(), &_id1(), &_value2()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value2()); } #[test] fn postgres_storage_update_works_for_non_existing_id() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let res = storage.update(&_type1(), &_id2(), &_value2()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_update_works_for_non_existing_type() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let res = storage.update(&_type2(), &_id1(), &_value2()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_add_tags_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); storage.add_tags(&_type1(), &_id1(), &_new_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let expected_tags = { let mut tags = _tags(); tags.extend(_new_tags()); _sort(tags) }; assert_eq!(_sort(record.tags.unwrap()), expected_tags); } #[test] fn postgres_storage_add_tags_works_for_non_existing_id() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add_tags(&_type1(), &_id2(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_add_tags_works_for_non_existing_type() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add_tags(&_type2(), &_id1(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_add_tags_works_for_already_existing() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let tags_with_existing = { let mut tags = _tags(); tags.extend(_new_tags()); tags }; storage.add_tags(&_type1(), &_id1(), &tags_with_existing).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let expected_tags = { let mut tags = _tags(); tags.extend(_new_tags()); _sort(tags) }; assert_eq!(_sort(record.tags.unwrap()), expected_tags); } #[test] fn postgres_storage_update_tags_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); storage.update_tags(&_type1(), &_id1(), &_new_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_new_tags())); } #[test] fn postgres_storage_update_tags_works_for_non_existing_id() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.update_tags(&_type1(), &_id2(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_update_tags_works_for_non_existing_type() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.update_tags(&_type1(), &_id2(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_update_tags_works_for_already_existing() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let tags_with_existing = { let mut tags = _tags(); tags.extend(_new_tags()); tags }; storage.update_tags(&_type1(), &_id1(), &tags_with_existing).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let expected_tags = { let mut tags = _tags(); tags.extend(_new_tags()); _sort(tags) }; assert_eq!(_sort(record.tags.unwrap()), expected_tags); } #[test] fn postgres_storage_delete_tags_works() { _cleanup(); let storage = _storage(); let tag_name1 = vec![0, 0, 0]; let tag_name2 = vec![1, 1, 1]; let tag_name3 = vec![2, 2, 2]; let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]); let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string()); let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]); let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()]; storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap(); let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())]; storage.delete_tags(&_type1(), &_id1(), &tag_names).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.tags.unwrap(), vec![tag3]); } #[test] fn postgres_storage_delete_tags_works_for_non_existing_type() { _cleanup(); let storage = _storage(); let tag_name1 = vec![0, 0, 0]; let tag_name2 = vec![1, 1, 1]; let tag_name3 = vec![2, 2, 2]; let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]); let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string()); let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]); let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()]; storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap(); let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())]; let res = storage.delete_tags(&_type2(), &_id1(), &tag_names); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_delete_tags_works_for_non_existing_id() { _cleanup(); let storage = _storage(); let tag_name1 = vec![0, 0, 0]; let tag_name2 = vec![1, 1, 1]; let tag_name3 = vec![2, 2, 2]; let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]); let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string()); let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]); let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()]; storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap(); let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())]; let res = storage.delete_tags(&_type1(), &_id2(), &tag_names); assert_match!(Err(WalletStorageError::ItemNotFound), res); } fn _cleanup() { let storage_type = PostgresStorageType::new(); { let mut write_strategy = SELECTED_STRATEGY.write().unwrap(); *write_strategy = None; } let _res = storage_type.init_storage(Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); let _ret = storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])); let res = test::cleanup_storage(); res } fn _wallet_id() -> &'static str { "walle1" } fn _storage() -> Box<WalletStorage> { let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.open_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); res } fn _storage_db_pool() -> Box<WalletStorage> { let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config_db_pool()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.open_storage(_wallet_id(), Some(&_wallet_config_db_pool()[..]), Some(&_wallet_credentials()[..])).unwrap(); res } fn _wallet_config() -> String { let wallet_scheme = env::var("WALLET_SCHEME"); match wallet_scheme { Ok(scheme) => { if scheme == "MultiWalletSingleTable" { return _wallet_config_multi(); } } Err(_) => () }; let config = json!({ "url": "localhost:5432".to_owned() }).to_string(); config } fn _wallet_config_multi() -> String { let config = json!({ "url": "localhost:5432".to_owned(), "wallet_scheme": "MultiWalletSingleTable".to_owned() }).to_string(); config } fn _wallet_config_db_pool() -> String { let config = json!({ "url": "localhost:5432".to_owned(), "tls": "None", "max_connections": 4, "min_idle_count": 0, "connection_timeout": 10 }).to_string(); config } fn _wallet_credentials() -> String { let creds = json!({ "account": "postgres".to_owned(), "password": "mysecretpassword".to_owned(), "admin_account": Some("postgres".to_owned()), "admin_password": Some("mysecretpassword".to_owned()) }).to_string(); creds } fn _metadata() -> Vec<u8> { return vec![ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8 ]; } fn _type(i: u8) -> Vec<u8> { vec![i, 1 + i, 2 + i] } fn _type1() -> Vec<u8> { _type(1) } fn _type2() -> Vec<u8> { _type(2) } fn _id(i: u8) -> Vec<u8> { vec![3 + i, 4 + i, 5 + i] } fn _id1() -> Vec<u8> { _id(1) } fn _id2() -> Vec<u8> { _id(2) } fn _value(i: u8) -> EncryptedValue { EncryptedValue { data: vec![6 + i, 7 + i, 8 + i], key: vec![9 + i, 10 + i, 11 + i] } } fn _value1() -> EncryptedValue { _value(1) } fn _value2() -> EncryptedValue { _value(2) } fn _tags() -> Vec<Tag> { let mut tags: Vec<Tag> = Vec::new(); tags.push(Tag::Encrypted(vec![1, 5, 8], vec![3, 5, 6])); tags.push(Tag::PlainText(vec![1, 5, 8, 1], "Plain value 1".to_string())); tags.push(Tag::Encrypted(vec![2, 5, 8], vec![3, 5, 7])); tags.push(Tag::PlainText(vec![2, 5, 8, 1], "Plain value 2".to_string())); tags } fn _new_tags() -> Vec<Tag> { vec![ Tag::Encrypted(vec![1, 1, 1], vec![2, 2, 2]), Tag::PlainText(vec![1, 1, 1], String::from("tag_value_3")) ] } fn _sort(mut v: Vec<Tag>) -> Vec<Tag> { v.sort(); v } } Return error min_idle_count>0 while using MultiWalletSingleTableSharedPool Signed-off-by: Patrik Stas <6c4238a049060e56eb05ce2cae5c82b437da42b8@absa.co.za> extern crate owning_ref; extern crate sodiumoxide; extern crate r2d2; extern crate r2d2_postgres; use ::std::sync::RwLock; use postgres; use self::r2d2_postgres::{TlsMode, PostgresConnectionManager}; use serde_json; use self::owning_ref::OwningHandle; use std::rc::Rc; use std::time::Duration; use errors::wallet::WalletStorageError; use errors::common::CommonError; use wql::language; use wql::query; use wql::transaction; use wql::storage::{StorageIterator, WalletStorage, StorageRecord, EncryptedValue, Tag, TagName}; use self::r2d2_postgres::r2d2::Pool; use errors::wallet::WalletStorageError::{ConfigError}; fn default_true() -> bool { true } fn default_false() -> bool { false } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct RecordOptions { #[serde(default = "default_false")] retrieve_type: bool, #[serde(default = "default_true")] retrieve_value: bool, #[serde(default = "default_false")] retrieve_tags: bool, } impl RecordOptions { pub fn id() -> String { let options = RecordOptions { retrieve_type: false, retrieve_value: false, retrieve_tags: false, }; serde_json::to_string(&options).unwrap() } pub fn id_value() -> String { let options = RecordOptions { retrieve_type: false, retrieve_value: true, retrieve_tags: false, }; serde_json::to_string(&options).unwrap() } } impl Default for RecordOptions { fn default() -> RecordOptions { RecordOptions { retrieve_type: false, retrieve_value: true, retrieve_tags: false, } } } #[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] pub struct SearchOptions { #[serde(default = "default_true")] retrieve_records: bool, #[serde(default = "default_false")] retrieve_total_count: bool, #[serde(default = "default_false")] retrieve_type: bool, #[serde(default = "default_true")] retrieve_value: bool, #[serde(default = "default_false")] retrieve_tags: bool, } impl SearchOptions { pub fn id_value() -> String { let options = SearchOptions { retrieve_records: true, retrieve_total_count: true, retrieve_type: true, retrieve_value: true, retrieve_tags: false, }; serde_json::to_string(&options).unwrap() } } impl Default for SearchOptions { fn default() -> SearchOptions { SearchOptions { retrieve_records: true, retrieve_total_count: false, retrieve_type: false, retrieve_value: true, retrieve_tags: false, } } } const _POSTGRES_DB: &str = "postgres"; const _WALLETS_DB: &str = "wallets"; const _PLAIN_TAGS_QUERY: &str = "SELECT name, value from tags_plaintext where item_id = $1"; const _ENCRYPTED_TAGS_QUERY: &str = "SELECT name, value from tags_encrypted where item_id = $1"; const _PLAIN_TAGS_QUERY_MULTI: &str = "SELECT name, value from tags_plaintext where item_id = $1 and wallet_id = $2"; const _ENCRYPTED_TAGS_QUERY_MULTI: &str = "SELECT name, value from tags_encrypted where item_id = $1 and wallet_id = $2"; const _CREATE_WALLET_DATABASE: &str = "CREATE DATABASE \"$1\""; const _CREATE_WALLETS_DATABASE: &str = "CREATE DATABASE wallets"; // Note: wallet id length was constrained before by postgres database name length to 64 characters, keeping the same restrictions const _CREATE_SCHEMA: [&str; 12] = [ "CREATE TABLE IF NOT EXISTS metadata ( id BIGSERIAL PRIMARY KEY, value BYTEA NOT NULL )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_values ON metadata(value)", "CREATE TABLE IF NOT EXISTS items( id BIGSERIAL PRIMARY KEY, type BYTEA NOT NULL, name BYTEA NOT NULL, value BYTEA NOT NULL, key BYTEA NOT NULL )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_items_type_name ON items(type, name)", "CREATE TABLE IF NOT EXISTS tags_encrypted( name BYTEA NOT NULL, value BYTEA NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(name, item_id), FOREIGN KEY(item_id) REFERENCES items(id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_name ON tags_encrypted(name)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_value ON tags_encrypted(value)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_item_id ON tags_encrypted(item_id)", "CREATE TABLE IF NOT EXISTS tags_plaintext( name BYTEA NOT NULL, value TEXT NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(name, item_id), FOREIGN KEY(item_id) REFERENCES items(id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_name ON tags_plaintext(name)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_value ON tags_plaintext(value)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_item_id ON tags_plaintext(item_id)" ]; const _CREATE_SCHEMA_MULTI: [&str; 14] = [ "CREATE TABLE IF NOT EXISTS metadata ( wallet_id VARCHAR(64) NOT NULL, value BYTEA NOT NULL, PRIMARY KEY(wallet_id) )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_wallet_id_id ON metadata(wallet_id)", "CREATE UNIQUE INDEX IF NOT EXISTS ux_metadata_values ON metadata(wallet_id, value)", "CREATE TABLE IF NOT EXISTS items( wallet_id VARCHAR(64) NOT NULL, id BIGSERIAL NOT NULL, type BYTEA NOT NULL, name BYTEA NOT NULL, value BYTEA NOT NULL, key BYTEA NOT NULL, PRIMARY KEY(wallet_id, id) )", "CREATE UNIQUE INDEX IF NOT EXISTS ux_items_wallet_id_id ON items(wallet_id, id)", "CREATE UNIQUE INDEX IF NOT EXISTS ux_items_type_name ON items(wallet_id, type, name)", "CREATE TABLE IF NOT EXISTS tags_encrypted( wallet_id VARCHAR(64) NOT NULL, name BYTEA NOT NULL, value BYTEA NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(wallet_id, name, item_id), FOREIGN KEY(wallet_id, item_id) REFERENCES items(wallet_id, id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_name ON tags_encrypted(wallet_id, name)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_value ON tags_encrypted(wallet_id, value)", "CREATE INDEX IF NOT EXISTS ix_tags_encrypted_wallet_id_item_id ON tags_encrypted(wallet_id, item_id)", "CREATE TABLE IF NOT EXISTS tags_plaintext( wallet_id VARCHAR(64) NOT NULL, name BYTEA NOT NULL, value TEXT NOT NULL, item_id BIGINT NOT NULL, PRIMARY KEY(wallet_id, name, item_id), FOREIGN KEY(wallet_id, item_id) REFERENCES items(wallet_id, id) ON DELETE CASCADE ON UPDATE CASCADE )", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_name ON tags_plaintext(wallet_id, name)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_value ON tags_plaintext(wallet_id, value)", "CREATE INDEX IF NOT EXISTS ix_tags_plaintext_wallet_id_item_id ON tags_plaintext(wallet_id, item_id)" ]; const _DROP_WALLET_DATABASE: &str = "DROP DATABASE \"$1\""; const _DROP_SCHEMA: [&str; 4] = [ "DROP TABLE tags_plaintext", "DROP TABLE tags_encrypted", "DROP TABLE items", "DROP TABLE metadata" ]; const _DELETE_WALLET_MULTI: [&str; 4] = [ "DELETE FROM tags_plaintext WHERE wallet_id = $1", "DELETE FROM tags_encrypted WHERE wallet_id = $1", "DELETE FROM items WHERE wallet_id = $1", "DELETE FROM metadata WHERE wallet_id = $1" ]; #[derive(Debug)] struct TagRetriever<'a> { plain_tags_stmt: postgres::stmt::Statement<'a>, encrypted_tags_stmt: postgres::stmt::Statement<'a>, wallet_id: Option<String>, } type TagRetrieverOwned = OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<TagRetriever<'static>>>; impl<'a> TagRetriever<'a> { fn new_owned(conn: Rc<r2d2::PooledConnection<PostgresConnectionManager>>, wallet_id: Option<String>) -> Result<TagRetrieverOwned, WalletStorageError> { OwningHandle::try_new(conn.clone(), |conn| -> Result<_, postgres::Error> { let (plain_tags_stmt, encrypted_tags_stmt) = unsafe { match wallet_id { Some(_) => ((*conn).prepare(_PLAIN_TAGS_QUERY_MULTI)?, (*conn).prepare(_ENCRYPTED_TAGS_QUERY_MULTI)?), None => ((*conn).prepare(_PLAIN_TAGS_QUERY)?, (*conn).prepare(_ENCRYPTED_TAGS_QUERY)?) } }; let tr = TagRetriever { plain_tags_stmt, encrypted_tags_stmt, wallet_id, }; Ok(Box::new(tr)) }).map_err(WalletStorageError::from) } fn retrieve(&mut self, id: i64) -> Result<Vec<Tag>, WalletStorageError> { let mut tags = Vec::new(); let plain_results = match self.wallet_id { Some(ref w_id) => self.plain_tags_stmt.query(&[&id, &w_id])?, None => self.plain_tags_stmt.query(&[&id])? }; let mut iter_plain = plain_results.iter(); while let Some(res) = iter_plain.next() { let row = res; tags.push(Tag::PlainText(row.get(0), row.get(1))); } let encrypted_results = match self.wallet_id { Some(ref w_id) => self.encrypted_tags_stmt.query(&[&id, &w_id])?, None => self.encrypted_tags_stmt.query(&[&id])? }; let mut iter_encrypted = encrypted_results.iter(); while let Some(res) = iter_encrypted.next() { let row = res; tags.push(Tag::Encrypted(row.get(0), row.get(1))); } Ok(tags) } } struct PostgresStorageIterator { rows: Option< OwningHandle< OwningHandle< Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>, Box<postgres::rows::Rows<>>>>, tag_retriever: Option<TagRetrieverOwned>, options: RecordOptions, total_count: Option<usize>, iter_count: usize, } impl PostgresStorageIterator { fn new(stmt: Option<OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>>, args: &[&dyn postgres::types::ToSql], options: RecordOptions, tag_retriever: Option<TagRetrieverOwned>, total_count: Option<usize>) -> Result<PostgresStorageIterator, WalletStorageError> { let mut iter = PostgresStorageIterator { rows: None, tag_retriever, options, total_count, iter_count: 0, }; if let Some(stmt) = stmt { iter.rows = Some(OwningHandle::try_new( stmt, |stmt| unsafe { (*(stmt as *mut postgres::stmt::Statement)).query(args).map(Box::new) }, )?); } Ok(iter) } } impl StorageIterator for PostgresStorageIterator { fn next(&mut self) -> Result<Option<StorageRecord>, WalletStorageError> { // if records are not requested. if self.rows.is_none() { return Ok(None); } // TODO not sure if iter().nth() is the most efficient way to iterate through the result set // TODO investigate if the Iter object can be cached between calls to next() match self.rows.as_mut().unwrap().iter().nth(self.iter_count) { Some(row) => { self.iter_count = self.iter_count + 1; let name = row.get(1); let value = if self.options.retrieve_value { Some(EncryptedValue::new(row.get(2), row.get(3))) } else { None }; let tags = if self.options.retrieve_tags { match self.tag_retriever { Some(ref mut tag_retriever) => Some(tag_retriever.retrieve(row.get(0))?), None => return Err(WalletStorageError::CommonError( CommonError::InvalidState("Fetch tags option set and tag retriever is None".to_string()) )) } } else { None }; let type_ = if self.options.retrieve_type { Some(row.get(4)) } else { None }; Ok(Some(StorageRecord::new(name, value, type_, tags))) } //Some(Err(err)) => Err(WalletStorageError::from(err)), None => Ok(None) } } fn get_total_count(&self) -> Result<Option<usize>, WalletStorageError> { Ok(self.total_count) } } #[derive(Deserialize, Debug)] pub struct PostgresConfig { url: String, tls: Option<String>, // default off max_connections: Option<u32>, // default 5 min_idle_time: Option<u32>, // default 0, deprecated min_idle_count: Option<u32>, // default 0 connection_timeout: Option<u64>, // default 5 wallet_scheme: Option<WalletScheme>, // default DatabasePerWallet } impl PostgresConfig { fn tls(&self) -> postgres::TlsMode { match &self.tls { Some(tls) => match tls.as_ref() { "None" => postgres::TlsMode::None, // TODO add tls support for connecting to postgres db //"Prefer" => postgres::TlsMode::Prefer(&postgres::Connection), //"Require" => postgres::TlsMode::Require(&postgres::Connection), _ => postgres::TlsMode::None }, None => postgres::TlsMode::None } } fn r2d2_tls(&self) -> TlsMode { match &self.tls { Some(tls) => match tls.as_ref() { "None" => TlsMode::None, // TODO add tls support for connecting to postgres db //"Prefer" => TlsMode::Prefer(&postgres::Connection), //"Require" => TlsMode::Require(&postgres::Connection), _ => TlsMode::None }, None => TlsMode::None } } /// Sets the maximum number of connections managed by the pool. fn max_connections(&self) -> u32 { match &self.max_connections { Some(conn) => *conn, None => 5 } } /// Sets the minimum idle connection count maintained by the pool. fn min_idle_count(&self) -> u32 { match self.min_idle_count { Some(idle_count) => idle_count, None => match self.min_idle_time { Some(idle_count_deprecated) => { warn!("Configuration option min_idle_time is deprecated. Use min_idle_count instead."); idle_count_deprecated } None => 0 } } } /// Sets the idle timeout used by the pool. fn connection_timeout(&self) -> u64 { match &self.connection_timeout { Some(timeout) => *timeout, None => 5 } } } #[derive(Deserialize, Debug)] pub struct PostgresCredentials { account: String, password: String, admin_account: Option<String>, admin_password: Option<String>, } #[derive(Debug)] pub struct PostgresStorage { pool: r2d2::Pool<PostgresConnectionManager>, wallet_id: String, } pub trait WalletStorageType { fn init_storage(&self, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError>; fn create_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>, metadata: &[u8]) -> Result<(), WalletStorageError>; fn open_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<Box<PostgresStorage>, WalletStorageError>; fn delete_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError>; } #[derive(Deserialize, Debug)] #[derive(Copy, Clone)] enum WalletScheme { DatabasePerWallet, MultiWalletSingleTable, MultiWalletSingleTableSharedPool, MultiWalletMultiTable, } trait WalletStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError>; // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError>; // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError>; // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError>; // determine physical table name based on wallet strategy fn table_name(&self, id: &str, base_name: &str) -> String; // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String>; } pub struct PostgresStorageType {} struct DatabasePerWalletStrategy {} struct MultiWalletSingleTableStrategy {} struct MultiWalletMultiTableStrategy {} struct MultiWalletSingleTableStrategySharedPool { pool: r2d2::Pool<PostgresConnectionManager> } impl WalletStrategy for MultiWalletSingleTableStrategySharedPool { // initialize storage based on wallet storage strategy fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // create database and tables for storage // if admin user and password aren't provided then bail if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = postgres::Connection::connect(&url_base[..], postgres::TlsMode::None)?; if let Err(error) = conn.execute(&_CREATE_WALLETS_DATABASE, &[]) { if error.code() != Some(&postgres::error::DUPLICATE_DATABASE) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating the database: {}", error))); } else { // if database already exists, assume tables are created already and return conn.finish()?; return Ok(()); } } conn.finish()?; let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; for sql in &_CREATE_SCHEMA_MULTI { if let Err(error) = conn.execute(sql, &[]) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error))); } } conn.finish()?; Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> { // insert metadata let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; // We allow error on conflict since this indicates AlreadyExists error let ret = match conn.execute("INSERT INTO metadata(wallet_id, value) VALUES($1, $2)", &[&id, &metadata]) { Ok(_) => Ok(()), Err(error) => { if error.code() == Some(&postgres::error::UNIQUE_VIOLATION) { Err(WalletStorageError::AlreadyExists) } else { Err(WalletStorageError::IOError(format!("Error occurred while inserting into metadata: {}", error))) } } }; conn.finish()?; ret } // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { Ok(Box::new(PostgresStorage { pool: self.pool.clone(), wallet_id: id.to_string(), })) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { let url = PostgresStorageType::_postgres_url(&_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; let mut ret = Ok(()); for sql in &_DELETE_WALLET_MULTI { ret = match conn.execute(sql, &[&id]) { Ok(row_count) => { if row_count == 0 { Err(WalletStorageError::NotFound) } else { Ok(()) } } Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while deleting wallet: {}", error))) } } }; conn.finish()?; return ret; } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO Some("AND wallet_id = $$".to_owned()) } } impl WalletStrategy for DatabasePerWalletStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // no-op debug!("Initializing storage strategy DatabasePerWalletStrategy."); Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> { // create database for wallet // if admin user and password aren't provided then bail if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(id, &config, &credentials); let conn = postgres::Connection::connect(&url_base[..], config.tls())?; let create_db_sql = str::replace(_CREATE_WALLET_DATABASE, "$1", id); let mut schema_result = match conn.execute(&create_db_sql, &[]) { Ok(_) => Ok(()), Err(_error) => { Err(WalletStorageError::AlreadyExists) } }; conn.finish()?; let conn = match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; for sql in &_CREATE_SCHEMA { match schema_result { Ok(_) => schema_result = match conn.execute(sql, &[]) { Ok(_) => Ok(()), Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error))) } }, _ => () } }; let ret = match schema_result { Ok(_) => { match conn.execute("INSERT INTO metadata(value) VALUES($1) ON CONFLICT (value) DO UPDATE SET value = excluded.value", &[&metadata]) { Ok(_) => Ok(()), Err(error) => { //std::fs::remove_file(db_path)?; Err(WalletStorageError::IOError(format!("Error occurred while inserting the keys: {}", error))) } } } Err(error) => Err(error) }; conn.finish()?; ret } // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { let url = PostgresStorageType::_postgres_url(id, &config, &credentials); // don't need a connection, but connect just to verify we can let _conn = match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => conn, Err(_) => return Err(WalletStorageError::NotFound) }; // TODO close _conn let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) { Ok(manager) => manager, Err(_) => return Err(WalletStorageError::NotFound) }; let pool = match r2d2::Pool::builder() .min_idle(Some(config.min_idle_count())) .max_size(config.max_connections()) .idle_timeout(Some(Duration::new(config.connection_timeout(), 0))) .build(manager) { Ok(pool) => pool, Err(_) => return Err(WalletStorageError::NotFound) }; Ok(Box::new(PostgresStorage { pool: pool, wallet_id: id.to_string(), })) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // if admin user and password aren't provided then bail if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(id, &config, &credentials); match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => { for sql in &_DROP_SCHEMA { match conn.execute(sql, &[]) { Ok(_) => (), Err(_) => () }; } let _ret = conn.finish(); () } Err(_) => return Err(WalletStorageError::NotFound) }; let conn = postgres::Connection::connect(url_base, config.tls())?; let drop_db_sql = str::replace(_DROP_WALLET_DATABASE, "$1", id); let ret = match conn.execute(&drop_db_sql, &[]) { Ok(_) => Ok(()), Err(_) => Ok(()) }; conn.finish()?; ret } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO None } } impl WalletStrategy for MultiWalletSingleTableStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // create database and tables for storage // if admin user and password aren't provided then bail debug!("Initializing storage strategy MultiWalletSingleTableStrategy."); if credentials.admin_account == None || credentials.admin_password == None { return Ok(()); } let url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = postgres::Connection::connect(&url_base[..], postgres::TlsMode::None)?; if let Err(error) = conn.execute(&_CREATE_WALLETS_DATABASE, &[]) { if error.code() != Some(&postgres::error::DUPLICATE_DATABASE) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating the database: {}", error))); } else { // if database already exists, assume tables are created already and return conn.finish()?; return Ok(()); } } conn.finish()?; let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; for sql in &_CREATE_SCHEMA_MULTI { if let Err(error) = conn.execute(sql, &[]) { conn.finish()?; return Err(WalletStorageError::IOError(format!("Error occurred while creating wallet schema: {}", error))); } } conn.finish()?; Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials, metadata: &[u8]) -> Result<(), WalletStorageError> { // insert metadata let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; // We allow error on conflict since this indicates AlreadyExists error let ret = match conn.execute("INSERT INTO metadata(wallet_id, value) VALUES($1, $2)", &[&id, &metadata]) { Ok(_) => Ok(()), Err(error) => { if error.code() == Some(&postgres::error::UNIQUE_VIOLATION) { Err(WalletStorageError::AlreadyExists) } else { Err(WalletStorageError::IOError(format!("Error occurred while inserting into metadata: {}", error))) } } }; conn.finish()?; ret } // open a wallet based on wallet storage strategy fn open_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); // don't need a connection, but connect just to verify we can let conn = match postgres::Connection::connect(&url[..], config.tls()) { Ok(conn) => conn, Err(_) => return Err(WalletStorageError::NotFound) }; // select metadata for this wallet to ensure it exists let res: Result<Vec<u8>, WalletStorageError> = { let mut rows = conn.query( "SELECT value FROM metadata WHERE wallet_id = $1", &[&id]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } }; match res { Ok(_entity) => (), Err(_) => return Err(WalletStorageError::NotFound) }; // TODO close conn let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) { Ok(manager) => manager, Err(_) => return Err(WalletStorageError::NotFound) }; let pool = match r2d2::Pool::builder() .min_idle(Some(config.min_idle_count())) .max_size(config.max_connections()) .idle_timeout(Some(Duration::new(config.connection_timeout(), 0))) .build(manager) { Ok(pool) => pool, Err(_) => return Err(WalletStorageError::NotFound) }; Ok(Box::new(PostgresStorage { pool: pool, wallet_id: id.to_string(), })) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { let url = PostgresStorageType::_postgres_url(&_WALLETS_DB, &config, &credentials); let conn = match postgres::Connection::connect(&url[..], postgres::TlsMode::None) { Ok(conn) => conn, Err(error) => { return Err(WalletStorageError::IOError(format!("Error occurred while connecting to wallet schema: {}", error))); } }; let mut ret = Ok(()); for sql in &_DELETE_WALLET_MULTI { ret = match conn.execute(sql, &[&id]) { Ok(row_count) => { if row_count == 0 { Err(WalletStorageError::NotFound) } else { Ok(()) } } Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while deleting wallet: {}", error))) } } }; conn.finish()?; return ret; } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO Some("AND wallet_id = $$".to_owned()) } } impl WalletStrategy for MultiWalletMultiTableStrategy { // initialize storage based on wallet storage strategy fn init_storage(&self, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // create database for storage // TODO Ok(()) } // initialize a single wallet based on wallet storage strategy fn create_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials, _metadata: &[u8]) -> Result<(), WalletStorageError> { // create tables for wallet storage // TODO Ok(()) } // open a wallet based on wallet storage strategy fn open_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<Box<PostgresStorage>, WalletStorageError> { // TODO Err(WalletStorageError::NotFound) } // delete a single wallet based on wallet storage strategy fn delete_wallet(&self, _id: &str, _config: &PostgresConfig, _credentials: &PostgresCredentials) -> Result<(), WalletStorageError> { // TODO Ok(()) } // determine phyisical table name based on wallet strategy fn table_name(&self, _id: &str, base_name: &str) -> String { // TODO base_name.to_owned() } // determine additional query parameters based on wallet strategy fn query_qualifier(&self) -> Option<String> { // TODO None } } lazy_static! { static ref SELECTED_STRATEGY: RwLock< Option<Box<dyn WalletStrategy + Send + Sync>> > = RwLock::new(None); } impl PostgresStorageType { pub fn new() -> PostgresStorageType { PostgresStorageType {} } fn _admin_postgres_url(config: &PostgresConfig, credentials: &PostgresCredentials) -> String { let mut url_base = "postgresql://".to_owned(); match credentials.admin_account { Some(ref account) => url_base.push_str(&account[..]), None => () } url_base.push_str(":"); match credentials.admin_password { Some(ref password) => url_base.push_str(&password[..]), None => () } url_base.push_str("@"); url_base.push_str(&config.url[..]); url_base } fn _base_postgres_url(config: &PostgresConfig, credentials: &PostgresCredentials) -> String { let mut url_base = "postgresql://".to_owned(); url_base.push_str(&credentials.account[..]); url_base.push_str(":"); url_base.push_str(&credentials.password[..]); url_base.push_str("@"); url_base.push_str(&config.url[..]); url_base } fn _postgres_url(id: &str, config: &PostgresConfig, credentials: &PostgresCredentials) -> String { let mut url_base = PostgresStorageType::_base_postgres_url(config, credentials); url_base.push_str("/"); url_base.push_str(id); url_base } } impl WalletStorage for PostgresStorage { /// /// Tries to fetch values and/or tags from the storage. /// Returns Result with StorageEntity object which holds requested data in case of success or /// Result with WalletStorageError in case of failure. /// /// /// # Arguments /// /// * `type_` - type_ of the item in storage /// * `id` - id of the item in storage /// * `options` - JSon containing what needs to be fetched. /// Example: {"retrieveValue": true, "retrieveTags": true} /// /// # Returns /// /// Result that can be either: /// /// * `StorageEntity` - Contains name, optional value and optional tags /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::Closed` - Storage is closed /// * `WalletStorageError::ItemNotFound` - Item is not found in database /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn get(&self, type_: &[u8], id: &[u8], options: &str) -> Result<StorageRecord, WalletStorageError> { let options: RecordOptions = if options == "{}" { // FIXME: RecordOptions::default() } else { serde_json::from_str(options)? }; let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res: Result<(i64, Vec<u8>, Vec<u8>), WalletStorageError> = { let mut rows = match query_qualifier { Some(_) => conn.query( "SELECT id, value, key FROM items where type = $1 AND name = $2 AND wallet_id = $3", &[&type_.to_vec(), &id.to_vec(), &self.wallet_id]), None => conn.query( "SELECT id, value, key FROM items where type = $1 AND name = $2", &[&type_.to_vec(), &id.to_vec()]) }; match rows.as_mut().unwrap().iter().next() { Some(row) => Ok((row.get(0), row.get(1), row.get(2))), None => Err(WalletStorageError::ItemNotFound) } }; let item = match res { Ok(entity) => entity, Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)) }; let value = if options.retrieve_value { Some(EncryptedValue::new(item.1, item.2)) } else { None }; let type_ = if options.retrieve_type { Some(type_.clone()) } else { None }; let tags = if options.retrieve_tags { let mut tags = Vec::new(); // get all encrypted. let rows = match query_qualifier { Some(_) => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_encrypted WHERE item_id = $1 AND wallet_id = $2")?; stmt.query(&[&item.0, &self.wallet_id])? } None => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_encrypted WHERE item_id = $1")?; stmt.query(&[&item.0])? } }; let mut iter = rows.iter(); while let Some(res) = iter.next() { let row = res; //let tag_name: Vec<u8> = row.get(0); //let tag_value: Vec<u8> = row.get(1); tags.push(Tag::Encrypted(row.get(0), row.get(1))); } // get all plain let rows = match query_qualifier { Some(_) => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_plaintext WHERE item_id = $1 AND wallet_id = $2")?; stmt.query(&[&item.0, &self.wallet_id])? } None => { let stmt = conn.prepare_cached("SELECT name, value FROM tags_plaintext WHERE item_id = $1")?; stmt.query(&[&item.0])? } }; let mut iter = rows.iter(); while let Some(res) = iter.next() { let row = res; //let tag_name: Vec<u8> = row.get(0); //let tag_value: String = row.get(1); tags.push(Tag::PlainText(row.get(0), row.get(1))); } Some(tags) } else { None }; Ok(StorageRecord::new(id.to_vec(), value, type_.map(|val| val.to_vec()), tags)) } /// /// inserts value and tags into storage. /// Returns Result with () on success or /// Result with WalletStorageError in case of failure. /// /// /// # Arguments /// /// * `type_` - type of the item in storage /// * `id` - id of the item in storage /// * `value` - value of the item in storage /// * `value_key` - key used to encrypt the value /// * `tags` - tags assigned to the value /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` class of errors can be throw by this method: /// /// * `WalletStorageError::Closed` - Storage is closed /// * `WalletStorageError::ItemAlreadyExists` - Item is already present in database /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn add(&self, type_: &[u8], id: &[u8], value: &EncryptedValue, tags: &[Tag]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; let res = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO items (type, name, value, key, wallet_id) VALUES ($1, $2, $3, $4, $5) RETURNING id")? .query(&[&type_.to_vec(), &id.to_vec(), &value.data, &value.key, &self.wallet_id]), None => tx.prepare_cached("INSERT INTO items (type, name, value, key) VALUES ($1, $2, $3, $4) RETURNING id")? .query(&[&type_.to_vec(), &id.to_vec(), &value.data, &value.key]) }; let item_id = match res { Ok(rows) => { let res = match rows.iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; item_id } Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } }; let item_id = item_id as i64; if !tags.is_empty() { let stmt_e = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3)")? }; let stmt_p = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3)")? }; for tag in tags { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => stmt_e.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => stmt_e.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } &Tag::PlainText(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => stmt_p.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => stmt_p.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } }; } } tx.commit()?; Ok(()) } fn update(&self, type_: &[u8], id: &[u8], value: &EncryptedValue) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res = match query_qualifier { Some(_) => conn.prepare_cached("UPDATE items SET value = $1, key = $2 WHERE type = $3 AND name = $4 AND wallet_id = $5")? .execute(&[&value.data, &value.key, &type_.to_vec(), &id.to_vec(), &self.wallet_id]), None => conn.prepare_cached("UPDATE items SET value = $1, key = $2 WHERE type = $3 AND name = $4")? .execute(&[&value.data, &value.key, &type_.to_vec(), &id.to_vec()]) }; match res { Ok(1) => Ok(()), Ok(0) => Err(WalletStorageError::ItemNotFound), Ok(count) => Err(WalletStorageError::CommonError(CommonError::InvalidState(format!("Postgres returned update row count: {}", count)))), Err(err) => Err(WalletStorageError::from(err)), } } fn add_tags(&self, type_: &[u8], id: &[u8], tags: &[Tag]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; let res = match query_qualifier { Some(_) => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } None => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; if !tags.is_empty() { let enc_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4) ON CONFLICT (name, item_id, wallet_id) DO UPDATE SET value = excluded.value")?, None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3) ON CONFLICT (name, item_id) DO UPDATE SET value = excluded.value")? }; let plain_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4) ON CONFLICT (name, item_id, wallet_id) DO UPDATE SET value = excluded.value")?, None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3) ON CONFLICT (name, item_id) DO UPDATE SET value = excluded.value")? }; for tag in tags { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } &Tag::PlainText(ref tag_name, ref tag_data) => { let res = match query_qualifier { Some(_) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id]), None => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data]) }; match res { Ok(_) => (), Err(err) => { if err.code() == Some(&postgres::error::UNIQUE_VIOLATION) || err.code() == Some(&postgres::error::INTEGRITY_CONSTRAINT_VIOLATION) { return Err(WalletStorageError::ItemAlreadyExists); } else { return Err(WalletStorageError::from(err)); } } } } }; } } tx.commit()?; Ok(()) } fn update_tags(&self, type_: &[u8], id: &[u8], tags: &[Tag]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; let res = match query_qualifier { Some(_) => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2 AND wallet_id = $3")? .query(&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } None => { let mut rows = tx.prepare_cached("SELECT id FROM items WHERE type = $1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; match query_qualifier { Some(_) => { tx.execute("DELETE FROM tags_encrypted WHERE item_id = $1 AND wallet_id = $2", &[&item_id, &self.wallet_id])?; tx.execute("DELETE FROM tags_plaintext WHERE item_id = $1 AND wallet_id = $2", &[&item_id, &self.wallet_id])?; } None => { tx.execute("DELETE FROM tags_encrypted WHERE item_id = $1", &[&item_id])?; tx.execute("DELETE FROM tags_plaintext WHERE item_id = $1", &[&item_id])?; } }; if !tags.is_empty() { let enc_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_encrypted (item_id, name, value) VALUES ($1, $2, $3)")? }; let plain_tag_insert_stmt = match query_qualifier { Some(_) => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value, wallet_id) VALUES ($1, $2, $3, $4)")?, None => tx.prepare_cached("INSERT INTO tags_plaintext (item_id, name, value) VALUES ($1, $2, $3)")? }; for tag in tags { match query_qualifier { Some(_) => { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id])?, &Tag::PlainText(ref tag_name, ref tag_data) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data, &self.wallet_id])? } } None => { match tag { &Tag::Encrypted(ref tag_name, ref tag_data) => enc_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])?, &Tag::PlainText(ref tag_name, ref tag_data) => plain_tag_insert_stmt.execute(&[&item_id, tag_name, tag_data])? } } }; } } tx.commit()?; Ok(()) } fn delete_tags(&self, type_: &[u8], id: &[u8], tag_names: &[TagName]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res = match query_qualifier { Some(_) => { let mut rows = conn.prepare_cached("SELECT id FROM items WHERE type =$1 AND name = $2 AND wallet_id = $3")? .query(&[&type_.to_vec(), &id.to_vec(), &self.wallet_id]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } None => { let mut rows = conn.prepare_cached("SELECT id FROM items WHERE type =$1 AND name = $2")? .query(&[&type_.to_vec(), &id.to_vec()]); match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } } }; let item_id: i64 = match res { Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)), Ok(id) => id }; let tx: transaction::Transaction = transaction::Transaction::new(&conn)?; { let enc_tag_delete_stmt = match query_qualifier { Some(_) => tx.prepare_cached("DELETE FROM tags_encrypted WHERE item_id = $1 AND name = $2 AND wallet_id = $3")?, None => tx.prepare_cached("DELETE FROM tags_encrypted WHERE item_id = $1 AND name = $2")? }; let plain_tag_delete_stmt = match query_qualifier { Some(_) => tx.prepare_cached("DELETE FROM tags_plaintext WHERE item_id = $1 AND name = $2 AND wallet_id = $3")?, None => tx.prepare_cached("DELETE FROM tags_plaintext WHERE item_id = $1 AND name = $2")? }; for tag_name in tag_names { match query_qualifier { Some(_) => match tag_name { &TagName::OfEncrypted(ref tag_name) => enc_tag_delete_stmt.execute(&[&item_id, tag_name, &self.wallet_id])?, &TagName::OfPlain(ref tag_name) => plain_tag_delete_stmt.execute(&[&item_id, tag_name, &self.wallet_id])?, }, None => match tag_name { &TagName::OfEncrypted(ref tag_name) => enc_tag_delete_stmt.execute(&[&item_id, tag_name])?, &TagName::OfPlain(ref tag_name) => plain_tag_delete_stmt.execute(&[&item_id, tag_name])?, } }; } } tx.commit()?; Ok(()) } /// /// deletes value and tags into storage. /// Returns Result with () on success or /// Result with WalletStorageError in case of failure. /// /// /// # Arguments /// /// * `type_` - type of the item in storage /// * `id` - id of the item in storage /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::Closed` - Storage is closed /// * `WalletStorageError::ItemNotFound` - Item is not found in database /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn delete(&self, type_: &[u8], id: &[u8]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let row_count = match query_qualifier { Some(_) => conn.execute( "DELETE FROM items where type = $1 AND name = $2 AND wallet_id = $3", &[&type_.to_vec(), &id.to_vec(), &self.wallet_id], )?, None => conn.execute( "DELETE FROM items where type = $1 AND name = $2", &[&type_.to_vec(), &id.to_vec()], )? }; if row_count == 1 { Ok(()) } else { Err(WalletStorageError::ItemNotFound) } } fn get_storage_metadata(&self) -> Result<Vec<u8>, WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res: Result<Vec<u8>, WalletStorageError> = { let mut rows = match query_qualifier { Some(_) => conn.query( "SELECT value FROM metadata WHERE wallet_id = $1", &[&self.wallet_id]), None => conn.query( "SELECT value FROM metadata", &[]) }; match rows.as_mut().unwrap().iter().next() { Some(row) => Ok(row.get(0)), None => Err(WalletStorageError::ItemNotFound) } }; match res { Ok(entity) => Ok(entity), Err(WalletStorageError::ItemNotFound) => return Err(WalletStorageError::ItemNotFound), Err(err) => return Err(WalletStorageError::from(err)) } } fn set_storage_metadata(&self, metadata: &[u8]) -> Result<(), WalletStorageError> { let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let res = match query_qualifier { Some(_) => conn.execute("UPDATE metadata SET value = $1 WHERE wallet_id = $2", &[&metadata.to_vec(), &self.wallet_id]), None => conn.execute("UPDATE metadata SET value = $1", &[&metadata.to_vec()]) }; match res { Ok(_) => Ok(()), Err(error) => { Err(WalletStorageError::IOError(format!("Error occurred while inserting the keys: {}", error))) } } } fn get_all(&self) -> Result<Box<dyn StorageIterator>, WalletStorageError> { let query_qualifier = get_wallet_strategy_qualifier()?; let statement = match query_qualifier { Some(_) => self._prepare_statement("SELECT id, name, value, key, type FROM items WHERE wallet_id = $1")?, None => self._prepare_statement("SELECT id, name, value, key, type FROM items")? }; let fetch_options = RecordOptions { retrieve_type: true, retrieve_value: true, retrieve_tags: true, }; let pool = self.pool.clone(); let tag_retriever = match query_qualifier { Some(_) => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), Some(self.wallet_id.clone()))?), None => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), None)?) }; let storage_iterator = match query_qualifier { Some(_) => PostgresStorageIterator::new(Some(statement), &[&self.wallet_id], fetch_options, tag_retriever, None)?, None => PostgresStorageIterator::new(Some(statement), &[], fetch_options, tag_retriever, None)? }; Ok(Box::new(storage_iterator)) } fn search(&self, type_: &[u8], query: &language::Operator, options: Option<&str>) -> Result<Box<dyn StorageIterator>, WalletStorageError> { let type_ = type_.to_vec(); // FIXME let search_options = match options { None => SearchOptions::default(), Some(option_str) => serde_json::from_str(option_str)? }; let pool = self.pool.clone(); let conn = pool.get().unwrap(); let query_qualifier = get_wallet_strategy_qualifier()?; let wallet_id_arg = self.wallet_id.to_owned(); let total_count: Option<usize> = if search_options.retrieve_total_count { let (query_string, query_arguments) = match query_qualifier { Some(_) => { let (mut query_string, mut query_arguments) = query::wql_to_sql_count(&type_, query)?; query_arguments.push(&wallet_id_arg); let arg_str = format!(" AND i.wallet_id = ${}", query_arguments.len()); query_string.push_str(&arg_str); let mut with_clause = false; if query_string.contains("tags_plaintext") { query_arguments.push(&wallet_id_arg); query_string = format!("tags_plaintext as (select * from tags_plaintext where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if query_string.contains("tags_encrypted") { if with_clause { query_string = format!(", {}", query_string); } query_arguments.push(&wallet_id_arg); query_string = format!("tags_encrypted as (select * from tags_encrypted where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if with_clause { query_string = format!("WITH {}", query_string); } (query_string, query_arguments) } None => query::wql_to_sql_count(&type_, query)? }; let mut rows = conn.query( &query_string, &query_arguments[..]); match rows.as_mut().unwrap().iter().next() { Some(row) => { let x: i64 = row.get(0); Some(x as usize) } None => None } } else { None }; if search_options.retrieve_records { let fetch_options = RecordOptions { retrieve_value: search_options.retrieve_value, retrieve_tags: search_options.retrieve_tags, retrieve_type: search_options.retrieve_type, }; let (query_string, query_arguments) = match query_qualifier { Some(_) => { let (mut query_string, mut query_arguments) = query::wql_to_sql(&type_, query, options)?; query_arguments.push(&wallet_id_arg); let arg_str = format!(" AND i.wallet_id = ${}", query_arguments.len()); query_string.push_str(&arg_str); let mut with_clause = false; if query_string.contains("tags_plaintext") { query_arguments.push(&wallet_id_arg); query_string = format!("tags_plaintext as (select * from tags_plaintext where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if query_string.contains("tags_encrypted") { if with_clause { query_string = format!(", {}", query_string); } query_arguments.push(&wallet_id_arg); query_string = format!("tags_encrypted as (select * from tags_encrypted where wallet_id = ${}) {}", query_arguments.len(), query_string); with_clause = true; } if with_clause { query_string = format!("WITH {}", query_string); } (query_string, query_arguments) } None => query::wql_to_sql(&type_, query, options)? }; let statement = self._prepare_statement(&query_string)?; let tag_retriever = if fetch_options.retrieve_tags { let pool = self.pool.clone(); match query_qualifier { Some(_) => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), Some(self.wallet_id.clone()))?), None => Some(TagRetriever::new_owned(Rc::new(pool.get().unwrap()).clone(), None)?) } } else { None }; let storage_iterator = PostgresStorageIterator::new(Some(statement), &query_arguments[..], fetch_options, tag_retriever, total_count)?; Ok(Box::new(storage_iterator)) } else { let storage_iterator = PostgresStorageIterator::new(None, &[], RecordOptions::default(), None, total_count)?; Ok(Box::new(storage_iterator)) } } fn close(&mut self) -> Result<(), WalletStorageError> { // TODO throws a borrow error if we try to close the connection here; temporary workaround is to rely on idle connection timeout Ok(()) } } impl PostgresStorage { fn _prepare_statement(&self, sql: &str) -> Result< OwningHandle<Rc<r2d2::PooledConnection<PostgresConnectionManager>>, Box<postgres::stmt::Statement<'static>>>, WalletStorageError> { let pool = self.pool.clone(); OwningHandle::try_new(Rc::new(pool.get().unwrap()).clone(), |conn| { unsafe { (*conn).prepare(sql) }.map(Box::new).map_err(WalletStorageError::from) }) } } fn create_connection_pool(config: &PostgresConfig, credentials: &PostgresCredentials) -> Result<Pool<PostgresConnectionManager>, WalletStorageError> { let _url_base = PostgresStorageType::_admin_postgres_url(&config, &credentials); let url = PostgresStorageType::_postgres_url(_WALLETS_DB, &config, &credentials); let manager = match PostgresConnectionManager::new(&url[..], config.r2d2_tls()) { Ok(manager) => manager, Err(e) => { return Err(WalletStorageError::GenericError(format!("Problem creating PostgresConnectionManager. Details {:?}", e))); } }; debug!("MultiWalletSingleTableStrategySharedPool open >> building connection pool"); match r2d2::Pool::builder() .min_idle(Some(config.min_idle_count())) .max_size(config.max_connections()) .idle_timeout(Some(Duration::new(config.connection_timeout(), 0))) .build(manager) { Ok(pool) => Ok(pool), Err(e) => Err(WalletStorageError::GenericError(format!("Problem creating PostgresConnectionManager. Details {:?}", e))) } } fn set_wallet_strategy(strategy: Box<dyn WalletStrategy + Send + Sync>) { let mut write_strategy = SELECTED_STRATEGY.write().unwrap(); *write_strategy = Some(strategy); } fn get_wallet_strategy_qualifier() -> Result<Option<String>, WalletStorageError> { let read_strategy = SELECTED_STRATEGY.read().unwrap(); read_strategy.as_ref() .map_or(Err(WalletStorageError::GenericError(format!("Storage was not yet initialized."))), |strategy| Ok(strategy.query_qualifier()), ) } impl WalletStorageType for PostgresStorageType { /// /// Initializes the wallets database and creates the necessary tables for all wallets /// This needs to be called once at the very beginning, I'm not entirely sure the best way to enforce it /// /// # Arguments /// /// * `storage_config` - config containing the location of Postgres DB files /// * `storage_credentials` - DB credentials /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::NotFound` - File with the provided id not found /// * `IOError(..)` - Deletion of the file form the file-system failed /// fn init_storage(&self, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; { let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(_) => { return Err(WalletStorageError::GenericError(format!("Storage was already initialized."))); } None => info!("Initializing postgresql storage for the first time") }; }; match config.wallet_scheme { Some(scheme) => match scheme { WalletScheme::DatabasePerWallet => { debug!("Initialising postgresql using DatabasePerWallet strategy."); set_wallet_strategy(Box::new(DatabasePerWalletStrategy {})); } WalletScheme::MultiWalletSingleTable => { debug!("Initialising postgresql using MultiWalletSingleTable strategy."); set_wallet_strategy(Box::new(MultiWalletSingleTableStrategy {})); } WalletScheme::MultiWalletMultiTable => { debug!("Initialising postgresql using MultiWalletMultiTable strategy."); set_wallet_strategy(Box::new(MultiWalletMultiTableStrategy {})); } WalletScheme::MultiWalletSingleTableSharedPool => { if (&config as &PostgresConfig).min_idle_count() > 0 { // TODO: This restriction can be removed but we would have make sure that // we don't attempt to construct connection pool before we have constructed // database. Currently we are creating database (if doesn't exists) in // strategy.init_storage(&config, &credentials) at the end of this function. error!("MultiWalletSingleTableSharedPool does not support 'min_idle_count' \ (or its deprecated equivalent 'min_idle_time') to be > 0"); return Err(ConfigError) } debug!("Initialising postgresql using MultiWalletSingleTableSharedPool strategy."); let pool = create_connection_pool(&config, &credentials)?; set_wallet_strategy(Box::new(MultiWalletSingleTableStrategySharedPool { pool })); } }, None => { debug!("Initialising postgresql but strategy was not specified in storage \ configuration. Using DatabasePerWallet strategy by default."); set_wallet_strategy(Box::new(DatabasePerWalletStrategy {})); } }; let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(strategy) => { strategy.init_storage(&config, &credentials) } None => panic!("Was about to initialize postgresql storage strategy, but not strategy \ was yet set. You should never see this error.") } } /// /// Deletes the Postgres database file with the provided id from the path specified in the /// config file. /// /// # Arguments /// /// * `id` - the wallet id /// * `storage_config` - Postgres DB connection config /// * `storage_credentials` - DB credentials /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::NotFound` - File with the provided id not found /// * `IOError(..)` - Deletion of the file form the file-system failed /// fn delete_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<(), WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; let strategy_read_lock = SELECTED_STRATEGY.read().unwrap(); strategy_read_lock .as_ref() .expect("Should never happen") .delete_wallet(id, &config, &credentials) } /// /// Creates the Postgres DB schema with the provided name in the id specified in the config file, /// and initializes the encryption keys needed for encryption and decryption of data. /// /// # Arguments /// /// * `id` - name of the Postgres DB schema /// * `config` - config containing the location of postgres db /// * `credentials` - DB credentials /// * `metadata` - encryption keys that need to be stored in the newly created DB /// /// # Returns /// /// Result that can be either: /// /// * `()` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `AlreadyExists` - Schema with a given name already exists in the database /// * `IOError("IO error during storage operation:...")` - Connection to the DB failed /// * `IOError("Error occurred while creating wallet file:..)"` - Creation of schema failed /// * `IOError("Error occurred while inserting the keys...")` - Insertion of keys failed /// fn create_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>, metadata: &[u8]) -> Result<(), WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; // initialize using the global selected_strategy object let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(strategy) => { strategy.create_wallet(id, &config, &credentials, metadata) } None => panic!("Should never happen") } } /// /// Establishes a connection to the SQLite DB with the provided id located in the path /// specified in the config. In case of a successful onection returns a Storage object /// embedding the connection and the encryption keys that will be used for encryption and /// decryption operations. /// /// /// # Arguments /// /// * `id` - id of the SQLite DB file /// * `config` - config containing the location of SQLite DB files /// * `credentials` - DB credentials /// /// # Returns /// /// Result that can be either: /// /// * `(Box<Storage>, Vec<u8>)` - Tuple of `SQLiteStorage` and `encryption keys` /// * `WalletStorageError` /// /// # Errors /// /// Any of the following `WalletStorageError` type_ of errors can be throw by this method: /// /// * `WalletStorageError::NotFound` - File with the provided id not found /// * `IOError("IO error during storage operation:...")` - Failed connection or SQL query /// fn open_storage(&self, id: &str, config: Option<&str>, credentials: Option<&str>) -> Result<Box<PostgresStorage>, WalletStorageError> { let config = config .map(serde_json::from_str::<PostgresConfig>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize config: {:?}", err)))?; let credentials = credentials .map(serde_json::from_str::<PostgresCredentials>) .map_or(Ok(None), |v| v.map(Some)) .map_err(|err| CommonError::InvalidStructure(format!("Cannot deserialize credentials: {:?}", err)))?; let config = match config { Some(config) => config, None => return Err(WalletStorageError::ConfigError) }; let credentials = match credentials { Some(credentials) => credentials, None => return Err(WalletStorageError::ConfigError) }; // initialize using the global selected_strategy object let r1 = SELECTED_STRATEGY.read().unwrap(); match r1.as_ref() { Some(strategy) => { strategy.open_wallet(id, &config, &credentials) } None => panic!("Should never happen") } } } #[cfg(test)] mod tests { use super::*; use std::env; use utils::test; #[test] fn postgres_storage_type_create_works() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); } #[test] fn postgres_storage_type_create_works_for_twice() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()); assert_match!(Err(WalletStorageError::AlreadyExists), res); } #[test] fn postgres_storage_get_storage_metadata_works() { _cleanup(); let storage = _storage(); let metadata = storage.get_storage_metadata().unwrap(); assert_eq!(metadata, _metadata()); } #[test] fn postgres_storage_type_delete_works() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); } #[test] fn postgres_storage_type_delete_works_for_non_existing() { _cleanup(); let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.delete_storage("unknown", Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])); assert_match!(Err(WalletStorageError::NotFound), res); storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); } #[test] fn postgres_storage_type_open_works() { _cleanup(); _storage(); } #[test] fn postgres_storage_type_open_works_for_not_created() { _cleanup(); let storage_type = PostgresStorageType::new(); let res = storage_type.open_storage("unknown", Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])); assert_match!(Err(WalletStorageError::NotFound), res); } #[test] fn postgres_storage_add_works_with_config() { _cleanup(); let storage = _storage_db_pool(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); } #[test] fn postgres_storage_add_works_for_is_802() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add(&_type1(), &_id1(), &_value1(), &_tags()); assert_match!(Err(WalletStorageError::ItemAlreadyExists), res); let res = storage.add(&_type1(), &_id1(), &_value1(), &_tags()); assert_match!(Err(WalletStorageError::ItemAlreadyExists), res); } #[test] fn postgres_storage_set_get_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); } #[test] fn postgres_storage_set_get_works_for_twice() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add(&_type1(), &_id1(), &_value2(), &_tags()); assert_match!(Err(WalletStorageError::ItemAlreadyExists), res); } #[test] fn postgres_storage_set_get_works_for_reopen() { _cleanup(); { _storage().add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); } let storage_type = PostgresStorageType::new(); let storage = storage_type.open_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); } #[test] fn postgres_storage_get_works_for_wrong_key() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.get(&_type1(), &_id2(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_delete_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); storage.delete(&_type1(), &_id1()).unwrap(); let res = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_delete_works_for_non_existing() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.delete(&_type1(), &_id2()); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_create_and_find_multiple_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record1 = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record1.value.unwrap(), _value1()); assert_eq!(_sort(record1.tags.unwrap()), _sort(_tags())); storage.add(&_type2(), &_id2(), &_value2(), &_tags()).unwrap(); let record2 = storage.get(&_type2(), &_id2(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record2.value.unwrap(), _value2()); assert_eq!(_sort(record2.tags.unwrap()), _sort(_tags())); } #[test] fn postgres_storage_get_all_workss() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); storage.add(&_type2(), &_id2(), &_value2(), &_tags()).unwrap(); let mut storage_iterator = storage.get_all().unwrap(); let record = storage_iterator.next().unwrap().unwrap(); assert_eq!(record.type_.unwrap(), _type1()); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); let record = storage_iterator.next().unwrap().unwrap(); assert_eq!(record.type_.unwrap(), _type2()); assert_eq!(record.value.unwrap(), _value2()); assert_eq!(_sort(record.tags.unwrap()), _sort(_tags())); let record = storage_iterator.next().unwrap(); assert!(record.is_none()); } #[test] fn postgres_storage_get_all_works_for_empty() { _cleanup(); let storage = _storage(); let mut storage_iterator = storage.get_all().unwrap(); let record = storage_iterator.next().unwrap(); assert!(record.is_none()); } #[test] fn postgres_storage_update_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); storage.update(&_type1(), &_id1(), &_value2()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value2()); } #[test] fn postgres_storage_update_works_for_non_existing_id() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let res = storage.update(&_type1(), &_id2(), &_value2()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_update_works_for_non_existing_type() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let res = storage.update(&_type2(), &_id1(), &_value2()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_add_tags_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); storage.add_tags(&_type1(), &_id1(), &_new_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let expected_tags = { let mut tags = _tags(); tags.extend(_new_tags()); _sort(tags) }; assert_eq!(_sort(record.tags.unwrap()), expected_tags); } #[test] fn postgres_storage_add_tags_works_for_non_existing_id() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add_tags(&_type1(), &_id2(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_add_tags_works_for_non_existing_type() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.add_tags(&_type2(), &_id1(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res) } #[test] fn postgres_storage_add_tags_works_for_already_existing() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let tags_with_existing = { let mut tags = _tags(); tags.extend(_new_tags()); tags }; storage.add_tags(&_type1(), &_id1(), &tags_with_existing).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let expected_tags = { let mut tags = _tags(); tags.extend(_new_tags()); _sort(tags) }; assert_eq!(_sort(record.tags.unwrap()), expected_tags); } #[test] fn postgres_storage_update_tags_works() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); storage.update_tags(&_type1(), &_id1(), &_new_tags()).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); assert_eq!(_sort(record.tags.unwrap()), _sort(_new_tags())); } #[test] fn postgres_storage_update_tags_works_for_non_existing_id() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.update_tags(&_type1(), &_id2(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_update_tags_works_for_non_existing_type() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let res = storage.update_tags(&_type1(), &_id2(), &_new_tags()); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_update_tags_works_for_already_existing() { _cleanup(); let storage = _storage(); storage.add(&_type1(), &_id1(), &_value1(), &_tags()).unwrap(); let tags_with_existing = { let mut tags = _tags(); tags.extend(_new_tags()); tags }; storage.update_tags(&_type1(), &_id1(), &tags_with_existing).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.value.unwrap(), _value1()); let expected_tags = { let mut tags = _tags(); tags.extend(_new_tags()); _sort(tags) }; assert_eq!(_sort(record.tags.unwrap()), expected_tags); } #[test] fn postgres_storage_delete_tags_works() { _cleanup(); let storage = _storage(); let tag_name1 = vec![0, 0, 0]; let tag_name2 = vec![1, 1, 1]; let tag_name3 = vec![2, 2, 2]; let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]); let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string()); let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]); let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()]; storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap(); let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())]; storage.delete_tags(&_type1(), &_id1(), &tag_names).unwrap(); let record = storage.get(&_type1(), &_id1(), r##"{"retrieveType": false, "retrieveValue": true, "retrieveTags": true}"##).unwrap(); assert_eq!(record.tags.unwrap(), vec![tag3]); } #[test] fn postgres_storage_delete_tags_works_for_non_existing_type() { _cleanup(); let storage = _storage(); let tag_name1 = vec![0, 0, 0]; let tag_name2 = vec![1, 1, 1]; let tag_name3 = vec![2, 2, 2]; let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]); let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string()); let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]); let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()]; storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap(); let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())]; let res = storage.delete_tags(&_type2(), &_id1(), &tag_names); assert_match!(Err(WalletStorageError::ItemNotFound), res); } #[test] fn postgres_storage_delete_tags_works_for_non_existing_id() { _cleanup(); let storage = _storage(); let tag_name1 = vec![0, 0, 0]; let tag_name2 = vec![1, 1, 1]; let tag_name3 = vec![2, 2, 2]; let tag1 = Tag::Encrypted(tag_name1.clone(), vec![0, 0, 0]); let tag2 = Tag::PlainText(tag_name2.clone(), "tag_value_2".to_string()); let tag3 = Tag::Encrypted(tag_name3.clone(), vec![2, 2, 2]); let tags = vec![tag1.clone(), tag2.clone(), tag3.clone()]; storage.add(&_type1(), &_id1(), &_value1(), &tags).unwrap(); let tag_names = vec![TagName::OfEncrypted(tag_name1.clone()), TagName::OfPlain(tag_name2.clone())]; let res = storage.delete_tags(&_type1(), &_id2(), &tag_names); assert_match!(Err(WalletStorageError::ItemNotFound), res); } fn _cleanup() { let storage_type = PostgresStorageType::new(); { let mut write_strategy = SELECTED_STRATEGY.write().unwrap(); *write_strategy = None; } let _res = storage_type.init_storage(Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); let _ret = storage_type.delete_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])); let res = test::cleanup_storage(); res } fn _wallet_id() -> &'static str { "walle1" } fn _storage() -> Box<WalletStorage> { let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.open_storage(_wallet_id(), Some(&_wallet_config()[..]), Some(&_wallet_credentials()[..])).unwrap(); res } fn _storage_db_pool() -> Box<WalletStorage> { let storage_type = PostgresStorageType::new(); storage_type.create_storage(_wallet_id(), Some(&_wallet_config_db_pool()[..]), Some(&_wallet_credentials()[..]), &_metadata()).unwrap(); let res = storage_type.open_storage(_wallet_id(), Some(&_wallet_config_db_pool()[..]), Some(&_wallet_credentials()[..])).unwrap(); res } fn _wallet_config() -> String { let wallet_scheme = env::var("WALLET_SCHEME"); match wallet_scheme { Ok(scheme) => { if scheme == "MultiWalletSingleTable" { return _wallet_config_multi(); } } Err(_) => () }; let config = json!({ "url": "localhost:5432".to_owned() }).to_string(); config } fn _wallet_config_multi() -> String { let config = json!({ "url": "localhost:5432".to_owned(), "wallet_scheme": "MultiWalletSingleTable".to_owned() }).to_string(); config } fn _wallet_config_db_pool() -> String { let config = json!({ "url": "localhost:5432".to_owned(), "tls": "None", "max_connections": 4, "min_idle_count": 0, "connection_timeout": 10 }).to_string(); config } fn _wallet_credentials() -> String { let creds = json!({ "account": "postgres".to_owned(), "password": "mysecretpassword".to_owned(), "admin_account": Some("postgres".to_owned()), "admin_password": Some("mysecretpassword".to_owned()) }).to_string(); creds } fn _metadata() -> Vec<u8> { return vec![ 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8 ]; } fn _type(i: u8) -> Vec<u8> { vec![i, 1 + i, 2 + i] } fn _type1() -> Vec<u8> { _type(1) } fn _type2() -> Vec<u8> { _type(2) } fn _id(i: u8) -> Vec<u8> { vec![3 + i, 4 + i, 5 + i] } fn _id1() -> Vec<u8> { _id(1) } fn _id2() -> Vec<u8> { _id(2) } fn _value(i: u8) -> EncryptedValue { EncryptedValue { data: vec![6 + i, 7 + i, 8 + i], key: vec![9 + i, 10 + i, 11 + i] } } fn _value1() -> EncryptedValue { _value(1) } fn _value2() -> EncryptedValue { _value(2) } fn _tags() -> Vec<Tag> { let mut tags: Vec<Tag> = Vec::new(); tags.push(Tag::Encrypted(vec![1, 5, 8], vec![3, 5, 6])); tags.push(Tag::PlainText(vec![1, 5, 8, 1], "Plain value 1".to_string())); tags.push(Tag::Encrypted(vec![2, 5, 8], vec![3, 5, 7])); tags.push(Tag::PlainText(vec![2, 5, 8, 1], "Plain value 2".to_string())); tags } fn _new_tags() -> Vec<Tag> { vec![ Tag::Encrypted(vec![1, 1, 1], vec![2, 2, 2]), Tag::PlainText(vec![1, 1, 1], String::from("tag_value_3")) ] } fn _sort(mut v: Vec<Tag>) -> Vec<Tag> { v.sort(); v } }
use token::Token; use lexer::Lexer; use ast::Binop; use ast::Expr; #[derive(Debug, PartialEq)] pub enum ParseError { UnexpectedEOF, UnexpectedToken(Token) } pub struct Parser<'a, I, C> { lexer: Lexer<'a, I, C> } impl<'a, I, C> Parser<'a, I, C> where I: Iterator<Item=char> { pub fn new(lexer: Lexer<'a, I, C>) -> Parser<'a, I, C> { Parser { lexer: lexer } } } impl<'a, I, C> Parser<'a, I, C> where I: Iterator<Item=char> { pub fn expr(&mut self) -> Result<Expr, ParseError> { let left = match self.lexer.next() { Some(Token::DecimalInt(_)) => Expr::Number(1.0), Some(t) => return Err(ParseError::UnexpectedToken(t)), None => return Err(ParseError::UnexpectedEOF) }; let op = match self.lexer.next() { Some(Token::Plus) => Binop::Plus, Some(t) => return Err(ParseError::UnexpectedToken(t)), None => return Err(ParseError::UnexpectedEOF) }; let right = match self.lexer.next() { Some(Token::DecimalInt(_)) => Expr::Number(1.0), Some(t) => return Err(ParseError::UnexpectedToken(t)), None => return Err(ParseError::UnexpectedEOF) }; Ok(Expr::Binop(op, Box::new(left), Box::new(right))) } } success use token::Token; use lexer::Lexer; use ast::Binop; use ast::Expr; #[derive(Debug, PartialEq)] pub enum ParseError { UnexpectedEOF, UnexpectedToken(Token) } pub struct Parser<'a, I, C: 'a> { lexer: Lexer<'a, I, C> } impl<'a, I, C> Parser<'a, I, C> where I: Iterator<Item=char> { pub fn new(lexer: Lexer<'a, I, C>) -> Parser<'a, I, C> { Parser { lexer: lexer } } } impl<'a, I, C> Parser<'a, I, C> where I: Iterator<Item=char> { pub fn expr(&mut self) -> Result<Expr, ParseError> { let left = match self.lexer.next() { Some(Token::DecimalInt(_)) => Expr::Number(1.0), Some(t) => return Err(ParseError::UnexpectedToken(t)), None => return Err(ParseError::UnexpectedEOF) }; let op = match self.lexer.next() { Some(Token::Plus) => Binop::Plus, Some(t) => return Err(ParseError::UnexpectedToken(t)), None => return Err(ParseError::UnexpectedEOF) }; let right = match self.lexer.next() { Some(Token::DecimalInt(_)) => Expr::Number(1.0), Some(t) => return Err(ParseError::UnexpectedToken(t)), None => return Err(ParseError::UnexpectedEOF) }; Ok(Expr::Binop(op, Box::new(left), Box::new(right))) } }
// Copyright (c) 2014-2016 Sandstorm Development Group, Inc. // Licensed under the MIT License: // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use gj::{Promise, EventLoop}; use capnp::Error; use capnp_rpc::{RpcSystem, twoparty, rpc_twoparty_capnp}; use rustc_serialize::{base64, hex, json}; use std::collections::hash_map::HashMap; use std::cell::RefCell; use std::rc::Rc; use collections_capnp::ui_view_metadata; use web_socket; use sandstorm::powerbox_capnp::powerbox_descriptor; use sandstorm::identity_capnp::{user_info}; use sandstorm::grain_capnp::{session_context, ui_view, ui_session, sandstorm_api}; use sandstorm::grain_capnp::{static_asset}; use sandstorm::web_session_capnp::{web_session}; use sandstorm::web_session_capnp::web_session::web_socket_stream; pub struct WebSocketStream { id: u64, saved_ui_views: Rc<RefCell<SavedUiViewSet>>, } impl Drop for WebSocketStream { fn drop(&mut self) { self.saved_ui_views.borrow_mut().subscribers.remove(&self.id); } } impl WebSocketStream { fn new(id: u64, saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> WebSocketStream { WebSocketStream { id: id, saved_ui_views: saved_ui_views, } } } impl web_socket::MessageHandler for WebSocketStream { fn handle_message(&mut self, message: web_socket::Message) -> Promise<(), Error> { // TODO: move PUTs and POSTs into websocket requests? match message { web_socket::Message::Text(_t) => { } web_socket::Message::Data(_d) => { } } Promise::ok(()) } } #[derive(Clone)] struct SavedUiViewData { title: String, date_added: u64, added_by: Option<String>, } fn optional_string_to_json(optional_string: &Option<String>) -> String { match optional_string { &None => "null".into(), &Some(ref s) => format!("{}", json::ToJson::to_json(s)), } } impl SavedUiViewData { fn to_json(&self) -> String { format!("{{\"title\":{},\"dateAdded\": \"{}\",\"addedBy\":{}}}", json::ToJson::to_json(&self.title), self.date_added, optional_string_to_json(&self.added_by)) } } #[derive(Clone)] struct ViewInfoData { app_title: String, grain_icon_url: String, } impl ViewInfoData { fn to_json(&self) -> String { format!("{{\"appTitle\":{},\"grainIconUrl\":\"{}\"}}", json::ToJson::to_json(&self.app_title), self.grain_icon_url) } } #[derive(Clone)] enum Action { Insert { token: String, data: SavedUiViewData }, Remove { token: String }, ViewInfo { token: String, data: Result<ViewInfoData, Error> }, CanWrite(bool), UserId(Option<String>), Description(String), } impl Action { fn to_json(&self) -> String { match self { &Action::Insert { ref token, ref data } => { format!("{{\"insert\":{{\"token\":\"{}\",\"data\":{} }} }}", token, data.to_json()) } &Action::Remove { ref token } => { format!("{{\"remove\":{{\"token\":\"{}\"}}}}", token) } &Action::ViewInfo { ref token, data: Ok(ref data) } => { format!("{{\"viewInfo\":{{\"token\":\"{}\",\"data\":{} }} }}", token, data.to_json()) } &Action::ViewInfo { ref token, data: Err(ref e) } => { format!("{{\"viewInfo\":{{\"token\":\"{}\",\"failed\": {} }} }}", token, json::ToJson::to_json(&format!("{}", e))) } &Action::CanWrite(b) => { format!("{{\"canWrite\":{}}}", b) } &Action::UserId(ref s) => { format!("{{\"userId\":{}}}", optional_string_to_json(s)) } &Action::Description(ref s) => { format!("{{\"description\":{}}}", json::ToJson::to_json(s)) } } } } struct Reaper; impl ::gj::TaskReaper<(), Error> for Reaper { fn task_failed(&mut self, error: Error) { // TODO better message. println!("task failed: {}", error); } } pub struct SavedUiViewSet { tmp_dir: ::std::path::PathBuf, sturdyref_dir: ::std::path::PathBuf, /// Invariant: Every entry in this map has been persisted to the filesystem and has sent /// out Action::Insert messages to each subscriber. views: HashMap<String, SavedUiViewData>, view_infos: HashMap<String, Result<ViewInfoData, Error>>, next_id: u64, subscribers: HashMap<u64, web_socket_stream::Client>, tasks: ::gj::TaskSet<(), Error>, description: String, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, } impl SavedUiViewSet { pub fn new<P1, P2>(tmp_dir: P1, sturdyref_dir: P2, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>) -> ::capnp::Result<Rc<RefCell<SavedUiViewSet>>> where P1: AsRef<::std::path::Path>, P2: AsRef<::std::path::Path> { let description = match ::std::fs::File::open("/var/description") { Ok(mut f) => { use std::io::Read; let mut result = String::new(); try!(f.read_to_string(&mut result)); result } Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => { use std::io::Write; let mut f = try!(::std::fs::File::create("/var/description")); let result = ""; try!(f.write_all(result.as_bytes())); result.into() } Err(e) => { return Err(e.into()); } }; let result = Rc::new(RefCell::new(SavedUiViewSet { tmp_dir: tmp_dir.as_ref().to_path_buf(), sturdyref_dir: sturdyref_dir.as_ref().to_path_buf(), views: HashMap::new(), view_infos: HashMap::new(), next_id: 0, subscribers: HashMap::new(), tasks: ::gj::TaskSet::new(Box::new(Reaper)), description: description, sandstorm_api: sandstorm_api, })); // create sturdyref directory if it does not yet exist try!(::std::fs::create_dir_all(&sturdyref_dir)); // clear and create tmp directory match ::std::fs::remove_dir_all(&tmp_dir) { Ok(()) => (), Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => (), Err(e) => return Err(e.into()), } try!(::std::fs::create_dir_all(&tmp_dir)); for token_file in try!(::std::fs::read_dir(&sturdyref_dir)) { let dir_entry = try!(token_file); let token: String = match dir_entry.file_name().to_str() { None => { println!("malformed token: {:?}", dir_entry.file_name()); continue } Some(s) => s.into(), }; if token.ends_with(".uploading") { // At one point, these temporary files got uploading directly into this directory. try!(::std::fs::remove_file(dir_entry.path())); } else { let mut reader = try!(::std::fs::File::open(dir_entry.path())); let message = try!(::capnp::serialize::read_message(&mut reader, Default::default())); let metadata: ui_view_metadata::Reader = try!(message.get_root()); let added_by = if metadata.has_added_by() { Some(try!(metadata.get_added_by()).into()) } else { None }; let entry = SavedUiViewData { title: try!(metadata.get_title()).into(), date_added: metadata.get_date_added(), added_by: added_by, }; result.borrow_mut().views.insert(token.clone(), entry); try!(SavedUiViewSet::retrieve_view_info(&result, token)); } } Ok(result) } fn get_saved_data<'a>(&'a self, token: &'a String) -> Option<&'a SavedUiViewData> { self.views.get(token) } fn retrieve_view_info(set_ref: &Rc<RefCell<SavedUiViewSet>>, token: String) -> ::capnp::Result<()> { // SandstormApi.restore, then call getViewInfo, // then call get_url() on the grain static asset. let set = set_ref.clone(); let binary_token = match base64::FromBase64::from_base64(&token[..]) { Ok(b) => b, Err(e) => return Err(Error::failed(format!("{}", e))), }; let mut req = set.borrow().sandstorm_api.restore_request(); req.get().set_token(&binary_token); let task = req.send().promise.then(move |response| { let view: ui_view::Client = pry!(pry!(response.get()).get_cap().get_as_capability()); view.get_view_info_request().send().promise.then(move |response| { let view_info = pry!(response.get()); let app_title = pry!(pry!(view_info.get_app_title()).get_default_text()).to_string(); let asset = pry!(view_info.get_grain_icon()); asset.get_url_request().send().promise.map(move |response| { let result = try!(response.get()); let protocol = match try!(result.get_protocol()) { static_asset::Protocol::Https => "https".to_string(), static_asset::Protocol::Http => "http".to_string(), }; Ok(ViewInfoData { app_title: app_title, grain_icon_url: format!("{}://{}", protocol, try!(result.get_host_path())), }) }) }) }).map_else(move |result| { set.borrow_mut().view_infos.insert(token.clone(), result.clone()); set.borrow_mut().send_action_to_subscribers(Action::ViewInfo { token: token, data: result, }); Ok(()) }); set_ref.borrow_mut().tasks.add(task); Ok(()) } fn update_description(&mut self, description: &[u8]) -> ::capnp::Result<()> { use std::io::Write; let desc_string: String = match ::std::str::from_utf8(description) { Err(e) => return Err(::capnp::Error::failed(format!("{}", e))), Ok(d) => d.into(), }; let temp_path = format!("/var/description.uploading"); try!(try!(::std::fs::File::create(&temp_path)).write_all(description)); try!(::std::fs::rename(temp_path, "/var/description")); self.description = desc_string.clone(); self.send_action_to_subscribers(Action::Description(desc_string)); Ok(()) } fn insert(&mut self, token: String, title: String, added_by: Option<String>) -> ::capnp::Result<()> { let dur = try!(::std::time::SystemTime::now().duration_since(::std::time::UNIX_EPOCH) .map_err(|e| Error::failed(format!("{}", e)))); let date_added = dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64; let mut token_path = ::std::path::PathBuf::new(); token_path.push(self.sturdyref_dir.clone()); token_path.push(token.clone()); let mut temp_path = ::std::path::PathBuf::new(); temp_path.push(self.tmp_dir.clone()); temp_path.push(format!("{}.uploading", token)); let mut writer = try!(::std::fs::File::create(&temp_path)); let mut message = ::capnp::message::Builder::new_default(); { let mut metadata: ui_view_metadata::Builder = message.init_root(); metadata.set_title(&title); metadata.set_date_added(date_added); match added_by { Some(ref s) => metadata.set_added_by(s), None => (), } } try!(::capnp::serialize::write_message(&mut writer, &message)); try!(::std::fs::rename(temp_path, token_path)); try!(writer.sync_all()); let entry = SavedUiViewData { title: title, date_added: date_added, added_by: added_by, }; self.send_action_to_subscribers(Action::Insert { token: token.clone(), data: entry.clone(), }); self.views.insert(token, entry); Ok(()) } fn send_action_to_subscribers(&mut self, action: Action) { let json_string = action.to_json(); for (_, sub) in &self.subscribers { let mut req = sub.send_bytes_request(); web_socket::encode_text_message(req.get(), &json_string); self.tasks.add(req.send().promise.map(|_| Ok(()))); } } fn remove(&mut self, token: &str) -> Result<(), Error> { let mut path = self.sturdyref_dir.clone(); path.push(token); if let Err(e) = ::std::fs::remove_file(path) { if e.kind() != ::std::io::ErrorKind::NotFound { return Err(e.into()) } } self.send_action_to_subscribers(Action::Remove { token: token.into() }); self.views.remove(token); Ok(()) } fn new_subscribed_websocket(set: &Rc<RefCell<SavedUiViewSet>>, client_stream: web_socket_stream::Client, can_write: bool, user_id: Option<String>, timer: &::gjio::Timer) -> web_socket_stream::Client { fn send_action(task: Promise<(), Error>, client_stream: &web_socket_stream::Client, action: Action) -> Promise<(), Error> { let json_string = action.to_json(); let mut req = client_stream.send_bytes_request(); web_socket::encode_text_message(req.get(), &json_string); let promise = req.send().promise.map(|_| Ok(())); task.then(|_| promise) } let id = set.borrow().next_id; set.borrow_mut().next_id = id + 1; set.borrow_mut().subscribers.insert(id, client_stream.clone()); let mut task = Promise::ok(()); task = send_action(task, &client_stream, Action::CanWrite(can_write)); task = send_action(task, &client_stream, Action::UserId(user_id)); task = send_action(task, &client_stream, Action::Description(set.borrow().description.clone())); for (t, v) in &set.borrow().views { task = send_action( task, &client_stream, Action::Insert { token: t.clone(), data: v.clone() } ); } for (t, vi) in &set.borrow().view_infos { task = send_action( task, &client_stream, Action::ViewInfo { token: t.clone(), data: vi.clone(), } ); } set.borrow_mut().tasks.add(task); web_socket_stream::ToClient::new( web_socket::Adapter::new( WebSocketStream::new(id, set.clone()), client_stream, timer.clone())).from_server::<::capnp_rpc::Server>() } } const ADD_GRAIN_ACTIVITY_INDEX: u16 = 0; const REMOVE_GRAIN_ACTIVITY_INDEX: u16 = 1; const EDIT_DESCRIPTION_ACTIVITY_INDEX: u16 = 2; pub struct WebSession { timer: ::gjio::Timer, can_write: bool, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, context: session_context::Client, saved_ui_views: Rc<RefCell<SavedUiViewSet>>, identity_id: Option<String>, } impl WebSession { pub fn new(timer: ::gjio::Timer, user_info: user_info::Reader, context: session_context::Client, _params: web_session::params::Reader, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> ::capnp::Result<WebSession> { // Permission #0 is "write". Check if bit 0 in the PermissionSet is set. let permissions = try!(user_info.get_permissions()); let can_write = permissions.len() > 0 && permissions.get(0); let identity_id = if user_info.has_identity_id() { Some(hex::ToHex::to_hex(try!(user_info.get_identity_id()))) } else { None }; Ok(WebSession { timer: timer, can_write: can_write, sandstorm_api: sandstorm_api, context: context, saved_ui_views: saved_ui_views, identity_id: identity_id, }) // `UserInfo` is defined in `sandstorm/grain.capnp` and contains info like: // - A stable ID for the user, so you can correlate sessions from the same user. // - The user's display name, e.g. "Mark Miller", useful for identifying the user to other // users. // - The user's permissions (seen above). // `WebSession::Params` is defined in `sandstorm/web-session.capnp` and contains info like: // - The hostname where the grain was mapped for this user. Every time a user opens a grain, // it is mapped at a new random hostname for security reasons. // - The user's User-Agent and Accept-Languages headers. // `SessionContext` is defined in `sandstorm/grain.capnp` and implements callbacks for // sharing/access control and service publishing/discovery. } } impl ui_session::Server for WebSession {} impl web_session::Server for WebSession { fn get(&mut self, params: web_session::GetParams, mut results: web_session::GetResults) -> Promise<(), Error> { // HTTP GET request. let path = pry!(pry!(params.get()).get_path()); pry!(self.require_canonical_path(path)); if path == "" { let text = "<!DOCTYPE html>\ <html><head>\ <link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\">\ <script type=\"text/javascript\" src=\"script.js\" async></script> </head><body><div id=\"main\"></div></body></html>"; let mut content = results.get().init_content(); content.set_mime_type("text/html; charset=UTF-8"); content.init_body().set_bytes(text.as_bytes()); Promise::ok(()) } else if path == "script.js" { self.read_file("/script.js.gz", results, "text/javascript; charset=UTF-8", Some("gzip")) } else if path == "style.css" { self.read_file("/style.css.gz", results, "text/css; charset=UTF-8", Some("gzip")) } else { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); Promise::ok(()) } } fn post(&mut self, params: web_session::PostParams, mut results: web_session::PostResults) -> Promise<(), Error> { let path = { let path = pry!(pry!(params.get()).get_path()); pry!(self.require_canonical_path(path)); path.to_string() }; if path.starts_with("token/") { self.receive_request_token(path[6..].to_string(), params, results) } else if path.starts_with("offer/") { let token = path[6..].to_string(); let title = match self.saved_ui_views.borrow().get_saved_data(&token) { None => { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); return Promise::ok(()) } Some(saved_ui_view) => saved_ui_view.title.to_string(), }; self.offer_ui_view(token, title, params, results) } else { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); Promise::ok(()) } } fn put(&mut self, params: web_session::PutParams, mut results: web_session::PutResults) -> Promise<(), Error> { // HTTP PUT request. let params = pry!(params.get()); let path = pry!(params.get_path()); pry!(self.require_canonical_path(path)); if !self.can_write { results.get().init_client_error() .set_status_code(web_session::response::ClientErrorCode::Forbidden); Promise::ok(()) } else if path == "description" { let content = pry!(pry!(params.get_content()).get_content()); pry!(self.saved_ui_views.borrow_mut().update_description(content)); let mut req = self.context.activity_request(); req.get().init_event().set_type(EDIT_DESCRIPTION_ACTIVITY_INDEX); req.send().promise.then(move |_| { results.get().init_no_content(); Promise::ok(()) }) } else { results.get().init_client_error() .set_status_code(web_session::response::ClientErrorCode::Forbidden); Promise::ok(()) } } fn delete(&mut self, params: web_session::DeleteParams, mut results: web_session::DeleteResults) -> Promise<(), Error> { // HTTP DELETE request. let path = pry!(pry!(params.get()).get_path()); pry!(self.require_canonical_path(path)); if !path.starts_with("sturdyref/") { return Promise::err(Error::failed("DELETE only supported under sturdyref/".to_string())); } if !self.can_write { results.get().init_client_error() .set_status_code(web_session::response::ClientErrorCode::Forbidden); Promise::ok(()) } else { let token_str = &path[10..]; let binary_token = match base64::FromBase64::from_base64(token_str) { Ok(b) => b, Err(e) => { results.get().init_client_error().set_description_html(&format!("{}", e)[..]); return Promise::ok(()) } }; pry!(self.saved_ui_views.borrow_mut().remove(token_str)); let context = self.context.clone(); let mut req = self.sandstorm_api.drop_request(); req.get().set_token(&binary_token); req.send().promise.then_else(move |_| { // then_else() because drop() is currently broken. :( let mut req = context.activity_request(); req.get().init_event().set_type(REMOVE_GRAIN_ACTIVITY_INDEX); req.send().promise.then(move |_| { results.get().init_no_content(); Promise::ok(()) }) }) } } fn open_web_socket(&mut self, params: web_session::OpenWebSocketParams, mut results: web_session::OpenWebSocketResults) -> Promise<(), Error> { let client_stream = pry!(pry!(params.get()).get_client_stream()); results.get().set_server_stream( SavedUiViewSet::new_subscribed_websocket( &self.saved_ui_views, client_stream, self.can_write, self.identity_id.clone(), &self.timer)); Promise::ok(()) } } fn fill_in_client_error(mut results: web_session::PostResults, e: Error) { let mut client_error = results.get().init_client_error(); client_error.set_description_html(&format!("{}", e)[..]); } impl WebSession { fn offer_ui_view(&mut self, text_token: String, title: String, _params: web_session::PostParams, mut results: web_session::PostResults) -> Promise<(), Error> { let token = match base64::FromBase64::from_base64(&text_token[..]) { Ok(b) => b, Err(e) => return Promise::err(Error::failed(format!("{}", e))), }; let session_context = self.context.clone(); let mut req = self.sandstorm_api.restore_request(); req.get().set_token(&token); req.send().promise.then(move |response| { let sealed_ui_view: ui_view::Client = pry!(pry!(response.get()).get_cap().get_as_capability()); let mut req = session_context.offer_request(); req.get().get_cap().set_as_capability(sealed_ui_view.client.hook); { use capnp::traits::HasTypeId; let tags = req.get().init_descriptor().init_tags(1); let mut tag = tags.get(0); tag.set_id(ui_view::Client::type_id()); let mut value: ui_view::powerbox_tag::Builder = tag.get_value().init_as(); value.set_title(&title); } req.send().promise }).then_else(move |r| match r { Ok(_) => { results.get().init_no_content(); Promise::ok(()) } Err(e) => { fill_in_client_error(results, e); Promise::ok(()) } }) } fn read_powerbox_tag(&mut self, decoded_content: Vec<u8>) -> ::capnp::Result<String> { let mut cursor = ::std::io::Cursor::new(decoded_content); let message = try!(::capnp::serialize_packed::read_message(&mut cursor, Default::default())); let desc: powerbox_descriptor::Reader = try!(message.get_root()); let tags = try!(desc.get_tags()); if tags.len() == 0 { Err(Error::failed("no powerbox tag".into())) } else { let value: ui_view::powerbox_tag::Reader = try!(tags.get(0).get_value().get_as()); Ok(try!(value.get_title()).into()) } } fn receive_request_token(&mut self, token: String, params: web_session::PostParams, mut results: web_session::PostResults) -> Promise<(), Error> { let content = pry!(pry!(pry!(params.get()).get_content()).get_content()); let decoded_content = match base64::FromBase64::from_base64(content) { Ok(c) => c, Err(_) => { fill_in_client_error(results, Error::failed("failed to convert from base64".into())); return Promise::ok(()) } }; let grain_title: String = match self.read_powerbox_tag(decoded_content) { Ok(t) => t, Err(e) => { fill_in_client_error(results, e); return Promise::ok(()); } }; // now let's save this thing into an actual uiview sturdyref let mut req = self.context.claim_request_request(); let sandstorm_api = self.sandstorm_api.clone(); req.get().set_request_token(&token[..]); let saved_ui_views = self.saved_ui_views.clone(); let identity_id = self.identity_id.clone(); let do_stuff = req.send().promise.then(move |response| { let sealed_ui_view: ui_view::Client = pry!(pry!(response.get()).get_cap().get_as_capability()); let mut req = sandstorm_api.save_request(); req.get().get_cap().set_as_capability(sealed_ui_view.client.hook); { let mut save_label = req.get().init_label(); save_label.set_default_text(&format!("grain with title: {}", grain_title)[..]); } req.send().promise.map(move |response| { let binary_token = try!(try!(response.get()).get_token()); let token = base64::ToBase64::to_base64(binary_token, base64::URL_SAFE); try!(saved_ui_views.borrow_mut().insert(token.clone(), grain_title, identity_id)); try!(SavedUiViewSet::retrieve_view_info(&saved_ui_views, token)); Ok(()) }) }); let context = self.context.clone(); do_stuff.then_else(move |r| match r { Ok(()) => { let mut req = context.activity_request(); req.get().init_event().set_type(ADD_GRAIN_ACTIVITY_INDEX); req.send().promise.then(move |_| { let mut _content = results.get().init_content(); Promise::ok(()) }) } Err(e) => { let mut error = results.get().init_client_error(); error.set_description_html(&format!("error: {:?}", e)); Promise::ok(()) } }) } fn require_canonical_path(&self, path: &str) -> Result<(), Error> { // Require that the path doesn't contain "." or ".." or consecutive slashes, to prevent path // injection attacks. // // Note that such attacks wouldn't actually accomplish much since everything outside /var // is a read-only filesystem anyway, containing the app package contents which are non-secret. for (idx, component) in path.split_terminator("/").enumerate() { if component == "." || component == ".." || (component == "" && idx > 0) { return Err(Error::failed(format!("non-canonical path: {:?}", path))); } } Ok(()) } fn read_file(&self, filename: &str, mut results: web_session::GetResults, content_type: &str, encoding: Option<&str>) -> Promise<(), Error> { match ::std::fs::File::open(filename) { Ok(mut f) => { let size = pry!(f.metadata()).len(); let mut content = results.get().init_content(); content.set_status_code(web_session::response::SuccessCode::Ok); content.set_mime_type(content_type); encoding.map(|enc| content.set_encoding(enc)); let mut body = content.init_body().init_bytes(size as u32); pry!(::std::io::copy(&mut f, &mut body)); Promise::ok(()) } Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); Promise::ok(()) } Err(e) => { Promise::err(e.into()) } } } } pub struct UiView { timer: ::gjio::Timer, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, saved_ui_views: Rc<RefCell<SavedUiViewSet>>, } impl UiView { fn new(timer: ::gjio::Timer, client: sandstorm_api::Client<::capnp::any_pointer::Owned>, saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> UiView { UiView { timer: timer, sandstorm_api: client, saved_ui_views: saved_ui_views, } } } impl ui_view::Server for UiView { fn get_view_info(&mut self, _params: ui_view::GetViewInfoParams, mut results: ui_view::GetViewInfoResults) -> Promise<(), Error> { let mut view_info = results.get(); // Define a "write" permission, and then define roles "editor" and "viewer" where only // "editor" has the "write" permission. This will allow people to share read-only. { let perms = view_info.borrow().init_permissions(1); let mut write = perms.get(0); write.set_name("write"); write.init_title().set_default_text("write"); } { let mut roles = view_info.borrow().init_roles(2); { let mut editor = roles.borrow().get(0); editor.borrow().init_title().set_default_text("editor"); editor.borrow().init_verb_phrase().set_default_text("can edit"); editor.init_permissions(1).set(0, true); // has "write" permission } { let mut viewer = roles.get(1); viewer.set_default(true); viewer.borrow().init_title().set_default_text("viewer"); viewer.borrow().init_verb_phrase().set_default_text("can view"); viewer.init_permissions(1).set(0, false); // does not have "write" permission } } { let mut event_types = view_info.init_event_types(3); { let mut added = event_types.borrow().get(ADD_GRAIN_ACTIVITY_INDEX as u32); added.set_name("add"); added.borrow().init_verb_phrase().set_default_text("grain added"); } { let mut removed = event_types.borrow().get(REMOVE_GRAIN_ACTIVITY_INDEX as u32); removed.set_name("remove"); removed.borrow().init_verb_phrase().set_default_text("grain removed"); } { let mut removed = event_types.borrow().get(EDIT_DESCRIPTION_ACTIVITY_INDEX as u32); removed.set_name("description"); removed.borrow().init_verb_phrase().set_default_text("description edited"); } } Promise::ok(()) } fn new_session(&mut self, params: ui_view::NewSessionParams, mut results: ui_view::NewSessionResults) -> Promise<(), Error> { use ::capnp::traits::HasTypeId; let params = pry!(params.get()); if params.get_session_type() != web_session::Client::type_id() { return Promise::err(Error::failed("unsupported session type".to_string())); } let session = pry!(WebSession::new( self.timer.clone(), pry!(params.get_user_info()), pry!(params.get_context()), pry!(params.get_session_params().get_as()), self.sandstorm_api.clone(), self.saved_ui_views.clone())); let client: web_session::Client = web_session::ToClient::new(session).from_server::<::capnp_rpc::Server>(); // We need to do this silly dance to upcast. results.get().set_session(ui_session::Client { client : client.client}); Promise::ok(()) } } pub fn main() -> Result<(), Box<::std::error::Error>> { EventLoop::top_level(move |wait_scope| { let mut event_port = try!(::gjio::EventPort::new()); let network = event_port.get_network(); // Sandstorm launches us with a connection on file descriptor 3. let stream = try!(unsafe { network.wrap_raw_socket_descriptor(3) }); let (p, f) = Promise::and_fulfiller(); let sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned> = ::capnp_rpc::new_promise_client(p); let saved_uiviews = try!(SavedUiViewSet::new("/var/tmp", "/var/sturdyrefs", sandstorm_api.clone())); let uiview = UiView::new( event_port.get_timer(), sandstorm_api, saved_uiviews); let client = ui_view::ToClient::new(uiview).from_server::<::capnp_rpc::Server>(); let network = twoparty::VatNetwork::new(stream.clone(), stream, rpc_twoparty_capnp::Side::Client, Default::default()); let mut rpc_system = RpcSystem::new(Box::new(network), Some(client.client)); let cap = rpc_system.bootstrap::<sandstorm_api::Client<::capnp::any_pointer::Owned>>( ::capnp_rpc::rpc_twoparty_capnp::Side::Server); f.fulfill(cap.client); Promise::never_done().wait(wait_scope, &mut event_port) }) } propagate error when restore fails // Copyright (c) 2014-2016 Sandstorm Development Group, Inc. // Licensed under the MIT License: // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use gj::{Promise, EventLoop}; use capnp::Error; use capnp_rpc::{RpcSystem, twoparty, rpc_twoparty_capnp}; use rustc_serialize::{base64, hex, json}; use std::collections::hash_map::HashMap; use std::cell::RefCell; use std::rc::Rc; use collections_capnp::ui_view_metadata; use web_socket; use sandstorm::powerbox_capnp::powerbox_descriptor; use sandstorm::identity_capnp::{user_info}; use sandstorm::grain_capnp::{session_context, ui_view, ui_session, sandstorm_api}; use sandstorm::grain_capnp::{static_asset}; use sandstorm::web_session_capnp::{web_session}; use sandstorm::web_session_capnp::web_session::web_socket_stream; pub struct WebSocketStream { id: u64, saved_ui_views: Rc<RefCell<SavedUiViewSet>>, } impl Drop for WebSocketStream { fn drop(&mut self) { self.saved_ui_views.borrow_mut().subscribers.remove(&self.id); } } impl WebSocketStream { fn new(id: u64, saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> WebSocketStream { WebSocketStream { id: id, saved_ui_views: saved_ui_views, } } } impl web_socket::MessageHandler for WebSocketStream { fn handle_message(&mut self, message: web_socket::Message) -> Promise<(), Error> { // TODO: move PUTs and POSTs into websocket requests? match message { web_socket::Message::Text(_t) => { } web_socket::Message::Data(_d) => { } } Promise::ok(()) } } #[derive(Clone)] struct SavedUiViewData { title: String, date_added: u64, added_by: Option<String>, } fn optional_string_to_json(optional_string: &Option<String>) -> String { match optional_string { &None => "null".into(), &Some(ref s) => format!("{}", json::ToJson::to_json(s)), } } impl SavedUiViewData { fn to_json(&self) -> String { format!("{{\"title\":{},\"dateAdded\": \"{}\",\"addedBy\":{}}}", json::ToJson::to_json(&self.title), self.date_added, optional_string_to_json(&self.added_by)) } } #[derive(Clone)] struct ViewInfoData { app_title: String, grain_icon_url: String, } impl ViewInfoData { fn to_json(&self) -> String { format!("{{\"appTitle\":{},\"grainIconUrl\":\"{}\"}}", json::ToJson::to_json(&self.app_title), self.grain_icon_url) } } #[derive(Clone)] enum Action { Insert { token: String, data: SavedUiViewData }, Remove { token: String }, ViewInfo { token: String, data: Result<ViewInfoData, Error> }, CanWrite(bool), UserId(Option<String>), Description(String), } impl Action { fn to_json(&self) -> String { match self { &Action::Insert { ref token, ref data } => { format!("{{\"insert\":{{\"token\":\"{}\",\"data\":{} }} }}", token, data.to_json()) } &Action::Remove { ref token } => { format!("{{\"remove\":{{\"token\":\"{}\"}}}}", token) } &Action::ViewInfo { ref token, data: Ok(ref data) } => { format!("{{\"viewInfo\":{{\"token\":\"{}\",\"data\":{} }} }}", token, data.to_json()) } &Action::ViewInfo { ref token, data: Err(ref e) } => { format!("{{\"viewInfo\":{{\"token\":\"{}\",\"failed\": {} }} }}", token, json::ToJson::to_json(&format!("{}", e))) } &Action::CanWrite(b) => { format!("{{\"canWrite\":{}}}", b) } &Action::UserId(ref s) => { format!("{{\"userId\":{}}}", optional_string_to_json(s)) } &Action::Description(ref s) => { format!("{{\"description\":{}}}", json::ToJson::to_json(s)) } } } } struct Reaper; impl ::gj::TaskReaper<(), Error> for Reaper { fn task_failed(&mut self, error: Error) { // TODO better message. println!("task failed: {}", error); } } pub struct SavedUiViewSet { tmp_dir: ::std::path::PathBuf, sturdyref_dir: ::std::path::PathBuf, /// Invariant: Every entry in this map has been persisted to the filesystem and has sent /// out Action::Insert messages to each subscriber. views: HashMap<String, SavedUiViewData>, view_infos: HashMap<String, Result<ViewInfoData, Error>>, next_id: u64, subscribers: HashMap<u64, web_socket_stream::Client>, tasks: ::gj::TaskSet<(), Error>, description: String, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, } impl SavedUiViewSet { pub fn new<P1, P2>(tmp_dir: P1, sturdyref_dir: P2, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>) -> ::capnp::Result<Rc<RefCell<SavedUiViewSet>>> where P1: AsRef<::std::path::Path>, P2: AsRef<::std::path::Path> { let description = match ::std::fs::File::open("/var/description") { Ok(mut f) => { use std::io::Read; let mut result = String::new(); try!(f.read_to_string(&mut result)); result } Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => { use std::io::Write; let mut f = try!(::std::fs::File::create("/var/description")); let result = ""; try!(f.write_all(result.as_bytes())); result.into() } Err(e) => { return Err(e.into()); } }; let result = Rc::new(RefCell::new(SavedUiViewSet { tmp_dir: tmp_dir.as_ref().to_path_buf(), sturdyref_dir: sturdyref_dir.as_ref().to_path_buf(), views: HashMap::new(), view_infos: HashMap::new(), next_id: 0, subscribers: HashMap::new(), tasks: ::gj::TaskSet::new(Box::new(Reaper)), description: description, sandstorm_api: sandstorm_api, })); // create sturdyref directory if it does not yet exist try!(::std::fs::create_dir_all(&sturdyref_dir)); // clear and create tmp directory match ::std::fs::remove_dir_all(&tmp_dir) { Ok(()) => (), Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => (), Err(e) => return Err(e.into()), } try!(::std::fs::create_dir_all(&tmp_dir)); for token_file in try!(::std::fs::read_dir(&sturdyref_dir)) { let dir_entry = try!(token_file); let token: String = match dir_entry.file_name().to_str() { None => { println!("malformed token: {:?}", dir_entry.file_name()); continue } Some(s) => s.into(), }; if token.ends_with(".uploading") { // At one point, these temporary files got uploading directly into this directory. try!(::std::fs::remove_file(dir_entry.path())); } else { let mut reader = try!(::std::fs::File::open(dir_entry.path())); let message = try!(::capnp::serialize::read_message(&mut reader, Default::default())); let metadata: ui_view_metadata::Reader = try!(message.get_root()); let added_by = if metadata.has_added_by() { Some(try!(metadata.get_added_by()).into()) } else { None }; let entry = SavedUiViewData { title: try!(metadata.get_title()).into(), date_added: metadata.get_date_added(), added_by: added_by, }; result.borrow_mut().views.insert(token.clone(), entry); try!(SavedUiViewSet::retrieve_view_info(&result, token)); } } Ok(result) } fn get_saved_data<'a>(&'a self, token: &'a String) -> Option<&'a SavedUiViewData> { self.views.get(token) } fn retrieve_view_info(set_ref: &Rc<RefCell<SavedUiViewSet>>, token: String) -> ::capnp::Result<()> { // SandstormApi.restore, then call getViewInfo, // then call get_url() on the grain static asset. let set = set_ref.clone(); let binary_token = match base64::FromBase64::from_base64(&token[..]) { Ok(b) => b, Err(e) => return Err(Error::failed(format!("{}", e))), }; let mut req = set.borrow().sandstorm_api.restore_request(); req.get().set_token(&binary_token); let task = req.send().promise.then(move |response| { let view: ui_view::Client = pry!(pry!(response.get()).get_cap().get_as_capability()); view.get_view_info_request().send().promise.then(move |response| { let view_info = pry!(response.get()); let app_title = pry!(pry!(view_info.get_app_title()).get_default_text()).to_string(); let asset = pry!(view_info.get_grain_icon()); asset.get_url_request().send().promise.map(move |response| { let result = try!(response.get()); let protocol = match try!(result.get_protocol()) { static_asset::Protocol::Https => "https".to_string(), static_asset::Protocol::Http => "http".to_string(), }; Ok(ViewInfoData { app_title: app_title, grain_icon_url: format!("{}://{}", protocol, try!(result.get_host_path())), }) }) }) }).map_else(move |result| { set.borrow_mut().view_infos.insert(token.clone(), result.clone()); set.borrow_mut().send_action_to_subscribers(Action::ViewInfo { token: token, data: result, }); Ok(()) }); set_ref.borrow_mut().tasks.add(task); Ok(()) } fn update_description(&mut self, description: &[u8]) -> ::capnp::Result<()> { use std::io::Write; let desc_string: String = match ::std::str::from_utf8(description) { Err(e) => return Err(::capnp::Error::failed(format!("{}", e))), Ok(d) => d.into(), }; let temp_path = format!("/var/description.uploading"); try!(try!(::std::fs::File::create(&temp_path)).write_all(description)); try!(::std::fs::rename(temp_path, "/var/description")); self.description = desc_string.clone(); self.send_action_to_subscribers(Action::Description(desc_string)); Ok(()) } fn insert(&mut self, token: String, title: String, added_by: Option<String>) -> ::capnp::Result<()> { let dur = try!(::std::time::SystemTime::now().duration_since(::std::time::UNIX_EPOCH) .map_err(|e| Error::failed(format!("{}", e)))); let date_added = dur.as_secs() * 1000 + (dur.subsec_nanos() / 1000000) as u64; let mut token_path = ::std::path::PathBuf::new(); token_path.push(self.sturdyref_dir.clone()); token_path.push(token.clone()); let mut temp_path = ::std::path::PathBuf::new(); temp_path.push(self.tmp_dir.clone()); temp_path.push(format!("{}.uploading", token)); let mut writer = try!(::std::fs::File::create(&temp_path)); let mut message = ::capnp::message::Builder::new_default(); { let mut metadata: ui_view_metadata::Builder = message.init_root(); metadata.set_title(&title); metadata.set_date_added(date_added); match added_by { Some(ref s) => metadata.set_added_by(s), None => (), } } try!(::capnp::serialize::write_message(&mut writer, &message)); try!(::std::fs::rename(temp_path, token_path)); try!(writer.sync_all()); let entry = SavedUiViewData { title: title, date_added: date_added, added_by: added_by, }; self.send_action_to_subscribers(Action::Insert { token: token.clone(), data: entry.clone(), }); self.views.insert(token, entry); Ok(()) } fn send_action_to_subscribers(&mut self, action: Action) { let json_string = action.to_json(); for (_, sub) in &self.subscribers { let mut req = sub.send_bytes_request(); web_socket::encode_text_message(req.get(), &json_string); self.tasks.add(req.send().promise.map(|_| Ok(()))); } } fn remove(&mut self, token: &str) -> Result<(), Error> { let mut path = self.sturdyref_dir.clone(); path.push(token); if let Err(e) = ::std::fs::remove_file(path) { if e.kind() != ::std::io::ErrorKind::NotFound { return Err(e.into()) } } self.send_action_to_subscribers(Action::Remove { token: token.into() }); self.views.remove(token); Ok(()) } fn new_subscribed_websocket(set: &Rc<RefCell<SavedUiViewSet>>, client_stream: web_socket_stream::Client, can_write: bool, user_id: Option<String>, timer: &::gjio::Timer) -> web_socket_stream::Client { fn send_action(task: Promise<(), Error>, client_stream: &web_socket_stream::Client, action: Action) -> Promise<(), Error> { let json_string = action.to_json(); let mut req = client_stream.send_bytes_request(); web_socket::encode_text_message(req.get(), &json_string); let promise = req.send().promise.map(|_| Ok(())); task.then(|_| promise) } let id = set.borrow().next_id; set.borrow_mut().next_id = id + 1; set.borrow_mut().subscribers.insert(id, client_stream.clone()); let mut task = Promise::ok(()); task = send_action(task, &client_stream, Action::CanWrite(can_write)); task = send_action(task, &client_stream, Action::UserId(user_id)); task = send_action(task, &client_stream, Action::Description(set.borrow().description.clone())); for (t, v) in &set.borrow().views { task = send_action( task, &client_stream, Action::Insert { token: t.clone(), data: v.clone() } ); } for (t, vi) in &set.borrow().view_infos { task = send_action( task, &client_stream, Action::ViewInfo { token: t.clone(), data: vi.clone(), } ); } set.borrow_mut().tasks.add(task); web_socket_stream::ToClient::new( web_socket::Adapter::new( WebSocketStream::new(id, set.clone()), client_stream, timer.clone())).from_server::<::capnp_rpc::Server>() } } const ADD_GRAIN_ACTIVITY_INDEX: u16 = 0; const REMOVE_GRAIN_ACTIVITY_INDEX: u16 = 1; const EDIT_DESCRIPTION_ACTIVITY_INDEX: u16 = 2; pub struct WebSession { timer: ::gjio::Timer, can_write: bool, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, context: session_context::Client, saved_ui_views: Rc<RefCell<SavedUiViewSet>>, identity_id: Option<String>, } impl WebSession { pub fn new(timer: ::gjio::Timer, user_info: user_info::Reader, context: session_context::Client, _params: web_session::params::Reader, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> ::capnp::Result<WebSession> { // Permission #0 is "write". Check if bit 0 in the PermissionSet is set. let permissions = try!(user_info.get_permissions()); let can_write = permissions.len() > 0 && permissions.get(0); let identity_id = if user_info.has_identity_id() { Some(hex::ToHex::to_hex(try!(user_info.get_identity_id()))) } else { None }; Ok(WebSession { timer: timer, can_write: can_write, sandstorm_api: sandstorm_api, context: context, saved_ui_views: saved_ui_views, identity_id: identity_id, }) // `UserInfo` is defined in `sandstorm/grain.capnp` and contains info like: // - A stable ID for the user, so you can correlate sessions from the same user. // - The user's display name, e.g. "Mark Miller", useful for identifying the user to other // users. // - The user's permissions (seen above). // `WebSession::Params` is defined in `sandstorm/web-session.capnp` and contains info like: // - The hostname where the grain was mapped for this user. Every time a user opens a grain, // it is mapped at a new random hostname for security reasons. // - The user's User-Agent and Accept-Languages headers. // `SessionContext` is defined in `sandstorm/grain.capnp` and implements callbacks for // sharing/access control and service publishing/discovery. } } impl ui_session::Server for WebSession {} impl web_session::Server for WebSession { fn get(&mut self, params: web_session::GetParams, mut results: web_session::GetResults) -> Promise<(), Error> { // HTTP GET request. let path = pry!(pry!(params.get()).get_path()); pry!(self.require_canonical_path(path)); if path == "" { let text = "<!DOCTYPE html>\ <html><head>\ <link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\">\ <script type=\"text/javascript\" src=\"script.js\" async></script> </head><body><div id=\"main\"></div></body></html>"; let mut content = results.get().init_content(); content.set_mime_type("text/html; charset=UTF-8"); content.init_body().set_bytes(text.as_bytes()); Promise::ok(()) } else if path == "script.js" { self.read_file("/script.js.gz", results, "text/javascript; charset=UTF-8", Some("gzip")) } else if path == "style.css" { self.read_file("/style.css.gz", results, "text/css; charset=UTF-8", Some("gzip")) } else { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); Promise::ok(()) } } fn post(&mut self, params: web_session::PostParams, mut results: web_session::PostResults) -> Promise<(), Error> { let path = { let path = pry!(pry!(params.get()).get_path()); pry!(self.require_canonical_path(path)); path.to_string() }; if path.starts_with("token/") { self.receive_request_token(path[6..].to_string(), params, results) } else if path.starts_with("offer/") { let token = path[6..].to_string(); let title = match self.saved_ui_views.borrow().get_saved_data(&token) { None => { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); return Promise::ok(()) } Some(saved_ui_view) => saved_ui_view.title.to_string(), }; self.offer_ui_view(token, title, params, results) } else { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); Promise::ok(()) } } fn put(&mut self, params: web_session::PutParams, mut results: web_session::PutResults) -> Promise<(), Error> { // HTTP PUT request. let params = pry!(params.get()); let path = pry!(params.get_path()); pry!(self.require_canonical_path(path)); if !self.can_write { results.get().init_client_error() .set_status_code(web_session::response::ClientErrorCode::Forbidden); Promise::ok(()) } else if path == "description" { let content = pry!(pry!(params.get_content()).get_content()); pry!(self.saved_ui_views.borrow_mut().update_description(content)); let mut req = self.context.activity_request(); req.get().init_event().set_type(EDIT_DESCRIPTION_ACTIVITY_INDEX); req.send().promise.then(move |_| { results.get().init_no_content(); Promise::ok(()) }) } else { results.get().init_client_error() .set_status_code(web_session::response::ClientErrorCode::Forbidden); Promise::ok(()) } } fn delete(&mut self, params: web_session::DeleteParams, mut results: web_session::DeleteResults) -> Promise<(), Error> { // HTTP DELETE request. let path = pry!(pry!(params.get()).get_path()); pry!(self.require_canonical_path(path)); if !path.starts_with("sturdyref/") { return Promise::err(Error::failed("DELETE only supported under sturdyref/".to_string())); } if !self.can_write { results.get().init_client_error() .set_status_code(web_session::response::ClientErrorCode::Forbidden); Promise::ok(()) } else { let token_str = &path[10..]; let binary_token = match base64::FromBase64::from_base64(token_str) { Ok(b) => b, Err(e) => { results.get().init_client_error().set_description_html(&format!("{}", e)[..]); return Promise::ok(()) } }; pry!(self.saved_ui_views.borrow_mut().remove(token_str)); let context = self.context.clone(); let mut req = self.sandstorm_api.drop_request(); req.get().set_token(&binary_token); req.send().promise.then_else(move |_| { // then_else() because drop() is currently broken. :( let mut req = context.activity_request(); req.get().init_event().set_type(REMOVE_GRAIN_ACTIVITY_INDEX); req.send().promise.then(move |_| { results.get().init_no_content(); Promise::ok(()) }) }) } } fn open_web_socket(&mut self, params: web_session::OpenWebSocketParams, mut results: web_session::OpenWebSocketResults) -> Promise<(), Error> { let client_stream = pry!(pry!(params.get()).get_client_stream()); results.get().set_server_stream( SavedUiViewSet::new_subscribed_websocket( &self.saved_ui_views, client_stream, self.can_write, self.identity_id.clone(), &self.timer)); Promise::ok(()) } } fn fill_in_client_error(mut results: web_session::PostResults, e: Error) { let mut client_error = results.get().init_client_error(); client_error.set_description_html(&format!("{}", e)[..]); } impl WebSession { fn offer_ui_view(&mut self, text_token: String, title: String, _params: web_session::PostParams, mut results: web_session::PostResults) -> Promise<(), Error> { let token = match base64::FromBase64::from_base64(&text_token[..]) { Ok(b) => b, Err(e) => return Promise::err(Error::failed(format!("{}", e))), }; let session_context = self.context.clone(); let set = self.saved_ui_views.clone(); let mut req = self.sandstorm_api.restore_request(); req.get().set_token(&token); req.send().promise.then_else(move |response| match response { Ok(v) => { let sealed_ui_view: ui_view::Client = pry!(pry!(v.get()).get_cap().get_as_capability()); let mut req = session_context.offer_request(); req.get().get_cap().set_as_capability(sealed_ui_view.client.hook); { use capnp::traits::HasTypeId; let tags = req.get().init_descriptor().init_tags(1); let mut tag = tags.get(0); tag.set_id(ui_view::Client::type_id()); let mut value: ui_view::powerbox_tag::Builder = tag.get_value().init_as(); value.set_title(&title); } req.send().promise.map(|_| Ok(())) } Err(e) => { set.borrow_mut().view_infos.insert(text_token.clone(), Err(e.clone())); set.borrow_mut().send_action_to_subscribers(Action::ViewInfo { token: text_token, data: Err(e), }); Promise::ok(()) } }).then_else(move |r| match r { Ok(_) => { results.get().init_no_content(); Promise::ok(()) } Err(e) => { fill_in_client_error(results, e); Promise::ok(()) } }) } fn read_powerbox_tag(&mut self, decoded_content: Vec<u8>) -> ::capnp::Result<String> { let mut cursor = ::std::io::Cursor::new(decoded_content); let message = try!(::capnp::serialize_packed::read_message(&mut cursor, Default::default())); let desc: powerbox_descriptor::Reader = try!(message.get_root()); let tags = try!(desc.get_tags()); if tags.len() == 0 { Err(Error::failed("no powerbox tag".into())) } else { let value: ui_view::powerbox_tag::Reader = try!(tags.get(0).get_value().get_as()); Ok(try!(value.get_title()).into()) } } fn receive_request_token(&mut self, token: String, params: web_session::PostParams, mut results: web_session::PostResults) -> Promise<(), Error> { let content = pry!(pry!(pry!(params.get()).get_content()).get_content()); let decoded_content = match base64::FromBase64::from_base64(content) { Ok(c) => c, Err(_) => { fill_in_client_error(results, Error::failed("failed to convert from base64".into())); return Promise::ok(()) } }; let grain_title: String = match self.read_powerbox_tag(decoded_content) { Ok(t) => t, Err(e) => { fill_in_client_error(results, e); return Promise::ok(()); } }; // now let's save this thing into an actual uiview sturdyref let mut req = self.context.claim_request_request(); let sandstorm_api = self.sandstorm_api.clone(); req.get().set_request_token(&token[..]); let saved_ui_views = self.saved_ui_views.clone(); let identity_id = self.identity_id.clone(); let do_stuff = req.send().promise.then(move |response| { let sealed_ui_view: ui_view::Client = pry!(pry!(response.get()).get_cap().get_as_capability()); let mut req = sandstorm_api.save_request(); req.get().get_cap().set_as_capability(sealed_ui_view.client.hook); { let mut save_label = req.get().init_label(); save_label.set_default_text(&format!("grain with title: {}", grain_title)[..]); } req.send().promise.map(move |response| { let binary_token = try!(try!(response.get()).get_token()); let token = base64::ToBase64::to_base64(binary_token, base64::URL_SAFE); try!(saved_ui_views.borrow_mut().insert(token.clone(), grain_title, identity_id)); try!(SavedUiViewSet::retrieve_view_info(&saved_ui_views, token)); Ok(()) }) }); let context = self.context.clone(); do_stuff.then_else(move |r| match r { Ok(()) => { let mut req = context.activity_request(); req.get().init_event().set_type(ADD_GRAIN_ACTIVITY_INDEX); req.send().promise.then(move |_| { let mut _content = results.get().init_content(); Promise::ok(()) }) } Err(e) => { let mut error = results.get().init_client_error(); error.set_description_html(&format!("error: {:?}", e)); Promise::ok(()) } }) } fn require_canonical_path(&self, path: &str) -> Result<(), Error> { // Require that the path doesn't contain "." or ".." or consecutive slashes, to prevent path // injection attacks. // // Note that such attacks wouldn't actually accomplish much since everything outside /var // is a read-only filesystem anyway, containing the app package contents which are non-secret. for (idx, component) in path.split_terminator("/").enumerate() { if component == "." || component == ".." || (component == "" && idx > 0) { return Err(Error::failed(format!("non-canonical path: {:?}", path))); } } Ok(()) } fn read_file(&self, filename: &str, mut results: web_session::GetResults, content_type: &str, encoding: Option<&str>) -> Promise<(), Error> { match ::std::fs::File::open(filename) { Ok(mut f) => { let size = pry!(f.metadata()).len(); let mut content = results.get().init_content(); content.set_status_code(web_session::response::SuccessCode::Ok); content.set_mime_type(content_type); encoding.map(|enc| content.set_encoding(enc)); let mut body = content.init_body().init_bytes(size as u32); pry!(::std::io::copy(&mut f, &mut body)); Promise::ok(()) } Err(ref e) if e.kind() == ::std::io::ErrorKind::NotFound => { let mut error = results.get().init_client_error(); error.set_status_code(web_session::response::ClientErrorCode::NotFound); Promise::ok(()) } Err(e) => { Promise::err(e.into()) } } } } pub struct UiView { timer: ::gjio::Timer, sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned>, saved_ui_views: Rc<RefCell<SavedUiViewSet>>, } impl UiView { fn new(timer: ::gjio::Timer, client: sandstorm_api::Client<::capnp::any_pointer::Owned>, saved_ui_views: Rc<RefCell<SavedUiViewSet>>) -> UiView { UiView { timer: timer, sandstorm_api: client, saved_ui_views: saved_ui_views, } } } impl ui_view::Server for UiView { fn get_view_info(&mut self, _params: ui_view::GetViewInfoParams, mut results: ui_view::GetViewInfoResults) -> Promise<(), Error> { let mut view_info = results.get(); // Define a "write" permission, and then define roles "editor" and "viewer" where only // "editor" has the "write" permission. This will allow people to share read-only. { let perms = view_info.borrow().init_permissions(1); let mut write = perms.get(0); write.set_name("write"); write.init_title().set_default_text("write"); } { let mut roles = view_info.borrow().init_roles(2); { let mut editor = roles.borrow().get(0); editor.borrow().init_title().set_default_text("editor"); editor.borrow().init_verb_phrase().set_default_text("can edit"); editor.init_permissions(1).set(0, true); // has "write" permission } { let mut viewer = roles.get(1); viewer.set_default(true); viewer.borrow().init_title().set_default_text("viewer"); viewer.borrow().init_verb_phrase().set_default_text("can view"); viewer.init_permissions(1).set(0, false); // does not have "write" permission } } { let mut event_types = view_info.init_event_types(3); { let mut added = event_types.borrow().get(ADD_GRAIN_ACTIVITY_INDEX as u32); added.set_name("add"); added.borrow().init_verb_phrase().set_default_text("grain added"); } { let mut removed = event_types.borrow().get(REMOVE_GRAIN_ACTIVITY_INDEX as u32); removed.set_name("remove"); removed.borrow().init_verb_phrase().set_default_text("grain removed"); } { let mut removed = event_types.borrow().get(EDIT_DESCRIPTION_ACTIVITY_INDEX as u32); removed.set_name("description"); removed.borrow().init_verb_phrase().set_default_text("description edited"); } } Promise::ok(()) } fn new_session(&mut self, params: ui_view::NewSessionParams, mut results: ui_view::NewSessionResults) -> Promise<(), Error> { use ::capnp::traits::HasTypeId; let params = pry!(params.get()); if params.get_session_type() != web_session::Client::type_id() { return Promise::err(Error::failed("unsupported session type".to_string())); } let session = pry!(WebSession::new( self.timer.clone(), pry!(params.get_user_info()), pry!(params.get_context()), pry!(params.get_session_params().get_as()), self.sandstorm_api.clone(), self.saved_ui_views.clone())); let client: web_session::Client = web_session::ToClient::new(session).from_server::<::capnp_rpc::Server>(); // We need to do this silly dance to upcast. results.get().set_session(ui_session::Client { client : client.client}); Promise::ok(()) } } pub fn main() -> Result<(), Box<::std::error::Error>> { EventLoop::top_level(move |wait_scope| { let mut event_port = try!(::gjio::EventPort::new()); let network = event_port.get_network(); // Sandstorm launches us with a connection on file descriptor 3. let stream = try!(unsafe { network.wrap_raw_socket_descriptor(3) }); let (p, f) = Promise::and_fulfiller(); let sandstorm_api: sandstorm_api::Client<::capnp::any_pointer::Owned> = ::capnp_rpc::new_promise_client(p); let saved_uiviews = try!(SavedUiViewSet::new("/var/tmp", "/var/sturdyrefs", sandstorm_api.clone())); let uiview = UiView::new( event_port.get_timer(), sandstorm_api, saved_uiviews); let client = ui_view::ToClient::new(uiview).from_server::<::capnp_rpc::Server>(); let network = twoparty::VatNetwork::new(stream.clone(), stream, rpc_twoparty_capnp::Side::Client, Default::default()); let mut rpc_system = RpcSystem::new(Box::new(network), Some(client.client)); let cap = rpc_system.bootstrap::<sandstorm_api::Client<::capnp::any_pointer::Owned>>( ::capnp_rpc::rpc_twoparty_capnp::Side::Server); f.fulfill(cap.client); Promise::never_done().wait(wait_scope, &mut event_port) }) }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Element nodes. use dom::activation::Activatable; use dom::attr::{Attr, AttrSettingType, AttrHelpers, AttrHelpersForLayout}; use dom::attr::AttrValue; use dom::namednodemap::NamedNodeMap; use dom::bindings::cell::DOMRefCell; use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods; use dom::bindings::codegen::Bindings::ElementBinding; use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods; use dom::bindings::codegen::Bindings::EventBinding::EventMethods; use dom::bindings::codegen::Bindings::HTMLInputElementBinding::HTMLInputElementMethods; use dom::bindings::codegen::Bindings::NamedNodeMapBinding::NamedNodeMapMethods; use dom::bindings::codegen::InheritTypes::{ElementCast, ElementDerived, EventTargetCast}; use dom::bindings::codegen::InheritTypes::{HTMLBodyElementDerived, HTMLInputElementCast}; use dom::bindings::codegen::InheritTypes::{HTMLInputElementDerived, HTMLTableElementCast}; use dom::bindings::codegen::InheritTypes::{HTMLTableElementDerived, HTMLTableCellElementDerived}; use dom::bindings::codegen::InheritTypes::{HTMLTableRowElementDerived, HTMLTextAreaElementDerived}; use dom::bindings::codegen::InheritTypes::{HTMLTableSectionElementDerived, NodeCast}; use dom::bindings::error::{ErrorResult, Fallible}; use dom::bindings::error::Error::{NamespaceError, InvalidCharacter, Syntax}; use dom::bindings::js::{MutNullableJS, JS, JSRef, Temporary, TemporaryPushable}; use dom::bindings::js::{OptionalRootable, Root}; use dom::bindings::utils::{Reflectable, Reflector}; use dom::bindings::utils::xml_name_type; use dom::bindings::utils::XMLName::{QName, Name, InvalidXMLName}; use dom::create::create_element; use dom::domrect::DOMRect; use dom::domrectlist::DOMRectList; use dom::document::{Document, DocumentHelpers, LayoutDocumentHelpers}; use dom::domtokenlist::DOMTokenList; use dom::event::Event; use dom::eventtarget::{EventTarget, EventTargetTypeId, EventTargetHelpers}; use dom::htmlbodyelement::{HTMLBodyElement, HTMLBodyElementHelpers}; use dom::htmlcollection::HTMLCollection; use dom::htmlinputelement::{HTMLInputElement, RawLayoutHTMLInputElementHelpers, HTMLInputElementHelpers}; use dom::htmlserializer::serialize; use dom::htmltableelement::{HTMLTableElement, HTMLTableElementHelpers}; use dom::htmltablecellelement::{HTMLTableCellElement, HTMLTableCellElementHelpers}; use dom::htmltablerowelement::{HTMLTableRowElement, HTMLTableRowElementHelpers}; use dom::htmltablesectionelement::{HTMLTableSectionElement, HTMLTableSectionElementHelpers}; use dom::htmltextareaelement::{HTMLTextAreaElement, RawLayoutHTMLTextAreaElementHelpers}; use dom::node::{CLICK_IN_PROGRESS, LayoutNodeHelpers, Node, NodeHelpers, NodeTypeId}; use dom::node::{NodeIterator, document_from_node, NodeDamage}; use dom::node::{window_from_node}; use dom::nodelist::NodeList; use dom::virtualmethods::{VirtualMethods, vtable_for}; use devtools_traits::AttrInfo; use style::{mod, StylesheetOrigin, SimpleColorAttribute, UnsignedIntegerAttribute}; use style::{IntegerAttribute, LengthAttribute, ParserContext, matches}; use servo_util::namespace; use servo_util::str::{DOMString, LengthOrPercentageOrAuto}; use cssparser::RGBA; use std::ascii::AsciiExt; use std::cell::{Ref, RefMut}; use std::default::Default; use std::mem; use std::sync::Arc; use string_cache::{Atom, Namespace, QualName}; use url::UrlParser; #[dom_struct] pub struct Element { node: Node, local_name: Atom, namespace: Namespace, prefix: Option<DOMString>, attrs: DOMRefCell<Vec<JS<Attr>>>, style_attribute: DOMRefCell<Option<style::PropertyDeclarationBlock>>, attr_list: MutNullableJS<NamedNodeMap>, class_list: MutNullableJS<DOMTokenList>, } impl ElementDerived for EventTarget { #[inline] fn is_element(&self) -> bool { match *self.type_id() { EventTargetTypeId::Node(NodeTypeId::Element(_)) => true, _ => false } } } impl Reflectable for Element { fn reflector<'a>(&'a self) -> &'a Reflector { self.node.reflector() } } #[deriving(PartialEq, Show)] #[jstraceable] pub enum ElementTypeId { HTMLElement, HTMLAnchorElement, HTMLAppletElement, HTMLAreaElement, HTMLAudioElement, HTMLBaseElement, HTMLBRElement, HTMLBodyElement, HTMLButtonElement, HTMLCanvasElement, HTMLDataElement, HTMLDataListElement, HTMLDirectoryElement, HTMLDListElement, HTMLDivElement, HTMLEmbedElement, HTMLFieldSetElement, HTMLFontElement, HTMLFormElement, HTMLFrameElement, HTMLFrameSetElement, HTMLHRElement, HTMLHeadElement, HTMLHeadingElement, HTMLHtmlElement, HTMLIFrameElement, HTMLImageElement, HTMLInputElement, HTMLLabelElement, HTMLLegendElement, HTMLLinkElement, HTMLLIElement, HTMLMapElement, HTMLMediaElement, HTMLMetaElement, HTMLMeterElement, HTMLModElement, HTMLObjectElement, HTMLOListElement, HTMLOptGroupElement, HTMLOptionElement, HTMLOutputElement, HTMLParagraphElement, HTMLParamElement, HTMLPreElement, HTMLProgressElement, HTMLQuoteElement, HTMLScriptElement, HTMLSelectElement, HTMLSourceElement, HTMLSpanElement, HTMLStyleElement, HTMLTableElement, HTMLTableCaptionElement, HTMLTableDataCellElement, HTMLTableHeaderCellElement, HTMLTableColElement, HTMLTableRowElement, HTMLTableSectionElement, HTMLTemplateElement, HTMLTextAreaElement, HTMLTimeElement, HTMLTitleElement, HTMLTrackElement, HTMLUListElement, HTMLVideoElement, HTMLUnknownElement, Element, } #[deriving(PartialEq)] pub enum ElementCreator { ParserCreated, ScriptCreated, } // // Element methods // impl Element { pub fn create(name: QualName, prefix: Option<DOMString>, document: JSRef<Document>, creator: ElementCreator) -> Temporary<Element> { create_element(name, prefix, document, creator) } pub fn new_inherited(type_id: ElementTypeId, local_name: DOMString, namespace: Namespace, prefix: Option<DOMString>, document: JSRef<Document>) -> Element { Element { node: Node::new_inherited(NodeTypeId::Element(type_id), document), local_name: Atom::from_slice(local_name.as_slice()), namespace: namespace, prefix: prefix, attrs: DOMRefCell::new(vec!()), attr_list: Default::default(), class_list: Default::default(), style_attribute: DOMRefCell::new(None), } } pub fn new(local_name: DOMString, namespace: Namespace, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<Element> { Node::reflect_node(box Element::new_inherited(ElementTypeId::Element, local_name, namespace, prefix, document), document, ElementBinding::Wrap) } } pub trait RawLayoutElementHelpers { unsafe fn get_attr_val_for_layout<'a>(&'a self, namespace: &Namespace, name: &Atom) -> Option<&'a str>; unsafe fn get_attr_vals_for_layout<'a>(&'a self, name: &Atom) -> Vec<&'a str>; unsafe fn get_attr_atom_for_layout(&self, namespace: &Namespace, name: &Atom) -> Option<Atom>; unsafe fn has_class_for_layout(&self, name: &Atom) -> bool; unsafe fn get_classes_for_layout(&self) -> Option<&'static [Atom]>; unsafe fn get_length_attribute_for_layout(&self, length_attribute: LengthAttribute) -> LengthOrPercentageOrAuto; unsafe fn get_integer_attribute_for_layout(&self, integer_attribute: IntegerAttribute) -> Option<i32>; unsafe fn get_checked_state_for_layout(&self) -> bool; unsafe fn get_indeterminate_state_for_layout(&self) -> bool; unsafe fn get_unsigned_integer_attribute_for_layout(&self, attribute: UnsignedIntegerAttribute) -> Option<u32>; unsafe fn get_simple_color_attribute_for_layout(&self, attribute: SimpleColorAttribute) -> Option<RGBA>; fn local_name<'a>(&'a self) -> &'a Atom; fn namespace<'a>(&'a self) -> &'a Namespace; fn style_attribute<'a>(&'a self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>>; } #[inline] unsafe fn get_attr_for_layout<'a>(elem: &'a Element, namespace: &Namespace, name: &Atom) -> Option<&'a JS<Attr>> { // cast to point to T in RefCell<T> directly let attrs: *const Vec<JS<Attr>> = mem::transmute(&elem.attrs); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); *name == (*attr).local_name_atom_forever() && (*attr).namespace() == namespace }) } impl RawLayoutElementHelpers for Element { #[inline] unsafe fn get_attr_val_for_layout<'a>(&'a self, namespace: &Namespace, name: &Atom) -> Option<&'a str> { get_attr_for_layout(self, namespace, name).map(|attr| { let attr = attr.unsafe_get(); (*attr).value_ref_forever() }) } #[inline] unsafe fn get_attr_vals_for_layout<'a>(&'a self, name: &Atom) -> Vec<&'a str> { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().filter_map(|attr: &JS<Attr>| { let attr = attr.unsafe_get(); if *name == (*attr).local_name_atom_forever() { Some((*attr).value_ref_forever()) } else { None } }).collect() } #[inline] unsafe fn get_attr_atom_for_layout(&self, namespace: &Namespace, name: &Atom) -> Option<Atom> { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); *name == (*attr).local_name_atom_forever() && (*attr).namespace() == namespace }).and_then(|attr| { let attr = attr.unsafe_get(); (*attr).value_atom_forever() }) } #[inline] unsafe fn has_class_for_layout(&self, name: &Atom) -> bool { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); (*attr).local_name_atom_forever() == atom!("class") }).map_or(false, |attr| { let attr = attr.unsafe_get(); (*attr).value_tokens_forever().map(|tokens| { tokens.iter().any(|atom| atom == name) }) }.take().unwrap()) } #[inline] unsafe fn get_classes_for_layout(&self) -> Option<&'static [Atom]> { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); (*attr).local_name_atom_forever() == atom!("class") }).and_then(|attr| { let attr = attr.unsafe_get(); (*attr).value_tokens_forever() }) } #[inline] unsafe fn get_length_attribute_for_layout(&self, length_attribute: LengthAttribute) -> LengthOrPercentageOrAuto { match length_attribute { LengthAttribute::Width => { if self.is_htmltableelement() { let this: &HTMLTableElement = mem::transmute(self); this.get_width() } else if self.is_htmltablecellelement() { let this: &HTMLTableCellElement = mem::transmute(self); this.get_width() } else { panic!("I'm not a table or table cell!") } } } } #[inline] unsafe fn get_integer_attribute_for_layout(&self, integer_attribute: IntegerAttribute) -> Option<i32> { match integer_attribute { IntegerAttribute::Size => { if !self.is_htmlinputelement() { panic!("I'm not a form input!") } let this: &HTMLInputElement = mem::transmute(self); Some(this.get_size_for_layout() as i32) } IntegerAttribute::Cols => { if !self.is_htmltextareaelement() { panic!("I'm not a textarea element!") } let this: &HTMLTextAreaElement = mem::transmute(self); Some(this.get_cols_for_layout() as i32) } IntegerAttribute::Rows => { if !self.is_htmltextareaelement() { panic!("I'm not a textarea element!") } let this: &HTMLTextAreaElement = mem::transmute(self); Some(this.get_rows_for_layout() as i32) } } } #[inline] #[allow(unrooted_must_root)] unsafe fn get_checked_state_for_layout(&self) -> bool { // TODO option and menuitem can also have a checked state. if !self.is_htmlinputelement() { return false } let this: &HTMLInputElement = mem::transmute(self); this.get_checked_state_for_layout() } #[inline] #[allow(unrooted_must_root)] unsafe fn get_indeterminate_state_for_layout(&self) -> bool { // TODO progress elements can also be matched with :indeterminate if !self.is_htmlinputelement() { return false } let this: &HTMLInputElement = mem::transmute(self); this.get_indeterminate_state_for_layout() } unsafe fn get_unsigned_integer_attribute_for_layout(&self, attribute: UnsignedIntegerAttribute) -> Option<u32> { match attribute { UnsignedIntegerAttribute::Border => { if self.is_htmltableelement() { let this: &HTMLTableElement = mem::transmute(self); this.get_border() } else { // Don't panic since `:-servo-nonzero-border` can cause this to be called on // arbitrary elements. None } } UnsignedIntegerAttribute::ColSpan => { if self.is_htmltablecellelement() { let this: &HTMLTableCellElement = mem::transmute(self); this.get_colspan() } else { // Don't panic since `display` can cause this to be called on arbitrary // elements. None } } } } #[inline] #[allow(unrooted_must_root)] unsafe fn get_simple_color_attribute_for_layout(&self, attribute: SimpleColorAttribute) -> Option<RGBA> { match attribute { SimpleColorAttribute::BgColor => { if self.is_htmlbodyelement() { let this: &HTMLBodyElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltableelement() { let this: &HTMLTableElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltablecellelement() { let this: &HTMLTableCellElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltablerowelement() { let this: &HTMLTableRowElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltablesectionelement() { let this: &HTMLTableSectionElement = mem::transmute(self); this.get_background_color() } else { None } } } } // Getters used in components/layout/wrapper.rs fn local_name<'a>(&'a self) -> &'a Atom { &self.local_name } fn namespace<'a>(&'a self) -> &'a Namespace { &self.namespace } fn style_attribute<'a>(&'a self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>> { &self.style_attribute } } pub trait LayoutElementHelpers { unsafe fn html_element_in_html_document_for_layout(&self) -> bool; unsafe fn has_attr_for_layout(&self, namespace: &Namespace, name: &Atom) -> bool; } impl LayoutElementHelpers for JS<Element> { #[inline] unsafe fn html_element_in_html_document_for_layout(&self) -> bool { if (*self.unsafe_get()).namespace != ns!(HTML) { return false } let node: JS<Node> = self.transmute_copy(); node.owner_doc_for_layout().is_html_document_for_layout() } unsafe fn has_attr_for_layout(&self, namespace: &Namespace, name: &Atom) -> bool { get_attr_for_layout(&*self.unsafe_get(), namespace, name).is_some() } } #[deriving(PartialEq)] pub enum StylePriority { Important, Normal, } pub trait ElementHelpers<'a> { fn html_element_in_html_document(self) -> bool; fn local_name(self) -> &'a Atom; fn parsed_name(self, name: DOMString) -> DOMString; fn namespace(self) -> &'a Namespace; fn prefix(self) -> &'a Option<DOMString>; fn attrs(&self) -> Ref<Vec<JS<Attr>>>; fn attrs_mut(&self) -> RefMut<Vec<JS<Attr>>>; fn style_attribute(self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>>; fn summarize(self) -> Vec<AttrInfo>; fn is_void(self) -> bool; fn remove_inline_style_property(self, property: DOMString); fn update_inline_style(self, property_decl: style::PropertyDeclaration, style_priority: StylePriority); fn get_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration>; fn get_important_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration>; } impl<'a> ElementHelpers<'a> for JSRef<'a, Element> { fn html_element_in_html_document(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); self.namespace == ns!(HTML) && node.is_in_html_doc() } fn local_name(self) -> &'a Atom { &self.extended_deref().local_name } // https://dom.spec.whatwg.org/#concept-element-attributes-get-by-name fn parsed_name(self, name: DOMString) -> DOMString { if self.html_element_in_html_document() { name.as_slice().to_ascii_lower() } else { name } } fn namespace(self) -> &'a Namespace { &self.extended_deref().namespace } fn prefix(self) -> &'a Option<DOMString> { &self.extended_deref().prefix } fn attrs(&self) -> Ref<Vec<JS<Attr>>> { self.extended_deref().attrs.borrow() } fn attrs_mut(&self) -> RefMut<Vec<JS<Attr>>> { self.extended_deref().attrs.borrow_mut() } fn style_attribute(self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>> { &self.extended_deref().style_attribute } fn summarize(self) -> Vec<AttrInfo> { let attrs = self.Attributes().root(); let mut i = 0; let mut summarized = vec!(); while i < attrs.Length() { let attr = attrs.Item(i).unwrap().root(); summarized.push(attr.summarize()); i += 1; } summarized } fn is_void(self) -> bool { if self.namespace != ns!(HTML) { return false } match self.local_name.as_slice() { /* List of void elements from http://www.whatwg.org/specs/web-apps/current-work/multipage/the-end.html#html-fragment-serialization-algorithm */ "area" | "base" | "basefont" | "bgsound" | "br" | "col" | "embed" | "frame" | "hr" | "img" | "input" | "keygen" | "link" | "menuitem" | "meta" | "param" | "source" | "track" | "wbr" => true, _ => false } } fn remove_inline_style_property(self, property: DOMString) { let mut inline_declarations = self.style_attribute.borrow_mut(); inline_declarations.as_mut().map(|declarations| { let index = declarations.normal .iter() .position(|decl| decl.name() == property); match index { Some(index) => { declarations.normal.make_unique().remove(index); return; } None => () } let index = declarations.important .iter() .position(|decl| decl.name() == property); match index { Some(index) => { declarations.important.make_unique().remove(index); return; } None => () } }); } fn update_inline_style(self, property_decl: style::PropertyDeclaration, style_priority: StylePriority) { let mut inline_declarations = self.style_attribute().borrow_mut(); if let Some(ref mut declarations) = *inline_declarations.deref_mut() { let existing_declarations = if style_priority == StylePriority::Important { declarations.important.make_unique() } else { declarations.normal.make_unique() }; for declaration in existing_declarations.iter_mut() { if declaration.name() == property_decl.name() { *declaration = property_decl; return; } } existing_declarations.push(property_decl); return; } let (important, normal) = if style_priority == StylePriority::Important { (vec!(property_decl), vec!()) } else { (vec!(), vec!(property_decl)) }; *inline_declarations = Some(style::PropertyDeclarationBlock { important: Arc::new(important), normal: Arc::new(normal), }); } fn get_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration> { let inline_declarations = self.style_attribute.borrow(); inline_declarations.as_ref().and_then(|declarations| { declarations.normal .iter() .chain(declarations.important.iter()) .find(|decl| decl.matches(property.as_slice())) .map(|decl| decl.clone()) }) } fn get_important_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration> { let inline_declarations = self.style_attribute.borrow(); inline_declarations.as_ref().and_then(|declarations| { declarations.important .iter() .find(|decl| decl.matches(property.as_slice())) .map(|decl| decl.clone()) }) } } pub trait AttributeHandlers { /// Returns the attribute with given namespace and case-sensitive local /// name, if any. fn get_attribute(self, namespace: Namespace, local_name: &Atom) -> Option<Temporary<Attr>>; fn get_attributes(self, local_name: &Atom) -> Vec<Temporary<Attr>>; fn set_attribute_from_parser(self, name: QualName, value: DOMString, prefix: Option<DOMString>); fn set_attribute(self, name: &Atom, value: AttrValue); fn set_custom_attribute(self, name: DOMString, value: DOMString) -> ErrorResult; fn do_set_attribute(self, local_name: Atom, value: AttrValue, name: Atom, namespace: Namespace, prefix: Option<DOMString>, cb: |JSRef<Attr>| -> bool); fn parse_attribute(self, namespace: &Namespace, local_name: &Atom, value: DOMString) -> AttrValue; fn remove_attribute(self, namespace: Namespace, name: &str); fn has_class(&self, name: &Atom) -> bool; fn set_atomic_attribute(self, name: &Atom, value: DOMString); // http://www.whatwg.org/html/#reflecting-content-attributes-in-idl-attributes fn has_attribute(self, name: &Atom) -> bool; fn set_bool_attribute(self, name: &Atom, value: bool); fn get_url_attribute(self, name: &Atom) -> DOMString; fn set_url_attribute(self, name: &Atom, value: DOMString); fn get_string_attribute(self, name: &Atom) -> DOMString; fn set_string_attribute(self, name: &Atom, value: DOMString); fn set_tokenlist_attribute(self, name: &Atom, value: DOMString); fn set_atomic_tokenlist_attribute(self, name: &Atom, tokens: Vec<Atom>); fn get_uint_attribute(self, name: &Atom) -> u32; fn set_uint_attribute(self, name: &Atom, value: u32); } impl<'a> AttributeHandlers for JSRef<'a, Element> { fn get_attribute(self, namespace: Namespace, local_name: &Atom) -> Option<Temporary<Attr>> { self.get_attributes(local_name).iter().map(|attr| attr.root()) .find(|attr| *attr.namespace() == namespace) .map(|x| Temporary::from_rooted(*x)) } fn get_attributes(self, local_name: &Atom) -> Vec<Temporary<Attr>> { self.attrs.borrow().iter().map(|attr| attr.root()).filter_map(|attr| { if *attr.local_name() == *local_name { Some(Temporary::from_rooted(*attr)) } else { None } }).collect() } fn set_attribute_from_parser(self, qname: QualName, value: DOMString, prefix: Option<DOMString>) { // Don't set if the attribute already exists, so we can handle add_attrs_if_missing if self.attrs.borrow().iter().map(|attr| attr.root()) .any(|a| *a.local_name() == qname.local && *a.namespace() == qname.ns) { return; } let name = match prefix { None => qname.local.clone(), Some(ref prefix) => { let name = format!("{:s}:{:s}", *prefix, qname.local.as_slice()); Atom::from_slice(name.as_slice()) }, }; let value = self.parse_attribute(&qname.ns, &qname.local, value); self.do_set_attribute(qname.local, value, name, qname.ns, prefix, |_| false) } fn set_attribute(self, name: &Atom, value: AttrValue) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); assert!(!name.as_slice().contains(":")); self.do_set_attribute(name.clone(), value, name.clone(), ns!(""), None, |attr| *attr.local_name() == *name); } // https://html.spec.whatwg.org/multipage/dom.html#attr-data-* fn set_custom_attribute(self, name: DOMString, value: DOMString) -> ErrorResult { // Step 1. match xml_name_type(name.as_slice()) { InvalidXMLName => return Err(InvalidCharacter), _ => {} } // Steps 2-5. let name = Atom::from_slice(name.as_slice()); let value = self.parse_attribute(&ns!(""), &name, value); self.do_set_attribute(name.clone(), value, name.clone(), ns!(""), None, |attr| { *attr.name() == name && *attr.namespace() == ns!("") }); Ok(()) } fn do_set_attribute(self, local_name: Atom, value: AttrValue, name: Atom, namespace: Namespace, prefix: Option<DOMString>, cb: |JSRef<Attr>| -> bool) { let idx = self.attrs.borrow().iter() .map(|attr| attr.root()) .position(|attr| cb(*attr)); let (idx, set_type) = match idx { Some(idx) => (idx, AttrSettingType::ReplacedAttr), None => { let window = window_from_node(self).root(); let attr = Attr::new(*window, local_name, value.clone(), name, namespace.clone(), prefix, Some(self)); self.attrs.borrow_mut().push_unrooted(&attr); (self.attrs.borrow().len() - 1, AttrSettingType::FirstSetAttr) } }; (*self.attrs.borrow())[idx].root().set_value(set_type, value, self); } fn parse_attribute(self, namespace: &Namespace, local_name: &Atom, value: DOMString) -> AttrValue { if *namespace == ns!("") { vtable_for(&NodeCast::from_ref(self)) .parse_plain_attribute(local_name, value) } else { AttrValue::String(value) } } fn remove_attribute(self, namespace: Namespace, name: &str) { let (_, local_name) = get_attribute_parts(name); let local_name = Atom::from_slice(local_name); let idx = self.attrs.borrow().iter().map(|attr| attr.root()).position(|attr| { *attr.local_name() == local_name }); match idx { None => (), Some(idx) => { if namespace == ns!("") { let attr = (*self.attrs.borrow())[idx].root(); vtable_for(&NodeCast::from_ref(self)).before_remove_attr(*attr); } self.attrs.borrow_mut().remove(idx); let node: JSRef<Node> = NodeCast::from_ref(self); if node.is_in_doc() { let document = document_from_node(self).root(); if local_name == atom!("style") { document.content_changed(node, NodeDamage::NodeStyleDamaged); } else { document.content_changed(node, NodeDamage::OtherNodeDamage); } } } }; } fn has_class(&self, name: &Atom) -> bool { self.get_attribute(ns!(""), &atom!("class")).root().map(|attr| { attr.value().tokens().map(|tokens| { tokens.iter().any(|atom| atom == name) }).unwrap_or(false) }).unwrap_or(false) } fn set_atomic_attribute(self, name: &Atom, value: DOMString) { assert!(name.as_slice().eq_ignore_ascii_case(name.as_slice())); let value = AttrValue::from_atomic(value); self.set_attribute(name, value); } fn has_attribute(self, name: &Atom) -> bool { assert!(name.as_slice().chars().all(|ch| { !ch.is_ascii() || ch.to_ascii().to_lowercase() == ch.to_ascii() })); self.attrs.borrow().iter().map(|attr| attr.root()).any(|attr| { *attr.local_name() == *name && *attr.namespace() == ns!("") }) } fn set_bool_attribute(self, name: &Atom, value: bool) { if self.has_attribute(name) == value { return; } if value { self.set_string_attribute(name, String::new()); } else { self.remove_attribute(ns!(""), name.as_slice()); } } fn get_url_attribute(self, name: &Atom) -> DOMString { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); if !self.has_attribute(name) { return "".to_string(); } let url = self.get_string_attribute(name); let doc = document_from_node(self).root(); let base = doc.url(); // https://html.spec.whatwg.org/multipage/infrastructure.html#reflect // XXXManishearth this doesn't handle `javascript:` urls properly match UrlParser::new().base_url(base).parse(url.as_slice()) { Ok(parsed) => parsed.serialize(), Err(_) => "".to_string() } } fn set_url_attribute(self, name: &Atom, value: DOMString) { self.set_string_attribute(name, value); } fn get_string_attribute(self, name: &Atom) -> DOMString { match self.get_attribute(ns!(""), name) { Some(x) => x.root().Value(), None => "".to_string() } } fn set_string_attribute(self, name: &Atom, value: DOMString) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::String(value)); } fn set_tokenlist_attribute(self, name: &Atom, value: DOMString) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::from_serialized_tokenlist(value)); } fn set_atomic_tokenlist_attribute(self, name: &Atom, tokens: Vec<Atom>) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::from_atomic_tokens(tokens)); } fn get_uint_attribute(self, name: &Atom) -> u32 { assert!(name.as_slice().chars().all(|ch| { !ch.is_ascii() || ch.to_ascii().to_lowercase() == ch.to_ascii() })); let attribute = self.get_attribute(ns!(""), name).root(); match attribute { Some(attribute) => { match *attribute.value() { AttrValue::UInt(_, value) => value, _ => panic!("Expected an AttrValue::UInt: \ implement parse_plain_attribute"), } } None => 0, } } fn set_uint_attribute(self, name: &Atom, value: u32) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::UInt(value.to_string(), value)); } } impl<'a> ElementMethods for JSRef<'a, Element> { // http://dom.spec.whatwg.org/#dom-element-namespaceuri fn GetNamespaceURI(self) -> Option<DOMString> { match self.namespace { ns!("") => None, Namespace(ref ns) => Some(ns.as_slice().to_string()) } } fn LocalName(self) -> DOMString { self.local_name.as_slice().to_string() } // http://dom.spec.whatwg.org/#dom-element-prefix fn GetPrefix(self) -> Option<DOMString> { self.prefix.clone() } // http://dom.spec.whatwg.org/#dom-element-tagname fn TagName(self) -> DOMString { let qualified_name = match self.prefix { Some(ref prefix) => { (format!("{:s}:{:s}", prefix.as_slice(), self.local_name.as_slice())).into_maybe_owned() }, None => self.local_name.as_slice().into_maybe_owned() }; if self.html_element_in_html_document() { qualified_name.as_slice().to_ascii_upper() } else { qualified_name.into_string() } } // http://dom.spec.whatwg.org/#dom-element-id fn Id(self) -> DOMString { self.get_string_attribute(&atom!("id")) } // http://dom.spec.whatwg.org/#dom-element-id fn SetId(self, id: DOMString) { self.set_atomic_attribute(&atom!("id"), id); } // http://dom.spec.whatwg.org/#dom-element-classname fn ClassName(self) -> DOMString { self.get_string_attribute(&atom!("class")) } // http://dom.spec.whatwg.org/#dom-element-classname fn SetClassName(self, class: DOMString) { self.set_tokenlist_attribute(&atom!("class"), class); } // http://dom.spec.whatwg.org/#dom-element-classlist fn ClassList(self) -> Temporary<DOMTokenList> { self.class_list.or_init(|| DOMTokenList::new(self, &atom!("class"))) } // http://dom.spec.whatwg.org/#dom-element-attributes fn Attributes(self) -> Temporary<NamedNodeMap> { self.attr_list.or_init(|| { let doc = { let node: JSRef<Node> = NodeCast::from_ref(self); node.owner_doc().root() }; let window = doc.window().root(); NamedNodeMap::new(*window, self) }) } // http://dom.spec.whatwg.org/#dom-element-getattribute fn GetAttribute(self, name: DOMString) -> Option<DOMString> { let name = self.parsed_name(name); self.get_attribute(ns!(""), &Atom::from_slice(name.as_slice())).root() .map(|s| s.Value()) } // http://dom.spec.whatwg.org/#dom-element-getattributens fn GetAttributeNS(self, namespace: Option<DOMString>, local_name: DOMString) -> Option<DOMString> { let namespace = namespace::from_domstring(namespace); self.get_attribute(namespace, &Atom::from_slice(local_name.as_slice())).root() .map(|attr| attr.Value()) } // http://dom.spec.whatwg.org/#dom-element-setattribute fn SetAttribute(self, name: DOMString, value: DOMString) -> ErrorResult { // Step 1. match xml_name_type(name.as_slice()) { InvalidXMLName => return Err(InvalidCharacter), _ => {} } // Step 2. let name = self.parsed_name(name); // Step 3-5. let name = Atom::from_slice(name.as_slice()); let value = self.parse_attribute(&ns!(""), &name, value); self.do_set_attribute(name.clone(), value, name.clone(), ns!(""), None, |attr| { *attr.name() == name }); Ok(()) } // http://dom.spec.whatwg.org/#dom-element-setattributens fn SetAttributeNS(self, namespace_url: Option<DOMString>, name: DOMString, value: DOMString) -> ErrorResult { // Step 1. let namespace = namespace::from_domstring(namespace_url); let name_type = xml_name_type(name.as_slice()); match name_type { // Step 2. InvalidXMLName => return Err(InvalidCharacter), // Step 3. Name => return Err(NamespaceError), QName => {} } // Step 4. let (prefix, local_name) = get_attribute_parts(name.as_slice()); match prefix { Some(ref prefix_str) => { // Step 5. if namespace == ns!("") { return Err(NamespaceError); } // Step 6. if "xml" == prefix_str.as_slice() && namespace != ns!(XML) { return Err(NamespaceError); } // Step 7b. if "xmlns" == prefix_str.as_slice() && namespace != ns!(XMLNS) { return Err(NamespaceError); } }, None => {} } let name = Atom::from_slice(name.as_slice()); let local_name = Atom::from_slice(local_name); let xmlns = atom!("xmlns"); // Step 7a. if xmlns == name && namespace != ns!(XMLNS) { return Err(NamespaceError); } // Step 8. if namespace == ns!(XMLNS) && xmlns != name && Some("xmlns") != prefix { return Err(NamespaceError); } // Step 9. let value = self.parse_attribute(&namespace, &local_name, value); self.do_set_attribute(local_name.clone(), value, name, namespace.clone(), prefix.map(|s| s.to_string()), |attr| { *attr.local_name() == local_name && *attr.namespace() == namespace }); Ok(()) } // http://dom.spec.whatwg.org/#dom-element-removeattribute fn RemoveAttribute(self, name: DOMString) { let name = self.parsed_name(name); self.remove_attribute(ns!(""), name.as_slice()) } // http://dom.spec.whatwg.org/#dom-element-removeattributens fn RemoveAttributeNS(self, namespace: Option<DOMString>, localname: DOMString) { let namespace = namespace::from_domstring(namespace); self.remove_attribute(namespace, localname.as_slice()) } // http://dom.spec.whatwg.org/#dom-element-hasattribute fn HasAttribute(self, name: DOMString) -> bool { self.GetAttribute(name).is_some() } // http://dom.spec.whatwg.org/#dom-element-hasattributens fn HasAttributeNS(self, namespace: Option<DOMString>, local_name: DOMString) -> bool { self.GetAttributeNS(namespace, local_name).is_some() } fn GetElementsByTagName(self, localname: DOMString) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::by_tag_name(*window, NodeCast::from_ref(self), localname) } fn GetElementsByTagNameNS(self, maybe_ns: Option<DOMString>, localname: DOMString) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::by_tag_name_ns(*window, NodeCast::from_ref(self), localname, maybe_ns) } fn GetElementsByClassName(self, classes: DOMString) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::by_class_name(*window, NodeCast::from_ref(self), classes) } // http://dev.w3.org/csswg/cssom-view/#dom-element-getclientrects fn GetClientRects(self) -> Temporary<DOMRectList> { let win = window_from_node(self).root(); let node: JSRef<Node> = NodeCast::from_ref(self); let rects = node.get_content_boxes(); let rects: Vec<Root<DOMRect>> = rects.iter().map(|r| { DOMRect::new( *win, r.origin.y, r.origin.y + r.size.height, r.origin.x, r.origin.x + r.size.width).root() }).collect(); DOMRectList::new(*win, rects.iter().map(|rect| rect.deref().clone()).collect()) } // http://dev.w3.org/csswg/cssom-view/#dom-element-getboundingclientrect fn GetBoundingClientRect(self) -> Temporary<DOMRect> { let win = window_from_node(self).root(); let node: JSRef<Node> = NodeCast::from_ref(self); let rect = node.get_bounding_content_box(); DOMRect::new( *win, rect.origin.y, rect.origin.y + rect.size.height, rect.origin.x, rect.origin.x + rect.size.width) } fn GetInnerHTML(self) -> Fallible<DOMString> { //XXX TODO: XML case Ok(serialize(&mut NodeIterator::new(NodeCast::from_ref(self), false, false))) } fn GetOuterHTML(self) -> Fallible<DOMString> { Ok(serialize(&mut NodeIterator::new(NodeCast::from_ref(self), true, false))) } // http://dom.spec.whatwg.org/#dom-parentnode-children fn Children(self) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::children(*window, NodeCast::from_ref(self)) } // http://dom.spec.whatwg.org/#dom-parentnode-queryselector fn QuerySelector(self, selectors: DOMString) -> Fallible<Option<Temporary<Element>>> { let root: JSRef<Node> = NodeCast::from_ref(self); root.query_selector(selectors) } // http://dom.spec.whatwg.org/#dom-parentnode-queryselectorall fn QuerySelectorAll(self, selectors: DOMString) -> Fallible<Temporary<NodeList>> { let root: JSRef<Node> = NodeCast::from_ref(self); root.query_selector_all(selectors) } // http://dom.spec.whatwg.org/#dom-childnode-remove fn Remove(self) { let node: JSRef<Node> = NodeCast::from_ref(self); node.remove_self(); } // http://dom.spec.whatwg.org/#dom-element-matches fn Matches(self, selectors: DOMString) -> Fallible<bool> { let parser_context = ParserContext { origin: StylesheetOrigin::Author, }; match style::parse_selector_list_from_str(&parser_context, selectors.as_slice()) { Err(()) => Err(Syntax), Ok(ref selectors) => { let root: JSRef<Node> = NodeCast::from_ref(self); Ok(matches(selectors, &root, &mut None)) } } } } pub fn get_attribute_parts<'a>(name: &'a str) -> (Option<&'a str>, &'a str) { //FIXME: Throw for XML-invalid names //FIXME: Throw for XMLNS-invalid names let (prefix, local_name) = if name.contains(":") { let mut parts = name.splitn(1, ':'); (Some(parts.next().unwrap()), parts.next().unwrap()) } else { (None, name) }; (prefix, local_name) } impl<'a> VirtualMethods for JSRef<'a, Element> { fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> { let node: &JSRef<Node> = NodeCast::from_borrowed_ref(self); Some(node as &VirtualMethods) } fn after_set_attr(&self, attr: JSRef<Attr>) { match self.super_type() { Some(ref s) => s.after_set_attr(attr), _ => () } match attr.local_name() { &atom!("style") => { // Modifying the `style` attribute might change style. let node: JSRef<Node> = NodeCast::from_ref(*self); let doc = document_from_node(*self).root(); let base_url = doc.url().clone(); let value = attr.value(); let style = Some(style::parse_style_attribute(value.as_slice(), &base_url)); *self.style_attribute.borrow_mut() = style; if node.is_in_doc() { doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("class") => { // Modifying a class can change style. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let document = document_from_node(*self).root(); document.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("id") => { // Modifying an ID might change style. let node: JSRef<Node> = NodeCast::from_ref(*self); let value = attr.value(); if node.is_in_doc() { let doc = document_from_node(*self).root(); if !value.as_slice().is_empty() { let value = Atom::from_slice(value.as_slice()); doc.register_named_element(*self, value); } doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } _ => { // Modifying any other attribute might change arbitrary things. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let document = document_from_node(*self).root(); document.content_changed(node, NodeDamage::OtherNodeDamage); } } } } fn before_remove_attr(&self, attr: JSRef<Attr>) { match self.super_type() { Some(ref s) => s.before_remove_attr(attr), _ => () } match attr.local_name() { &atom!("style") => { // Modifying the `style` attribute might change style. *self.style_attribute.borrow_mut() = None; let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let doc = document_from_node(*self).root(); doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("id") => { // Modifying an ID can change style. let node: JSRef<Node> = NodeCast::from_ref(*self); let value = attr.value(); if node.is_in_doc() { let doc = document_from_node(*self).root(); if !value.as_slice().is_empty() { let value = Atom::from_slice(value.as_slice()); doc.unregister_named_element(*self, value); } doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("class") => { // Modifying a class can change style. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let document = document_from_node(*self).root(); document.content_changed(node, NodeDamage::NodeStyleDamaged); } } _ => { // Modifying any other attribute might change arbitrary things. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let doc = document_from_node(*self).root(); doc.content_changed(node, NodeDamage::OtherNodeDamage); } } } } fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue { match name { &atom!("id") => AttrValue::from_atomic(value), &atom!("class") => AttrValue::from_serialized_tokenlist(value), _ => self.super_type().unwrap().parse_plain_attribute(name, value), } } fn bind_to_tree(&self, tree_in_doc: bool) { match self.super_type() { Some(ref s) => s.bind_to_tree(tree_in_doc), _ => (), } if !tree_in_doc { return; } match self.get_attribute(ns!(""), &atom!("id")).root() { Some(attr) => { let doc = document_from_node(*self).root(); let value = attr.Value(); if !value.is_empty() { let value = Atom::from_slice(value.as_slice()); doc.register_named_element(*self, value); } } _ => () } } fn unbind_from_tree(&self, tree_in_doc: bool) { match self.super_type() { Some(ref s) => s.unbind_from_tree(tree_in_doc), _ => (), } if !tree_in_doc { return; } match self.get_attribute(ns!(""), &atom!("id")).root() { Some(attr) => { let doc = document_from_node(*self).root(); let value = attr.Value(); if !value.is_empty() { let value = Atom::from_slice(value.as_slice()); doc.unregister_named_element(*self, value); } } _ => () } } } impl<'a> style::TElement<'a> for JSRef<'a, Element> { fn get_attr(self, namespace: &Namespace, attr: &Atom) -> Option<&'a str> { self.get_attribute(namespace.clone(), attr).root().map(|attr| { // This transmute is used to cheat the lifetime restriction. unsafe { mem::transmute(attr.value().as_slice()) } }) } fn get_attrs(self, attr: &Atom) -> Vec<&'a str> { self.get_attributes(attr).iter().map(|attr| attr.root()).map(|attr| { // This transmute is used to cheat the lifetime restriction. unsafe { mem::transmute(attr.value().as_slice()) } }).collect() } fn get_link(self) -> Option<&'a str> { // FIXME: This is HTML only. let node: JSRef<Node> = NodeCast::from_ref(self); match node.type_id() { // http://www.whatwg.org/specs/web-apps/current-work/multipage/selectors.html# // selector-link NodeTypeId::Element(ElementTypeId::HTMLAnchorElement) | NodeTypeId::Element(ElementTypeId::HTMLAreaElement) | NodeTypeId::Element(ElementTypeId::HTMLLinkElement) => self.get_attr(&ns!(""), &atom!("href")), _ => None, } } fn get_local_name(self) -> &'a Atom { // FIXME(zwarich): Remove this when UFCS lands and there is a better way // of disambiguating methods. fn get_local_name<'a, T: ElementHelpers<'a>>(this: T) -> &'a Atom { this.local_name() } get_local_name(self) } fn get_namespace(self) -> &'a Namespace { // FIXME(zwarich): Remove this when UFCS lands and there is a better way // of disambiguating methods. fn get_namespace<'a, T: ElementHelpers<'a>>(this: T) -> &'a Namespace { this.namespace() } get_namespace(self) } fn get_hover_state(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_hover_state() } fn get_id(self) -> Option<Atom> { self.get_attribute(ns!(""), &atom!("id")).map(|attr| { let attr = attr.root(); match *attr.value() { AttrValue::Atom(ref val) => val.clone(), _ => panic!("`id` attribute should be AttrValue::Atom"), } }) } fn get_disabled_state(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_disabled_state() } fn get_enabled_state(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_enabled_state() } fn get_checked_state(self) -> bool { match HTMLInputElementCast::to_ref(self) { Some(input) => input.Checked(), None => false, } } fn get_indeterminate_state(self) -> bool { match HTMLInputElementCast::to_ref(self) { Some(input) => input.get_indeterminate_state(), None => false, } } fn has_class(self, name: &Atom) -> bool { // FIXME(zwarich): Remove this when UFCS lands and there is a better way // of disambiguating methods. fn has_class<T: AttributeHandlers>(this: T, name: &Atom) -> bool { this.has_class(name) } has_class(self, name) } fn each_class(self, callback: |&Atom|) { match self.get_attribute(ns!(""), &atom!("class")).root() { None => {} Some(ref attr) => { match attr.value().tokens() { None => {} Some(tokens) => { for token in tokens.iter() { callback(token) } } } } } } fn has_nonzero_border(self) -> bool { match HTMLTableElementCast::to_ref(self) { None => false, Some(this) => { match this.get_border() { None | Some(0) => false, Some(_) => true, } } } } } pub trait ActivationElementHelpers<'a> { fn as_maybe_activatable(&'a self) -> Option<&'a Activatable + 'a>; fn click_in_progress(self) -> bool; fn set_click_in_progress(self, click: bool); fn nearest_activable_element(self) -> Option<Temporary<Element>>; fn authentic_click_activation<'b>(self, event: JSRef<'b, Event>); } impl<'a> ActivationElementHelpers<'a> for JSRef<'a, Element> { fn as_maybe_activatable(&'a self) -> Option<&'a Activatable + 'a> { let node: JSRef<Node> = NodeCast::from_ref(*self); match node.type_id() { NodeTypeId::Element(ElementTypeId::HTMLInputElement) => { let element: &'a JSRef<'a, HTMLInputElement> = HTMLInputElementCast::to_borrowed_ref(self).unwrap(); Some(element as &'a Activatable + 'a) }, _ => { None } } } fn click_in_progress(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_flag(CLICK_IN_PROGRESS) } fn set_click_in_progress(self, click: bool) { let node: JSRef<Node> = NodeCast::from_ref(self); node.set_flag(CLICK_IN_PROGRESS, click) } // https://html.spec.whatwg.org/multipage/interaction.html#nearest-activatable-element fn nearest_activable_element(self) -> Option<Temporary<Element>> { match self.as_maybe_activatable() { Some(el) => Some(Temporary::from_rooted(*el.as_element().root())), None => { let node: JSRef<Node> = NodeCast::from_ref(self); node.ancestors() .filter_map(|node| ElementCast::to_ref(node)) .filter(|e| e.as_maybe_activatable().is_some()).next() .map(|r| Temporary::from_rooted(r)) } } } /// Please call this method *only* for real click events /// /// https://html.spec.whatwg.org/multipage/interaction.html#run-authentic-click-activation-steps /// /// Use an element's synthetic click activation (or handle_event) for any script-triggered clicks. /// If the spec says otherwise, check with Manishearth first fn authentic_click_activation<'b>(self, event: JSRef<'b, Event>) { // Not explicitly part of the spec, however this helps enforce the invariants // required to save state between pre-activation and post-activation // since we cannot nest authentic clicks (unlike synthetic click activation, where // the script can generate more click events from the handler) assert!(!self.click_in_progress()); let target: JSRef<EventTarget> = EventTargetCast::from_ref(self); // Step 2 (requires canvas support) // Step 3 self.set_click_in_progress(true); // Step 4 let e = self.nearest_activable_element().root(); match e { Some(el) => match el.as_maybe_activatable() { Some(elem) => { // Step 5-6 elem.pre_click_activation(); target.dispatch_event(event); if !event.DefaultPrevented() { // post click activation elem.activation_behavior(); } else { elem.canceled_activation(); } } // Step 6 None => {target.dispatch_event(event);} }, // Step 6 None => {target.dispatch_event(event);} } // Step 7 self.set_click_in_progress(false); } } Added Element::get_tokenlist_attribute /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Element nodes. use dom::activation::Activatable; use dom::attr::{Attr, AttrSettingType, AttrHelpers, AttrHelpersForLayout}; use dom::attr::AttrValue; use dom::namednodemap::NamedNodeMap; use dom::bindings::cell::DOMRefCell; use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods; use dom::bindings::codegen::Bindings::ElementBinding; use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods; use dom::bindings::codegen::Bindings::EventBinding::EventMethods; use dom::bindings::codegen::Bindings::HTMLInputElementBinding::HTMLInputElementMethods; use dom::bindings::codegen::Bindings::NamedNodeMapBinding::NamedNodeMapMethods; use dom::bindings::codegen::InheritTypes::{ElementCast, ElementDerived, EventTargetCast}; use dom::bindings::codegen::InheritTypes::{HTMLBodyElementDerived, HTMLInputElementCast}; use dom::bindings::codegen::InheritTypes::{HTMLInputElementDerived, HTMLTableElementCast}; use dom::bindings::codegen::InheritTypes::{HTMLTableElementDerived, HTMLTableCellElementDerived}; use dom::bindings::codegen::InheritTypes::{HTMLTableRowElementDerived, HTMLTextAreaElementDerived}; use dom::bindings::codegen::InheritTypes::{HTMLTableSectionElementDerived, NodeCast}; use dom::bindings::error::{ErrorResult, Fallible}; use dom::bindings::error::Error::{NamespaceError, InvalidCharacter, Syntax}; use dom::bindings::js::{MutNullableJS, JS, JSRef, Temporary, TemporaryPushable}; use dom::bindings::js::{OptionalRootable, Root}; use dom::bindings::utils::{Reflectable, Reflector}; use dom::bindings::utils::xml_name_type; use dom::bindings::utils::XMLName::{QName, Name, InvalidXMLName}; use dom::create::create_element; use dom::domrect::DOMRect; use dom::domrectlist::DOMRectList; use dom::document::{Document, DocumentHelpers, LayoutDocumentHelpers}; use dom::domtokenlist::DOMTokenList; use dom::event::Event; use dom::eventtarget::{EventTarget, EventTargetTypeId, EventTargetHelpers}; use dom::htmlbodyelement::{HTMLBodyElement, HTMLBodyElementHelpers}; use dom::htmlcollection::HTMLCollection; use dom::htmlinputelement::{HTMLInputElement, RawLayoutHTMLInputElementHelpers, HTMLInputElementHelpers}; use dom::htmlserializer::serialize; use dom::htmltableelement::{HTMLTableElement, HTMLTableElementHelpers}; use dom::htmltablecellelement::{HTMLTableCellElement, HTMLTableCellElementHelpers}; use dom::htmltablerowelement::{HTMLTableRowElement, HTMLTableRowElementHelpers}; use dom::htmltablesectionelement::{HTMLTableSectionElement, HTMLTableSectionElementHelpers}; use dom::htmltextareaelement::{HTMLTextAreaElement, RawLayoutHTMLTextAreaElementHelpers}; use dom::node::{CLICK_IN_PROGRESS, LayoutNodeHelpers, Node, NodeHelpers, NodeTypeId}; use dom::node::{NodeIterator, document_from_node, NodeDamage}; use dom::node::{window_from_node}; use dom::nodelist::NodeList; use dom::virtualmethods::{VirtualMethods, vtable_for}; use devtools_traits::AttrInfo; use style::{mod, StylesheetOrigin, SimpleColorAttribute, UnsignedIntegerAttribute}; use style::{IntegerAttribute, LengthAttribute, ParserContext, matches}; use servo_util::namespace; use servo_util::str::{DOMString, LengthOrPercentageOrAuto}; use cssparser::RGBA; use std::ascii::AsciiExt; use std::cell::{Ref, RefMut}; use std::default::Default; use std::mem; use std::sync::Arc; use string_cache::{Atom, Namespace, QualName}; use url::UrlParser; #[dom_struct] pub struct Element { node: Node, local_name: Atom, namespace: Namespace, prefix: Option<DOMString>, attrs: DOMRefCell<Vec<JS<Attr>>>, style_attribute: DOMRefCell<Option<style::PropertyDeclarationBlock>>, attr_list: MutNullableJS<NamedNodeMap>, class_list: MutNullableJS<DOMTokenList>, } impl ElementDerived for EventTarget { #[inline] fn is_element(&self) -> bool { match *self.type_id() { EventTargetTypeId::Node(NodeTypeId::Element(_)) => true, _ => false } } } impl Reflectable for Element { fn reflector<'a>(&'a self) -> &'a Reflector { self.node.reflector() } } #[deriving(PartialEq, Show)] #[jstraceable] pub enum ElementTypeId { HTMLElement, HTMLAnchorElement, HTMLAppletElement, HTMLAreaElement, HTMLAudioElement, HTMLBaseElement, HTMLBRElement, HTMLBodyElement, HTMLButtonElement, HTMLCanvasElement, HTMLDataElement, HTMLDataListElement, HTMLDirectoryElement, HTMLDListElement, HTMLDivElement, HTMLEmbedElement, HTMLFieldSetElement, HTMLFontElement, HTMLFormElement, HTMLFrameElement, HTMLFrameSetElement, HTMLHRElement, HTMLHeadElement, HTMLHeadingElement, HTMLHtmlElement, HTMLIFrameElement, HTMLImageElement, HTMLInputElement, HTMLLabelElement, HTMLLegendElement, HTMLLinkElement, HTMLLIElement, HTMLMapElement, HTMLMediaElement, HTMLMetaElement, HTMLMeterElement, HTMLModElement, HTMLObjectElement, HTMLOListElement, HTMLOptGroupElement, HTMLOptionElement, HTMLOutputElement, HTMLParagraphElement, HTMLParamElement, HTMLPreElement, HTMLProgressElement, HTMLQuoteElement, HTMLScriptElement, HTMLSelectElement, HTMLSourceElement, HTMLSpanElement, HTMLStyleElement, HTMLTableElement, HTMLTableCaptionElement, HTMLTableDataCellElement, HTMLTableHeaderCellElement, HTMLTableColElement, HTMLTableRowElement, HTMLTableSectionElement, HTMLTemplateElement, HTMLTextAreaElement, HTMLTimeElement, HTMLTitleElement, HTMLTrackElement, HTMLUListElement, HTMLVideoElement, HTMLUnknownElement, Element, } #[deriving(PartialEq)] pub enum ElementCreator { ParserCreated, ScriptCreated, } // // Element methods // impl Element { pub fn create(name: QualName, prefix: Option<DOMString>, document: JSRef<Document>, creator: ElementCreator) -> Temporary<Element> { create_element(name, prefix, document, creator) } pub fn new_inherited(type_id: ElementTypeId, local_name: DOMString, namespace: Namespace, prefix: Option<DOMString>, document: JSRef<Document>) -> Element { Element { node: Node::new_inherited(NodeTypeId::Element(type_id), document), local_name: Atom::from_slice(local_name.as_slice()), namespace: namespace, prefix: prefix, attrs: DOMRefCell::new(vec!()), attr_list: Default::default(), class_list: Default::default(), style_attribute: DOMRefCell::new(None), } } pub fn new(local_name: DOMString, namespace: Namespace, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<Element> { Node::reflect_node(box Element::new_inherited(ElementTypeId::Element, local_name, namespace, prefix, document), document, ElementBinding::Wrap) } } pub trait RawLayoutElementHelpers { unsafe fn get_attr_val_for_layout<'a>(&'a self, namespace: &Namespace, name: &Atom) -> Option<&'a str>; unsafe fn get_attr_vals_for_layout<'a>(&'a self, name: &Atom) -> Vec<&'a str>; unsafe fn get_attr_atom_for_layout(&self, namespace: &Namespace, name: &Atom) -> Option<Atom>; unsafe fn has_class_for_layout(&self, name: &Atom) -> bool; unsafe fn get_classes_for_layout(&self) -> Option<&'static [Atom]>; unsafe fn get_length_attribute_for_layout(&self, length_attribute: LengthAttribute) -> LengthOrPercentageOrAuto; unsafe fn get_integer_attribute_for_layout(&self, integer_attribute: IntegerAttribute) -> Option<i32>; unsafe fn get_checked_state_for_layout(&self) -> bool; unsafe fn get_indeterminate_state_for_layout(&self) -> bool; unsafe fn get_unsigned_integer_attribute_for_layout(&self, attribute: UnsignedIntegerAttribute) -> Option<u32>; unsafe fn get_simple_color_attribute_for_layout(&self, attribute: SimpleColorAttribute) -> Option<RGBA>; fn local_name<'a>(&'a self) -> &'a Atom; fn namespace<'a>(&'a self) -> &'a Namespace; fn style_attribute<'a>(&'a self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>>; } #[inline] unsafe fn get_attr_for_layout<'a>(elem: &'a Element, namespace: &Namespace, name: &Atom) -> Option<&'a JS<Attr>> { // cast to point to T in RefCell<T> directly let attrs: *const Vec<JS<Attr>> = mem::transmute(&elem.attrs); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); *name == (*attr).local_name_atom_forever() && (*attr).namespace() == namespace }) } impl RawLayoutElementHelpers for Element { #[inline] unsafe fn get_attr_val_for_layout<'a>(&'a self, namespace: &Namespace, name: &Atom) -> Option<&'a str> { get_attr_for_layout(self, namespace, name).map(|attr| { let attr = attr.unsafe_get(); (*attr).value_ref_forever() }) } #[inline] unsafe fn get_attr_vals_for_layout<'a>(&'a self, name: &Atom) -> Vec<&'a str> { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().filter_map(|attr: &JS<Attr>| { let attr = attr.unsafe_get(); if *name == (*attr).local_name_atom_forever() { Some((*attr).value_ref_forever()) } else { None } }).collect() } #[inline] unsafe fn get_attr_atom_for_layout(&self, namespace: &Namespace, name: &Atom) -> Option<Atom> { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); *name == (*attr).local_name_atom_forever() && (*attr).namespace() == namespace }).and_then(|attr| { let attr = attr.unsafe_get(); (*attr).value_atom_forever() }) } #[inline] unsafe fn has_class_for_layout(&self, name: &Atom) -> bool { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); (*attr).local_name_atom_forever() == atom!("class") }).map_or(false, |attr| { let attr = attr.unsafe_get(); (*attr).value_tokens_forever().map(|tokens| { tokens.iter().any(|atom| atom == name) }) }.take().unwrap()) } #[inline] unsafe fn get_classes_for_layout(&self) -> Option<&'static [Atom]> { let attrs = self.attrs.borrow_for_layout(); (*attrs).iter().find(|attr: & &JS<Attr>| { let attr = attr.unsafe_get(); (*attr).local_name_atom_forever() == atom!("class") }).and_then(|attr| { let attr = attr.unsafe_get(); (*attr).value_tokens_forever() }) } #[inline] unsafe fn get_length_attribute_for_layout(&self, length_attribute: LengthAttribute) -> LengthOrPercentageOrAuto { match length_attribute { LengthAttribute::Width => { if self.is_htmltableelement() { let this: &HTMLTableElement = mem::transmute(self); this.get_width() } else if self.is_htmltablecellelement() { let this: &HTMLTableCellElement = mem::transmute(self); this.get_width() } else { panic!("I'm not a table or table cell!") } } } } #[inline] unsafe fn get_integer_attribute_for_layout(&self, integer_attribute: IntegerAttribute) -> Option<i32> { match integer_attribute { IntegerAttribute::Size => { if !self.is_htmlinputelement() { panic!("I'm not a form input!") } let this: &HTMLInputElement = mem::transmute(self); Some(this.get_size_for_layout() as i32) } IntegerAttribute::Cols => { if !self.is_htmltextareaelement() { panic!("I'm not a textarea element!") } let this: &HTMLTextAreaElement = mem::transmute(self); Some(this.get_cols_for_layout() as i32) } IntegerAttribute::Rows => { if !self.is_htmltextareaelement() { panic!("I'm not a textarea element!") } let this: &HTMLTextAreaElement = mem::transmute(self); Some(this.get_rows_for_layout() as i32) } } } #[inline] #[allow(unrooted_must_root)] unsafe fn get_checked_state_for_layout(&self) -> bool { // TODO option and menuitem can also have a checked state. if !self.is_htmlinputelement() { return false } let this: &HTMLInputElement = mem::transmute(self); this.get_checked_state_for_layout() } #[inline] #[allow(unrooted_must_root)] unsafe fn get_indeterminate_state_for_layout(&self) -> bool { // TODO progress elements can also be matched with :indeterminate if !self.is_htmlinputelement() { return false } let this: &HTMLInputElement = mem::transmute(self); this.get_indeterminate_state_for_layout() } unsafe fn get_unsigned_integer_attribute_for_layout(&self, attribute: UnsignedIntegerAttribute) -> Option<u32> { match attribute { UnsignedIntegerAttribute::Border => { if self.is_htmltableelement() { let this: &HTMLTableElement = mem::transmute(self); this.get_border() } else { // Don't panic since `:-servo-nonzero-border` can cause this to be called on // arbitrary elements. None } } UnsignedIntegerAttribute::ColSpan => { if self.is_htmltablecellelement() { let this: &HTMLTableCellElement = mem::transmute(self); this.get_colspan() } else { // Don't panic since `display` can cause this to be called on arbitrary // elements. None } } } } #[inline] #[allow(unrooted_must_root)] unsafe fn get_simple_color_attribute_for_layout(&self, attribute: SimpleColorAttribute) -> Option<RGBA> { match attribute { SimpleColorAttribute::BgColor => { if self.is_htmlbodyelement() { let this: &HTMLBodyElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltableelement() { let this: &HTMLTableElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltablecellelement() { let this: &HTMLTableCellElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltablerowelement() { let this: &HTMLTableRowElement = mem::transmute(self); this.get_background_color() } else if self.is_htmltablesectionelement() { let this: &HTMLTableSectionElement = mem::transmute(self); this.get_background_color() } else { None } } } } // Getters used in components/layout/wrapper.rs fn local_name<'a>(&'a self) -> &'a Atom { &self.local_name } fn namespace<'a>(&'a self) -> &'a Namespace { &self.namespace } fn style_attribute<'a>(&'a self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>> { &self.style_attribute } } pub trait LayoutElementHelpers { unsafe fn html_element_in_html_document_for_layout(&self) -> bool; unsafe fn has_attr_for_layout(&self, namespace: &Namespace, name: &Atom) -> bool; } impl LayoutElementHelpers for JS<Element> { #[inline] unsafe fn html_element_in_html_document_for_layout(&self) -> bool { if (*self.unsafe_get()).namespace != ns!(HTML) { return false } let node: JS<Node> = self.transmute_copy(); node.owner_doc_for_layout().is_html_document_for_layout() } unsafe fn has_attr_for_layout(&self, namespace: &Namespace, name: &Atom) -> bool { get_attr_for_layout(&*self.unsafe_get(), namespace, name).is_some() } } #[deriving(PartialEq)] pub enum StylePriority { Important, Normal, } pub trait ElementHelpers<'a> { fn html_element_in_html_document(self) -> bool; fn local_name(self) -> &'a Atom; fn parsed_name(self, name: DOMString) -> DOMString; fn namespace(self) -> &'a Namespace; fn prefix(self) -> &'a Option<DOMString>; fn attrs(&self) -> Ref<Vec<JS<Attr>>>; fn attrs_mut(&self) -> RefMut<Vec<JS<Attr>>>; fn style_attribute(self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>>; fn summarize(self) -> Vec<AttrInfo>; fn is_void(self) -> bool; fn remove_inline_style_property(self, property: DOMString); fn update_inline_style(self, property_decl: style::PropertyDeclaration, style_priority: StylePriority); fn get_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration>; fn get_important_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration>; } impl<'a> ElementHelpers<'a> for JSRef<'a, Element> { fn html_element_in_html_document(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); self.namespace == ns!(HTML) && node.is_in_html_doc() } fn local_name(self) -> &'a Atom { &self.extended_deref().local_name } // https://dom.spec.whatwg.org/#concept-element-attributes-get-by-name fn parsed_name(self, name: DOMString) -> DOMString { if self.html_element_in_html_document() { name.as_slice().to_ascii_lower() } else { name } } fn namespace(self) -> &'a Namespace { &self.extended_deref().namespace } fn prefix(self) -> &'a Option<DOMString> { &self.extended_deref().prefix } fn attrs(&self) -> Ref<Vec<JS<Attr>>> { self.extended_deref().attrs.borrow() } fn attrs_mut(&self) -> RefMut<Vec<JS<Attr>>> { self.extended_deref().attrs.borrow_mut() } fn style_attribute(self) -> &'a DOMRefCell<Option<style::PropertyDeclarationBlock>> { &self.extended_deref().style_attribute } fn summarize(self) -> Vec<AttrInfo> { let attrs = self.Attributes().root(); let mut i = 0; let mut summarized = vec!(); while i < attrs.Length() { let attr = attrs.Item(i).unwrap().root(); summarized.push(attr.summarize()); i += 1; } summarized } fn is_void(self) -> bool { if self.namespace != ns!(HTML) { return false } match self.local_name.as_slice() { /* List of void elements from http://www.whatwg.org/specs/web-apps/current-work/multipage/the-end.html#html-fragment-serialization-algorithm */ "area" | "base" | "basefont" | "bgsound" | "br" | "col" | "embed" | "frame" | "hr" | "img" | "input" | "keygen" | "link" | "menuitem" | "meta" | "param" | "source" | "track" | "wbr" => true, _ => false } } fn remove_inline_style_property(self, property: DOMString) { let mut inline_declarations = self.style_attribute.borrow_mut(); inline_declarations.as_mut().map(|declarations| { let index = declarations.normal .iter() .position(|decl| decl.name() == property); match index { Some(index) => { declarations.normal.make_unique().remove(index); return; } None => () } let index = declarations.important .iter() .position(|decl| decl.name() == property); match index { Some(index) => { declarations.important.make_unique().remove(index); return; } None => () } }); } fn update_inline_style(self, property_decl: style::PropertyDeclaration, style_priority: StylePriority) { let mut inline_declarations = self.style_attribute().borrow_mut(); if let Some(ref mut declarations) = *inline_declarations.deref_mut() { let existing_declarations = if style_priority == StylePriority::Important { declarations.important.make_unique() } else { declarations.normal.make_unique() }; for declaration in existing_declarations.iter_mut() { if declaration.name() == property_decl.name() { *declaration = property_decl; return; } } existing_declarations.push(property_decl); return; } let (important, normal) = if style_priority == StylePriority::Important { (vec!(property_decl), vec!()) } else { (vec!(), vec!(property_decl)) }; *inline_declarations = Some(style::PropertyDeclarationBlock { important: Arc::new(important), normal: Arc::new(normal), }); } fn get_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration> { let inline_declarations = self.style_attribute.borrow(); inline_declarations.as_ref().and_then(|declarations| { declarations.normal .iter() .chain(declarations.important.iter()) .find(|decl| decl.matches(property.as_slice())) .map(|decl| decl.clone()) }) } fn get_important_inline_style_declaration(self, property: &Atom) -> Option<style::PropertyDeclaration> { let inline_declarations = self.style_attribute.borrow(); inline_declarations.as_ref().and_then(|declarations| { declarations.important .iter() .find(|decl| decl.matches(property.as_slice())) .map(|decl| decl.clone()) }) } } pub trait AttributeHandlers { /// Returns the attribute with given namespace and case-sensitive local /// name, if any. fn get_attribute(self, namespace: Namespace, local_name: &Atom) -> Option<Temporary<Attr>>; fn get_attributes(self, local_name: &Atom) -> Vec<Temporary<Attr>>; fn set_attribute_from_parser(self, name: QualName, value: DOMString, prefix: Option<DOMString>); fn set_attribute(self, name: &Atom, value: AttrValue); fn set_custom_attribute(self, name: DOMString, value: DOMString) -> ErrorResult; fn do_set_attribute(self, local_name: Atom, value: AttrValue, name: Atom, namespace: Namespace, prefix: Option<DOMString>, cb: |JSRef<Attr>| -> bool); fn parse_attribute(self, namespace: &Namespace, local_name: &Atom, value: DOMString) -> AttrValue; fn remove_attribute(self, namespace: Namespace, name: &str); fn has_class(&self, name: &Atom) -> bool; fn set_atomic_attribute(self, name: &Atom, value: DOMString); // http://www.whatwg.org/html/#reflecting-content-attributes-in-idl-attributes fn has_attribute(self, name: &Atom) -> bool; fn set_bool_attribute(self, name: &Atom, value: bool); fn get_url_attribute(self, name: &Atom) -> DOMString; fn set_url_attribute(self, name: &Atom, value: DOMString); fn get_string_attribute(self, name: &Atom) -> DOMString; fn set_string_attribute(self, name: &Atom, value: DOMString); fn get_tokenlist_attribute(self, name: &Atom) -> Vec<Atom>; fn set_tokenlist_attribute(self, name: &Atom, value: DOMString); fn set_atomic_tokenlist_attribute(self, name: &Atom, tokens: Vec<Atom>); fn get_uint_attribute(self, name: &Atom) -> u32; fn set_uint_attribute(self, name: &Atom, value: u32); } impl<'a> AttributeHandlers for JSRef<'a, Element> { fn get_attribute(self, namespace: Namespace, local_name: &Atom) -> Option<Temporary<Attr>> { self.get_attributes(local_name).iter().map(|attr| attr.root()) .find(|attr| *attr.namespace() == namespace) .map(|x| Temporary::from_rooted(*x)) } fn get_attributes(self, local_name: &Atom) -> Vec<Temporary<Attr>> { self.attrs.borrow().iter().map(|attr| attr.root()).filter_map(|attr| { if *attr.local_name() == *local_name { Some(Temporary::from_rooted(*attr)) } else { None } }).collect() } fn set_attribute_from_parser(self, qname: QualName, value: DOMString, prefix: Option<DOMString>) { // Don't set if the attribute already exists, so we can handle add_attrs_if_missing if self.attrs.borrow().iter().map(|attr| attr.root()) .any(|a| *a.local_name() == qname.local && *a.namespace() == qname.ns) { return; } let name = match prefix { None => qname.local.clone(), Some(ref prefix) => { let name = format!("{:s}:{:s}", *prefix, qname.local.as_slice()); Atom::from_slice(name.as_slice()) }, }; let value = self.parse_attribute(&qname.ns, &qname.local, value); self.do_set_attribute(qname.local, value, name, qname.ns, prefix, |_| false) } fn set_attribute(self, name: &Atom, value: AttrValue) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); assert!(!name.as_slice().contains(":")); self.do_set_attribute(name.clone(), value, name.clone(), ns!(""), None, |attr| *attr.local_name() == *name); } // https://html.spec.whatwg.org/multipage/dom.html#attr-data-* fn set_custom_attribute(self, name: DOMString, value: DOMString) -> ErrorResult { // Step 1. match xml_name_type(name.as_slice()) { InvalidXMLName => return Err(InvalidCharacter), _ => {} } // Steps 2-5. let name = Atom::from_slice(name.as_slice()); let value = self.parse_attribute(&ns!(""), &name, value); self.do_set_attribute(name.clone(), value, name.clone(), ns!(""), None, |attr| { *attr.name() == name && *attr.namespace() == ns!("") }); Ok(()) } fn do_set_attribute(self, local_name: Atom, value: AttrValue, name: Atom, namespace: Namespace, prefix: Option<DOMString>, cb: |JSRef<Attr>| -> bool) { let idx = self.attrs.borrow().iter() .map(|attr| attr.root()) .position(|attr| cb(*attr)); let (idx, set_type) = match idx { Some(idx) => (idx, AttrSettingType::ReplacedAttr), None => { let window = window_from_node(self).root(); let attr = Attr::new(*window, local_name, value.clone(), name, namespace.clone(), prefix, Some(self)); self.attrs.borrow_mut().push_unrooted(&attr); (self.attrs.borrow().len() - 1, AttrSettingType::FirstSetAttr) } }; (*self.attrs.borrow())[idx].root().set_value(set_type, value, self); } fn parse_attribute(self, namespace: &Namespace, local_name: &Atom, value: DOMString) -> AttrValue { if *namespace == ns!("") { vtable_for(&NodeCast::from_ref(self)) .parse_plain_attribute(local_name, value) } else { AttrValue::String(value) } } fn remove_attribute(self, namespace: Namespace, name: &str) { let (_, local_name) = get_attribute_parts(name); let local_name = Atom::from_slice(local_name); let idx = self.attrs.borrow().iter().map(|attr| attr.root()).position(|attr| { *attr.local_name() == local_name }); match idx { None => (), Some(idx) => { if namespace == ns!("") { let attr = (*self.attrs.borrow())[idx].root(); vtable_for(&NodeCast::from_ref(self)).before_remove_attr(*attr); } self.attrs.borrow_mut().remove(idx); let node: JSRef<Node> = NodeCast::from_ref(self); if node.is_in_doc() { let document = document_from_node(self).root(); if local_name == atom!("style") { document.content_changed(node, NodeDamage::NodeStyleDamaged); } else { document.content_changed(node, NodeDamage::OtherNodeDamage); } } } }; } fn has_class(&self, name: &Atom) -> bool { self.get_attribute(ns!(""), &atom!("class")).root().map(|attr| { attr.value().tokens().map(|tokens| { tokens.iter().any(|atom| atom == name) }).unwrap_or(false) }).unwrap_or(false) } fn set_atomic_attribute(self, name: &Atom, value: DOMString) { assert!(name.as_slice().eq_ignore_ascii_case(name.as_slice())); let value = AttrValue::from_atomic(value); self.set_attribute(name, value); } fn has_attribute(self, name: &Atom) -> bool { assert!(name.as_slice().chars().all(|ch| { !ch.is_ascii() || ch.to_ascii().to_lowercase() == ch.to_ascii() })); self.attrs.borrow().iter().map(|attr| attr.root()).any(|attr| { *attr.local_name() == *name && *attr.namespace() == ns!("") }) } fn set_bool_attribute(self, name: &Atom, value: bool) { if self.has_attribute(name) == value { return; } if value { self.set_string_attribute(name, String::new()); } else { self.remove_attribute(ns!(""), name.as_slice()); } } fn get_url_attribute(self, name: &Atom) -> DOMString { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); if !self.has_attribute(name) { return "".to_string(); } let url = self.get_string_attribute(name); let doc = document_from_node(self).root(); let base = doc.url(); // https://html.spec.whatwg.org/multipage/infrastructure.html#reflect // XXXManishearth this doesn't handle `javascript:` urls properly match UrlParser::new().base_url(base).parse(url.as_slice()) { Ok(parsed) => parsed.serialize(), Err(_) => "".to_string() } } fn set_url_attribute(self, name: &Atom, value: DOMString) { self.set_string_attribute(name, value); } fn get_string_attribute(self, name: &Atom) -> DOMString { match self.get_attribute(ns!(""), name) { Some(x) => x.root().Value(), None => "".to_string() } } fn set_string_attribute(self, name: &Atom, value: DOMString) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::String(value)); } fn get_tokenlist_attribute(self, name: &Atom) -> Vec<Atom> { self.get_attribute(ns!(""), name).root().map(|attr| { attr.value() .tokens() .expect("Expected a TokenListAttrValue") .to_vec() }).unwrap_or(vec!()) } fn set_tokenlist_attribute(self, name: &Atom, value: DOMString) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::from_serialized_tokenlist(value)); } fn set_atomic_tokenlist_attribute(self, name: &Atom, tokens: Vec<Atom>) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::from_atomic_tokens(tokens)); } fn get_uint_attribute(self, name: &Atom) -> u32 { assert!(name.as_slice().chars().all(|ch| { !ch.is_ascii() || ch.to_ascii().to_lowercase() == ch.to_ascii() })); let attribute = self.get_attribute(ns!(""), name).root(); match attribute { Some(attribute) => { match *attribute.value() { AttrValue::UInt(_, value) => value, _ => panic!("Expected an AttrValue::UInt: \ implement parse_plain_attribute"), } } None => 0, } } fn set_uint_attribute(self, name: &Atom, value: u32) { assert!(name.as_slice() == name.as_slice().to_ascii_lower().as_slice()); self.set_attribute(name, AttrValue::UInt(value.to_string(), value)); } } impl<'a> ElementMethods for JSRef<'a, Element> { // http://dom.spec.whatwg.org/#dom-element-namespaceuri fn GetNamespaceURI(self) -> Option<DOMString> { match self.namespace { ns!("") => None, Namespace(ref ns) => Some(ns.as_slice().to_string()) } } fn LocalName(self) -> DOMString { self.local_name.as_slice().to_string() } // http://dom.spec.whatwg.org/#dom-element-prefix fn GetPrefix(self) -> Option<DOMString> { self.prefix.clone() } // http://dom.spec.whatwg.org/#dom-element-tagname fn TagName(self) -> DOMString { let qualified_name = match self.prefix { Some(ref prefix) => { (format!("{:s}:{:s}", prefix.as_slice(), self.local_name.as_slice())).into_maybe_owned() }, None => self.local_name.as_slice().into_maybe_owned() }; if self.html_element_in_html_document() { qualified_name.as_slice().to_ascii_upper() } else { qualified_name.into_string() } } // http://dom.spec.whatwg.org/#dom-element-id fn Id(self) -> DOMString { self.get_string_attribute(&atom!("id")) } // http://dom.spec.whatwg.org/#dom-element-id fn SetId(self, id: DOMString) { self.set_atomic_attribute(&atom!("id"), id); } // http://dom.spec.whatwg.org/#dom-element-classname fn ClassName(self) -> DOMString { self.get_string_attribute(&atom!("class")) } // http://dom.spec.whatwg.org/#dom-element-classname fn SetClassName(self, class: DOMString) { self.set_tokenlist_attribute(&atom!("class"), class); } // http://dom.spec.whatwg.org/#dom-element-classlist fn ClassList(self) -> Temporary<DOMTokenList> { self.class_list.or_init(|| DOMTokenList::new(self, &atom!("class"))) } // http://dom.spec.whatwg.org/#dom-element-attributes fn Attributes(self) -> Temporary<NamedNodeMap> { self.attr_list.or_init(|| { let doc = { let node: JSRef<Node> = NodeCast::from_ref(self); node.owner_doc().root() }; let window = doc.window().root(); NamedNodeMap::new(*window, self) }) } // http://dom.spec.whatwg.org/#dom-element-getattribute fn GetAttribute(self, name: DOMString) -> Option<DOMString> { let name = self.parsed_name(name); self.get_attribute(ns!(""), &Atom::from_slice(name.as_slice())).root() .map(|s| s.Value()) } // http://dom.spec.whatwg.org/#dom-element-getattributens fn GetAttributeNS(self, namespace: Option<DOMString>, local_name: DOMString) -> Option<DOMString> { let namespace = namespace::from_domstring(namespace); self.get_attribute(namespace, &Atom::from_slice(local_name.as_slice())).root() .map(|attr| attr.Value()) } // http://dom.spec.whatwg.org/#dom-element-setattribute fn SetAttribute(self, name: DOMString, value: DOMString) -> ErrorResult { // Step 1. match xml_name_type(name.as_slice()) { InvalidXMLName => return Err(InvalidCharacter), _ => {} } // Step 2. let name = self.parsed_name(name); // Step 3-5. let name = Atom::from_slice(name.as_slice()); let value = self.parse_attribute(&ns!(""), &name, value); self.do_set_attribute(name.clone(), value, name.clone(), ns!(""), None, |attr| { *attr.name() == name }); Ok(()) } // http://dom.spec.whatwg.org/#dom-element-setattributens fn SetAttributeNS(self, namespace_url: Option<DOMString>, name: DOMString, value: DOMString) -> ErrorResult { // Step 1. let namespace = namespace::from_domstring(namespace_url); let name_type = xml_name_type(name.as_slice()); match name_type { // Step 2. InvalidXMLName => return Err(InvalidCharacter), // Step 3. Name => return Err(NamespaceError), QName => {} } // Step 4. let (prefix, local_name) = get_attribute_parts(name.as_slice()); match prefix { Some(ref prefix_str) => { // Step 5. if namespace == ns!("") { return Err(NamespaceError); } // Step 6. if "xml" == prefix_str.as_slice() && namespace != ns!(XML) { return Err(NamespaceError); } // Step 7b. if "xmlns" == prefix_str.as_slice() && namespace != ns!(XMLNS) { return Err(NamespaceError); } }, None => {} } let name = Atom::from_slice(name.as_slice()); let local_name = Atom::from_slice(local_name); let xmlns = atom!("xmlns"); // Step 7a. if xmlns == name && namespace != ns!(XMLNS) { return Err(NamespaceError); } // Step 8. if namespace == ns!(XMLNS) && xmlns != name && Some("xmlns") != prefix { return Err(NamespaceError); } // Step 9. let value = self.parse_attribute(&namespace, &local_name, value); self.do_set_attribute(local_name.clone(), value, name, namespace.clone(), prefix.map(|s| s.to_string()), |attr| { *attr.local_name() == local_name && *attr.namespace() == namespace }); Ok(()) } // http://dom.spec.whatwg.org/#dom-element-removeattribute fn RemoveAttribute(self, name: DOMString) { let name = self.parsed_name(name); self.remove_attribute(ns!(""), name.as_slice()) } // http://dom.spec.whatwg.org/#dom-element-removeattributens fn RemoveAttributeNS(self, namespace: Option<DOMString>, localname: DOMString) { let namespace = namespace::from_domstring(namespace); self.remove_attribute(namespace, localname.as_slice()) } // http://dom.spec.whatwg.org/#dom-element-hasattribute fn HasAttribute(self, name: DOMString) -> bool { self.GetAttribute(name).is_some() } // http://dom.spec.whatwg.org/#dom-element-hasattributens fn HasAttributeNS(self, namespace: Option<DOMString>, local_name: DOMString) -> bool { self.GetAttributeNS(namespace, local_name).is_some() } fn GetElementsByTagName(self, localname: DOMString) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::by_tag_name(*window, NodeCast::from_ref(self), localname) } fn GetElementsByTagNameNS(self, maybe_ns: Option<DOMString>, localname: DOMString) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::by_tag_name_ns(*window, NodeCast::from_ref(self), localname, maybe_ns) } fn GetElementsByClassName(self, classes: DOMString) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::by_class_name(*window, NodeCast::from_ref(self), classes) } // http://dev.w3.org/csswg/cssom-view/#dom-element-getclientrects fn GetClientRects(self) -> Temporary<DOMRectList> { let win = window_from_node(self).root(); let node: JSRef<Node> = NodeCast::from_ref(self); let rects = node.get_content_boxes(); let rects: Vec<Root<DOMRect>> = rects.iter().map(|r| { DOMRect::new( *win, r.origin.y, r.origin.y + r.size.height, r.origin.x, r.origin.x + r.size.width).root() }).collect(); DOMRectList::new(*win, rects.iter().map(|rect| rect.deref().clone()).collect()) } // http://dev.w3.org/csswg/cssom-view/#dom-element-getboundingclientrect fn GetBoundingClientRect(self) -> Temporary<DOMRect> { let win = window_from_node(self).root(); let node: JSRef<Node> = NodeCast::from_ref(self); let rect = node.get_bounding_content_box(); DOMRect::new( *win, rect.origin.y, rect.origin.y + rect.size.height, rect.origin.x, rect.origin.x + rect.size.width) } fn GetInnerHTML(self) -> Fallible<DOMString> { //XXX TODO: XML case Ok(serialize(&mut NodeIterator::new(NodeCast::from_ref(self), false, false))) } fn GetOuterHTML(self) -> Fallible<DOMString> { Ok(serialize(&mut NodeIterator::new(NodeCast::from_ref(self), true, false))) } // http://dom.spec.whatwg.org/#dom-parentnode-children fn Children(self) -> Temporary<HTMLCollection> { let window = window_from_node(self).root(); HTMLCollection::children(*window, NodeCast::from_ref(self)) } // http://dom.spec.whatwg.org/#dom-parentnode-queryselector fn QuerySelector(self, selectors: DOMString) -> Fallible<Option<Temporary<Element>>> { let root: JSRef<Node> = NodeCast::from_ref(self); root.query_selector(selectors) } // http://dom.spec.whatwg.org/#dom-parentnode-queryselectorall fn QuerySelectorAll(self, selectors: DOMString) -> Fallible<Temporary<NodeList>> { let root: JSRef<Node> = NodeCast::from_ref(self); root.query_selector_all(selectors) } // http://dom.spec.whatwg.org/#dom-childnode-remove fn Remove(self) { let node: JSRef<Node> = NodeCast::from_ref(self); node.remove_self(); } // http://dom.spec.whatwg.org/#dom-element-matches fn Matches(self, selectors: DOMString) -> Fallible<bool> { let parser_context = ParserContext { origin: StylesheetOrigin::Author, }; match style::parse_selector_list_from_str(&parser_context, selectors.as_slice()) { Err(()) => Err(Syntax), Ok(ref selectors) => { let root: JSRef<Node> = NodeCast::from_ref(self); Ok(matches(selectors, &root, &mut None)) } } } } pub fn get_attribute_parts<'a>(name: &'a str) -> (Option<&'a str>, &'a str) { //FIXME: Throw for XML-invalid names //FIXME: Throw for XMLNS-invalid names let (prefix, local_name) = if name.contains(":") { let mut parts = name.splitn(1, ':'); (Some(parts.next().unwrap()), parts.next().unwrap()) } else { (None, name) }; (prefix, local_name) } impl<'a> VirtualMethods for JSRef<'a, Element> { fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> { let node: &JSRef<Node> = NodeCast::from_borrowed_ref(self); Some(node as &VirtualMethods) } fn after_set_attr(&self, attr: JSRef<Attr>) { match self.super_type() { Some(ref s) => s.after_set_attr(attr), _ => () } match attr.local_name() { &atom!("style") => { // Modifying the `style` attribute might change style. let node: JSRef<Node> = NodeCast::from_ref(*self); let doc = document_from_node(*self).root(); let base_url = doc.url().clone(); let value = attr.value(); let style = Some(style::parse_style_attribute(value.as_slice(), &base_url)); *self.style_attribute.borrow_mut() = style; if node.is_in_doc() { doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("class") => { // Modifying a class can change style. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let document = document_from_node(*self).root(); document.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("id") => { // Modifying an ID might change style. let node: JSRef<Node> = NodeCast::from_ref(*self); let value = attr.value(); if node.is_in_doc() { let doc = document_from_node(*self).root(); if !value.as_slice().is_empty() { let value = Atom::from_slice(value.as_slice()); doc.register_named_element(*self, value); } doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } _ => { // Modifying any other attribute might change arbitrary things. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let document = document_from_node(*self).root(); document.content_changed(node, NodeDamage::OtherNodeDamage); } } } } fn before_remove_attr(&self, attr: JSRef<Attr>) { match self.super_type() { Some(ref s) => s.before_remove_attr(attr), _ => () } match attr.local_name() { &atom!("style") => { // Modifying the `style` attribute might change style. *self.style_attribute.borrow_mut() = None; let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let doc = document_from_node(*self).root(); doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("id") => { // Modifying an ID can change style. let node: JSRef<Node> = NodeCast::from_ref(*self); let value = attr.value(); if node.is_in_doc() { let doc = document_from_node(*self).root(); if !value.as_slice().is_empty() { let value = Atom::from_slice(value.as_slice()); doc.unregister_named_element(*self, value); } doc.content_changed(node, NodeDamage::NodeStyleDamaged); } } &atom!("class") => { // Modifying a class can change style. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let document = document_from_node(*self).root(); document.content_changed(node, NodeDamage::NodeStyleDamaged); } } _ => { // Modifying any other attribute might change arbitrary things. let node: JSRef<Node> = NodeCast::from_ref(*self); if node.is_in_doc() { let doc = document_from_node(*self).root(); doc.content_changed(node, NodeDamage::OtherNodeDamage); } } } } fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue { match name { &atom!("id") => AttrValue::from_atomic(value), &atom!("class") => AttrValue::from_serialized_tokenlist(value), _ => self.super_type().unwrap().parse_plain_attribute(name, value), } } fn bind_to_tree(&self, tree_in_doc: bool) { match self.super_type() { Some(ref s) => s.bind_to_tree(tree_in_doc), _ => (), } if !tree_in_doc { return; } match self.get_attribute(ns!(""), &atom!("id")).root() { Some(attr) => { let doc = document_from_node(*self).root(); let value = attr.Value(); if !value.is_empty() { let value = Atom::from_slice(value.as_slice()); doc.register_named_element(*self, value); } } _ => () } } fn unbind_from_tree(&self, tree_in_doc: bool) { match self.super_type() { Some(ref s) => s.unbind_from_tree(tree_in_doc), _ => (), } if !tree_in_doc { return; } match self.get_attribute(ns!(""), &atom!("id")).root() { Some(attr) => { let doc = document_from_node(*self).root(); let value = attr.Value(); if !value.is_empty() { let value = Atom::from_slice(value.as_slice()); doc.unregister_named_element(*self, value); } } _ => () } } } impl<'a> style::TElement<'a> for JSRef<'a, Element> { fn get_attr(self, namespace: &Namespace, attr: &Atom) -> Option<&'a str> { self.get_attribute(namespace.clone(), attr).root().map(|attr| { // This transmute is used to cheat the lifetime restriction. unsafe { mem::transmute(attr.value().as_slice()) } }) } fn get_attrs(self, attr: &Atom) -> Vec<&'a str> { self.get_attributes(attr).iter().map(|attr| attr.root()).map(|attr| { // This transmute is used to cheat the lifetime restriction. unsafe { mem::transmute(attr.value().as_slice()) } }).collect() } fn get_link(self) -> Option<&'a str> { // FIXME: This is HTML only. let node: JSRef<Node> = NodeCast::from_ref(self); match node.type_id() { // http://www.whatwg.org/specs/web-apps/current-work/multipage/selectors.html# // selector-link NodeTypeId::Element(ElementTypeId::HTMLAnchorElement) | NodeTypeId::Element(ElementTypeId::HTMLAreaElement) | NodeTypeId::Element(ElementTypeId::HTMLLinkElement) => self.get_attr(&ns!(""), &atom!("href")), _ => None, } } fn get_local_name(self) -> &'a Atom { // FIXME(zwarich): Remove this when UFCS lands and there is a better way // of disambiguating methods. fn get_local_name<'a, T: ElementHelpers<'a>>(this: T) -> &'a Atom { this.local_name() } get_local_name(self) } fn get_namespace(self) -> &'a Namespace { // FIXME(zwarich): Remove this when UFCS lands and there is a better way // of disambiguating methods. fn get_namespace<'a, T: ElementHelpers<'a>>(this: T) -> &'a Namespace { this.namespace() } get_namespace(self) } fn get_hover_state(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_hover_state() } fn get_id(self) -> Option<Atom> { self.get_attribute(ns!(""), &atom!("id")).map(|attr| { let attr = attr.root(); match *attr.value() { AttrValue::Atom(ref val) => val.clone(), _ => panic!("`id` attribute should be AttrValue::Atom"), } }) } fn get_disabled_state(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_disabled_state() } fn get_enabled_state(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_enabled_state() } fn get_checked_state(self) -> bool { match HTMLInputElementCast::to_ref(self) { Some(input) => input.Checked(), None => false, } } fn get_indeterminate_state(self) -> bool { match HTMLInputElementCast::to_ref(self) { Some(input) => input.get_indeterminate_state(), None => false, } } fn has_class(self, name: &Atom) -> bool { // FIXME(zwarich): Remove this when UFCS lands and there is a better way // of disambiguating methods. fn has_class<T: AttributeHandlers>(this: T, name: &Atom) -> bool { this.has_class(name) } has_class(self, name) } fn each_class(self, callback: |&Atom|) { match self.get_attribute(ns!(""), &atom!("class")).root() { None => {} Some(ref attr) => { match attr.value().tokens() { None => {} Some(tokens) => { for token in tokens.iter() { callback(token) } } } } } } fn has_nonzero_border(self) -> bool { match HTMLTableElementCast::to_ref(self) { None => false, Some(this) => { match this.get_border() { None | Some(0) => false, Some(_) => true, } } } } } pub trait ActivationElementHelpers<'a> { fn as_maybe_activatable(&'a self) -> Option<&'a Activatable + 'a>; fn click_in_progress(self) -> bool; fn set_click_in_progress(self, click: bool); fn nearest_activable_element(self) -> Option<Temporary<Element>>; fn authentic_click_activation<'b>(self, event: JSRef<'b, Event>); } impl<'a> ActivationElementHelpers<'a> for JSRef<'a, Element> { fn as_maybe_activatable(&'a self) -> Option<&'a Activatable + 'a> { let node: JSRef<Node> = NodeCast::from_ref(*self); match node.type_id() { NodeTypeId::Element(ElementTypeId::HTMLInputElement) => { let element: &'a JSRef<'a, HTMLInputElement> = HTMLInputElementCast::to_borrowed_ref(self).unwrap(); Some(element as &'a Activatable + 'a) }, _ => { None } } } fn click_in_progress(self) -> bool { let node: JSRef<Node> = NodeCast::from_ref(self); node.get_flag(CLICK_IN_PROGRESS) } fn set_click_in_progress(self, click: bool) { let node: JSRef<Node> = NodeCast::from_ref(self); node.set_flag(CLICK_IN_PROGRESS, click) } // https://html.spec.whatwg.org/multipage/interaction.html#nearest-activatable-element fn nearest_activable_element(self) -> Option<Temporary<Element>> { match self.as_maybe_activatable() { Some(el) => Some(Temporary::from_rooted(*el.as_element().root())), None => { let node: JSRef<Node> = NodeCast::from_ref(self); node.ancestors() .filter_map(|node| ElementCast::to_ref(node)) .filter(|e| e.as_maybe_activatable().is_some()).next() .map(|r| Temporary::from_rooted(r)) } } } /// Please call this method *only* for real click events /// /// https://html.spec.whatwg.org/multipage/interaction.html#run-authentic-click-activation-steps /// /// Use an element's synthetic click activation (or handle_event) for any script-triggered clicks. /// If the spec says otherwise, check with Manishearth first fn authentic_click_activation<'b>(self, event: JSRef<'b, Event>) { // Not explicitly part of the spec, however this helps enforce the invariants // required to save state between pre-activation and post-activation // since we cannot nest authentic clicks (unlike synthetic click activation, where // the script can generate more click events from the handler) assert!(!self.click_in_progress()); let target: JSRef<EventTarget> = EventTargetCast::from_ref(self); // Step 2 (requires canvas support) // Step 3 self.set_click_in_progress(true); // Step 4 let e = self.nearest_activable_element().root(); match e { Some(el) => match el.as_maybe_activatable() { Some(elem) => { // Step 5-6 elem.pre_click_activation(); target.dispatch_event(event); if !event.DefaultPrevented() { // post click activation elem.activation_behavior(); } else { elem.canceled_activation(); } } // Step 6 None => {target.dispatch_event(event);} }, // Step 6 None => {target.dispatch_event(event);} } // Step 7 self.set_click_in_progress(false); } }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The script task is the task that owns the DOM in memory, runs JavaScript, and spawns parsing //! and layout tasks. It's in charge of processing events for all same-origin pages in a frame //! tree, and manages the entire lifetime of pages in the frame tree from initial request to //! teardown. //! //! Page loads follow a two-step process. When a request for a new page load is received, the //! network request is initiated and the relevant data pertaining to the new page is stashed. //! While the non-blocking request is ongoing, the script task is free to process further events, //! noting when they pertain to ongoing loads (such as resizes/viewport adjustments). When the //! initial response is received for an ongoing load, the second phase starts - the frame tree //! entry is created, along with the Window and Document objects, and the appropriate parser //! takes over the response body. Once parsing is complete, the document lifecycle for loading //! a page runs its course and the script task returns to processing events in the main event //! loop. use devtools; use devtools_traits::ScriptToDevtoolsControlMsg; use devtools_traits::{DevtoolScriptControlMsg, DevtoolsPageInfo}; use document_loader::DocumentLoader; use dom::bindings::cell::DOMRefCell; use dom::bindings::codegen::Bindings::DocumentBinding::{DocumentMethods, DocumentReadyState}; use dom::bindings::conversions::{FromJSValConvertible, StringificationBehavior}; use dom::bindings::global::GlobalRef; use dom::bindings::inheritance::Castable; use dom::bindings::js::{JS, RootCollection, trace_roots}; use dom::bindings::js::{Root, RootCollectionPtr, RootedReference}; use dom::bindings::refcounted::{LiveDOMReferences, Trusted, TrustedReference, trace_refcounted_objects}; use dom::bindings::trace::{JSTraceable, RootedVec, trace_traceables}; use dom::bindings::utils::{DOM_CALLBACKS, WRAP_CALLBACKS}; use dom::document::{Document, DocumentProgressHandler, IsHTMLDocument}; use dom::document::{DocumentSource, MouseEventType}; use dom::element::Element; use dom::event::{Event, EventBubbles, EventCancelable}; use dom::node::{Node, NodeDamage, window_from_node}; use dom::servohtmlparser::{ParserContext, ServoHTMLParser}; use dom::uievent::UIEvent; use dom::window::{ReflowReason, ScriptHelpers, Window}; use dom::worker::TrustedWorkerAddress; use euclid::Rect; use euclid::point::Point2D; use hyper::header::{ContentType, HttpDate}; use hyper::header::{Headers, LastModified}; use hyper::method::Method; use hyper::mime::{Mime, SubLevel, TopLevel}; use ipc_channel::ipc::{self, IpcSender}; use ipc_channel::router::ROUTER; use js::glue::CollectServoSizes; use js::jsapi::{DOMProxyShadowsResult, HandleId, HandleObject, RootedValue, SetDOMProxyInformation}; use js::jsapi::{DisableIncrementalGC, JS_AddExtraGCRootsTracer, JS_SetWrapObjectCallbacks}; use js::jsapi::{GCDescription, GCProgress, JSGCInvocationKind, SetGCSliceCallback}; use js::jsapi::{JSAutoRequest, JSGCStatus, JS_GetRuntime, JS_SetGCCallback, SetDOMCallbacks}; use js::jsapi::{JSContext, JSRuntime, JSTracer}; use js::jsapi::{JSObject, SetPreserveWrapperCallback}; use js::jsval::UndefinedValue; use js::rust::Runtime; use layout_interface::{ReflowQueryType}; use layout_interface::{self, LayoutChan, NewLayoutTaskInfo, ReflowGoal, ScriptLayoutChan}; use libc; use mem::heap_size_of_self_and_children; use msg::compositor_msg::{EventResult, LayerId, ScriptToCompositorMsg}; use msg::constellation_msg::Msg as ConstellationMsg; use msg::constellation_msg::{ConstellationChan, FocusType, LoadData}; use msg::constellation_msg::{MozBrowserEvent, PipelineId}; use msg::constellation_msg::{PipelineNamespace}; use msg::constellation_msg::{SubpageId, WindowSizeData, WorkerId}; use msg::webdriver_msg::WebDriverScriptCommand; use net_traits::LoadData as NetLoadData; use net_traits::image_cache_task::{ImageCacheChan, ImageCacheResult, ImageCacheTask}; use net_traits::storage_task::StorageTask; use net_traits::{AsyncResponseTarget, ControlMsg, LoadConsumer, Metadata, ResourceTask}; use network_listener::NetworkListener; use page::{Frame, IterablePage, Page}; use parse::html::{ParseContext, parse_html}; use profile_traits::mem::{self, OpaqueSender, Report, ReportKind, ReportsChan}; use profile_traits::time::{self, ProfilerCategory, profile}; use script_traits::CompositorEvent::{ClickEvent, ResizeEvent}; use script_traits::CompositorEvent::{KeyEvent, MouseMoveEvent}; use script_traits::CompositorEvent::{MouseDownEvent, MouseUpEvent, TouchEvent}; use script_traits::{CompositorEvent, ConstellationControlMsg}; use script_traits::{InitialScriptState, MouseButton, NewLayoutInfo}; use script_traits::{OpaqueScriptLayoutChannel, ScriptState, ScriptTaskFactory}; use script_traits::{TimerEvent, TimerEventChan, TimerEventRequest, TimerSource}; use script_traits::{TouchEventType, TouchId}; use std::any::Any; use std::borrow::ToOwned; use std::cell::{Cell, RefCell}; use std::collections::HashSet; use std::io::{Write, stdout}; use std::marker::PhantomData; use std::mem as std_mem; use std::option::Option; use std::ptr; use std::rc::Rc; use std::result::Result; use std::sync::atomic::{Ordering, AtomicBool}; use std::sync::mpsc::{Receiver, Select, Sender, channel}; use std::sync::{Arc, Mutex}; use string_cache::Atom; use time::{Tm, now}; use url::{Url, UrlParser}; use util::opts; use util::str::DOMString; use util::task::spawn_named_with_send_on_failure; use util::task_state; use webdriver_handlers; thread_local!(pub static STACK_ROOTS: Cell<Option<RootCollectionPtr>> = Cell::new(None)); thread_local!(static SCRIPT_TASK_ROOT: RefCell<Option<*const ScriptTask>> = RefCell::new(None)); unsafe extern fn trace_rust_roots(tr: *mut JSTracer, _data: *mut libc::c_void) { SCRIPT_TASK_ROOT.with(|root| { if let Some(script_task) = *root.borrow() { (*script_task).trace(tr); } }); trace_traceables(tr); trace_roots(tr); } /// A document load that is in the process of fetching the requested resource. Contains /// data that will need to be present when the document and frame tree entry are created, /// but is only easily available at initiation of the load and on a push basis (so some /// data will be updated according to future resize events, viewport changes, etc.) #[derive(JSTraceable)] struct InProgressLoad { /// The pipeline which requested this load. pipeline_id: PipelineId, /// The parent pipeline and child subpage associated with this load, if any. parent_info: Option<(PipelineId, SubpageId)>, /// The current window size associated with this pipeline. window_size: Option<WindowSizeData>, /// Channel to the layout task associated with this pipeline. layout_chan: LayoutChan, /// The current viewport clipping rectangle applying to this pipeline, if any. clip_rect: Option<Rect<f32>>, /// The requested URL of the load. url: Url, } impl InProgressLoad { /// Create a new InProgressLoad object. fn new(id: PipelineId, parent_info: Option<(PipelineId, SubpageId)>, layout_chan: LayoutChan, window_size: Option<WindowSizeData>, url: Url) -> InProgressLoad { InProgressLoad { pipeline_id: id, parent_info: parent_info, layout_chan: layout_chan, window_size: window_size, clip_rect: None, url: url, } } } /// Encapsulated state required to create cancellable runnables from non-script threads. pub struct RunnableWrapper { pub cancelled: Arc<AtomicBool>, } impl RunnableWrapper { pub fn wrap_runnable<T: Runnable + Send + 'static>(&self, runnable: T) -> Box<Runnable + Send> { box CancellableRunnable { cancelled: self.cancelled.clone(), inner: box runnable, } } } /// A runnable that can be discarded by toggling a shared flag. pub struct CancellableRunnable<T: Runnable + Send> { cancelled: Arc<AtomicBool>, inner: Box<T>, } impl<T: Runnable + Send> Runnable for CancellableRunnable<T> { fn is_cancelled(&self) -> bool { self.cancelled.load(Ordering::Relaxed) } fn handler(self: Box<CancellableRunnable<T>>) { self.inner.handler() } } pub trait Runnable { fn is_cancelled(&self) -> bool { false } fn handler(self: Box<Self>); } pub trait MainThreadRunnable { fn handler(self: Box<Self>, script_task: &ScriptTask); } enum MixedMessage { FromConstellation(ConstellationControlMsg), FromScript(MainThreadScriptMsg), FromDevtools(DevtoolScriptControlMsg), FromImageCache(ImageCacheResult), FromScheduler(TimerEvent), } /// Common messages used to control the event loops in both the script and the worker pub enum CommonScriptMsg { /// Requests that the script task measure its memory usage. The results are sent back via the /// supplied channel. CollectReports(ReportsChan), /// A DOM object's last pinned reference was removed (dispatched to all tasks). RefcountCleanup(TrustedReference), /// Generic message that encapsulates event handling. RunnableMsg(ScriptTaskEventCategory, Box<Runnable + Send>), } #[derive(Clone, Copy, Debug, Eq, Hash, JSTraceable, PartialEq)] pub enum ScriptTaskEventCategory { AttachLayout, ConstellationMsg, DevtoolsMsg, DocumentEvent, DomEvent, FileRead, ImageCacheMsg, InputEvent, NetworkEvent, Resize, ScriptEvent, TimerEvent, UpdateReplacedElement, SetViewport, WebSocketEvent, WorkerEvent, } /// Messages used to control the script event loop pub enum MainThreadScriptMsg { /// Common variants associated with the script messages Common(CommonScriptMsg), /// Notify a document that all pending loads are complete. DocumentLoadsComplete(PipelineId), /// Notifies the script that a window associated with a particular pipeline /// should be closed (only dispatched to ScriptTask). ExitWindow(PipelineId), /// Generic message for running tasks in the ScriptTask MainThreadRunnableMsg(Box<MainThreadRunnable + Send>), /// Begins a content-initiated load on the specified pipeline (only /// dispatched to ScriptTask). Navigate(PipelineId, LoadData), } /// A cloneable interface for communicating with an event loop. pub trait ScriptChan { /// Send a message to the associated event loop. fn send(&self, msg: CommonScriptMsg) -> Result<(), ()>; /// Clone this handle. fn clone(&self) -> Box<ScriptChan + Send>; } impl OpaqueSender<CommonScriptMsg> for Box<ScriptChan + Send> { fn send(&self, msg: CommonScriptMsg) { ScriptChan::send(&**self, msg).unwrap(); } } /// An interface for receiving ScriptMsg values in an event loop. Used for synchronous DOM /// APIs that need to abstract over multiple kinds of event loops (worker/main thread) with /// different Receiver interfaces. pub trait ScriptPort { fn recv(&self) -> CommonScriptMsg; } impl ScriptPort for Receiver<CommonScriptMsg> { fn recv(&self) -> CommonScriptMsg { self.recv().unwrap() } } impl ScriptPort for Receiver<MainThreadScriptMsg> { fn recv(&self) -> CommonScriptMsg { match self.recv().unwrap() { MainThreadScriptMsg::Common(script_msg) => script_msg, _ => panic!("unexpected main thread event message!") } } } impl ScriptPort for Receiver<(TrustedWorkerAddress, CommonScriptMsg)> { fn recv(&self) -> CommonScriptMsg { self.recv().unwrap().1 } } impl ScriptPort for Receiver<(TrustedWorkerAddress, MainThreadScriptMsg)> { fn recv(&self) -> CommonScriptMsg { match self.recv().unwrap().1 { MainThreadScriptMsg::Common(script_msg) => script_msg, _ => panic!("unexpected main thread event message!") } } } /// Encapsulates internal communication of shared messages within the script task. #[derive(JSTraceable)] pub struct SendableMainThreadScriptChan(pub Sender<CommonScriptMsg>); impl ScriptChan for SendableMainThreadScriptChan { fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> { let SendableMainThreadScriptChan(ref chan) = *self; chan.send(msg).map_err(|_| ()) } fn clone(&self) -> Box<ScriptChan + Send> { let SendableMainThreadScriptChan(ref chan) = *self; box SendableMainThreadScriptChan((*chan).clone()) } } impl SendableMainThreadScriptChan { /// Creates a new script chan. pub fn new() -> (Receiver<CommonScriptMsg>, Box<SendableMainThreadScriptChan>) { let (chan, port) = channel(); (port, box SendableMainThreadScriptChan(chan)) } } /// Encapsulates internal communication of main thread messages within the script task. #[derive(JSTraceable)] pub struct MainThreadScriptChan(pub Sender<MainThreadScriptMsg>); impl ScriptChan for MainThreadScriptChan { fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> { let MainThreadScriptChan(ref chan) = *self; chan.send(MainThreadScriptMsg::Common(msg)).map_err(|_| ()) } fn clone(&self) -> Box<ScriptChan + Send> { let MainThreadScriptChan(ref chan) = *self; box MainThreadScriptChan((*chan).clone()) } } impl MainThreadScriptChan { /// Creates a new script chan. pub fn new() -> (Receiver<MainThreadScriptMsg>, Box<MainThreadScriptChan>) { let (chan, port) = channel(); (port, box MainThreadScriptChan(chan)) } } pub struct MainThreadTimerEventChan(Sender<TimerEvent>); impl TimerEventChan for MainThreadTimerEventChan { fn send(&self, event: TimerEvent) -> Result<(), ()> { let MainThreadTimerEventChan(ref chan) = *self; chan.send(event).map_err(|_| ()) } fn clone(&self) -> Box<TimerEventChan + Send> { let MainThreadTimerEventChan(ref chan) = *self; box MainThreadTimerEventChan((*chan).clone()) } } pub struct StackRootTLS<'a>(PhantomData<&'a u32>); impl<'a> StackRootTLS<'a> { pub fn new(roots: &'a RootCollection) -> StackRootTLS<'a> { STACK_ROOTS.with(|ref r| { r.set(Some(RootCollectionPtr(roots as *const _))) }); StackRootTLS(PhantomData) } } impl<'a> Drop for StackRootTLS<'a> { fn drop(&mut self) { STACK_ROOTS.with(|ref r| r.set(None)); } } /// Information for an entire page. Pages are top-level browsing contexts and can contain multiple /// frames. #[derive(JSTraceable)] // ScriptTask instances are rooted on creation, so this is okay #[allow(unrooted_must_root)] pub struct ScriptTask { /// A handle to the information pertaining to page layout page: DOMRefCell<Option<Rc<Page>>>, /// A list of data pertaining to loads that have not yet received a network response incomplete_loads: DOMRefCell<Vec<InProgressLoad>>, /// A handle to the image cache task. image_cache_task: ImageCacheTask, /// A handle to the resource task. This is an `Arc` to avoid running out of file descriptors if /// there are many iframes. resource_task: Arc<ResourceTask>, /// A handle to the storage task. storage_task: StorageTask, /// The port on which the script task receives messages (load URL, exit, etc.) port: Receiver<MainThreadScriptMsg>, /// A channel to hand out to script task-based entities that need to be able to enqueue /// events in the event queue. chan: MainThreadScriptChan, /// A channel to hand out to tasks that need to respond to a message from the script task. control_chan: Sender<ConstellationControlMsg>, /// The port on which the constellation and layout tasks can communicate with the /// script task. control_port: Receiver<ConstellationControlMsg>, /// For communicating load url messages to the constellation constellation_chan: ConstellationChan, /// A handle to the compositor for communicating ready state messages. compositor: DOMRefCell<IpcSender<ScriptToCompositorMsg>>, /// The port on which we receive messages from the image cache image_cache_port: Receiver<ImageCacheResult>, /// The channel on which the image cache can send messages to ourself. image_cache_channel: ImageCacheChan, /// For providing contact with the time profiler. time_profiler_chan: time::ProfilerChan, /// For providing contact with the memory profiler. mem_profiler_chan: mem::ProfilerChan, /// For providing instructions to an optional devtools server. devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>, /// For receiving commands from an optional devtools server. Will be ignored if /// no such server exists. devtools_port: Receiver<DevtoolScriptControlMsg>, devtools_sender: IpcSender<DevtoolScriptControlMsg>, /// The JavaScript runtime. js_runtime: Rc<Runtime>, mouse_over_targets: DOMRefCell<Vec<JS<Element>>>, /// List of pipelines that have been owned and closed by this script task. closed_pipelines: DOMRefCell<HashSet<PipelineId>>, scheduler_chan: Sender<TimerEventRequest>, timer_event_chan: Sender<TimerEvent>, timer_event_port: Receiver<TimerEvent>, } /// In the event of task failure, all data on the stack runs its destructor. However, there /// are no reachable, owning pointers to the DOM memory, so it never gets freed by default /// when the script task fails. The ScriptMemoryFailsafe uses the destructor bomb pattern /// to forcibly tear down the JS compartments for pages associated with the failing ScriptTask. struct ScriptMemoryFailsafe<'a> { owner: Option<&'a ScriptTask>, } impl<'a> ScriptMemoryFailsafe<'a> { fn neuter(&mut self) { self.owner = None; } fn new(owner: &'a ScriptTask) -> ScriptMemoryFailsafe<'a> { ScriptMemoryFailsafe { owner: Some(owner), } } } impl<'a> Drop for ScriptMemoryFailsafe<'a> { #[allow(unrooted_must_root)] fn drop(&mut self) { match self.owner { Some(owner) => { unsafe { let page = owner.page.borrow_for_script_deallocation(); for page in page.iter() { let window = page.window(); window.clear_js_runtime_for_script_deallocation(); } } } None => (), } } } impl ScriptTaskFactory for ScriptTask { fn create_layout_channel(_phantom: Option<&mut ScriptTask>) -> OpaqueScriptLayoutChannel { let (chan, port) = channel(); ScriptLayoutChan::new(chan, port) } fn clone_layout_channel(_phantom: Option<&mut ScriptTask>, pair: &OpaqueScriptLayoutChannel) -> Box<Any + Send> { box pair.sender() as Box<Any + Send> } fn create(_phantom: Option<&mut ScriptTask>, state: InitialScriptState, layout_chan: &OpaqueScriptLayoutChannel, load_data: LoadData) { let ConstellationChan(const_chan) = state.constellation_chan.clone(); let (script_chan, script_port) = channel(); let layout_chan = LayoutChan(layout_chan.sender()); let failure_info = state.failure_info; spawn_named_with_send_on_failure(format!("ScriptTask {:?}", state.id), task_state::SCRIPT, move || { PipelineNamespace::install(state.pipeline_namespace_id); let roots = RootCollection::new(); let _stack_roots_tls = StackRootTLS::new(&roots); let chan = MainThreadScriptChan(script_chan); let channel_for_reporter = chan.clone(); let id = state.id; let parent_info = state.parent_info; let mem_profiler_chan = state.mem_profiler_chan.clone(); let window_size = state.window_size; let script_task = ScriptTask::new(state, script_port, chan); SCRIPT_TASK_ROOT.with(|root| { *root.borrow_mut() = Some(&script_task as *const _); }); let mut failsafe = ScriptMemoryFailsafe::new(&script_task); let new_load = InProgressLoad::new(id, parent_info, layout_chan, window_size, load_data.url.clone()); script_task.start_page_load(new_load, load_data); let reporter_name = format!("script-reporter-{}", id); mem_profiler_chan.run_with_memory_reporting(|| { script_task.start(); }, reporter_name, channel_for_reporter, CommonScriptMsg::CollectReports); // This must always be the very last operation performed before the task completes failsafe.neuter(); }, ConstellationMsg::Failure(failure_info), const_chan); } } thread_local!(static GC_CYCLE_START: Cell<Option<Tm>> = Cell::new(None)); thread_local!(static GC_SLICE_START: Cell<Option<Tm>> = Cell::new(None)); unsafe extern "C" fn gc_slice_callback(_rt: *mut JSRuntime, progress: GCProgress, desc: *const GCDescription) { match progress { GCProgress::GC_CYCLE_BEGIN => { GC_CYCLE_START.with(|start| { start.set(Some(now())); println!("GC cycle began"); }) }, GCProgress::GC_SLICE_BEGIN => { GC_SLICE_START.with(|start| { start.set(Some(now())); println!("GC slice began"); }) }, GCProgress::GC_SLICE_END => { GC_SLICE_START.with(|start| { let dur = now() - start.get().unwrap(); start.set(None); println!("GC slice ended: duration={}", dur); }) }, GCProgress::GC_CYCLE_END => { GC_CYCLE_START.with(|start| { let dur = now() - start.get().unwrap(); start.set(None); println!("GC cycle ended: duration={}", dur); }) }, }; if !desc.is_null() { let desc: &GCDescription = &*desc; let invocationKind = match desc.invocationKind_ { JSGCInvocationKind::GC_NORMAL => "GC_NORMAL", JSGCInvocationKind::GC_SHRINK => "GC_SHRINK", }; println!(" isCompartment={}, invocationKind={}", desc.isCompartment_, invocationKind); } let _ = stdout().flush(); } unsafe extern "C" fn debug_gc_callback(_rt: *mut JSRuntime, status: JSGCStatus, _data: *mut libc::c_void) { match status { JSGCStatus::JSGC_BEGIN => task_state::enter(task_state::IN_GC), JSGCStatus::JSGC_END => task_state::exit(task_state::IN_GC), } } unsafe extern "C" fn shadow_check_callback(_cx: *mut JSContext, _object: HandleObject, _id: HandleId) -> DOMProxyShadowsResult { // XXX implement me DOMProxyShadowsResult::ShadowCheckFailed } impl ScriptTask { pub fn page_fetch_complete(id: PipelineId, subpage: Option<SubpageId>, metadata: Metadata) -> Option<Root<ServoHTMLParser>> { SCRIPT_TASK_ROOT.with(|root| { let script_task = unsafe { &*root.borrow().unwrap() }; script_task.handle_page_fetch_complete(id, subpage, metadata) }) } pub fn parsing_complete(id: PipelineId) { SCRIPT_TASK_ROOT.with(|root| { let script_task = unsafe { &*root.borrow().unwrap() }; script_task.handle_parsing_complete(id); }); } pub fn process_event(msg: CommonScriptMsg) { SCRIPT_TASK_ROOT.with(|root| { if let Some(script_task) = *root.borrow() { let script_task = unsafe { &*script_task }; script_task.handle_msg_from_script(MainThreadScriptMsg::Common(msg)); } }); } /// Creates a new script task. pub fn new(state: InitialScriptState, port: Receiver<MainThreadScriptMsg>, chan: MainThreadScriptChan) -> ScriptTask { let runtime = ScriptTask::new_rt_and_cx(); unsafe { JS_SetWrapObjectCallbacks(runtime.rt(), &WRAP_CALLBACKS); } // Ask the router to proxy IPC messages from the devtools to us. let (ipc_devtools_sender, ipc_devtools_receiver) = ipc::channel().unwrap(); let devtools_port = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_devtools_receiver); // Ask the router to proxy IPC messages from the image cache task to us. let (ipc_image_cache_channel, ipc_image_cache_port) = ipc::channel().unwrap(); let image_cache_port = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_image_cache_port); let (timer_event_chan, timer_event_port) = channel(); ScriptTask { page: DOMRefCell::new(None), incomplete_loads: DOMRefCell::new(vec!()), image_cache_task: state.image_cache_task, image_cache_channel: ImageCacheChan(ipc_image_cache_channel), image_cache_port: image_cache_port, resource_task: Arc::new(state.resource_task), storage_task: state.storage_task, port: port, chan: chan, control_chan: state.control_chan, control_port: state.control_port, constellation_chan: state.constellation_chan, compositor: DOMRefCell::new(state.compositor), time_profiler_chan: state.time_profiler_chan, mem_profiler_chan: state.mem_profiler_chan, devtools_chan: state.devtools_chan, devtools_port: devtools_port, devtools_sender: ipc_devtools_sender, js_runtime: Rc::new(runtime), mouse_over_targets: DOMRefCell::new(vec!()), closed_pipelines: DOMRefCell::new(HashSet::new()), scheduler_chan: state.scheduler_chan, timer_event_chan: timer_event_chan, timer_event_port: timer_event_port, } } pub fn new_rt_and_cx() -> Runtime { LiveDOMReferences::initialize(); let runtime = Runtime::new(); unsafe { JS_AddExtraGCRootsTracer(runtime.rt(), Some(trace_rust_roots), ptr::null_mut()); JS_AddExtraGCRootsTracer(runtime.rt(), Some(trace_refcounted_objects), ptr::null_mut()); } // Needed for debug assertions about whether GC is running. if cfg!(debug_assertions) { unsafe { JS_SetGCCallback(runtime.rt(), Some(debug_gc_callback), ptr::null_mut()); } } if opts::get().gc_profile { unsafe { SetGCSliceCallback(runtime.rt(), Some(gc_slice_callback)); } } unsafe { unsafe extern "C" fn empty_wrapper_callback(_: *mut JSContext, _: *mut JSObject) -> bool { true } SetDOMProxyInformation(ptr::null(), 0, Some(shadow_check_callback)); SetDOMCallbacks(runtime.rt(), &DOM_CALLBACKS); SetPreserveWrapperCallback(runtime.rt(), Some(empty_wrapper_callback)); // Pre barriers aren't working correctly at the moment DisableIncrementalGC(runtime.rt()); } runtime } // Return the root page in the frame tree. Panics if it doesn't exist. pub fn root_page(&self) -> Rc<Page> { self.page.borrow().as_ref().unwrap().clone() } pub fn get_cx(&self) -> *mut JSContext { self.js_runtime.cx() } /// Starts the script task. After calling this method, the script task will loop receiving /// messages on its port. pub fn start(&self) { while self.handle_msgs() { // Go on... } } /// Handle incoming control messages. fn handle_msgs(&self) -> bool { // Handle pending resize events. // Gather them first to avoid a double mut borrow on self. let mut resizes = vec!(); { let page = self.page.borrow(); if let Some(page) = page.as_ref() { for page in page.iter() { // Only process a resize if layout is idle. let window = page.window(); let resize_event = window.steal_resize_event(); match resize_event { Some(size) => resizes.push((window.pipeline(), size)), None => () } } } } for (id, size) in resizes { self.handle_event(id, ResizeEvent(size)); } // Store new resizes, and gather all other events. let mut sequential = vec!(); // Receive at least one message so we don't spinloop. let mut event = { let sel = Select::new(); let mut script_port = sel.handle(&self.port); let mut control_port = sel.handle(&self.control_port); let mut timer_event_port = sel.handle(&self.timer_event_port); let mut devtools_port = sel.handle(&self.devtools_port); let mut image_cache_port = sel.handle(&self.image_cache_port); unsafe { script_port.add(); control_port.add(); timer_event_port.add(); if self.devtools_chan.is_some() { devtools_port.add(); } image_cache_port.add(); } let ret = sel.wait(); if ret == script_port.id() { MixedMessage::FromScript(self.port.recv().unwrap()) } else if ret == control_port.id() { MixedMessage::FromConstellation(self.control_port.recv().unwrap()) } else if ret == timer_event_port.id() { MixedMessage::FromScheduler(self.timer_event_port.recv().unwrap()) } else if ret == devtools_port.id() { MixedMessage::FromDevtools(self.devtools_port.recv().unwrap()) } else if ret == image_cache_port.id() { MixedMessage::FromImageCache(self.image_cache_port.recv().unwrap()) } else { panic!("unexpected select result") } }; // Squash any pending resize, reflow, animation tick, and mouse-move events in the queue. let mut mouse_move_event_index = None; let mut animation_ticks = HashSet::new(); loop { match event { // This has to be handled before the ResizeMsg below, // otherwise the page may not have been added to the // child list yet, causing the find() to fail. MixedMessage::FromConstellation(ConstellationControlMsg::AttachLayout( new_layout_info)) => { self.profile_event(ScriptTaskEventCategory::AttachLayout, || { self.handle_new_layout(new_layout_info); }) } MixedMessage::FromConstellation(ConstellationControlMsg::Resize(id, size)) => { self.profile_event(ScriptTaskEventCategory::Resize, || { self.handle_resize(id, size); }) } MixedMessage::FromConstellation(ConstellationControlMsg::Viewport(id, rect)) => { self.profile_event(ScriptTaskEventCategory::SetViewport, || { self.handle_viewport(id, rect); }) } MixedMessage::FromConstellation(ConstellationControlMsg::TickAllAnimations( pipeline_id)) => { if !animation_ticks.contains(&pipeline_id) { animation_ticks.insert(pipeline_id); sequential.push(event); } } MixedMessage::FromConstellation(ConstellationControlMsg::SendEvent( _, MouseMoveEvent(_))) => { match mouse_move_event_index { None => { mouse_move_event_index = Some(sequential.len()); sequential.push(event); } Some(index) => { sequential[index] = event } } } _ => { sequential.push(event); } } // If any of our input sources has an event pending, we'll perform another iteration // and check for more resize events. If there are no events pending, we'll move // on and execute the sequential non-resize events we've seen. match self.control_port.try_recv() { Err(_) => match self.port.try_recv() { Err(_) => match self.timer_event_port.try_recv() { Err(_) => match self.devtools_port.try_recv() { Err(_) => match self.image_cache_port.try_recv() { Err(_) => break, Ok(ev) => event = MixedMessage::FromImageCache(ev), }, Ok(ev) => event = MixedMessage::FromDevtools(ev), }, Ok(ev) => event = MixedMessage::FromScheduler(ev), }, Ok(ev) => event = MixedMessage::FromScript(ev), }, Ok(ev) => event = MixedMessage::FromConstellation(ev), } } // Process the gathered events. for msg in sequential { let category = self.categorize_msg(&msg); let result = self.profile_event(category, move || { match msg { MixedMessage::FromConstellation(ConstellationControlMsg::ExitPipeline(id)) => { if self.handle_exit_pipeline_msg(id) { return Some(false) } }, MixedMessage::FromConstellation(inner_msg) => self.handle_msg_from_constellation(inner_msg), MixedMessage::FromScript(inner_msg) => self.handle_msg_from_script(inner_msg), MixedMessage::FromScheduler(inner_msg) => self.handle_timer_event(inner_msg), MixedMessage::FromDevtools(inner_msg) => self.handle_msg_from_devtools(inner_msg), MixedMessage::FromImageCache(inner_msg) => self.handle_msg_from_image_cache(inner_msg), } None }); if let Some(retval) = result { return retval } } // Issue batched reflows on any pages that require it (e.g. if images loaded) // TODO(gw): In the future we could probably batch other types of reflows // into this loop too, but for now it's only images. let page = self.page.borrow(); if let Some(page) = page.as_ref() { for page in page.iter() { let window = page.window(); let pending_reflows = window.get_pending_reflow_count(); if pending_reflows > 0 { window.reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, ReflowReason::ImageLoaded); } } } true } fn categorize_msg(&self, msg: &MixedMessage) -> ScriptTaskEventCategory { match *msg { MixedMessage::FromConstellation(ref inner_msg) => { match *inner_msg { ConstellationControlMsg::SendEvent(_, _) => ScriptTaskEventCategory::DomEvent, _ => ScriptTaskEventCategory::ConstellationMsg } }, MixedMessage::FromDevtools(_) => ScriptTaskEventCategory::DevtoolsMsg, MixedMessage::FromImageCache(_) => ScriptTaskEventCategory::ImageCacheMsg, MixedMessage::FromScript(ref inner_msg) => { match *inner_msg { MainThreadScriptMsg::Common(CommonScriptMsg::RunnableMsg(ref category, _)) => *category, _ => ScriptTaskEventCategory::ScriptEvent } }, MixedMessage::FromScheduler(_) => ScriptTaskEventCategory::TimerEvent, } } fn profile_event<F, R>(&self, category: ScriptTaskEventCategory, f: F) -> R where F: FnOnce() -> R { if opts::get().profile_script_events { let profiler_cat = match category { ScriptTaskEventCategory::AttachLayout => ProfilerCategory::ScriptAttachLayout, ScriptTaskEventCategory::ConstellationMsg => ProfilerCategory::ScriptConstellationMsg, ScriptTaskEventCategory::DevtoolsMsg => ProfilerCategory::ScriptDevtoolsMsg, ScriptTaskEventCategory::DocumentEvent => ProfilerCategory::ScriptDocumentEvent, ScriptTaskEventCategory::DomEvent => ProfilerCategory::ScriptDomEvent, ScriptTaskEventCategory::FileRead => ProfilerCategory::ScriptFileRead, ScriptTaskEventCategory::ImageCacheMsg => ProfilerCategory::ScriptImageCacheMsg, ScriptTaskEventCategory::InputEvent => ProfilerCategory::ScriptInputEvent, ScriptTaskEventCategory::NetworkEvent => ProfilerCategory::ScriptNetworkEvent, ScriptTaskEventCategory::Resize => ProfilerCategory::ScriptResize, ScriptTaskEventCategory::ScriptEvent => ProfilerCategory::ScriptEvent, ScriptTaskEventCategory::UpdateReplacedElement => ProfilerCategory::ScriptUpdateReplacedElement, ScriptTaskEventCategory::SetViewport => ProfilerCategory::ScriptSetViewport, ScriptTaskEventCategory::TimerEvent => ProfilerCategory::ScriptTimerEvent, ScriptTaskEventCategory::WebSocketEvent => ProfilerCategory::ScriptWebSocketEvent, ScriptTaskEventCategory::WorkerEvent => ProfilerCategory::ScriptWorkerEvent, }; profile(profiler_cat, None, self.time_profiler_chan.clone(), f) } else { f() } } fn handle_msg_from_constellation(&self, msg: ConstellationControlMsg) { match msg { ConstellationControlMsg::AttachLayout(_) => panic!("should have handled AttachLayout already"), ConstellationControlMsg::Navigate(pipeline_id, subpage_id, load_data) => self.handle_navigate(pipeline_id, Some(subpage_id), load_data), ConstellationControlMsg::SendEvent(id, event) => self.handle_event(id, event), ConstellationControlMsg::ResizeInactive(id, new_size) => self.handle_resize_inactive_msg(id, new_size), ConstellationControlMsg::Viewport(..) => panic!("should have handled Viewport already"), ConstellationControlMsg::Resize(..) => panic!("should have handled Resize already"), ConstellationControlMsg::ExitPipeline(..) => panic!("should have handled ExitPipeline already"), ConstellationControlMsg::GetTitle(pipeline_id) => self.handle_get_title_msg(pipeline_id), ConstellationControlMsg::Freeze(pipeline_id) => self.handle_freeze_msg(pipeline_id), ConstellationControlMsg::Thaw(pipeline_id) => self.handle_thaw_msg(pipeline_id), ConstellationControlMsg::MozBrowserEvent(parent_pipeline_id, subpage_id, event) => self.handle_mozbrowser_event_msg(parent_pipeline_id, subpage_id, event), ConstellationControlMsg::UpdateSubpageId(containing_pipeline_id, old_subpage_id, new_subpage_id) => self.handle_update_subpage_id(containing_pipeline_id, old_subpage_id, new_subpage_id), ConstellationControlMsg::FocusIFrame(containing_pipeline_id, subpage_id) => self.handle_focus_iframe_msg(containing_pipeline_id, subpage_id), ConstellationControlMsg::WebDriverScriptCommand(pipeline_id, msg) => self.handle_webdriver_msg(pipeline_id, msg), ConstellationControlMsg::TickAllAnimations(pipeline_id) => self.handle_tick_all_animations(pipeline_id), ConstellationControlMsg::WebFontLoaded(pipeline_id) => self.handle_web_font_loaded(pipeline_id), ConstellationControlMsg::GetCurrentState(sender, pipeline_id) => { let state = self.handle_get_current_state(pipeline_id); sender.send(state).unwrap(); } } } fn handle_msg_from_script(&self, msg: MainThreadScriptMsg) { match msg { MainThreadScriptMsg::Navigate(id, load_data) => self.handle_navigate(id, None, load_data), MainThreadScriptMsg::ExitWindow(id) => self.handle_exit_window_msg(id), MainThreadScriptMsg::MainThreadRunnableMsg(runnable) => runnable.handler(self), MainThreadScriptMsg::DocumentLoadsComplete(id) => self.handle_loads_complete(id), MainThreadScriptMsg::Common(CommonScriptMsg::RunnableMsg(_, runnable)) => { // The category of the runnable is ignored by the pattern, however // it is still respected by profiling (see categorize_msg). if !runnable.is_cancelled() { runnable.handler() } } MainThreadScriptMsg::Common(CommonScriptMsg::RefcountCleanup(addr)) => LiveDOMReferences::cleanup(addr), MainThreadScriptMsg::Common(CommonScriptMsg::CollectReports(reports_chan)) => self.collect_reports(reports_chan), } } fn handle_timer_event(&self, timer_event: TimerEvent) { let TimerEvent(source, id) = timer_event; let pipeline_id = match source { TimerSource::FromWindow(pipeline_id) => pipeline_id, TimerSource::FromWorker => panic!("Worker timeouts must not be sent to script task"), }; let page = self.root_page(); let page = page.find(pipeline_id).expect("ScriptTask: received fire timer msg for a pipeline ID not associated with this script task. This is a bug."); let window = page.window(); window.handle_fire_timer(id); } fn handle_msg_from_devtools(&self, msg: DevtoolScriptControlMsg) { let page = self.root_page(); match msg { DevtoolScriptControlMsg::EvaluateJS(id, s, reply) => { let window = get_page(&page, id).window(); let global_ref = GlobalRef::Window(window.r()); devtools::handle_evaluate_js(&global_ref, s, reply) }, DevtoolScriptControlMsg::GetRootNode(id, reply) => devtools::handle_get_root_node(&page, id, reply), DevtoolScriptControlMsg::GetDocumentElement(id, reply) => devtools::handle_get_document_element(&page, id, reply), DevtoolScriptControlMsg::GetChildren(id, node_id, reply) => devtools::handle_get_children(&page, id, node_id, reply), DevtoolScriptControlMsg::GetLayout(id, node_id, reply) => devtools::handle_get_layout(&page, id, node_id, reply), DevtoolScriptControlMsg::GetCachedMessages(pipeline_id, message_types, reply) => devtools::handle_get_cached_messages(pipeline_id, message_types, reply), DevtoolScriptControlMsg::ModifyAttribute(id, node_id, modifications) => devtools::handle_modify_attribute(&page, id, node_id, modifications), DevtoolScriptControlMsg::WantsLiveNotifications(id, to_send) => { let window = get_page(&page, id).window(); let global_ref = GlobalRef::Window(window.r()); devtools::handle_wants_live_notifications(&global_ref, to_send) }, DevtoolScriptControlMsg::SetTimelineMarkers(_pipeline_id, marker_types, reply) => devtools::handle_set_timeline_markers(&page, marker_types, reply), DevtoolScriptControlMsg::DropTimelineMarkers(_pipeline_id, marker_types) => devtools::handle_drop_timeline_markers(&page, marker_types), DevtoolScriptControlMsg::RequestAnimationFrame(pipeline_id, name) => devtools::handle_request_animation_frame(&page, pipeline_id, name), } } fn handle_msg_from_image_cache(&self, msg: ImageCacheResult) { msg.responder.unwrap().respond(msg.image_response); } fn handle_webdriver_msg(&self, pipeline_id: PipelineId, msg: WebDriverScriptCommand) { let page = self.root_page(); match msg { WebDriverScriptCommand::ExecuteScript(script, reply) => webdriver_handlers::handle_execute_script(&page, pipeline_id, script, reply), WebDriverScriptCommand::FindElementCSS(selector, reply) => webdriver_handlers::handle_find_element_css(&page, pipeline_id, selector, reply), WebDriverScriptCommand::FindElementsCSS(selector, reply) => webdriver_handlers::handle_find_elements_css(&page, pipeline_id, selector, reply), WebDriverScriptCommand::GetActiveElement(reply) => webdriver_handlers::handle_get_active_element(&page, pipeline_id, reply), WebDriverScriptCommand::GetElementTagName(node_id, reply) => webdriver_handlers::handle_get_name(&page, pipeline_id, node_id, reply), WebDriverScriptCommand::GetElementText(node_id, reply) => webdriver_handlers::handle_get_text(&page, pipeline_id, node_id, reply), WebDriverScriptCommand::GetFrameId(frame_id, reply) => webdriver_handlers::handle_get_frame_id(&page, pipeline_id, frame_id, reply), WebDriverScriptCommand::GetUrl(reply) => webdriver_handlers::handle_get_url(&page, pipeline_id, reply), WebDriverScriptCommand::GetTitle(reply) => webdriver_handlers::handle_get_title(&page, pipeline_id, reply), WebDriverScriptCommand::ExecuteAsyncScript(script, reply) => webdriver_handlers::handle_execute_async_script(&page, pipeline_id, script, reply), } } fn handle_resize(&self, id: PipelineId, size: WindowSizeData) { let page = self.page.borrow(); if let Some(ref page) = page.as_ref() { if let Some(ref page) = page.find(id) { let window = page.window(); window.set_resize_event(size); return; } } let mut loads = self.incomplete_loads.borrow_mut(); if let Some(ref mut load) = loads.iter_mut().find(|load| load.pipeline_id == id) { load.window_size = Some(size); return; } panic!("resize sent to nonexistent pipeline"); } fn handle_viewport(&self, id: PipelineId, rect: Rect<f32>) { let page = self.page.borrow(); if let Some(page) = page.as_ref() { if let Some(ref inner_page) = page.find(id) { let window = inner_page.window(); if window.set_page_clip_rect_with_new_viewport(rect) { let page = get_page(page, id); self.rebuild_and_force_reflow(&*page, ReflowReason::Viewport); } return; } } let mut loads = self.incomplete_loads.borrow_mut(); if let Some(ref mut load) = loads.iter_mut().find(|load| load.pipeline_id == id) { load.clip_rect = Some(rect); return; } panic!("Page rect message sent to nonexistent pipeline"); } /// Get the current state of a given pipeline. fn handle_get_current_state(&self, pipeline_id: PipelineId) -> ScriptState { // Check if the main page load is still pending let loads = self.incomplete_loads.borrow(); if let Some(_) = loads.iter().find(|load| load.pipeline_id == pipeline_id) { return ScriptState::DocumentLoading; } // If not in pending loads, the page should exist by now. let page = self.root_page(); let page = page.find(pipeline_id).expect("GetCurrentState sent to nonexistent pipeline"); let doc = page.document(); // Check if document load event has fired. If the document load // event has fired, this also guarantees that the first reflow // has been kicked off. Since the script task does a join with // layout, this ensures there are no race conditions that can occur // between load completing and the first layout completing. let load_pending = doc.ReadyState() != DocumentReadyState::Complete; if load_pending { return ScriptState::DocumentLoading; } // Checks if the html element has reftest-wait attribute present. // See http://testthewebforward.org/docs/reftests.html let html_element = doc.GetDocumentElement(); let reftest_wait = html_element.map_or(false, |elem| elem.has_class(&Atom::from_slice("reftest-wait"))); if reftest_wait { return ScriptState::DocumentLoading; } ScriptState::DocumentLoaded } fn handle_new_layout(&self, new_layout_info: NewLayoutInfo) { let NewLayoutInfo { containing_pipeline_id, new_pipeline_id, subpage_id, load_data, paint_chan, failure, pipeline_port, layout_shutdown_chan, } = new_layout_info; let layout_pair = ScriptTask::create_layout_channel(None::<&mut ScriptTask>); let layout_chan = LayoutChan(*ScriptTask::clone_layout_channel( None::<&mut ScriptTask>, &layout_pair).downcast::<Sender<layout_interface::Msg>>().unwrap()); let layout_creation_info = NewLayoutTaskInfo { id: new_pipeline_id, url: load_data.url.clone(), is_parent: false, layout_pair: layout_pair, pipeline_port: pipeline_port, constellation_chan: self.constellation_chan.clone(), failure: failure, paint_chan: paint_chan, script_chan: self.control_chan.clone(), image_cache_task: self.image_cache_task.clone(), layout_shutdown_chan: layout_shutdown_chan, }; let page = self.root_page(); let parent_page = page.find(containing_pipeline_id).expect("ScriptTask: received a layout whose parent has a PipelineId which does not correspond to a pipeline in the script task's page tree. This is a bug."); let parent_window = parent_page.window(); // Tell layout to actually spawn the task. parent_window.layout_chan() .0 .send(layout_interface::Msg::CreateLayoutTask(layout_creation_info)) .unwrap(); // Kick off the fetch for the new resource. let new_load = InProgressLoad::new(new_pipeline_id, Some((containing_pipeline_id, subpage_id)), layout_chan, parent_window.window_size(), load_data.url.clone()); self.start_page_load(new_load, load_data); } fn handle_loads_complete(&self, pipeline: PipelineId) { let page = get_page(&self.root_page(), pipeline); let doc = page.document(); let doc = doc.r(); if doc.loader().is_blocked() { return; } doc.mut_loader().inhibit_events(); // https://html.spec.whatwg.org/multipage/#the-end step 7 let addr: Trusted<Document> = Trusted::new(self.get_cx(), doc, self.chan.clone()); let handler = box DocumentProgressHandler::new(addr.clone()); self.chan.send(CommonScriptMsg::RunnableMsg(ScriptTaskEventCategory::DocumentEvent, handler)).unwrap(); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::LoadComplete(pipeline)).unwrap(); } pub fn get_reports(cx: *mut JSContext, path_seg: String) -> Vec<Report> { let mut reports = vec![]; unsafe { let rt = JS_GetRuntime(cx); let mut stats = ::std::mem::zeroed(); if CollectServoSizes(rt, &mut stats) { let mut report = |mut path_suffix, kind, size| { let mut path = path![path_seg, "js"]; path.append(&mut path_suffix); reports.push(Report { path: path, kind: kind, size: size as usize, }) }; // A note about possibly confusing terminology: the JS GC "heap" is allocated via // mmap/VirtualAlloc, which means it's not on the malloc "heap", so we use // `ExplicitNonHeapSize` as its kind. report(path!["gc-heap", "used"], ReportKind::ExplicitNonHeapSize, stats.gcHeapUsed); report(path!["gc-heap", "unused"], ReportKind::ExplicitNonHeapSize, stats.gcHeapUnused); report(path!["gc-heap", "admin"], ReportKind::ExplicitNonHeapSize, stats.gcHeapAdmin); report(path!["gc-heap", "decommitted"], ReportKind::ExplicitNonHeapSize, stats.gcHeapDecommitted); // SpiderMonkey uses the system heap, not jemalloc. report(path!["malloc-heap"], ReportKind::ExplicitSystemHeapSize, stats.mallocHeap); report(path!["non-heap"], ReportKind::ExplicitNonHeapSize, stats.nonHeap); } } reports } fn collect_reports(&self, reports_chan: ReportsChan) { let mut urls = vec![]; let mut dom_tree_size = 0; let mut reports = vec![]; if let Some(root_page) = self.page.borrow().as_ref() { for it_page in root_page.iter() { let current_url = it_page.document().url().serialize(); urls.push(current_url.clone()); for child in it_page.document().upcast::<Node>().traverse_preorder() { dom_tree_size += heap_size_of_self_and_children(&*child); } let window = it_page.window(); dom_tree_size += heap_size_of_self_and_children(&*window); reports.push(Report { path: path![format!("url({})", current_url), "dom-tree"], kind: ReportKind::ExplicitJemallocHeapSize, size: dom_tree_size, }) } } let path_seg = format!("url({})", urls.join(", ")); reports.extend(ScriptTask::get_reports(self.get_cx(), path_seg)); reports_chan.send(reports); } /// Handles freeze message fn handle_freeze_msg(&self, id: PipelineId) { // Workaround for a race condition when navigating before the initial page has // been constructed c.f. https://github.com/servo/servo/issues/7677 if self.page.borrow().is_none() { return }; let page = self.root_page(); let page = page.find(id).expect("ScriptTask: received freeze msg for a pipeline ID not associated with this script task. This is a bug."); let window = page.window(); window.freeze(); } /// Handles thaw message fn handle_thaw_msg(&self, id: PipelineId) { // We should only get this message when moving in history, so all pages requested // should exist. let page = self.root_page().find(id).unwrap(); let needed_reflow = page.set_reflow_status(false); if needed_reflow { self.rebuild_and_force_reflow(&*page, ReflowReason::CachedPageNeededReflow); } let window = page.window(); window.thaw(); } fn handle_focus_iframe_msg(&self, parent_pipeline_id: PipelineId, subpage_id: SubpageId) { let borrowed_page = self.root_page(); let page = borrowed_page.find(parent_pipeline_id).unwrap(); let doc = page.document(); let frame_element = doc.find_iframe(subpage_id); if let Some(ref frame_element) = frame_element { doc.begin_focus_transaction(); doc.request_focus(frame_element.upcast()); doc.commit_focus_transaction(FocusType::Parent); } } /// Handles a mozbrowser event, for example see: /// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowserloadstart fn handle_mozbrowser_event_msg(&self, parent_pipeline_id: PipelineId, subpage_id: SubpageId, event: MozBrowserEvent) { let borrowed_page = self.root_page(); let frame_element = borrowed_page.find(parent_pipeline_id).and_then(|page| { let doc = page.document(); doc.find_iframe(subpage_id) }); if let Some(ref frame_element) = frame_element { frame_element.dispatch_mozbrowser_event(event); } } fn handle_update_subpage_id(&self, containing_pipeline_id: PipelineId, old_subpage_id: SubpageId, new_subpage_id: SubpageId) { let borrowed_page = self.root_page(); let frame_element = borrowed_page.find(containing_pipeline_id).and_then(|page| { let doc = page.document(); doc.find_iframe(old_subpage_id) }); frame_element.unwrap().update_subpage_id(new_subpage_id); } /// Window was resized, but this script was not active, so don't reflow yet fn handle_resize_inactive_msg(&self, id: PipelineId, new_size: WindowSizeData) { let page = self.root_page(); let page = page.find(id).expect("Received resize message for PipelineId not associated with a page in the page tree. This is a bug."); let window = page.window(); window.set_window_size(new_size); page.set_reflow_status(true); } /// We have gotten a window.close from script, which we pass on to the compositor. /// We do not shut down the script task now, because the compositor will ask the /// constellation to shut down the pipeline, which will clean everything up /// normally. If we do exit, we will tear down the DOM nodes, possibly at a point /// where layout is still accessing them. fn handle_exit_window_msg(&self, _: PipelineId) { debug!("script task handling exit window msg"); // TODO(tkuehn): currently there is only one window, // so this can afford to be naive and just shut down the // compositor. In the future it'll need to be smarter. self.compositor.borrow_mut().send(ScriptToCompositorMsg::Exit).unwrap(); } /// We have received notification that the response associated with a load has completed. /// Kick off the document and frame tree creation process using the result. fn handle_page_fetch_complete(&self, id: PipelineId, subpage: Option<SubpageId>, metadata: Metadata) -> Option<Root<ServoHTMLParser>> { let idx = self.incomplete_loads.borrow().iter().position(|load| { load.pipeline_id == id && load.parent_info.map(|info| info.1) == subpage }); // The matching in progress load structure may not exist if // the pipeline exited before the page load completed. match idx { Some(idx) => { let load = self.incomplete_loads.borrow_mut().remove(idx); Some(self.load(metadata, load)) } None => { assert!(self.closed_pipelines.borrow().contains(&id)); None } } } /// Handles a request for the window title. fn handle_get_title_msg(&self, pipeline_id: PipelineId) { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.send_title_to_compositor(); } /// Handles a request to exit the script task and shut down layout. /// Returns true if the script task should shut down and false otherwise. fn handle_exit_pipeline_msg(&self, id: PipelineId) -> bool { self.closed_pipelines.borrow_mut().insert(id); // Check if the exit message is for an in progress load. let idx = self.incomplete_loads.borrow().iter().position(|load| { load.pipeline_id == id }); if let Some(idx) = idx { let load = self.incomplete_loads.borrow_mut().remove(idx); // Tell the layout task to begin shutting down, and wait until it // processed this message. let (response_chan, response_port) = channel(); let LayoutChan(chan) = load.layout_chan; if chan.send(layout_interface::Msg::PrepareToExit(response_chan)).is_ok() { debug!("shutting down layout for page {:?}", id); response_port.recv().unwrap(); chan.send(layout_interface::Msg::ExitNow).ok(); } let has_pending_loads = self.incomplete_loads.borrow().len() > 0; let has_root_page = self.page.borrow().is_some(); // Exit if no pending loads and no root page return !has_pending_loads && !has_root_page; } // If root is being exited, shut down all pages let page = self.root_page(); let window = page.window(); if window.pipeline() == id { debug!("shutting down layout for root page {:?}", id); shut_down_layout(&page); return true } // otherwise find just the matching page and exit all sub-pages if let Some(ref mut child_page) = page.remove(id) { shut_down_layout(&*child_page); } false } /// Handles when layout task finishes all animation in one tick fn handle_tick_all_animations(&self, id: PipelineId) { let page = get_page(&self.root_page(), id); let document = page.document(); document.run_the_animation_frame_callbacks(); } /// Handles a Web font being loaded. Does nothing if the page no longer exists. fn handle_web_font_loaded(&self, pipeline_id: PipelineId) { if let Some(page) = self.page.borrow().as_ref() { if let Some(page) = page.find(pipeline_id) { self.rebuild_and_force_reflow(&*page, ReflowReason::WebFontLoaded); } } } /// The entry point to document loading. Defines bindings, sets up the window and document /// objects, parses HTML and CSS, and kicks off initial layout. fn load(&self, metadata: Metadata, incomplete: InProgressLoad) -> Root<ServoHTMLParser> { let final_url = metadata.final_url.clone(); debug!("ScriptTask: loading {} on page {:?}", incomplete.url.serialize(), incomplete.pipeline_id); // We should either be initializing a root page or loading a child page of an // existing one. let root_page_exists = self.page.borrow().is_some(); let frame_element = incomplete.parent_info.and_then(|(parent_id, subpage_id)| { // The root page may not exist yet, if the parent of this frame // exists in a different script task. let borrowed_page = self.page.borrow(); // In the case a parent id exists but the matching page // cannot be found, this means the page exists in a different // script task (due to origin) so it shouldn't be returned. // TODO: window.parent will continue to return self in that // case, which is wrong. We should be returning an object that // denies access to most properties (per // https://github.com/servo/servo/issues/3939#issuecomment-62287025). borrowed_page.as_ref().and_then(|borrowed_page| { borrowed_page.find(parent_id).and_then(|page| { let doc = page.document(); doc.find_iframe(subpage_id) }) }) }); // Create a new frame tree entry. let page = Rc::new(Page::new(incomplete.pipeline_id)); if !root_page_exists { // We have a new root frame tree. *self.page.borrow_mut() = Some(page.clone()); } else if let Some((parent, _)) = incomplete.parent_info { // We have a new child frame. let parent_page = self.root_page(); // TODO(gw): This find will fail when we are sharing script tasks // between cross origin iframes in the same TLD. parent_page.find(parent).expect("received load for subpage with missing parent"); parent_page.children.borrow_mut().push(page.clone()); } enum PageToRemove { Root, Child(PipelineId), } struct AutoPageRemover<'a> { page: PageToRemove, script_task: &'a ScriptTask, neutered: bool, } impl<'a> AutoPageRemover<'a> { fn new(script_task: &'a ScriptTask, page: PageToRemove) -> AutoPageRemover<'a> { AutoPageRemover { page: page, script_task: script_task, neutered: false, } } fn neuter(&mut self) { self.neutered = true; } } impl<'a> Drop for AutoPageRemover<'a> { fn drop(&mut self) { if !self.neutered { match self.page { PageToRemove::Root => *self.script_task.page.borrow_mut() = None, PageToRemove::Child(id) => { self.script_task.root_page().remove(id).unwrap(); } } } } } let page_to_remove = if !root_page_exists { PageToRemove::Root } else { PageToRemove::Child(incomplete.pipeline_id) }; let mut page_remover = AutoPageRemover::new(self, page_to_remove); let MainThreadScriptChan(ref sender) = self.chan; // Create the window and document objects. let window = Window::new(self.js_runtime.clone(), page.clone(), MainThreadScriptChan(sender.clone()), self.image_cache_channel.clone(), self.compositor.borrow_mut().clone(), self.image_cache_task.clone(), self.resource_task.clone(), self.storage_task.clone(), self.mem_profiler_chan.clone(), self.devtools_chan.clone(), self.constellation_chan.clone(), self.scheduler_chan.clone(), MainThreadTimerEventChan(self.timer_event_chan.clone()), incomplete.layout_chan, incomplete.pipeline_id, incomplete.parent_info, incomplete.window_size); let last_modified = metadata.headers.as_ref().and_then(|headers| { headers.get().map(|&LastModified(HttpDate(ref tm))| dom_last_modified(tm)) }); let content_type = match metadata.content_type { Some(ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) => { Some(DOMString("text/plain".to_owned())) } _ => None }; let loader = DocumentLoader::new_with_task(self.resource_task.clone(), Some(page.pipeline()), Some(incomplete.url.clone())); let document = Document::new(window.r(), Some(final_url.clone()), IsHTMLDocument::HTMLDocument, content_type, last_modified, DocumentSource::FromParser, loader); let frame_element = frame_element.r().map(Castable::upcast); window.init_browsing_context(document.r(), frame_element); // Create the root frame page.set_frame(Some(Frame { document: JS::from_rooted(&document), window: JS::from_rooted(&window), })); let is_javascript = incomplete.url.scheme == "javascript"; let parse_input = if is_javascript { use url::percent_encoding::percent_decode_to; // Turn javascript: URL into JS code to eval, according to the steps in // https://html.spec.whatwg.org/multipage/#javascript-protocol let _ar = JSAutoRequest::new(self.get_cx()); let mut script_source_bytes = Vec::new(); // Start with the scheme data of the parsed URL (5.), while percent-decoding (8.) percent_decode_to(incomplete.url.non_relative_scheme_data().unwrap().as_bytes(), &mut script_source_bytes); // Append question mark and query component, if any (6.), while percent-decoding (8.) if let Some(ref query) = incomplete.url.query { script_source_bytes.push(b'?'); percent_decode_to(query.as_bytes(), &mut script_source_bytes); } // Append number sign and fragment component if any (7.), while percent-decoding (8.) if let Some(ref fragment) = incomplete.url.fragment { script_source_bytes.push(b'#'); percent_decode_to(fragment.as_bytes(), &mut script_source_bytes); } // UTF-8 decode (9.) let script_source = String::from_utf8_lossy(&script_source_bytes); // Script source is ready to be evaluated (11.) let mut jsval = RootedValue::new(self.get_cx(), UndefinedValue()); window.evaluate_js_on_global_with_result(&script_source, jsval.handle_mut()); let strval = DOMString::from_jsval(self.get_cx(), jsval.handle(), StringificationBehavior::Empty); strval.unwrap_or(DOMString::new()) } else { DOMString::new() }; parse_html(document.r(), parse_input, final_url, ParseContext::Owner(Some(incomplete.pipeline_id))); page_remover.neuter(); document.get_current_parser().unwrap() } fn notify_devtools(&self, title: DOMString, url: Url, ids: (PipelineId, Option<WorkerId>)) { if let Some(ref chan) = self.devtools_chan { let page_info = DevtoolsPageInfo { title: title, url: url, }; chan.send(ScriptToDevtoolsControlMsg::NewGlobal( ids, self.devtools_sender.clone(), page_info)).unwrap(); } } fn scroll_fragment_point(&self, pipeline_id: PipelineId, element: &Element) { // FIXME(#8275, pcwalton): This is pretty bogus when multiple layers are involved. // Really what needs to happen is that this needs to go through layout to ask which // layer the element belongs to, and have it send the scroll message to the // compositor. let rect = element.upcast::<Node>().get_bounding_content_box(); // In order to align with element edges, we snap to unscaled pixel boundaries, since the // paint task currently does the same for drawing elements. This is important for pages // that require pixel perfect scroll positioning for proper display (like Acid2). Since we // don't have the device pixel ratio here, this might not be accurate, but should work as // long as the ratio is a whole number. Once #8275 is fixed this should actually take into // account the real device pixel ratio. let point = Point2D::new(rect.origin.x.to_nearest_px() as f32, rect.origin.y.to_nearest_px() as f32); self.compositor.borrow_mut().send(ScriptToCompositorMsg::ScrollFragmentPoint( pipeline_id, LayerId::null(), point, false)).unwrap(); } /// Reflows non-incrementally, rebuilding the entire layout tree in the process. fn rebuild_and_force_reflow(&self, page: &Page, reason: ReflowReason) { let document = page.document(); document.dirty_all_nodes(); let window = window_from_node(document.r()); window.reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, reason); } /// This is the main entry point for receiving and dispatching DOM events. /// /// TODO: Actually perform DOM event dispatch. fn handle_event(&self, pipeline_id: PipelineId, event: CompositorEvent) { match event { ResizeEvent(new_size) => { self.handle_resize_event(pipeline_id, new_size); } ClickEvent(button, point) => { self.handle_mouse_event(pipeline_id, MouseEventType::Click, button, point); } MouseDownEvent(button, point) => { self.handle_mouse_event(pipeline_id, MouseEventType::MouseDown, button, point); } MouseUpEvent(button, point) => { self.handle_mouse_event(pipeline_id, MouseEventType::MouseUp, button, point); } MouseMoveEvent(point) => { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); let mut prev_mouse_over_targets: RootedVec<JS<Element>> = RootedVec::new(); for target in &*self.mouse_over_targets.borrow_mut() { prev_mouse_over_targets.push(target.clone()); } // We temporarily steal the list of targets over which the mouse is to pass it to // handle_mouse_move_event() in a safe RootedVec container. let mut mouse_over_targets = RootedVec::new(); std_mem::swap(&mut *self.mouse_over_targets.borrow_mut(), &mut *mouse_over_targets); document.handle_mouse_move_event(self.js_runtime.rt(), point, &mut mouse_over_targets); // Notify Constellation about anchors that are no longer mouse over targets. for target in &*prev_mouse_over_targets { if !mouse_over_targets.contains(target) { if target.upcast::<Node>().is_anchor_element() { let event = ConstellationMsg::NodeStatus(None); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(event).unwrap(); break; } } } // Notify Constellation about the topmost anchor mouse over target. for target in &*mouse_over_targets { if target.upcast::<Node>().is_anchor_element() { let status = target.get_attribute(&ns!(""), &atom!("href")) .and_then(|href| { let value = href.value(); let url = document.url(); UrlParser::new().base_url(&url).parse(&value).map(|url| url.serialize()).ok() }); let event = ConstellationMsg::NodeStatus(status); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(event).unwrap(); break; } } std_mem::swap(&mut *self.mouse_over_targets.borrow_mut(), &mut *mouse_over_targets); } TouchEvent(event_type, identifier, point) => { let handled = self.handle_touch_event(pipeline_id, event_type, identifier, point); match event_type { TouchEventType::Down => { if handled { // TODO: Wait to see if preventDefault is called on the first touchmove event. self.compositor.borrow_mut() .send(ScriptToCompositorMsg::TouchEventProcessed( EventResult::DefaultAllowed)).unwrap(); } else { self.compositor.borrow_mut() .send(ScriptToCompositorMsg::TouchEventProcessed( EventResult::DefaultPrevented)).unwrap(); } } _ => { // TODO: Calling preventDefault on a touchup event should prevent clicks. } } } KeyEvent(key, state, modifiers) => { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.dispatch_key_event( key, state, modifiers, &mut self.compositor.borrow_mut()); } } } fn handle_mouse_event(&self, pipeline_id: PipelineId, mouse_event_type: MouseEventType, button: MouseButton, point: Point2D<f32>) { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.handle_mouse_event(self.js_runtime.rt(), button, point, mouse_event_type); } fn handle_touch_event(&self, pipeline_id: PipelineId, event_type: TouchEventType, identifier: TouchId, point: Point2D<f32>) -> bool { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.handle_touch_event(self.js_runtime.rt(), event_type, identifier, point) } /// https://html.spec.whatwg.org/multipage/#navigating-across-documents /// The entry point for content to notify that a new load has been requested /// for the given pipeline (specifically the "navigate" algorithm). fn handle_navigate(&self, pipeline_id: PipelineId, subpage_id: Option<SubpageId>, load_data: LoadData) { // Step 8. { let nurl = &load_data.url; if let Some(ref fragment) = nurl.fragment { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); let document = document.r(); let url = document.url(); if url.scheme == nurl.scheme && url.scheme_data == nurl.scheme_data && url.query == nurl.query && load_data.method == Method::Get { match document.find_fragment_node(&*fragment) { Some(ref node) => { self.scroll_fragment_point(pipeline_id, node.r()); } None => {} } return; } } } match subpage_id { Some(subpage_id) => { let borrowed_page = self.root_page(); let iframe = borrowed_page.find(pipeline_id).and_then(|page| { let doc = page.document(); doc.find_iframe(subpage_id) }); if let Some(iframe) = iframe.r() { iframe.navigate_child_browsing_context(load_data.url); } } None => { let ConstellationChan(ref const_chan) = self.constellation_chan; const_chan.send(ConstellationMsg::LoadUrl(pipeline_id, load_data)).unwrap(); } } } fn handle_resize_event(&self, pipeline_id: PipelineId, new_size: WindowSizeData) { let page = get_page(&self.root_page(), pipeline_id); let window = page.window(); window.set_window_size(new_size); window.force_reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, ReflowReason::WindowResize); let document = page.document(); let fragment_node = window.steal_fragment_name() .and_then(|name| document.find_fragment_node(&*name)); match fragment_node { Some(ref node) => self.scroll_fragment_point(pipeline_id, node.r()), None => {} } // http://dev.w3.org/csswg/cssom-view/#resizing-viewports // https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#event-type-resize let uievent = UIEvent::new(window.r(), DOMString("resize".to_owned()), EventBubbles::DoesNotBubble, EventCancelable::NotCancelable, Some(window.r()), 0i32); uievent.upcast::<Event>().fire(window.upcast()); } /// Initiate a non-blocking fetch for a specified resource. Stores the InProgressLoad /// argument until a notification is received that the fetch is complete. fn start_page_load(&self, incomplete: InProgressLoad, mut load_data: LoadData) { let id = incomplete.pipeline_id.clone(); let subpage = incomplete.parent_info.clone().map(|p| p.1); let script_chan = self.chan.clone(); let resource_task = self.resource_task.clone(); let context = Arc::new(Mutex::new(ParserContext::new(id, subpage, script_chan.clone(), load_data.url.clone()))); let (action_sender, action_receiver) = ipc::channel().unwrap(); let listener = box NetworkListener { context: context, script_chan: script_chan.clone(), }; ROUTER.add_route(action_receiver.to_opaque(), box move |message| { listener.notify(message.to().unwrap()); }); let response_target = AsyncResponseTarget { sender: action_sender, }; if load_data.url.scheme == "javascript" { load_data.url = Url::parse("about:blank").unwrap(); } resource_task.send(ControlMsg::Load(NetLoadData { url: load_data.url, method: load_data.method, headers: Headers::new(), preserved_headers: load_data.headers, data: load_data.data, cors: None, pipeline_id: Some(id), }, LoadConsumer::Listener(response_target), None)).unwrap(); self.incomplete_loads.borrow_mut().push(incomplete); } fn handle_parsing_complete(&self, id: PipelineId) { let parent_page = self.root_page(); let page = match parent_page.find(id) { Some(page) => page, None => return, }; let document = page.document(); let final_url = document.url(); // https://html.spec.whatwg.org/multipage/#the-end step 1 document.set_ready_state(DocumentReadyState::Interactive); // TODO: Execute step 2 here. // Kick off the initial reflow of the page. debug!("kicking off initial reflow of {:?}", final_url); document.disarm_reflow_timeout(); document.content_changed(document.upcast(), NodeDamage::OtherNodeDamage); let window = window_from_node(document.r()); window.reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, ReflowReason::FirstLoad); // No more reflow required page.set_reflow_status(false); // https://html.spec.whatwg.org/multipage/#the-end steps 3-4. document.process_deferred_scripts(); window.set_fragment_name(final_url.fragment.clone()); // Notify devtools that a new script global exists. //TODO: should this happen as soon as the global is created, or at least once the first // script runs? self.notify_devtools(document.Title(), (*final_url).clone(), (id, None)); } } impl Drop for ScriptTask { fn drop(&mut self) { SCRIPT_TASK_ROOT.with(|root| { *root.borrow_mut() = None; }); } } /// Shuts down layout for the given page tree. fn shut_down_layout(page_tree: &Rc<Page>) { let mut channels = vec!(); for page in page_tree.iter() { // Tell the layout task to begin shutting down, and wait until it // processed this message. let (response_chan, response_port) = channel(); let window = page.window(); let LayoutChan(chan) = window.layout_chan(); if chan.send(layout_interface::Msg::PrepareToExit(response_chan)).is_ok() { channels.push(chan); response_port.recv().unwrap(); } } // Drop our references to the JSContext and DOM objects. for page in page_tree.iter() { let window = page.window(); window.clear_js_runtime(); // Sever the connection between the global and the DOM tree page.set_frame(None); } // Destroy the layout task. If there were node leaks, layout will now crash safely. for chan in channels { chan.send(layout_interface::Msg::ExitNow).ok(); } } pub fn get_page(page: &Rc<Page>, pipeline_id: PipelineId) -> Rc<Page> { page.find(pipeline_id).expect("ScriptTask: received an event \ message for a layout channel that is not associated with this script task.\ This is a bug.") } fn dom_last_modified(tm: &Tm) -> String { tm.to_local().strftime("%m/%d/%Y %H:%M:%S").unwrap().to_string() } Consolidate 'subpage finding' script_task logic /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The script task is the task that owns the DOM in memory, runs JavaScript, and spawns parsing //! and layout tasks. It's in charge of processing events for all same-origin pages in a frame //! tree, and manages the entire lifetime of pages in the frame tree from initial request to //! teardown. //! //! Page loads follow a two-step process. When a request for a new page load is received, the //! network request is initiated and the relevant data pertaining to the new page is stashed. //! While the non-blocking request is ongoing, the script task is free to process further events, //! noting when they pertain to ongoing loads (such as resizes/viewport adjustments). When the //! initial response is received for an ongoing load, the second phase starts - the frame tree //! entry is created, along with the Window and Document objects, and the appropriate parser //! takes over the response body. Once parsing is complete, the document lifecycle for loading //! a page runs its course and the script task returns to processing events in the main event //! loop. use devtools; use devtools_traits::ScriptToDevtoolsControlMsg; use devtools_traits::{DevtoolScriptControlMsg, DevtoolsPageInfo}; use document_loader::DocumentLoader; use dom::bindings::cell::DOMRefCell; use dom::bindings::codegen::Bindings::DocumentBinding::{DocumentMethods, DocumentReadyState}; use dom::bindings::conversions::{FromJSValConvertible, StringificationBehavior}; use dom::bindings::global::GlobalRef; use dom::bindings::inheritance::Castable; use dom::bindings::js::{JS, RootCollection, trace_roots}; use dom::bindings::js::{Root, RootCollectionPtr, RootedReference}; use dom::bindings::refcounted::{LiveDOMReferences, Trusted, TrustedReference, trace_refcounted_objects}; use dom::bindings::trace::{JSTraceable, RootedVec, trace_traceables}; use dom::bindings::utils::{DOM_CALLBACKS, WRAP_CALLBACKS}; use dom::document::{Document, DocumentProgressHandler, IsHTMLDocument}; use dom::document::{DocumentSource, MouseEventType}; use dom::element::Element; use dom::event::{Event, EventBubbles, EventCancelable}; use dom::node::{Node, NodeDamage, window_from_node}; use dom::servohtmlparser::{ParserContext, ServoHTMLParser}; use dom::uievent::UIEvent; use dom::window::{ReflowReason, ScriptHelpers, Window}; use dom::worker::TrustedWorkerAddress; use euclid::Rect; use euclid::point::Point2D; use hyper::header::{ContentType, HttpDate}; use hyper::header::{Headers, LastModified}; use hyper::method::Method; use hyper::mime::{Mime, SubLevel, TopLevel}; use ipc_channel::ipc::{self, IpcSender}; use ipc_channel::router::ROUTER; use js::glue::CollectServoSizes; use js::jsapi::{DOMProxyShadowsResult, HandleId, HandleObject, RootedValue, SetDOMProxyInformation}; use js::jsapi::{DisableIncrementalGC, JS_AddExtraGCRootsTracer, JS_SetWrapObjectCallbacks}; use js::jsapi::{GCDescription, GCProgress, JSGCInvocationKind, SetGCSliceCallback}; use js::jsapi::{JSAutoRequest, JSGCStatus, JS_GetRuntime, JS_SetGCCallback, SetDOMCallbacks}; use js::jsapi::{JSContext, JSRuntime, JSTracer}; use js::jsapi::{JSObject, SetPreserveWrapperCallback}; use js::jsval::UndefinedValue; use js::rust::Runtime; use layout_interface::{ReflowQueryType}; use layout_interface::{self, LayoutChan, NewLayoutTaskInfo, ReflowGoal, ScriptLayoutChan}; use libc; use mem::heap_size_of_self_and_children; use msg::compositor_msg::{EventResult, LayerId, ScriptToCompositorMsg}; use msg::constellation_msg::Msg as ConstellationMsg; use msg::constellation_msg::{ConstellationChan, FocusType, LoadData}; use msg::constellation_msg::{MozBrowserEvent, PipelineId}; use msg::constellation_msg::{PipelineNamespace}; use msg::constellation_msg::{SubpageId, WindowSizeData, WorkerId}; use msg::webdriver_msg::WebDriverScriptCommand; use net_traits::LoadData as NetLoadData; use net_traits::image_cache_task::{ImageCacheChan, ImageCacheResult, ImageCacheTask}; use net_traits::storage_task::StorageTask; use net_traits::{AsyncResponseTarget, ControlMsg, LoadConsumer, Metadata, ResourceTask}; use network_listener::NetworkListener; use page::{Frame, IterablePage, Page}; use parse::html::{ParseContext, parse_html}; use profile_traits::mem::{self, OpaqueSender, Report, ReportKind, ReportsChan}; use profile_traits::time::{self, ProfilerCategory, profile}; use script_traits::CompositorEvent::{ClickEvent, ResizeEvent}; use script_traits::CompositorEvent::{KeyEvent, MouseMoveEvent}; use script_traits::CompositorEvent::{MouseDownEvent, MouseUpEvent, TouchEvent}; use script_traits::{CompositorEvent, ConstellationControlMsg}; use script_traits::{InitialScriptState, MouseButton, NewLayoutInfo}; use script_traits::{OpaqueScriptLayoutChannel, ScriptState, ScriptTaskFactory}; use script_traits::{TimerEvent, TimerEventChan, TimerEventRequest, TimerSource}; use script_traits::{TouchEventType, TouchId}; use std::any::Any; use std::borrow::ToOwned; use std::cell::{Cell, RefCell}; use std::collections::HashSet; use std::io::{Write, stdout}; use std::marker::PhantomData; use std::mem as std_mem; use std::option::Option; use std::ptr; use std::rc::Rc; use std::result::Result; use std::sync::atomic::{Ordering, AtomicBool}; use std::sync::mpsc::{Receiver, Select, Sender, channel}; use std::sync::{Arc, Mutex}; use string_cache::Atom; use time::{Tm, now}; use url::{Url, UrlParser}; use util::opts; use util::str::DOMString; use util::task::spawn_named_with_send_on_failure; use util::task_state; use webdriver_handlers; thread_local!(pub static STACK_ROOTS: Cell<Option<RootCollectionPtr>> = Cell::new(None)); thread_local!(static SCRIPT_TASK_ROOT: RefCell<Option<*const ScriptTask>> = RefCell::new(None)); unsafe extern fn trace_rust_roots(tr: *mut JSTracer, _data: *mut libc::c_void) { SCRIPT_TASK_ROOT.with(|root| { if let Some(script_task) = *root.borrow() { (*script_task).trace(tr); } }); trace_traceables(tr); trace_roots(tr); } /// A document load that is in the process of fetching the requested resource. Contains /// data that will need to be present when the document and frame tree entry are created, /// but is only easily available at initiation of the load and on a push basis (so some /// data will be updated according to future resize events, viewport changes, etc.) #[derive(JSTraceable)] struct InProgressLoad { /// The pipeline which requested this load. pipeline_id: PipelineId, /// The parent pipeline and child subpage associated with this load, if any. parent_info: Option<(PipelineId, SubpageId)>, /// The current window size associated with this pipeline. window_size: Option<WindowSizeData>, /// Channel to the layout task associated with this pipeline. layout_chan: LayoutChan, /// The current viewport clipping rectangle applying to this pipeline, if any. clip_rect: Option<Rect<f32>>, /// The requested URL of the load. url: Url, } impl InProgressLoad { /// Create a new InProgressLoad object. fn new(id: PipelineId, parent_info: Option<(PipelineId, SubpageId)>, layout_chan: LayoutChan, window_size: Option<WindowSizeData>, url: Url) -> InProgressLoad { InProgressLoad { pipeline_id: id, parent_info: parent_info, layout_chan: layout_chan, window_size: window_size, clip_rect: None, url: url, } } } /// Encapsulated state required to create cancellable runnables from non-script threads. pub struct RunnableWrapper { pub cancelled: Arc<AtomicBool>, } impl RunnableWrapper { pub fn wrap_runnable<T: Runnable + Send + 'static>(&self, runnable: T) -> Box<Runnable + Send> { box CancellableRunnable { cancelled: self.cancelled.clone(), inner: box runnable, } } } /// A runnable that can be discarded by toggling a shared flag. pub struct CancellableRunnable<T: Runnable + Send> { cancelled: Arc<AtomicBool>, inner: Box<T>, } impl<T: Runnable + Send> Runnable for CancellableRunnable<T> { fn is_cancelled(&self) -> bool { self.cancelled.load(Ordering::Relaxed) } fn handler(self: Box<CancellableRunnable<T>>) { self.inner.handler() } } pub trait Runnable { fn is_cancelled(&self) -> bool { false } fn handler(self: Box<Self>); } pub trait MainThreadRunnable { fn handler(self: Box<Self>, script_task: &ScriptTask); } enum MixedMessage { FromConstellation(ConstellationControlMsg), FromScript(MainThreadScriptMsg), FromDevtools(DevtoolScriptControlMsg), FromImageCache(ImageCacheResult), FromScheduler(TimerEvent), } /// Common messages used to control the event loops in both the script and the worker pub enum CommonScriptMsg { /// Requests that the script task measure its memory usage. The results are sent back via the /// supplied channel. CollectReports(ReportsChan), /// A DOM object's last pinned reference was removed (dispatched to all tasks). RefcountCleanup(TrustedReference), /// Generic message that encapsulates event handling. RunnableMsg(ScriptTaskEventCategory, Box<Runnable + Send>), } #[derive(Clone, Copy, Debug, Eq, Hash, JSTraceable, PartialEq)] pub enum ScriptTaskEventCategory { AttachLayout, ConstellationMsg, DevtoolsMsg, DocumentEvent, DomEvent, FileRead, ImageCacheMsg, InputEvent, NetworkEvent, Resize, ScriptEvent, TimerEvent, UpdateReplacedElement, SetViewport, WebSocketEvent, WorkerEvent, } /// Messages used to control the script event loop pub enum MainThreadScriptMsg { /// Common variants associated with the script messages Common(CommonScriptMsg), /// Notify a document that all pending loads are complete. DocumentLoadsComplete(PipelineId), /// Notifies the script that a window associated with a particular pipeline /// should be closed (only dispatched to ScriptTask). ExitWindow(PipelineId), /// Generic message for running tasks in the ScriptTask MainThreadRunnableMsg(Box<MainThreadRunnable + Send>), /// Begins a content-initiated load on the specified pipeline (only /// dispatched to ScriptTask). Navigate(PipelineId, LoadData), } /// A cloneable interface for communicating with an event loop. pub trait ScriptChan { /// Send a message to the associated event loop. fn send(&self, msg: CommonScriptMsg) -> Result<(), ()>; /// Clone this handle. fn clone(&self) -> Box<ScriptChan + Send>; } impl OpaqueSender<CommonScriptMsg> for Box<ScriptChan + Send> { fn send(&self, msg: CommonScriptMsg) { ScriptChan::send(&**self, msg).unwrap(); } } /// An interface for receiving ScriptMsg values in an event loop. Used for synchronous DOM /// APIs that need to abstract over multiple kinds of event loops (worker/main thread) with /// different Receiver interfaces. pub trait ScriptPort { fn recv(&self) -> CommonScriptMsg; } impl ScriptPort for Receiver<CommonScriptMsg> { fn recv(&self) -> CommonScriptMsg { self.recv().unwrap() } } impl ScriptPort for Receiver<MainThreadScriptMsg> { fn recv(&self) -> CommonScriptMsg { match self.recv().unwrap() { MainThreadScriptMsg::Common(script_msg) => script_msg, _ => panic!("unexpected main thread event message!") } } } impl ScriptPort for Receiver<(TrustedWorkerAddress, CommonScriptMsg)> { fn recv(&self) -> CommonScriptMsg { self.recv().unwrap().1 } } impl ScriptPort for Receiver<(TrustedWorkerAddress, MainThreadScriptMsg)> { fn recv(&self) -> CommonScriptMsg { match self.recv().unwrap().1 { MainThreadScriptMsg::Common(script_msg) => script_msg, _ => panic!("unexpected main thread event message!") } } } /// Encapsulates internal communication of shared messages within the script task. #[derive(JSTraceable)] pub struct SendableMainThreadScriptChan(pub Sender<CommonScriptMsg>); impl ScriptChan for SendableMainThreadScriptChan { fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> { let SendableMainThreadScriptChan(ref chan) = *self; chan.send(msg).map_err(|_| ()) } fn clone(&self) -> Box<ScriptChan + Send> { let SendableMainThreadScriptChan(ref chan) = *self; box SendableMainThreadScriptChan((*chan).clone()) } } impl SendableMainThreadScriptChan { /// Creates a new script chan. pub fn new() -> (Receiver<CommonScriptMsg>, Box<SendableMainThreadScriptChan>) { let (chan, port) = channel(); (port, box SendableMainThreadScriptChan(chan)) } } /// Encapsulates internal communication of main thread messages within the script task. #[derive(JSTraceable)] pub struct MainThreadScriptChan(pub Sender<MainThreadScriptMsg>); impl ScriptChan for MainThreadScriptChan { fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> { let MainThreadScriptChan(ref chan) = *self; chan.send(MainThreadScriptMsg::Common(msg)).map_err(|_| ()) } fn clone(&self) -> Box<ScriptChan + Send> { let MainThreadScriptChan(ref chan) = *self; box MainThreadScriptChan((*chan).clone()) } } impl MainThreadScriptChan { /// Creates a new script chan. pub fn new() -> (Receiver<MainThreadScriptMsg>, Box<MainThreadScriptChan>) { let (chan, port) = channel(); (port, box MainThreadScriptChan(chan)) } } pub struct MainThreadTimerEventChan(Sender<TimerEvent>); impl TimerEventChan for MainThreadTimerEventChan { fn send(&self, event: TimerEvent) -> Result<(), ()> { let MainThreadTimerEventChan(ref chan) = *self; chan.send(event).map_err(|_| ()) } fn clone(&self) -> Box<TimerEventChan + Send> { let MainThreadTimerEventChan(ref chan) = *self; box MainThreadTimerEventChan((*chan).clone()) } } pub struct StackRootTLS<'a>(PhantomData<&'a u32>); impl<'a> StackRootTLS<'a> { pub fn new(roots: &'a RootCollection) -> StackRootTLS<'a> { STACK_ROOTS.with(|ref r| { r.set(Some(RootCollectionPtr(roots as *const _))) }); StackRootTLS(PhantomData) } } impl<'a> Drop for StackRootTLS<'a> { fn drop(&mut self) { STACK_ROOTS.with(|ref r| r.set(None)); } } /// Information for an entire page. Pages are top-level browsing contexts and can contain multiple /// frames. #[derive(JSTraceable)] // ScriptTask instances are rooted on creation, so this is okay #[allow(unrooted_must_root)] pub struct ScriptTask { /// A handle to the information pertaining to page layout page: DOMRefCell<Option<Rc<Page>>>, /// A list of data pertaining to loads that have not yet received a network response incomplete_loads: DOMRefCell<Vec<InProgressLoad>>, /// A handle to the image cache task. image_cache_task: ImageCacheTask, /// A handle to the resource task. This is an `Arc` to avoid running out of file descriptors if /// there are many iframes. resource_task: Arc<ResourceTask>, /// A handle to the storage task. storage_task: StorageTask, /// The port on which the script task receives messages (load URL, exit, etc.) port: Receiver<MainThreadScriptMsg>, /// A channel to hand out to script task-based entities that need to be able to enqueue /// events in the event queue. chan: MainThreadScriptChan, /// A channel to hand out to tasks that need to respond to a message from the script task. control_chan: Sender<ConstellationControlMsg>, /// The port on which the constellation and layout tasks can communicate with the /// script task. control_port: Receiver<ConstellationControlMsg>, /// For communicating load url messages to the constellation constellation_chan: ConstellationChan, /// A handle to the compositor for communicating ready state messages. compositor: DOMRefCell<IpcSender<ScriptToCompositorMsg>>, /// The port on which we receive messages from the image cache image_cache_port: Receiver<ImageCacheResult>, /// The channel on which the image cache can send messages to ourself. image_cache_channel: ImageCacheChan, /// For providing contact with the time profiler. time_profiler_chan: time::ProfilerChan, /// For providing contact with the memory profiler. mem_profiler_chan: mem::ProfilerChan, /// For providing instructions to an optional devtools server. devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>, /// For receiving commands from an optional devtools server. Will be ignored if /// no such server exists. devtools_port: Receiver<DevtoolScriptControlMsg>, devtools_sender: IpcSender<DevtoolScriptControlMsg>, /// The JavaScript runtime. js_runtime: Rc<Runtime>, mouse_over_targets: DOMRefCell<Vec<JS<Element>>>, /// List of pipelines that have been owned and closed by this script task. closed_pipelines: DOMRefCell<HashSet<PipelineId>>, scheduler_chan: Sender<TimerEventRequest>, timer_event_chan: Sender<TimerEvent>, timer_event_port: Receiver<TimerEvent>, } /// In the event of task failure, all data on the stack runs its destructor. However, there /// are no reachable, owning pointers to the DOM memory, so it never gets freed by default /// when the script task fails. The ScriptMemoryFailsafe uses the destructor bomb pattern /// to forcibly tear down the JS compartments for pages associated with the failing ScriptTask. struct ScriptMemoryFailsafe<'a> { owner: Option<&'a ScriptTask>, } impl<'a> ScriptMemoryFailsafe<'a> { fn neuter(&mut self) { self.owner = None; } fn new(owner: &'a ScriptTask) -> ScriptMemoryFailsafe<'a> { ScriptMemoryFailsafe { owner: Some(owner), } } } impl<'a> Drop for ScriptMemoryFailsafe<'a> { #[allow(unrooted_must_root)] fn drop(&mut self) { match self.owner { Some(owner) => { unsafe { let page = owner.page.borrow_for_script_deallocation(); for page in page.iter() { let window = page.window(); window.clear_js_runtime_for_script_deallocation(); } } } None => (), } } } impl ScriptTaskFactory for ScriptTask { fn create_layout_channel(_phantom: Option<&mut ScriptTask>) -> OpaqueScriptLayoutChannel { let (chan, port) = channel(); ScriptLayoutChan::new(chan, port) } fn clone_layout_channel(_phantom: Option<&mut ScriptTask>, pair: &OpaqueScriptLayoutChannel) -> Box<Any + Send> { box pair.sender() as Box<Any + Send> } fn create(_phantom: Option<&mut ScriptTask>, state: InitialScriptState, layout_chan: &OpaqueScriptLayoutChannel, load_data: LoadData) { let ConstellationChan(const_chan) = state.constellation_chan.clone(); let (script_chan, script_port) = channel(); let layout_chan = LayoutChan(layout_chan.sender()); let failure_info = state.failure_info; spawn_named_with_send_on_failure(format!("ScriptTask {:?}", state.id), task_state::SCRIPT, move || { PipelineNamespace::install(state.pipeline_namespace_id); let roots = RootCollection::new(); let _stack_roots_tls = StackRootTLS::new(&roots); let chan = MainThreadScriptChan(script_chan); let channel_for_reporter = chan.clone(); let id = state.id; let parent_info = state.parent_info; let mem_profiler_chan = state.mem_profiler_chan.clone(); let window_size = state.window_size; let script_task = ScriptTask::new(state, script_port, chan); SCRIPT_TASK_ROOT.with(|root| { *root.borrow_mut() = Some(&script_task as *const _); }); let mut failsafe = ScriptMemoryFailsafe::new(&script_task); let new_load = InProgressLoad::new(id, parent_info, layout_chan, window_size, load_data.url.clone()); script_task.start_page_load(new_load, load_data); let reporter_name = format!("script-reporter-{}", id); mem_profiler_chan.run_with_memory_reporting(|| { script_task.start(); }, reporter_name, channel_for_reporter, CommonScriptMsg::CollectReports); // This must always be the very last operation performed before the task completes failsafe.neuter(); }, ConstellationMsg::Failure(failure_info), const_chan); } } thread_local!(static GC_CYCLE_START: Cell<Option<Tm>> = Cell::new(None)); thread_local!(static GC_SLICE_START: Cell<Option<Tm>> = Cell::new(None)); unsafe extern "C" fn gc_slice_callback(_rt: *mut JSRuntime, progress: GCProgress, desc: *const GCDescription) { match progress { GCProgress::GC_CYCLE_BEGIN => { GC_CYCLE_START.with(|start| { start.set(Some(now())); println!("GC cycle began"); }) }, GCProgress::GC_SLICE_BEGIN => { GC_SLICE_START.with(|start| { start.set(Some(now())); println!("GC slice began"); }) }, GCProgress::GC_SLICE_END => { GC_SLICE_START.with(|start| { let dur = now() - start.get().unwrap(); start.set(None); println!("GC slice ended: duration={}", dur); }) }, GCProgress::GC_CYCLE_END => { GC_CYCLE_START.with(|start| { let dur = now() - start.get().unwrap(); start.set(None); println!("GC cycle ended: duration={}", dur); }) }, }; if !desc.is_null() { let desc: &GCDescription = &*desc; let invocationKind = match desc.invocationKind_ { JSGCInvocationKind::GC_NORMAL => "GC_NORMAL", JSGCInvocationKind::GC_SHRINK => "GC_SHRINK", }; println!(" isCompartment={}, invocationKind={}", desc.isCompartment_, invocationKind); } let _ = stdout().flush(); } unsafe extern "C" fn debug_gc_callback(_rt: *mut JSRuntime, status: JSGCStatus, _data: *mut libc::c_void) { match status { JSGCStatus::JSGC_BEGIN => task_state::enter(task_state::IN_GC), JSGCStatus::JSGC_END => task_state::exit(task_state::IN_GC), } } unsafe extern "C" fn shadow_check_callback(_cx: *mut JSContext, _object: HandleObject, _id: HandleId) -> DOMProxyShadowsResult { // XXX implement me DOMProxyShadowsResult::ShadowCheckFailed } impl ScriptTask { pub fn page_fetch_complete(id: PipelineId, subpage: Option<SubpageId>, metadata: Metadata) -> Option<Root<ServoHTMLParser>> { SCRIPT_TASK_ROOT.with(|root| { let script_task = unsafe { &*root.borrow().unwrap() }; script_task.handle_page_fetch_complete(id, subpage, metadata) }) } pub fn parsing_complete(id: PipelineId) { SCRIPT_TASK_ROOT.with(|root| { let script_task = unsafe { &*root.borrow().unwrap() }; script_task.handle_parsing_complete(id); }); } pub fn process_event(msg: CommonScriptMsg) { SCRIPT_TASK_ROOT.with(|root| { if let Some(script_task) = *root.borrow() { let script_task = unsafe { &*script_task }; script_task.handle_msg_from_script(MainThreadScriptMsg::Common(msg)); } }); } /// Creates a new script task. pub fn new(state: InitialScriptState, port: Receiver<MainThreadScriptMsg>, chan: MainThreadScriptChan) -> ScriptTask { let runtime = ScriptTask::new_rt_and_cx(); unsafe { JS_SetWrapObjectCallbacks(runtime.rt(), &WRAP_CALLBACKS); } // Ask the router to proxy IPC messages from the devtools to us. let (ipc_devtools_sender, ipc_devtools_receiver) = ipc::channel().unwrap(); let devtools_port = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_devtools_receiver); // Ask the router to proxy IPC messages from the image cache task to us. let (ipc_image_cache_channel, ipc_image_cache_port) = ipc::channel().unwrap(); let image_cache_port = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_image_cache_port); let (timer_event_chan, timer_event_port) = channel(); ScriptTask { page: DOMRefCell::new(None), incomplete_loads: DOMRefCell::new(vec!()), image_cache_task: state.image_cache_task, image_cache_channel: ImageCacheChan(ipc_image_cache_channel), image_cache_port: image_cache_port, resource_task: Arc::new(state.resource_task), storage_task: state.storage_task, port: port, chan: chan, control_chan: state.control_chan, control_port: state.control_port, constellation_chan: state.constellation_chan, compositor: DOMRefCell::new(state.compositor), time_profiler_chan: state.time_profiler_chan, mem_profiler_chan: state.mem_profiler_chan, devtools_chan: state.devtools_chan, devtools_port: devtools_port, devtools_sender: ipc_devtools_sender, js_runtime: Rc::new(runtime), mouse_over_targets: DOMRefCell::new(vec!()), closed_pipelines: DOMRefCell::new(HashSet::new()), scheduler_chan: state.scheduler_chan, timer_event_chan: timer_event_chan, timer_event_port: timer_event_port, } } pub fn new_rt_and_cx() -> Runtime { LiveDOMReferences::initialize(); let runtime = Runtime::new(); unsafe { JS_AddExtraGCRootsTracer(runtime.rt(), Some(trace_rust_roots), ptr::null_mut()); JS_AddExtraGCRootsTracer(runtime.rt(), Some(trace_refcounted_objects), ptr::null_mut()); } // Needed for debug assertions about whether GC is running. if cfg!(debug_assertions) { unsafe { JS_SetGCCallback(runtime.rt(), Some(debug_gc_callback), ptr::null_mut()); } } if opts::get().gc_profile { unsafe { SetGCSliceCallback(runtime.rt(), Some(gc_slice_callback)); } } unsafe { unsafe extern "C" fn empty_wrapper_callback(_: *mut JSContext, _: *mut JSObject) -> bool { true } SetDOMProxyInformation(ptr::null(), 0, Some(shadow_check_callback)); SetDOMCallbacks(runtime.rt(), &DOM_CALLBACKS); SetPreserveWrapperCallback(runtime.rt(), Some(empty_wrapper_callback)); // Pre barriers aren't working correctly at the moment DisableIncrementalGC(runtime.rt()); } runtime } // Return the root page in the frame tree. Panics if it doesn't exist. pub fn root_page(&self) -> Rc<Page> { self.page.borrow().as_ref().unwrap().clone() } /// Find a child page of the root page by pipeline id. Returns `None` if the root page does /// not exist or the subpage cannot be found. fn find_subpage(&self, pipeline_id: PipelineId) -> Option<Rc<Page>> { self.page.borrow().as_ref().and_then(|page| page.find(pipeline_id)) } pub fn get_cx(&self) -> *mut JSContext { self.js_runtime.cx() } /// Starts the script task. After calling this method, the script task will loop receiving /// messages on its port. pub fn start(&self) { while self.handle_msgs() { // Go on... } } /// Handle incoming control messages. fn handle_msgs(&self) -> bool { // Handle pending resize events. // Gather them first to avoid a double mut borrow on self. let mut resizes = vec!(); { let page = self.page.borrow(); if let Some(page) = page.as_ref() { for page in page.iter() { // Only process a resize if layout is idle. let window = page.window(); let resize_event = window.steal_resize_event(); match resize_event { Some(size) => resizes.push((window.pipeline(), size)), None => () } } } } for (id, size) in resizes { self.handle_event(id, ResizeEvent(size)); } // Store new resizes, and gather all other events. let mut sequential = vec!(); // Receive at least one message so we don't spinloop. let mut event = { let sel = Select::new(); let mut script_port = sel.handle(&self.port); let mut control_port = sel.handle(&self.control_port); let mut timer_event_port = sel.handle(&self.timer_event_port); let mut devtools_port = sel.handle(&self.devtools_port); let mut image_cache_port = sel.handle(&self.image_cache_port); unsafe { script_port.add(); control_port.add(); timer_event_port.add(); if self.devtools_chan.is_some() { devtools_port.add(); } image_cache_port.add(); } let ret = sel.wait(); if ret == script_port.id() { MixedMessage::FromScript(self.port.recv().unwrap()) } else if ret == control_port.id() { MixedMessage::FromConstellation(self.control_port.recv().unwrap()) } else if ret == timer_event_port.id() { MixedMessage::FromScheduler(self.timer_event_port.recv().unwrap()) } else if ret == devtools_port.id() { MixedMessage::FromDevtools(self.devtools_port.recv().unwrap()) } else if ret == image_cache_port.id() { MixedMessage::FromImageCache(self.image_cache_port.recv().unwrap()) } else { panic!("unexpected select result") } }; // Squash any pending resize, reflow, animation tick, and mouse-move events in the queue. let mut mouse_move_event_index = None; let mut animation_ticks = HashSet::new(); loop { match event { // This has to be handled before the ResizeMsg below, // otherwise the page may not have been added to the // child list yet, causing the find() to fail. MixedMessage::FromConstellation(ConstellationControlMsg::AttachLayout( new_layout_info)) => { self.profile_event(ScriptTaskEventCategory::AttachLayout, || { self.handle_new_layout(new_layout_info); }) } MixedMessage::FromConstellation(ConstellationControlMsg::Resize(id, size)) => { self.profile_event(ScriptTaskEventCategory::Resize, || { self.handle_resize(id, size); }) } MixedMessage::FromConstellation(ConstellationControlMsg::Viewport(id, rect)) => { self.profile_event(ScriptTaskEventCategory::SetViewport, || { self.handle_viewport(id, rect); }) } MixedMessage::FromConstellation(ConstellationControlMsg::TickAllAnimations( pipeline_id)) => { if !animation_ticks.contains(&pipeline_id) { animation_ticks.insert(pipeline_id); sequential.push(event); } } MixedMessage::FromConstellation(ConstellationControlMsg::SendEvent( _, MouseMoveEvent(_))) => { match mouse_move_event_index { None => { mouse_move_event_index = Some(sequential.len()); sequential.push(event); } Some(index) => { sequential[index] = event } } } _ => { sequential.push(event); } } // If any of our input sources has an event pending, we'll perform another iteration // and check for more resize events. If there are no events pending, we'll move // on and execute the sequential non-resize events we've seen. match self.control_port.try_recv() { Err(_) => match self.port.try_recv() { Err(_) => match self.timer_event_port.try_recv() { Err(_) => match self.devtools_port.try_recv() { Err(_) => match self.image_cache_port.try_recv() { Err(_) => break, Ok(ev) => event = MixedMessage::FromImageCache(ev), }, Ok(ev) => event = MixedMessage::FromDevtools(ev), }, Ok(ev) => event = MixedMessage::FromScheduler(ev), }, Ok(ev) => event = MixedMessage::FromScript(ev), }, Ok(ev) => event = MixedMessage::FromConstellation(ev), } } // Process the gathered events. for msg in sequential { let category = self.categorize_msg(&msg); let result = self.profile_event(category, move || { match msg { MixedMessage::FromConstellation(ConstellationControlMsg::ExitPipeline(id)) => { if self.handle_exit_pipeline_msg(id) { return Some(false) } }, MixedMessage::FromConstellation(inner_msg) => self.handle_msg_from_constellation(inner_msg), MixedMessage::FromScript(inner_msg) => self.handle_msg_from_script(inner_msg), MixedMessage::FromScheduler(inner_msg) => self.handle_timer_event(inner_msg), MixedMessage::FromDevtools(inner_msg) => self.handle_msg_from_devtools(inner_msg), MixedMessage::FromImageCache(inner_msg) => self.handle_msg_from_image_cache(inner_msg), } None }); if let Some(retval) = result { return retval } } // Issue batched reflows on any pages that require it (e.g. if images loaded) // TODO(gw): In the future we could probably batch other types of reflows // into this loop too, but for now it's only images. let page = self.page.borrow(); if let Some(page) = page.as_ref() { for page in page.iter() { let window = page.window(); let pending_reflows = window.get_pending_reflow_count(); if pending_reflows > 0 { window.reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, ReflowReason::ImageLoaded); } } } true } fn categorize_msg(&self, msg: &MixedMessage) -> ScriptTaskEventCategory { match *msg { MixedMessage::FromConstellation(ref inner_msg) => { match *inner_msg { ConstellationControlMsg::SendEvent(_, _) => ScriptTaskEventCategory::DomEvent, _ => ScriptTaskEventCategory::ConstellationMsg } }, MixedMessage::FromDevtools(_) => ScriptTaskEventCategory::DevtoolsMsg, MixedMessage::FromImageCache(_) => ScriptTaskEventCategory::ImageCacheMsg, MixedMessage::FromScript(ref inner_msg) => { match *inner_msg { MainThreadScriptMsg::Common(CommonScriptMsg::RunnableMsg(ref category, _)) => *category, _ => ScriptTaskEventCategory::ScriptEvent } }, MixedMessage::FromScheduler(_) => ScriptTaskEventCategory::TimerEvent, } } fn profile_event<F, R>(&self, category: ScriptTaskEventCategory, f: F) -> R where F: FnOnce() -> R { if opts::get().profile_script_events { let profiler_cat = match category { ScriptTaskEventCategory::AttachLayout => ProfilerCategory::ScriptAttachLayout, ScriptTaskEventCategory::ConstellationMsg => ProfilerCategory::ScriptConstellationMsg, ScriptTaskEventCategory::DevtoolsMsg => ProfilerCategory::ScriptDevtoolsMsg, ScriptTaskEventCategory::DocumentEvent => ProfilerCategory::ScriptDocumentEvent, ScriptTaskEventCategory::DomEvent => ProfilerCategory::ScriptDomEvent, ScriptTaskEventCategory::FileRead => ProfilerCategory::ScriptFileRead, ScriptTaskEventCategory::ImageCacheMsg => ProfilerCategory::ScriptImageCacheMsg, ScriptTaskEventCategory::InputEvent => ProfilerCategory::ScriptInputEvent, ScriptTaskEventCategory::NetworkEvent => ProfilerCategory::ScriptNetworkEvent, ScriptTaskEventCategory::Resize => ProfilerCategory::ScriptResize, ScriptTaskEventCategory::ScriptEvent => ProfilerCategory::ScriptEvent, ScriptTaskEventCategory::UpdateReplacedElement => ProfilerCategory::ScriptUpdateReplacedElement, ScriptTaskEventCategory::SetViewport => ProfilerCategory::ScriptSetViewport, ScriptTaskEventCategory::TimerEvent => ProfilerCategory::ScriptTimerEvent, ScriptTaskEventCategory::WebSocketEvent => ProfilerCategory::ScriptWebSocketEvent, ScriptTaskEventCategory::WorkerEvent => ProfilerCategory::ScriptWorkerEvent, }; profile(profiler_cat, None, self.time_profiler_chan.clone(), f) } else { f() } } fn handle_msg_from_constellation(&self, msg: ConstellationControlMsg) { match msg { ConstellationControlMsg::AttachLayout(_) => panic!("should have handled AttachLayout already"), ConstellationControlMsg::Navigate(pipeline_id, subpage_id, load_data) => self.handle_navigate(pipeline_id, Some(subpage_id), load_data), ConstellationControlMsg::SendEvent(id, event) => self.handle_event(id, event), ConstellationControlMsg::ResizeInactive(id, new_size) => self.handle_resize_inactive_msg(id, new_size), ConstellationControlMsg::Viewport(..) => panic!("should have handled Viewport already"), ConstellationControlMsg::Resize(..) => panic!("should have handled Resize already"), ConstellationControlMsg::ExitPipeline(..) => panic!("should have handled ExitPipeline already"), ConstellationControlMsg::GetTitle(pipeline_id) => self.handle_get_title_msg(pipeline_id), ConstellationControlMsg::Freeze(pipeline_id) => self.handle_freeze_msg(pipeline_id), ConstellationControlMsg::Thaw(pipeline_id) => self.handle_thaw_msg(pipeline_id), ConstellationControlMsg::MozBrowserEvent(parent_pipeline_id, subpage_id, event) => self.handle_mozbrowser_event_msg(parent_pipeline_id, subpage_id, event), ConstellationControlMsg::UpdateSubpageId(containing_pipeline_id, old_subpage_id, new_subpage_id) => self.handle_update_subpage_id(containing_pipeline_id, old_subpage_id, new_subpage_id), ConstellationControlMsg::FocusIFrame(containing_pipeline_id, subpage_id) => self.handle_focus_iframe_msg(containing_pipeline_id, subpage_id), ConstellationControlMsg::WebDriverScriptCommand(pipeline_id, msg) => self.handle_webdriver_msg(pipeline_id, msg), ConstellationControlMsg::TickAllAnimations(pipeline_id) => self.handle_tick_all_animations(pipeline_id), ConstellationControlMsg::WebFontLoaded(pipeline_id) => self.handle_web_font_loaded(pipeline_id), ConstellationControlMsg::GetCurrentState(sender, pipeline_id) => { let state = self.handle_get_current_state(pipeline_id); sender.send(state).unwrap(); } } } fn handle_msg_from_script(&self, msg: MainThreadScriptMsg) { match msg { MainThreadScriptMsg::Navigate(id, load_data) => self.handle_navigate(id, None, load_data), MainThreadScriptMsg::ExitWindow(id) => self.handle_exit_window_msg(id), MainThreadScriptMsg::MainThreadRunnableMsg(runnable) => runnable.handler(self), MainThreadScriptMsg::DocumentLoadsComplete(id) => self.handle_loads_complete(id), MainThreadScriptMsg::Common(CommonScriptMsg::RunnableMsg(_, runnable)) => { // The category of the runnable is ignored by the pattern, however // it is still respected by profiling (see categorize_msg). if !runnable.is_cancelled() { runnable.handler() } } MainThreadScriptMsg::Common(CommonScriptMsg::RefcountCleanup(addr)) => LiveDOMReferences::cleanup(addr), MainThreadScriptMsg::Common(CommonScriptMsg::CollectReports(reports_chan)) => self.collect_reports(reports_chan), } } fn handle_timer_event(&self, timer_event: TimerEvent) { let TimerEvent(source, id) = timer_event; let pipeline_id = match source { TimerSource::FromWindow(pipeline_id) => pipeline_id, TimerSource::FromWorker => panic!("Worker timeouts must not be sent to script task"), }; let page = self.root_page(); let page = page.find(pipeline_id).expect("ScriptTask: received fire timer msg for a pipeline ID not associated with this script task. This is a bug."); let window = page.window(); window.handle_fire_timer(id); } fn handle_msg_from_devtools(&self, msg: DevtoolScriptControlMsg) { let page = self.root_page(); match msg { DevtoolScriptControlMsg::EvaluateJS(id, s, reply) => { let window = get_page(&page, id).window(); let global_ref = GlobalRef::Window(window.r()); devtools::handle_evaluate_js(&global_ref, s, reply) }, DevtoolScriptControlMsg::GetRootNode(id, reply) => devtools::handle_get_root_node(&page, id, reply), DevtoolScriptControlMsg::GetDocumentElement(id, reply) => devtools::handle_get_document_element(&page, id, reply), DevtoolScriptControlMsg::GetChildren(id, node_id, reply) => devtools::handle_get_children(&page, id, node_id, reply), DevtoolScriptControlMsg::GetLayout(id, node_id, reply) => devtools::handle_get_layout(&page, id, node_id, reply), DevtoolScriptControlMsg::GetCachedMessages(pipeline_id, message_types, reply) => devtools::handle_get_cached_messages(pipeline_id, message_types, reply), DevtoolScriptControlMsg::ModifyAttribute(id, node_id, modifications) => devtools::handle_modify_attribute(&page, id, node_id, modifications), DevtoolScriptControlMsg::WantsLiveNotifications(id, to_send) => { let window = get_page(&page, id).window(); let global_ref = GlobalRef::Window(window.r()); devtools::handle_wants_live_notifications(&global_ref, to_send) }, DevtoolScriptControlMsg::SetTimelineMarkers(_pipeline_id, marker_types, reply) => devtools::handle_set_timeline_markers(&page, marker_types, reply), DevtoolScriptControlMsg::DropTimelineMarkers(_pipeline_id, marker_types) => devtools::handle_drop_timeline_markers(&page, marker_types), DevtoolScriptControlMsg::RequestAnimationFrame(pipeline_id, name) => devtools::handle_request_animation_frame(&page, pipeline_id, name), } } fn handle_msg_from_image_cache(&self, msg: ImageCacheResult) { msg.responder.unwrap().respond(msg.image_response); } fn handle_webdriver_msg(&self, pipeline_id: PipelineId, msg: WebDriverScriptCommand) { let page = self.root_page(); match msg { WebDriverScriptCommand::ExecuteScript(script, reply) => webdriver_handlers::handle_execute_script(&page, pipeline_id, script, reply), WebDriverScriptCommand::FindElementCSS(selector, reply) => webdriver_handlers::handle_find_element_css(&page, pipeline_id, selector, reply), WebDriverScriptCommand::FindElementsCSS(selector, reply) => webdriver_handlers::handle_find_elements_css(&page, pipeline_id, selector, reply), WebDriverScriptCommand::GetActiveElement(reply) => webdriver_handlers::handle_get_active_element(&page, pipeline_id, reply), WebDriverScriptCommand::GetElementTagName(node_id, reply) => webdriver_handlers::handle_get_name(&page, pipeline_id, node_id, reply), WebDriverScriptCommand::GetElementText(node_id, reply) => webdriver_handlers::handle_get_text(&page, pipeline_id, node_id, reply), WebDriverScriptCommand::GetFrameId(frame_id, reply) => webdriver_handlers::handle_get_frame_id(&page, pipeline_id, frame_id, reply), WebDriverScriptCommand::GetUrl(reply) => webdriver_handlers::handle_get_url(&page, pipeline_id, reply), WebDriverScriptCommand::GetTitle(reply) => webdriver_handlers::handle_get_title(&page, pipeline_id, reply), WebDriverScriptCommand::ExecuteAsyncScript(script, reply) => webdriver_handlers::handle_execute_async_script(&page, pipeline_id, script, reply), } } fn handle_resize(&self, id: PipelineId, size: WindowSizeData) { if let Some(ref page) = self.find_subpage(id) { let window = page.window(); window.set_resize_event(size); return; } let mut loads = self.incomplete_loads.borrow_mut(); if let Some(ref mut load) = loads.iter_mut().find(|load| load.pipeline_id == id) { load.window_size = Some(size); return; } panic!("resize sent to nonexistent pipeline"); } fn handle_viewport(&self, id: PipelineId, rect: Rect<f32>) { let page = self.page.borrow(); if let Some(page) = page.as_ref() { if let Some(ref inner_page) = page.find(id) { let window = inner_page.window(); if window.set_page_clip_rect_with_new_viewport(rect) { let page = get_page(page, id); self.rebuild_and_force_reflow(&*page, ReflowReason::Viewport); } return; } } let mut loads = self.incomplete_loads.borrow_mut(); if let Some(ref mut load) = loads.iter_mut().find(|load| load.pipeline_id == id) { load.clip_rect = Some(rect); return; } panic!("Page rect message sent to nonexistent pipeline"); } /// Get the current state of a given pipeline. fn handle_get_current_state(&self, pipeline_id: PipelineId) -> ScriptState { // Check if the main page load is still pending let loads = self.incomplete_loads.borrow(); if let Some(_) = loads.iter().find(|load| load.pipeline_id == pipeline_id) { return ScriptState::DocumentLoading; } // If not in pending loads, the page should exist by now. let page = self.root_page(); let page = page.find(pipeline_id).expect("GetCurrentState sent to nonexistent pipeline"); let doc = page.document(); // Check if document load event has fired. If the document load // event has fired, this also guarantees that the first reflow // has been kicked off. Since the script task does a join with // layout, this ensures there are no race conditions that can occur // between load completing and the first layout completing. let load_pending = doc.ReadyState() != DocumentReadyState::Complete; if load_pending { return ScriptState::DocumentLoading; } // Checks if the html element has reftest-wait attribute present. // See http://testthewebforward.org/docs/reftests.html let html_element = doc.GetDocumentElement(); let reftest_wait = html_element.map_or(false, |elem| elem.has_class(&Atom::from_slice("reftest-wait"))); if reftest_wait { return ScriptState::DocumentLoading; } ScriptState::DocumentLoaded } fn handle_new_layout(&self, new_layout_info: NewLayoutInfo) { let NewLayoutInfo { containing_pipeline_id, new_pipeline_id, subpage_id, load_data, paint_chan, failure, pipeline_port, layout_shutdown_chan, } = new_layout_info; let layout_pair = ScriptTask::create_layout_channel(None::<&mut ScriptTask>); let layout_chan = LayoutChan(*ScriptTask::clone_layout_channel( None::<&mut ScriptTask>, &layout_pair).downcast::<Sender<layout_interface::Msg>>().unwrap()); let layout_creation_info = NewLayoutTaskInfo { id: new_pipeline_id, url: load_data.url.clone(), is_parent: false, layout_pair: layout_pair, pipeline_port: pipeline_port, constellation_chan: self.constellation_chan.clone(), failure: failure, paint_chan: paint_chan, script_chan: self.control_chan.clone(), image_cache_task: self.image_cache_task.clone(), layout_shutdown_chan: layout_shutdown_chan, }; let page = self.root_page(); let parent_page = page.find(containing_pipeline_id).expect("ScriptTask: received a layout whose parent has a PipelineId which does not correspond to a pipeline in the script task's page tree. This is a bug."); let parent_window = parent_page.window(); // Tell layout to actually spawn the task. parent_window.layout_chan() .0 .send(layout_interface::Msg::CreateLayoutTask(layout_creation_info)) .unwrap(); // Kick off the fetch for the new resource. let new_load = InProgressLoad::new(new_pipeline_id, Some((containing_pipeline_id, subpage_id)), layout_chan, parent_window.window_size(), load_data.url.clone()); self.start_page_load(new_load, load_data); } fn handle_loads_complete(&self, pipeline: PipelineId) { let page = get_page(&self.root_page(), pipeline); let doc = page.document(); let doc = doc.r(); if doc.loader().is_blocked() { return; } doc.mut_loader().inhibit_events(); // https://html.spec.whatwg.org/multipage/#the-end step 7 let addr: Trusted<Document> = Trusted::new(self.get_cx(), doc, self.chan.clone()); let handler = box DocumentProgressHandler::new(addr.clone()); self.chan.send(CommonScriptMsg::RunnableMsg(ScriptTaskEventCategory::DocumentEvent, handler)).unwrap(); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::LoadComplete(pipeline)).unwrap(); } pub fn get_reports(cx: *mut JSContext, path_seg: String) -> Vec<Report> { let mut reports = vec![]; unsafe { let rt = JS_GetRuntime(cx); let mut stats = ::std::mem::zeroed(); if CollectServoSizes(rt, &mut stats) { let mut report = |mut path_suffix, kind, size| { let mut path = path![path_seg, "js"]; path.append(&mut path_suffix); reports.push(Report { path: path, kind: kind, size: size as usize, }) }; // A note about possibly confusing terminology: the JS GC "heap" is allocated via // mmap/VirtualAlloc, which means it's not on the malloc "heap", so we use // `ExplicitNonHeapSize` as its kind. report(path!["gc-heap", "used"], ReportKind::ExplicitNonHeapSize, stats.gcHeapUsed); report(path!["gc-heap", "unused"], ReportKind::ExplicitNonHeapSize, stats.gcHeapUnused); report(path!["gc-heap", "admin"], ReportKind::ExplicitNonHeapSize, stats.gcHeapAdmin); report(path!["gc-heap", "decommitted"], ReportKind::ExplicitNonHeapSize, stats.gcHeapDecommitted); // SpiderMonkey uses the system heap, not jemalloc. report(path!["malloc-heap"], ReportKind::ExplicitSystemHeapSize, stats.mallocHeap); report(path!["non-heap"], ReportKind::ExplicitNonHeapSize, stats.nonHeap); } } reports } fn collect_reports(&self, reports_chan: ReportsChan) { let mut urls = vec![]; let mut dom_tree_size = 0; let mut reports = vec![]; if let Some(root_page) = self.page.borrow().as_ref() { for it_page in root_page.iter() { let current_url = it_page.document().url().serialize(); urls.push(current_url.clone()); for child in it_page.document().upcast::<Node>().traverse_preorder() { dom_tree_size += heap_size_of_self_and_children(&*child); } let window = it_page.window(); dom_tree_size += heap_size_of_self_and_children(&*window); reports.push(Report { path: path![format!("url({})", current_url), "dom-tree"], kind: ReportKind::ExplicitJemallocHeapSize, size: dom_tree_size, }) } } let path_seg = format!("url({})", urls.join(", ")); reports.extend(ScriptTask::get_reports(self.get_cx(), path_seg)); reports_chan.send(reports); } /// Handles freeze message fn handle_freeze_msg(&self, id: PipelineId) { // Workaround for a race condition when navigating before the initial page has // been constructed c.f. https://github.com/servo/servo/issues/7677 if self.page.borrow().is_none() { return }; let page = self.root_page(); let page = page.find(id).expect("ScriptTask: received freeze msg for a pipeline ID not associated with this script task. This is a bug."); let window = page.window(); window.freeze(); } /// Handles thaw message fn handle_thaw_msg(&self, id: PipelineId) { // We should only get this message when moving in history, so all pages requested // should exist. let page = self.root_page().find(id).unwrap(); let needed_reflow = page.set_reflow_status(false); if needed_reflow { self.rebuild_and_force_reflow(&*page, ReflowReason::CachedPageNeededReflow); } let window = page.window(); window.thaw(); } fn handle_focus_iframe_msg(&self, parent_pipeline_id: PipelineId, subpage_id: SubpageId) { let borrowed_page = self.root_page(); let page = borrowed_page.find(parent_pipeline_id).unwrap(); let doc = page.document(); let frame_element = doc.find_iframe(subpage_id); if let Some(ref frame_element) = frame_element { doc.begin_focus_transaction(); doc.request_focus(frame_element.upcast()); doc.commit_focus_transaction(FocusType::Parent); } } /// Handles a mozbrowser event, for example see: /// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowserloadstart fn handle_mozbrowser_event_msg(&self, parent_pipeline_id: PipelineId, subpage_id: SubpageId, event: MozBrowserEvent) { let borrowed_page = self.root_page(); let frame_element = borrowed_page.find(parent_pipeline_id).and_then(|page| { let doc = page.document(); doc.find_iframe(subpage_id) }); if let Some(ref frame_element) = frame_element { frame_element.dispatch_mozbrowser_event(event); } } fn handle_update_subpage_id(&self, containing_pipeline_id: PipelineId, old_subpage_id: SubpageId, new_subpage_id: SubpageId) { let borrowed_page = self.root_page(); let frame_element = borrowed_page.find(containing_pipeline_id).and_then(|page| { let doc = page.document(); doc.find_iframe(old_subpage_id) }); frame_element.unwrap().update_subpage_id(new_subpage_id); } /// Window was resized, but this script was not active, so don't reflow yet fn handle_resize_inactive_msg(&self, id: PipelineId, new_size: WindowSizeData) { let page = self.root_page(); let page = page.find(id).expect("Received resize message for PipelineId not associated with a page in the page tree. This is a bug."); let window = page.window(); window.set_window_size(new_size); page.set_reflow_status(true); } /// We have gotten a window.close from script, which we pass on to the compositor. /// We do not shut down the script task now, because the compositor will ask the /// constellation to shut down the pipeline, which will clean everything up /// normally. If we do exit, we will tear down the DOM nodes, possibly at a point /// where layout is still accessing them. fn handle_exit_window_msg(&self, _: PipelineId) { debug!("script task handling exit window msg"); // TODO(tkuehn): currently there is only one window, // so this can afford to be naive and just shut down the // compositor. In the future it'll need to be smarter. self.compositor.borrow_mut().send(ScriptToCompositorMsg::Exit).unwrap(); } /// We have received notification that the response associated with a load has completed. /// Kick off the document and frame tree creation process using the result. fn handle_page_fetch_complete(&self, id: PipelineId, subpage: Option<SubpageId>, metadata: Metadata) -> Option<Root<ServoHTMLParser>> { let idx = self.incomplete_loads.borrow().iter().position(|load| { load.pipeline_id == id && load.parent_info.map(|info| info.1) == subpage }); // The matching in progress load structure may not exist if // the pipeline exited before the page load completed. match idx { Some(idx) => { let load = self.incomplete_loads.borrow_mut().remove(idx); Some(self.load(metadata, load)) } None => { assert!(self.closed_pipelines.borrow().contains(&id)); None } } } /// Handles a request for the window title. fn handle_get_title_msg(&self, pipeline_id: PipelineId) { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.send_title_to_compositor(); } /// Handles a request to exit the script task and shut down layout. /// Returns true if the script task should shut down and false otherwise. fn handle_exit_pipeline_msg(&self, id: PipelineId) -> bool { self.closed_pipelines.borrow_mut().insert(id); // Check if the exit message is for an in progress load. let idx = self.incomplete_loads.borrow().iter().position(|load| { load.pipeline_id == id }); if let Some(idx) = idx { let load = self.incomplete_loads.borrow_mut().remove(idx); // Tell the layout task to begin shutting down, and wait until it // processed this message. let (response_chan, response_port) = channel(); let LayoutChan(chan) = load.layout_chan; if chan.send(layout_interface::Msg::PrepareToExit(response_chan)).is_ok() { debug!("shutting down layout for page {:?}", id); response_port.recv().unwrap(); chan.send(layout_interface::Msg::ExitNow).ok(); } let has_pending_loads = self.incomplete_loads.borrow().len() > 0; let has_root_page = self.page.borrow().is_some(); // Exit if no pending loads and no root page return !has_pending_loads && !has_root_page; } // If root is being exited, shut down all pages let page = self.root_page(); let window = page.window(); if window.pipeline() == id { debug!("shutting down layout for root page {:?}", id); shut_down_layout(&page); return true } // otherwise find just the matching page and exit all sub-pages if let Some(ref mut child_page) = page.remove(id) { shut_down_layout(&*child_page); } false } /// Handles when layout task finishes all animation in one tick fn handle_tick_all_animations(&self, id: PipelineId) { let page = get_page(&self.root_page(), id); let document = page.document(); document.run_the_animation_frame_callbacks(); } /// Handles a Web font being loaded. Does nothing if the page no longer exists. fn handle_web_font_loaded(&self, pipeline_id: PipelineId) { if let Some(ref page) = self.find_subpage(pipeline_id) { self.rebuild_and_force_reflow(page, ReflowReason::WebFontLoaded); } } /// The entry point to document loading. Defines bindings, sets up the window and document /// objects, parses HTML and CSS, and kicks off initial layout. fn load(&self, metadata: Metadata, incomplete: InProgressLoad) -> Root<ServoHTMLParser> { let final_url = metadata.final_url.clone(); debug!("ScriptTask: loading {} on page {:?}", incomplete.url.serialize(), incomplete.pipeline_id); // We should either be initializing a root page or loading a child page of an // existing one. let root_page_exists = self.page.borrow().is_some(); let frame_element = incomplete.parent_info.and_then(|(parent_id, subpage_id)| { // The root page may not exist yet, if the parent of this frame // exists in a different script task. let borrowed_page = self.page.borrow(); // In the case a parent id exists but the matching page // cannot be found, this means the page exists in a different // script task (due to origin) so it shouldn't be returned. // TODO: window.parent will continue to return self in that // case, which is wrong. We should be returning an object that // denies access to most properties (per // https://github.com/servo/servo/issues/3939#issuecomment-62287025). borrowed_page.as_ref().and_then(|borrowed_page| { borrowed_page.find(parent_id).and_then(|page| { let doc = page.document(); doc.find_iframe(subpage_id) }) }) }); // Create a new frame tree entry. let page = Rc::new(Page::new(incomplete.pipeline_id)); if !root_page_exists { // We have a new root frame tree. *self.page.borrow_mut() = Some(page.clone()); } else if let Some((parent, _)) = incomplete.parent_info { // We have a new child frame. let parent_page = self.root_page(); // TODO(gw): This find will fail when we are sharing script tasks // between cross origin iframes in the same TLD. parent_page.find(parent).expect("received load for subpage with missing parent"); parent_page.children.borrow_mut().push(page.clone()); } enum PageToRemove { Root, Child(PipelineId), } struct AutoPageRemover<'a> { page: PageToRemove, script_task: &'a ScriptTask, neutered: bool, } impl<'a> AutoPageRemover<'a> { fn new(script_task: &'a ScriptTask, page: PageToRemove) -> AutoPageRemover<'a> { AutoPageRemover { page: page, script_task: script_task, neutered: false, } } fn neuter(&mut self) { self.neutered = true; } } impl<'a> Drop for AutoPageRemover<'a> { fn drop(&mut self) { if !self.neutered { match self.page { PageToRemove::Root => *self.script_task.page.borrow_mut() = None, PageToRemove::Child(id) => { self.script_task.root_page().remove(id).unwrap(); } } } } } let page_to_remove = if !root_page_exists { PageToRemove::Root } else { PageToRemove::Child(incomplete.pipeline_id) }; let mut page_remover = AutoPageRemover::new(self, page_to_remove); let MainThreadScriptChan(ref sender) = self.chan; // Create the window and document objects. let window = Window::new(self.js_runtime.clone(), page.clone(), MainThreadScriptChan(sender.clone()), self.image_cache_channel.clone(), self.compositor.borrow_mut().clone(), self.image_cache_task.clone(), self.resource_task.clone(), self.storage_task.clone(), self.mem_profiler_chan.clone(), self.devtools_chan.clone(), self.constellation_chan.clone(), self.scheduler_chan.clone(), MainThreadTimerEventChan(self.timer_event_chan.clone()), incomplete.layout_chan, incomplete.pipeline_id, incomplete.parent_info, incomplete.window_size); let last_modified = metadata.headers.as_ref().and_then(|headers| { headers.get().map(|&LastModified(HttpDate(ref tm))| dom_last_modified(tm)) }); let content_type = match metadata.content_type { Some(ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) => { Some(DOMString("text/plain".to_owned())) } _ => None }; let loader = DocumentLoader::new_with_task(self.resource_task.clone(), Some(page.pipeline()), Some(incomplete.url.clone())); let document = Document::new(window.r(), Some(final_url.clone()), IsHTMLDocument::HTMLDocument, content_type, last_modified, DocumentSource::FromParser, loader); let frame_element = frame_element.r().map(Castable::upcast); window.init_browsing_context(document.r(), frame_element); // Create the root frame page.set_frame(Some(Frame { document: JS::from_rooted(&document), window: JS::from_rooted(&window), })); let is_javascript = incomplete.url.scheme == "javascript"; let parse_input = if is_javascript { use url::percent_encoding::percent_decode_to; // Turn javascript: URL into JS code to eval, according to the steps in // https://html.spec.whatwg.org/multipage/#javascript-protocol let _ar = JSAutoRequest::new(self.get_cx()); let mut script_source_bytes = Vec::new(); // Start with the scheme data of the parsed URL (5.), while percent-decoding (8.) percent_decode_to(incomplete.url.non_relative_scheme_data().unwrap().as_bytes(), &mut script_source_bytes); // Append question mark and query component, if any (6.), while percent-decoding (8.) if let Some(ref query) = incomplete.url.query { script_source_bytes.push(b'?'); percent_decode_to(query.as_bytes(), &mut script_source_bytes); } // Append number sign and fragment component if any (7.), while percent-decoding (8.) if let Some(ref fragment) = incomplete.url.fragment { script_source_bytes.push(b'#'); percent_decode_to(fragment.as_bytes(), &mut script_source_bytes); } // UTF-8 decode (9.) let script_source = String::from_utf8_lossy(&script_source_bytes); // Script source is ready to be evaluated (11.) let mut jsval = RootedValue::new(self.get_cx(), UndefinedValue()); window.evaluate_js_on_global_with_result(&script_source, jsval.handle_mut()); let strval = DOMString::from_jsval(self.get_cx(), jsval.handle(), StringificationBehavior::Empty); strval.unwrap_or(DOMString::new()) } else { DOMString::new() }; parse_html(document.r(), parse_input, final_url, ParseContext::Owner(Some(incomplete.pipeline_id))); page_remover.neuter(); document.get_current_parser().unwrap() } fn notify_devtools(&self, title: DOMString, url: Url, ids: (PipelineId, Option<WorkerId>)) { if let Some(ref chan) = self.devtools_chan { let page_info = DevtoolsPageInfo { title: title, url: url, }; chan.send(ScriptToDevtoolsControlMsg::NewGlobal( ids, self.devtools_sender.clone(), page_info)).unwrap(); } } fn scroll_fragment_point(&self, pipeline_id: PipelineId, element: &Element) { // FIXME(#8275, pcwalton): This is pretty bogus when multiple layers are involved. // Really what needs to happen is that this needs to go through layout to ask which // layer the element belongs to, and have it send the scroll message to the // compositor. let rect = element.upcast::<Node>().get_bounding_content_box(); // In order to align with element edges, we snap to unscaled pixel boundaries, since the // paint task currently does the same for drawing elements. This is important for pages // that require pixel perfect scroll positioning for proper display (like Acid2). Since we // don't have the device pixel ratio here, this might not be accurate, but should work as // long as the ratio is a whole number. Once #8275 is fixed this should actually take into // account the real device pixel ratio. let point = Point2D::new(rect.origin.x.to_nearest_px() as f32, rect.origin.y.to_nearest_px() as f32); self.compositor.borrow_mut().send(ScriptToCompositorMsg::ScrollFragmentPoint( pipeline_id, LayerId::null(), point, false)).unwrap(); } /// Reflows non-incrementally, rebuilding the entire layout tree in the process. fn rebuild_and_force_reflow(&self, page: &Page, reason: ReflowReason) { let document = page.document(); document.dirty_all_nodes(); let window = window_from_node(document.r()); window.reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, reason); } /// This is the main entry point for receiving and dispatching DOM events. /// /// TODO: Actually perform DOM event dispatch. fn handle_event(&self, pipeline_id: PipelineId, event: CompositorEvent) { match event { ResizeEvent(new_size) => { self.handle_resize_event(pipeline_id, new_size); } ClickEvent(button, point) => { self.handle_mouse_event(pipeline_id, MouseEventType::Click, button, point); } MouseDownEvent(button, point) => { self.handle_mouse_event(pipeline_id, MouseEventType::MouseDown, button, point); } MouseUpEvent(button, point) => { self.handle_mouse_event(pipeline_id, MouseEventType::MouseUp, button, point); } MouseMoveEvent(point) => { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); let mut prev_mouse_over_targets: RootedVec<JS<Element>> = RootedVec::new(); for target in &*self.mouse_over_targets.borrow_mut() { prev_mouse_over_targets.push(target.clone()); } // We temporarily steal the list of targets over which the mouse is to pass it to // handle_mouse_move_event() in a safe RootedVec container. let mut mouse_over_targets = RootedVec::new(); std_mem::swap(&mut *self.mouse_over_targets.borrow_mut(), &mut *mouse_over_targets); document.handle_mouse_move_event(self.js_runtime.rt(), point, &mut mouse_over_targets); // Notify Constellation about anchors that are no longer mouse over targets. for target in &*prev_mouse_over_targets { if !mouse_over_targets.contains(target) { if target.upcast::<Node>().is_anchor_element() { let event = ConstellationMsg::NodeStatus(None); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(event).unwrap(); break; } } } // Notify Constellation about the topmost anchor mouse over target. for target in &*mouse_over_targets { if target.upcast::<Node>().is_anchor_element() { let status = target.get_attribute(&ns!(""), &atom!("href")) .and_then(|href| { let value = href.value(); let url = document.url(); UrlParser::new().base_url(&url).parse(&value).map(|url| url.serialize()).ok() }); let event = ConstellationMsg::NodeStatus(status); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(event).unwrap(); break; } } std_mem::swap(&mut *self.mouse_over_targets.borrow_mut(), &mut *mouse_over_targets); } TouchEvent(event_type, identifier, point) => { let handled = self.handle_touch_event(pipeline_id, event_type, identifier, point); match event_type { TouchEventType::Down => { if handled { // TODO: Wait to see if preventDefault is called on the first touchmove event. self.compositor.borrow_mut() .send(ScriptToCompositorMsg::TouchEventProcessed( EventResult::DefaultAllowed)).unwrap(); } else { self.compositor.borrow_mut() .send(ScriptToCompositorMsg::TouchEventProcessed( EventResult::DefaultPrevented)).unwrap(); } } _ => { // TODO: Calling preventDefault on a touchup event should prevent clicks. } } } KeyEvent(key, state, modifiers) => { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.dispatch_key_event( key, state, modifiers, &mut self.compositor.borrow_mut()); } } } fn handle_mouse_event(&self, pipeline_id: PipelineId, mouse_event_type: MouseEventType, button: MouseButton, point: Point2D<f32>) { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.handle_mouse_event(self.js_runtime.rt(), button, point, mouse_event_type); } fn handle_touch_event(&self, pipeline_id: PipelineId, event_type: TouchEventType, identifier: TouchId, point: Point2D<f32>) -> bool { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); document.handle_touch_event(self.js_runtime.rt(), event_type, identifier, point) } /// https://html.spec.whatwg.org/multipage/#navigating-across-documents /// The entry point for content to notify that a new load has been requested /// for the given pipeline (specifically the "navigate" algorithm). fn handle_navigate(&self, pipeline_id: PipelineId, subpage_id: Option<SubpageId>, load_data: LoadData) { // Step 8. { let nurl = &load_data.url; if let Some(ref fragment) = nurl.fragment { let page = get_page(&self.root_page(), pipeline_id); let document = page.document(); let document = document.r(); let url = document.url(); if url.scheme == nurl.scheme && url.scheme_data == nurl.scheme_data && url.query == nurl.query && load_data.method == Method::Get { match document.find_fragment_node(&*fragment) { Some(ref node) => { self.scroll_fragment_point(pipeline_id, node.r()); } None => {} } return; } } } match subpage_id { Some(subpage_id) => { let borrowed_page = self.root_page(); let iframe = borrowed_page.find(pipeline_id).and_then(|page| { let doc = page.document(); doc.find_iframe(subpage_id) }); if let Some(iframe) = iframe.r() { iframe.navigate_child_browsing_context(load_data.url); } } None => { let ConstellationChan(ref const_chan) = self.constellation_chan; const_chan.send(ConstellationMsg::LoadUrl(pipeline_id, load_data)).unwrap(); } } } fn handle_resize_event(&self, pipeline_id: PipelineId, new_size: WindowSizeData) { let page = get_page(&self.root_page(), pipeline_id); let window = page.window(); window.set_window_size(new_size); window.force_reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, ReflowReason::WindowResize); let document = page.document(); let fragment_node = window.steal_fragment_name() .and_then(|name| document.find_fragment_node(&*name)); match fragment_node { Some(ref node) => self.scroll_fragment_point(pipeline_id, node.r()), None => {} } // http://dev.w3.org/csswg/cssom-view/#resizing-viewports // https://dvcs.w3.org/hg/dom3events/raw-file/tip/html/DOM3-Events.html#event-type-resize let uievent = UIEvent::new(window.r(), DOMString("resize".to_owned()), EventBubbles::DoesNotBubble, EventCancelable::NotCancelable, Some(window.r()), 0i32); uievent.upcast::<Event>().fire(window.upcast()); } /// Initiate a non-blocking fetch for a specified resource. Stores the InProgressLoad /// argument until a notification is received that the fetch is complete. fn start_page_load(&self, incomplete: InProgressLoad, mut load_data: LoadData) { let id = incomplete.pipeline_id.clone(); let subpage = incomplete.parent_info.clone().map(|p| p.1); let script_chan = self.chan.clone(); let resource_task = self.resource_task.clone(); let context = Arc::new(Mutex::new(ParserContext::new(id, subpage, script_chan.clone(), load_data.url.clone()))); let (action_sender, action_receiver) = ipc::channel().unwrap(); let listener = box NetworkListener { context: context, script_chan: script_chan.clone(), }; ROUTER.add_route(action_receiver.to_opaque(), box move |message| { listener.notify(message.to().unwrap()); }); let response_target = AsyncResponseTarget { sender: action_sender, }; if load_data.url.scheme == "javascript" { load_data.url = Url::parse("about:blank").unwrap(); } resource_task.send(ControlMsg::Load(NetLoadData { url: load_data.url, method: load_data.method, headers: Headers::new(), preserved_headers: load_data.headers, data: load_data.data, cors: None, pipeline_id: Some(id), }, LoadConsumer::Listener(response_target), None)).unwrap(); self.incomplete_loads.borrow_mut().push(incomplete); } fn handle_parsing_complete(&self, id: PipelineId) { let parent_page = self.root_page(); let page = match parent_page.find(id) { Some(page) => page, None => return, }; let document = page.document(); let final_url = document.url(); // https://html.spec.whatwg.org/multipage/#the-end step 1 document.set_ready_state(DocumentReadyState::Interactive); // TODO: Execute step 2 here. // Kick off the initial reflow of the page. debug!("kicking off initial reflow of {:?}", final_url); document.disarm_reflow_timeout(); document.content_changed(document.upcast(), NodeDamage::OtherNodeDamage); let window = window_from_node(document.r()); window.reflow(ReflowGoal::ForDisplay, ReflowQueryType::NoQuery, ReflowReason::FirstLoad); // No more reflow required page.set_reflow_status(false); // https://html.spec.whatwg.org/multipage/#the-end steps 3-4. document.process_deferred_scripts(); window.set_fragment_name(final_url.fragment.clone()); // Notify devtools that a new script global exists. //TODO: should this happen as soon as the global is created, or at least once the first // script runs? self.notify_devtools(document.Title(), (*final_url).clone(), (id, None)); } } impl Drop for ScriptTask { fn drop(&mut self) { SCRIPT_TASK_ROOT.with(|root| { *root.borrow_mut() = None; }); } } /// Shuts down layout for the given page tree. fn shut_down_layout(page_tree: &Rc<Page>) { let mut channels = vec!(); for page in page_tree.iter() { // Tell the layout task to begin shutting down, and wait until it // processed this message. let (response_chan, response_port) = channel(); let window = page.window(); let LayoutChan(chan) = window.layout_chan(); if chan.send(layout_interface::Msg::PrepareToExit(response_chan)).is_ok() { channels.push(chan); response_port.recv().unwrap(); } } // Drop our references to the JSContext and DOM objects. for page in page_tree.iter() { let window = page.window(); window.clear_js_runtime(); // Sever the connection between the global and the DOM tree page.set_frame(None); } // Destroy the layout task. If there were node leaks, layout will now crash safely. for chan in channels { chan.send(layout_interface::Msg::ExitNow).ok(); } } pub fn get_page(page: &Rc<Page>, pipeline_id: PipelineId) -> Rc<Page> { page.find(pipeline_id).expect("ScriptTask: received an event \ message for a layout channel that is not associated with this script task.\ This is a bug.") } fn dom_last_modified(tm: &Tm) -> String { tm.to_local().strftime("%m/%d/%Y %H:%M:%S").unwrap().to_string() }
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! VecDeque is a double-ended queue, which is implemented with the help of a //! growing ring buffer. //! //! This queue has `O(1)` amortized inserts and removals from both ends of the //! container. It also has `O(1)` indexing like a vector. The contained elements //! are not required to be copyable, and the queue will be sendable if the //! contained type is sendable. #![stable(feature = "rust1", since = "1.0.0")] use core::prelude::*; use core::cmp::Ordering; use core::default::Default; use core::fmt; use core::iter::{self, repeat, FromIterator, IntoIterator, RandomAccessIterator}; use core::mem; use core::num::wrapping::WrappingOps; use core::ops::{Index, IndexMut}; use core::ptr::{self, Unique}; use core::slice; use core::hash::{Hash, Hasher}; use core::cmp; use alloc::heap; #[deprecated(since = "1.0.0", reason = "renamed to VecDeque")] #[unstable(feature = "collections")] pub use VecDeque as RingBuf; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 /// `VecDeque` is a growable ring buffer, which can be used as a /// double-ended queue efficiently. #[stable(feature = "rust1", since = "1.0.0")] pub struct VecDeque<T> { // tail and head are pointers into the buffer. Tail always points // to the first element that could be read, Head always points // to where data should be written. // If tail == head the buffer is empty. The length of the ringbuf // is defined as the distance between the two. tail: usize, head: usize, cap: usize, ptr: Unique<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone> Clone for VecDeque<T> { fn clone(&self) -> VecDeque<T> { self.iter().cloned().collect() } } #[unsafe_destructor] #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for VecDeque<T> { fn drop(&mut self) { self.clear(); unsafe { if mem::size_of::<T>() != 0 { heap::deallocate(*self.ptr as *mut u8, self.cap * mem::size_of::<T>(), mem::min_align_of::<T>()) } } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Default for VecDeque<T> { #[inline] fn default() -> VecDeque<T> { VecDeque::new() } } impl<T> VecDeque<T> { /// Turn ptr into a slice #[inline] unsafe fn buffer_as_slice(&self) -> &[T] { slice::from_raw_parts(*self.ptr, self.cap) } /// Turn ptr into a mut slice #[inline] unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] { slice::from_raw_parts_mut(*self.ptr, self.cap) } /// Moves an element out of the buffer #[inline] unsafe fn buffer_read(&mut self, off: usize) -> T { ptr::read(self.ptr.offset(off as isize)) } /// Writes an element into the buffer, moving it. #[inline] unsafe fn buffer_write(&mut self, off: usize, t: T) { ptr::write(self.ptr.offset(off as isize), t); } /// Returns true iff the buffer is at capacity #[inline] fn is_full(&self) -> bool { self.cap - self.len() == 1 } /// Returns the index in the underlying buffer for a given logical element /// index. #[inline] fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap) } /// Returns the index in the underlying buffer for a given logical element /// index + addend. #[inline] fn wrap_add(&self, idx: usize, addend: usize) -> usize { wrap_index(idx.wrapping_add(addend), self.cap) } /// Returns the index in the underlying buffer for a given logical element /// index - subtrahend. #[inline] fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize { wrap_index(idx.wrapping_sub(subtrahend), self.cap) } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); ptr::copy( self.ptr.offset(dst as isize), self.ptr.offset(src as isize), len); } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); ptr::copy_nonoverlapping( self.ptr.offset(dst as isize), self.ptr.offset(src as isize), len); } } impl<T> VecDeque<T> { /// Creates an empty `VecDeque`. #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> VecDeque<T> { VecDeque::with_capacity(INITIAL_CAPACITY) } /// Creates an empty `VecDeque` with space for at least `n` elements. #[stable(feature = "rust1", since = "1.0.0")] pub fn with_capacity(n: usize) -> VecDeque<T> { // +1 since the ringbuffer always leaves one space empty let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); assert!(cap > n, "capacity overflow"); let size = cap.checked_mul(mem::size_of::<T>()) .expect("capacity overflow"); let ptr = unsafe { if mem::size_of::<T>() != 0 { let ptr = heap::allocate(size, mem::min_align_of::<T>()) as *mut T;; if ptr.is_null() { ::alloc::oom() } Unique::new(ptr) } else { Unique::new(heap::EMPTY as *mut T) } }; VecDeque { tail: 0, head: 0, cap: cap, ptr: ptr, } } /// Retrieves an element in the `VecDeque` by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// assert_eq!(buf.get(1).unwrap(), &4); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get(&self, i: usize) -> Option<&T> { if i < self.len() { let idx = self.wrap_add(self.tail, i); unsafe { Some(&*self.ptr.offset(idx as isize)) } } else { None } } /// Retrieves an element in the `VecDeque` mutably by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// match buf.get_mut(1) { /// None => {} /// Some(elem) => { /// *elem = 7; /// } /// } /// /// assert_eq!(buf[1], 7); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self, i: usize) -> Option<&mut T> { if i < self.len() { let idx = self.wrap_add(self.tail, i); unsafe { Some(&mut *self.ptr.offset(idx as isize)) } } else { None } } /// Swaps elements at indices `i` and `j`. /// /// `i` and `j` may be equal. /// /// Fails if there is no element with either index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// buf.swap(0, 2); /// assert_eq!(buf[0], 5); /// assert_eq!(buf[2], 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn swap(&mut self, i: usize, j: usize) { assert!(i < self.len()); assert!(j < self.len()); let ri = self.wrap_add(self.tail, i); let rj = self.wrap_add(self.tail, j); unsafe { ptr::swap(self.ptr.offset(ri as isize), self.ptr.offset(rj as isize)) } } /// Returns the number of elements the `VecDeque` can hold without /// reallocating. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let buf: VecDeque<i32> = VecDeque::with_capacity(10); /// assert!(buf.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { self.cap - 1 } /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the /// given `VecDeque`. Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it requests. Therefore /// capacity can not be relied upon to be precisely minimal. Prefer `reserve` if future /// insertions are expected. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve_exact(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve_exact(&mut self, additional: usize) { self.reserve(additional); } /// Reserves capacity for at least `additional` more elements to be inserted in the given /// `Ringbuf`. The collection may reserve more space to avoid frequent reallocations. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { let new_len = self.len() + additional; assert!(new_len + 1 > self.len(), "capacity overflow"); if new_len > self.capacity() { let count = (new_len + 1).next_power_of_two(); assert!(count >= new_len + 1); if mem::size_of::<T>() != 0 { let old = self.cap * mem::size_of::<T>(); let new = count.checked_mul(mem::size_of::<T>()) .expect("capacity overflow"); unsafe { let ptr = heap::reallocate(*self.ptr as *mut u8, old, new, mem::min_align_of::<T>()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = Unique::new(ptr); } } // Move the shortest contiguous section of the ring buffer // T H // [o o o o o o o . ] // T H // A [o o o o o o o . . . . . . . . . ] // H T // [o o . o o o o o ] // T H // B [. . . o o o o o o o . . . . . . ] // H T // [o o o o o . o o ] // H T // C [o o o o o . . . . . . . . . o o ] let oldcap = self.cap; self.cap = count; if self.tail <= self.head { // A // Nop } else if self.head < oldcap - self.tail { // B unsafe { self.copy_nonoverlapping(oldcap, 0, self.head); } self.head += oldcap; debug_assert!(self.head > self.tail); } else { // C let new_tail = count - (oldcap - self.tail); unsafe { self.copy_nonoverlapping(new_tail, self.tail, oldcap - self.tail); } self.tail = new_tail; debug_assert!(self.head < self.tail); } debug_assert!(self.head < self.cap); debug_assert!(self.tail < self.cap); debug_assert!(self.cap.count_ones() == 1); } } /// Shrinks the capacity of the ringbuf as much as possible. /// /// It will drop down as close as possible to the length but the allocator may still inform the /// ringbuf that there is space for a few more elements. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::with_capacity(15); /// buf.extend(0..4); /// assert_eq!(buf.capacity(), 15); /// buf.shrink_to_fit(); /// assert!(buf.capacity() >= 4); /// ``` pub fn shrink_to_fit(&mut self) { // +1 since the ringbuffer always leaves one space empty // len + 1 can't overflow for an existing, well-formed ringbuf. let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); if target_cap < self.cap { // There are three cases of interest: // All elements are out of desired bounds // Elements are contiguous, and head is out of desired bounds // Elements are discontiguous, and tail is out of desired bounds // // At all other times, element positions are unaffected. // // Indicates that elements at the head should be moved. let head_outside = self.head == 0 || self.head >= target_cap; // Move elements from out of desired bounds (positions after target_cap) if self.tail >= target_cap && head_outside { // T H // [. . . . . . . . o o o o o o o . ] // T H // [o o o o o o o . ] unsafe { self.copy_nonoverlapping(0, self.tail, self.len()); } self.head = self.len(); self.tail = 0; } else if self.tail != 0 && self.tail < target_cap && head_outside { // T H // [. . . o o o o o o o . . . . . . ] // H T // [o o . o o o o o ] let len = self.wrap_sub(self.head, target_cap); unsafe { self.copy_nonoverlapping(0, target_cap, len); } self.head = len; debug_assert!(self.head < self.tail); } else if self.tail >= target_cap { // H T // [o o o o o . . . . . . . . . o o ] // H T // [o o o o o . o o ] debug_assert!(self.wrap_sub(self.head, 1) < target_cap); let len = self.cap - self.tail; let new_tail = target_cap - len; unsafe { self.copy_nonoverlapping(new_tail, self.tail, len); } self.tail = new_tail; debug_assert!(self.head < self.tail); } if mem::size_of::<T>() != 0 { let old = self.cap * mem::size_of::<T>(); let new_size = target_cap * mem::size_of::<T>(); unsafe { let ptr = heap::reallocate(*self.ptr as *mut u8, old, new_size, mem::min_align_of::<T>()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = Unique::new(ptr); } } self.cap = target_cap; debug_assert!(self.head < self.cap); debug_assert!(self.tail < self.cap); debug_assert!(self.cap.count_ones() == 1); } } /// Shorten a ringbuf, dropping excess elements from the back. /// /// If `len` is greater than the ringbuf's current length, this has no /// effect. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.truncate(1); /// assert_eq!(buf.len(), 1); /// assert_eq!(Some(&5), buf.get(0)); /// ``` #[unstable(feature = "collections", reason = "matches collection reform specification; waiting on panic semantics")] pub fn truncate(&mut self, len: usize) { for _ in len..self.len() { self.pop_back(); } } /// Returns a front-to-back iterator. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// let b: &[_] = &[&5, &3, &4]; /// assert_eq!(buf.iter().collect::<Vec<&i32>>().as_slice(), b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<T> { Iter { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_slice() } } } /// Returns a front-to-back iterator that returns mutable references. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// for num in buf.iter_mut() { /// *num = *num - 2; /// } /// let b: &[_] = &[&mut 3, &mut 1, &mut 2]; /// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut<T> { IterMut { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_mut_slice() }, } } /// Consumes the list into an iterator yielding elements by value. #[stable(feature = "rust1", since = "1.0.0")] pub fn into_iter(self) -> IntoIter<T> { IntoIter { inner: self, } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub fn as_slices(&self) -> (&[T], &[T]) { unsafe { let contiguous = self.is_contiguous(); let buf = self.buffer_as_slice(); if contiguous { let (empty, buf) = buf.split_at(0); (&buf[self.tail..self.head], empty) } else { let (mid, right) = buf.split_at(self.tail); let (left, _) = mid.split_at(self.head); (right, left) } } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { unsafe { let contiguous = self.is_contiguous(); let head = self.head; let tail = self.tail; let buf = self.buffer_as_mut_slice(); if contiguous { let (empty, buf) = buf.split_at_mut(0); (&mut buf[tail .. head], empty) } else { let (mid, right) = buf.split_at_mut(tail); let (left, _) = mid.split_at_mut(head); (right, left) } } } /// Returns the number of elements in the `VecDeque`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert_eq!(v.len(), 0); /// v.push_back(1); /// assert_eq!(v.len(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { count(self.tail, self.head, self.cap) } /// Returns true if the buffer contains no elements /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert!(v.is_empty()); /// v.push_front(1); /// assert!(!v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Creates a draining iterator that clears the `VecDeque` and iterates over /// the removed items from start to end. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// v.push_back(1); /// assert_eq!(v.drain().next(), Some(1)); /// assert!(v.is_empty()); /// ``` #[inline] #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub fn drain(&mut self) -> Drain<T> { Drain { inner: self, } } /// Clears the buffer, removing all values. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// v.push_back(1); /// v.clear(); /// assert!(v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn clear(&mut self) { self.drain(); } /// Provides a reference to the front element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.front(), Some(&1)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front(&self) -> Option<&T> { if !self.is_empty() { Some(&self[0]) } else { None } } /// Provides a mutable reference to the front element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front_mut(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.front_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.front(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front_mut(&mut self) -> Option<&mut T> { if !self.is_empty() { Some(&mut self[0]) } else { None } } /// Provides a reference to the back element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.back(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back(&self) -> Option<&T> { if !self.is_empty() { Some(&self[self.len() - 1]) } else { None } } /// Provides a mutable reference to the back element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.back_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.back(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back_mut(&mut self) -> Option<&mut T> { let len = self.len(); if !self.is_empty() { Some(&mut self[len - 1]) } else { None } } /// Removes the first element and returns it, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_back(1); /// d.push_back(2); /// /// assert_eq!(d.pop_front(), Some(1)); /// assert_eq!(d.pop_front(), Some(2)); /// assert_eq!(d.pop_front(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_front(&mut self) -> Option<T> { if self.is_empty() { None } else { let tail = self.tail; self.tail = self.wrap_add(self.tail, 1); unsafe { Some(self.buffer_read(tail)) } } } /// Inserts an element first in the sequence. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_front(1); /// d.push_front(2); /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, t: T) { if self.is_full() { self.reserve(1); debug_assert!(!self.is_full()); } self.tail = self.wrap_sub(self.tail, 1); let tail = self.tail; unsafe { self.buffer_write(tail, t); } } /// Appends an element to the back of a buffer /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_back(&mut self, t: T) { if self.is_full() { self.reserve(1); debug_assert!(!self.is_full()); } let head = self.head; self.head = self.wrap_add(self.head, 1); unsafe { self.buffer_write(head, t) } } /// Removes the last element from a buffer and returns it, or `None` if /// it is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.pop_back(), None); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(buf.pop_back(), Some(3)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_back(&mut self) -> Option<T> { if self.is_empty() { None } else { self.head = self.wrap_sub(self.head, 1); let head = self.head; unsafe { Some(self.buffer_read(head)) } } } #[inline] fn is_contiguous(&self) -> bool { self.tail <= self.head } /// Removes an element from anywhere in the ringbuf and returns it, replacing it with the last /// element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_back_remove(0), None); /// buf.push_back(5); /// buf.push_back(99); /// buf.push_back(15); /// buf.push_back(20); /// buf.push_back(10); /// assert_eq!(buf.swap_back_remove(1), Some(99)); /// ``` #[unstable(feature = "collections", reason = "the naming of this function may be altered")] pub fn swap_back_remove(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length - 1 { self.swap(index, length - 1); } else if index >= length { return None; } self.pop_back() } /// Removes an element from anywhere in the ringbuf and returns it, replacing it with the first /// element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_front_remove(0), None); /// buf.push_back(15); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(99); /// buf.push_back(20); /// assert_eq!(buf.swap_front_remove(3), Some(99)); /// ``` #[unstable(feature = "collections", reason = "the naming of this function may be altered")] pub fn swap_front_remove(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length && index != 0 { self.swap(index, 0); } else if index >= length { return None; } self.pop_front() } /// Inserts an element at position `i` within the ringbuf. Whichever /// end is closer to the insertion point will be moved to make room, /// and all the affected elements will be moved to new positions. /// /// # Panics /// /// Panics if `i` is greater than ringbuf's length /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(10); /// buf.push_back(12); /// buf.insert(1,11); /// assert_eq!(Some(&11), buf.get(1)); /// ``` pub fn insert(&mut self, i: usize, t: T) { assert!(i <= self.len(), "index out of bounds"); if self.is_full() { self.reserve(1); debug_assert!(!self.is_full()); } // Move the least number of elements in the ring buffer and insert // the given object // // At most len/2 - 1 elements will be moved. O(min(n, n-i)) // // There are three main cases: // Elements are contiguous // - special case when tail is 0 // Elements are discontiguous and the insert is in the tail section // Elements are discontiguous and the insert is in the head section // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // I - Insertion element // A - The element that should be after the insertion point // M - Indicates element was moved let idx = self.wrap_add(self.tail, i); let distance_to_tail = i; let distance_to_head = self.len() - i; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) if i == 0 => { // push_front // // T // I H // [A o o o o o o . . . . . . . . .] // // H T // [A o o o o o o o . . . . . I] // self.tail = self.wrap_sub(self.tail, 1); }, (true, true, _) => unsafe { // contiguous, insert closer to tail: // // T I H // [. . . o o A o o o o . . . . . .] // // T H // [. . o o I A o o o o . . . . . .] // M M // // contiguous, insert closer to tail and tail is 0: // // // T I H // [o o A o o o o . . . . . . . . .] // // H T // [o I A o o o o o . . . . . . . o] // M M let new_tail = self.wrap_sub(self.tail, 1); self.copy(new_tail, self.tail, 1); // Already moved the tail, so we only copy `i - 1` elements. self.copy(self.tail, self.tail + 1, i - 1); self.tail = new_tail; }, (true, false, _) => unsafe { // contiguous, insert closer to head: // // T I H // [. . . o o o o A o o . . . . . .] // // T H // [. . . o o o o I A o o . . . . .] // M M M self.copy(idx + 1, idx, self.head - idx); self.head = self.wrap_add(self.head, 1); }, (false, true, true) => unsafe { // discontiguous, insert closer to tail, tail section: // // H T I // [o o o o o o . . . . . o o A o o] // // H T // [o o o o o o . . . . o o I A o o] // M M self.copy(self.tail - 1, self.tail, i); self.tail -= 1; }, (false, false, true) => unsafe { // discontiguous, insert closer to head, tail section: // // H T I // [o o . . . . . . . o o o o o A o] // // H T // [o o o . . . . . . o o o o o I A] // M M M M // copy elements up to new head self.copy(1, 0, self.head); // copy last element into empty spot at bottom of buffer self.copy(0, self.cap - 1, 1); // move elements from idx to end forward not including ^ element self.copy(idx + 1, idx, self.cap - 1 - idx); self.head += 1; }, (false, true, false) if idx == 0 => unsafe { // discontiguous, insert is closer to tail, head section, // and is at index zero in the internal buffer: // // I H T // [A o o o o o o o o o . . . o o o] // // H T // [A o o o o o o o o o . . o o o I] // M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap - 1, 0, 1); self.tail -= 1; }, (false, true, false) => unsafe { // discontiguous, insert closer to tail, head section: // // I H T // [o o o A o o o o o o . . . o o o] // // H T // [o o I A o o o o o o . . o o o o] // M M M M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap - 1, 0, 1); // move elements from idx-1 to end forward not including ^ element self.copy(0, 1, idx - 1); self.tail -= 1; }, (false, false, false) => unsafe { // discontiguous, insert closer to head, head section: // // I H T // [o o o o A o o . . . . . . o o o] // // H T // [o o o o I A o o . . . . . o o o] // M M M self.copy(idx + 1, idx, self.head - idx); self.head += 1; } } // tail might've been changed so we need to recalculate let new_idx = self.wrap_add(self.tail, i); unsafe { self.buffer_write(new_idx, t); } } /// Removes and returns the element at position `i` from the ringbuf. /// Whichever end is closer to the removal point will be moved to make /// room, and all the affected elements will be moved to new positions. /// Returns `None` if `i` is out of bounds. /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(12); /// buf.push_back(15); /// buf.remove(2); /// assert_eq!(Some(&15), buf.get(2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, i: usize) -> Option<T> { if self.is_empty() || self.len() <= i { return None; } // There are three main cases: // Elements are contiguous // Elements are discontiguous and the removal is in the tail section // Elements are discontiguous and the removal is in the head section // - special case when elements are technically contiguous, // but self.head = 0 // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // x - Element marked for removal // R - Indicates element that is being removed // M - Indicates element was moved let idx = self.wrap_add(self.tail, i); let elem = unsafe { Some(self.buffer_read(idx)) }; let distance_to_tail = i; let distance_to_head = self.len() - i; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) => unsafe { // contiguous, remove closer to tail: // // T R H // [. . . o o x o o o o . . . . . .] // // T H // [. . . . o o o o o o . . . . . .] // M M self.copy(self.tail + 1, self.tail, i); self.tail += 1; }, (true, false, _) => unsafe { // contiguous, remove closer to head: // // T R H // [. . . o o o o x o o . . . . . .] // // T H // [. . . o o o o o o . . . . . . .] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; }, (false, true, true) => unsafe { // discontiguous, remove closer to tail, tail section: // // H T R // [o o o o o o . . . . . o o x o o] // // H T // [o o o o o o . . . . . . o o o o] // M M self.copy(self.tail + 1, self.tail, i); self.tail = self.wrap_add(self.tail, 1); }, (false, false, false) => unsafe { // discontiguous, remove closer to head, head section: // // R H T // [o o o o x o o . . . . . . o o o] // // H T // [o o o o o o . . . . . . . o o o] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; }, (false, false, true) => unsafe { // discontiguous, remove closer to head, tail section: // // H T R // [o o o . . . . . . o o o o o x o] // // H T // [o o . . . . . . . o o o o o o o] // M M M M // // or quasi-discontiguous, remove next to head, tail section: // // H T R // [. . . . . . . . . o o o o o x o] // // T H // [. . . . . . . . . o o o o o o .] // M // draw in elements in the tail section self.copy(idx, idx + 1, self.cap - idx - 1); // Prevents underflow. if self.head != 0 { // copy first element into empty spot self.copy(self.cap - 1, 0, 1); // move elements in the head section backwards self.copy(0, 1, self.head - 1); } self.head = self.wrap_sub(self.head, 1); }, (false, true, false) => unsafe { // discontiguous, remove closer to tail, head section: // // R H T // [o o x o o o o o o o . . . o o o] // // H T // [o o o o o o o o o o . . . . o o] // M M M M M // draw in elements up to idx self.copy(1, 0, idx); // copy last element into empty spot self.copy(0, self.cap - 1, 1); // move elements from tail to end forward, excluding the last one self.copy(self.tail + 1, self.tail, self.cap - self.tail - 1); self.tail = self.wrap_add(self.tail, 1); } } return elem; } /// Splits the collection into two at the given index. /// /// Returns a newly allocated `Self`. `self` contains elements `[0, at)`, /// and the returned `Self` contains elements `[at, len)`. /// /// Note that the capacity of `self` does not change. /// /// # Panics /// /// Panics if `at > len` /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect(); /// let buf2 = buf.split_off(1); /// // buf = [1], buf2 = [2, 3] /// assert_eq!(buf.len(), 1); /// assert_eq!(buf2.len(), 2); /// ``` #[inline] #[unstable(feature = "collections", reason = "new API, waiting for dust to settle")] pub fn split_off(&mut self, at: usize) -> Self { let len = self.len(); assert!(at <= len, "`at` out of bounds"); let other_len = len - at; let mut other = VecDeque::with_capacity(other_len); unsafe { let (first_half, second_half) = self.as_slices(); let first_len = first_half.len(); let second_len = second_half.len(); if at < first_len { // `at` lies in the first half. let amount_in_first = first_len - at; ptr::copy_nonoverlapping(*other.ptr, first_half.as_ptr().offset(at as isize), amount_in_first); // just take all of the second half. ptr::copy_nonoverlapping(other.ptr.offset(amount_in_first as isize), second_half.as_ptr(), second_len); } else { // `at` lies in the second half, need to factor in the elements we skipped // in the first half. let offset = at - first_len; let amount_in_second = second_len - offset; ptr::copy_nonoverlapping(*other.ptr, second_half.as_ptr().offset(offset as isize), amount_in_second); } } // Cleanup where the ends of the buffers are self.head = self.wrap_sub(self.head, other_len); other.head = other.wrap_index(other_len); other } /// Moves all the elements of `other` into `Self`, leaving `other` empty. /// /// # Panics /// /// Panics if the new number of elements in self overflows a `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// let mut buf2: VecDeque<_> = vec![4, 5, 6].into_iter().collect(); /// buf.append(&mut buf2); /// assert_eq!(buf.len(), 6); /// assert_eq!(buf2.len(), 0); /// ``` #[inline] #[unstable(feature = "collections", reason = "new API, waiting for dust to settle")] pub fn append(&mut self, other: &mut Self) { // naive impl self.extend(other.drain()); } } impl<T: Clone> VecDeque<T> { /// Modifies the ringbuf in-place so that `len()` is equal to new_len, /// either by removing excess elements or by appending copies of a value to the back. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.resize(2, 0); /// buf.resize(6, 20); /// for (a, b) in [5, 10, 20, 20, 20, 20].iter().zip(buf.iter()) { /// assert_eq!(a, b); /// } /// ``` #[unstable(feature = "collections", reason = "matches collection reform specification; waiting on panic semantics")] pub fn resize(&mut self, new_len: usize, value: T) { let len = self.len(); if new_len > len { self.extend(repeat(value).take(new_len - len)) } else { self.truncate(new_len); } } } /// Returns the index in the underlying buffer for a given logical element index. #[inline] fn wrap_index(index: usize, size: usize) -> usize { // size is always a power of 2 index & (size - 1) } /// Calculate the number of elements left to be read in the buffer #[inline] fn count(tail: usize, head: usize, size: usize) -> usize { // size is always a power of 2 (head.wrapping_sub(tail)) & (size - 1) } /// `VecDeque` iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T:'a> { ring: &'a [T], tail: usize, head: usize } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { ring: self.ring, tail: self.tail, head: self.head } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; #[inline] fn next(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(tail)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(self.head)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> RandomAccessIterator for Iter<'a, T> { #[inline] fn indexable(&self) -> usize { let (len, _) = self.size_hint(); len } #[inline] fn idx(&mut self, j: usize) -> Option<&'a T> { if j >= self.indexable() { None } else { let idx = wrap_index(self.tail.wrapping_add(j), self.ring.len()); unsafe { Some(self.ring.get_unchecked(idx)) } } } } /// `VecDeque` mutable iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T:'a> { ring: &'a mut [T], tail: usize, head: usize, } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for IterMut<'a, T> { type Item = &'a mut T; #[inline] fn next(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(tail); Some(&mut *(elem as *mut _)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(self.head); Some(&mut *(elem as *mut _)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} /// A by-value VecDeque iterator #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter<T> { inner: VecDeque<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Iterator for IntoIter<T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.inner.pop_front() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = self.inner.len(); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> DoubleEndedIterator for IntoIter<T> { #[inline] fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> ExactSizeIterator for IntoIter<T> {} /// A draining VecDeque iterator #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub struct Drain<'a, T: 'a> { inner: &'a mut VecDeque<T>, } #[unsafe_destructor] #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { for _ in self.by_ref() {} self.inner.head = 0; self.inner.tail = 0; } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Iterator for Drain<'a, T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.inner.pop_front() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = self.inner.len(); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialEq> PartialEq for VecDeque<A> { fn eq(&self, other: &VecDeque<A>) -> bool { self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a.eq(b)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Eq> Eq for VecDeque<A> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialOrd> PartialOrd for VecDeque<A> { fn partial_cmp(&self, other: &VecDeque<A>) -> Option<Ordering> { iter::order::partial_cmp(self.iter(), other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Ord> Ord for VecDeque<A> { #[inline] fn cmp(&self, other: &VecDeque<A>) -> Ordering { iter::order::cmp(self.iter(), other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Hash> Hash for VecDeque<A> { fn hash<H: Hasher>(&self, state: &mut H) { self.len().hash(state); for elt in self { elt.hash(state); } } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Index<usize> for VecDeque<A> { type Output = A; #[inline] fn index(&self, i: &usize) -> &A { self.get(*i).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> IndexMut<usize> for VecDeque<A> { #[inline] fn index_mut(&mut self, i: &usize) -> &mut A { self.get_mut(*i).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> FromIterator<A> for VecDeque<A> { fn from_iter<T: IntoIterator<Item=A>>(iterable: T) -> VecDeque<A> { let iterator = iterable.into_iter(); let (lower, _) = iterator.size_hint(); let mut deq = VecDeque::with_capacity(lower); deq.extend(iterator); deq } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> IntoIterator for VecDeque<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter(self) -> IntoIter<T> { self.into_iter() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a VecDeque<T> { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Iter<'a, T> { self.iter() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a mut VecDeque<T> { type Item = &'a mut T; type IntoIter = IterMut<'a, T>; fn into_iter(mut self) -> IterMut<'a, T> { self.iter_mut() } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Extend<A> for VecDeque<A> { fn extend<T: IntoIterator<Item=A>>(&mut self, iter: T) { for elt in iter { self.push_back(elt); } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: fmt::Debug> fmt::Debug for VecDeque<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(write!(f, "[")); for (i, e) in self.iter().enumerate() { if i != 0 { try!(write!(f, ", ")); } try!(write!(f, "{:?}", *e)); } write!(f, "]") } } #[cfg(test)] mod test { use core::iter::{IteratorExt, self}; use core::option::Option::Some; use test; use super::VecDeque; #[bench] fn bench_push_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_back(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_push_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_front(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_pop_back_100(b: &mut test::Bencher) { let mut deq= VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_back()); } }) } #[bench] fn bench_pop_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_front()); } }) } #[test] fn test_swap_front_back_remove() { fn test(back: bool) { // This test checks that every single combination of tail position and length is tested. // Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); let usable_cap = tester.capacity(); let final_len = usable_cap / 2; for len in 0..final_len { let expected = if back { (0..len).collect() } else { (0..len).rev().collect() }; for tail_pos in 0..usable_cap { tester.tail = tail_pos; tester.head = tail_pos; if back { for i in 0..len * 2 { tester.push_front(i); } for i in 0..len { assert_eq!(tester.swap_back_remove(i), Some(len * 2 - 1 - i)); } } else { for i in 0..len * 2 { tester.push_back(i); } for i in 0..len { let idx = tester.len() - 1 - i; assert_eq!(tester.swap_front_remove(idx), Some(len * 2 - 1 - i)); } } assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } test(true); test(false); } #[test] fn test_insert() { // This test checks that every single combination of tail position, length, and // insertion position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* insertion for len in 1..cap { // 0, 1, 2, .., len - 1 let expected = iter::count(0, 1).take(len).collect(); for tail_pos in 0..cap { for to_insert in 0..len { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i != to_insert { tester.push_back(i); } } tester.insert(to_insert, to_insert); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } } #[test] fn test_remove() { // This test checks that every single combination of tail position, length, and // removal position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* removal for len in 0..cap - 1 { // 0, 1, 2, .., len - 1 let expected = iter::count(0, 1).take(len).collect(); for tail_pos in 0..cap { for to_remove in 0..len + 1 { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i == to_remove { tester.push_back(1234); } tester.push_back(i); } if to_remove == len { tester.push_back(1234); } tester.remove(to_remove); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } } #[test] fn test_shrink_to_fit() { // This test checks that every single combination of head and tail position, // is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); tester.reserve(63); let max_cap = tester.capacity(); for len in 0..cap + 1 { // 0, 1, 2, .., len - 1 let expected = iter::count(0, 1).take(len).collect(); for tail_pos in 0..max_cap + 1 { tester.tail = tail_pos; tester.head = tail_pos; tester.reserve(63); for i in 0..len { tester.push_back(i); } tester.shrink_to_fit(); assert!(tester.capacity() <= cap); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } #[test] fn test_split_off() { // This test checks that every single combination of tail position, length, and // split position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *before* splitting for len in 0..cap { // index to split at for at in 0..len + 1 { // 0, 1, 2, .., at - 1 (may be empty) let expected_self = iter::count(0, 1).take(at).collect(); // at, at + 1, .., len - 1 (may be empty) let expected_other = iter::count(at, 1).take(len - at).collect(); for tail_pos in 0..cap { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { tester.push_back(i); } let result = tester.split_off(at); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert!(result.tail < result.cap); assert!(result.head < result.cap); assert_eq!(tester, expected_self); assert_eq!(result, expected_other); } } } } } document iteration order for `vec_deque::IntoIter` // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! VecDeque is a double-ended queue, which is implemented with the help of a //! growing ring buffer. //! //! This queue has `O(1)` amortized inserts and removals from both ends of the //! container. It also has `O(1)` indexing like a vector. The contained elements //! are not required to be copyable, and the queue will be sendable if the //! contained type is sendable. #![stable(feature = "rust1", since = "1.0.0")] use core::prelude::*; use core::cmp::Ordering; use core::default::Default; use core::fmt; use core::iter::{self, repeat, FromIterator, IntoIterator, RandomAccessIterator}; use core::mem; use core::num::wrapping::WrappingOps; use core::ops::{Index, IndexMut}; use core::ptr::{self, Unique}; use core::slice; use core::hash::{Hash, Hasher}; use core::cmp; use alloc::heap; #[deprecated(since = "1.0.0", reason = "renamed to VecDeque")] #[unstable(feature = "collections")] pub use VecDeque as RingBuf; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 /// `VecDeque` is a growable ring buffer, which can be used as a /// double-ended queue efficiently. #[stable(feature = "rust1", since = "1.0.0")] pub struct VecDeque<T> { // tail and head are pointers into the buffer. Tail always points // to the first element that could be read, Head always points // to where data should be written. // If tail == head the buffer is empty. The length of the ringbuf // is defined as the distance between the two. tail: usize, head: usize, cap: usize, ptr: Unique<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone> Clone for VecDeque<T> { fn clone(&self) -> VecDeque<T> { self.iter().cloned().collect() } } #[unsafe_destructor] #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for VecDeque<T> { fn drop(&mut self) { self.clear(); unsafe { if mem::size_of::<T>() != 0 { heap::deallocate(*self.ptr as *mut u8, self.cap * mem::size_of::<T>(), mem::min_align_of::<T>()) } } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Default for VecDeque<T> { #[inline] fn default() -> VecDeque<T> { VecDeque::new() } } impl<T> VecDeque<T> { /// Turn ptr into a slice #[inline] unsafe fn buffer_as_slice(&self) -> &[T] { slice::from_raw_parts(*self.ptr, self.cap) } /// Turn ptr into a mut slice #[inline] unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] { slice::from_raw_parts_mut(*self.ptr, self.cap) } /// Moves an element out of the buffer #[inline] unsafe fn buffer_read(&mut self, off: usize) -> T { ptr::read(self.ptr.offset(off as isize)) } /// Writes an element into the buffer, moving it. #[inline] unsafe fn buffer_write(&mut self, off: usize, t: T) { ptr::write(self.ptr.offset(off as isize), t); } /// Returns true iff the buffer is at capacity #[inline] fn is_full(&self) -> bool { self.cap - self.len() == 1 } /// Returns the index in the underlying buffer for a given logical element /// index. #[inline] fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap) } /// Returns the index in the underlying buffer for a given logical element /// index + addend. #[inline] fn wrap_add(&self, idx: usize, addend: usize) -> usize { wrap_index(idx.wrapping_add(addend), self.cap) } /// Returns the index in the underlying buffer for a given logical element /// index - subtrahend. #[inline] fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize { wrap_index(idx.wrapping_sub(subtrahend), self.cap) } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); ptr::copy( self.ptr.offset(dst as isize), self.ptr.offset(src as isize), len); } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len, self.cap); ptr::copy_nonoverlapping( self.ptr.offset(dst as isize), self.ptr.offset(src as isize), len); } } impl<T> VecDeque<T> { /// Creates an empty `VecDeque`. #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> VecDeque<T> { VecDeque::with_capacity(INITIAL_CAPACITY) } /// Creates an empty `VecDeque` with space for at least `n` elements. #[stable(feature = "rust1", since = "1.0.0")] pub fn with_capacity(n: usize) -> VecDeque<T> { // +1 since the ringbuffer always leaves one space empty let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); assert!(cap > n, "capacity overflow"); let size = cap.checked_mul(mem::size_of::<T>()) .expect("capacity overflow"); let ptr = unsafe { if mem::size_of::<T>() != 0 { let ptr = heap::allocate(size, mem::min_align_of::<T>()) as *mut T;; if ptr.is_null() { ::alloc::oom() } Unique::new(ptr) } else { Unique::new(heap::EMPTY as *mut T) } }; VecDeque { tail: 0, head: 0, cap: cap, ptr: ptr, } } /// Retrieves an element in the `VecDeque` by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// assert_eq!(buf.get(1).unwrap(), &4); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get(&self, i: usize) -> Option<&T> { if i < self.len() { let idx = self.wrap_add(self.tail, i); unsafe { Some(&*self.ptr.offset(idx as isize)) } } else { None } } /// Retrieves an element in the `VecDeque` mutably by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// match buf.get_mut(1) { /// None => {} /// Some(elem) => { /// *elem = 7; /// } /// } /// /// assert_eq!(buf[1], 7); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self, i: usize) -> Option<&mut T> { if i < self.len() { let idx = self.wrap_add(self.tail, i); unsafe { Some(&mut *self.ptr.offset(idx as isize)) } } else { None } } /// Swaps elements at indices `i` and `j`. /// /// `i` and `j` may be equal. /// /// Fails if there is no element with either index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// buf.swap(0, 2); /// assert_eq!(buf[0], 5); /// assert_eq!(buf[2], 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn swap(&mut self, i: usize, j: usize) { assert!(i < self.len()); assert!(j < self.len()); let ri = self.wrap_add(self.tail, i); let rj = self.wrap_add(self.tail, j); unsafe { ptr::swap(self.ptr.offset(ri as isize), self.ptr.offset(rj as isize)) } } /// Returns the number of elements the `VecDeque` can hold without /// reallocating. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let buf: VecDeque<i32> = VecDeque::with_capacity(10); /// assert!(buf.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { self.cap - 1 } /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the /// given `VecDeque`. Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it requests. Therefore /// capacity can not be relied upon to be precisely minimal. Prefer `reserve` if future /// insertions are expected. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve_exact(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve_exact(&mut self, additional: usize) { self.reserve(additional); } /// Reserves capacity for at least `additional` more elements to be inserted in the given /// `Ringbuf`. The collection may reserve more space to avoid frequent reallocations. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { let new_len = self.len() + additional; assert!(new_len + 1 > self.len(), "capacity overflow"); if new_len > self.capacity() { let count = (new_len + 1).next_power_of_two(); assert!(count >= new_len + 1); if mem::size_of::<T>() != 0 { let old = self.cap * mem::size_of::<T>(); let new = count.checked_mul(mem::size_of::<T>()) .expect("capacity overflow"); unsafe { let ptr = heap::reallocate(*self.ptr as *mut u8, old, new, mem::min_align_of::<T>()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = Unique::new(ptr); } } // Move the shortest contiguous section of the ring buffer // T H // [o o o o o o o . ] // T H // A [o o o o o o o . . . . . . . . . ] // H T // [o o . o o o o o ] // T H // B [. . . o o o o o o o . . . . . . ] // H T // [o o o o o . o o ] // H T // C [o o o o o . . . . . . . . . o o ] let oldcap = self.cap; self.cap = count; if self.tail <= self.head { // A // Nop } else if self.head < oldcap - self.tail { // B unsafe { self.copy_nonoverlapping(oldcap, 0, self.head); } self.head += oldcap; debug_assert!(self.head > self.tail); } else { // C let new_tail = count - (oldcap - self.tail); unsafe { self.copy_nonoverlapping(new_tail, self.tail, oldcap - self.tail); } self.tail = new_tail; debug_assert!(self.head < self.tail); } debug_assert!(self.head < self.cap); debug_assert!(self.tail < self.cap); debug_assert!(self.cap.count_ones() == 1); } } /// Shrinks the capacity of the ringbuf as much as possible. /// /// It will drop down as close as possible to the length but the allocator may still inform the /// ringbuf that there is space for a few more elements. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::with_capacity(15); /// buf.extend(0..4); /// assert_eq!(buf.capacity(), 15); /// buf.shrink_to_fit(); /// assert!(buf.capacity() >= 4); /// ``` pub fn shrink_to_fit(&mut self) { // +1 since the ringbuffer always leaves one space empty // len + 1 can't overflow for an existing, well-formed ringbuf. let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); if target_cap < self.cap { // There are three cases of interest: // All elements are out of desired bounds // Elements are contiguous, and head is out of desired bounds // Elements are discontiguous, and tail is out of desired bounds // // At all other times, element positions are unaffected. // // Indicates that elements at the head should be moved. let head_outside = self.head == 0 || self.head >= target_cap; // Move elements from out of desired bounds (positions after target_cap) if self.tail >= target_cap && head_outside { // T H // [. . . . . . . . o o o o o o o . ] // T H // [o o o o o o o . ] unsafe { self.copy_nonoverlapping(0, self.tail, self.len()); } self.head = self.len(); self.tail = 0; } else if self.tail != 0 && self.tail < target_cap && head_outside { // T H // [. . . o o o o o o o . . . . . . ] // H T // [o o . o o o o o ] let len = self.wrap_sub(self.head, target_cap); unsafe { self.copy_nonoverlapping(0, target_cap, len); } self.head = len; debug_assert!(self.head < self.tail); } else if self.tail >= target_cap { // H T // [o o o o o . . . . . . . . . o o ] // H T // [o o o o o . o o ] debug_assert!(self.wrap_sub(self.head, 1) < target_cap); let len = self.cap - self.tail; let new_tail = target_cap - len; unsafe { self.copy_nonoverlapping(new_tail, self.tail, len); } self.tail = new_tail; debug_assert!(self.head < self.tail); } if mem::size_of::<T>() != 0 { let old = self.cap * mem::size_of::<T>(); let new_size = target_cap * mem::size_of::<T>(); unsafe { let ptr = heap::reallocate(*self.ptr as *mut u8, old, new_size, mem::min_align_of::<T>()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = Unique::new(ptr); } } self.cap = target_cap; debug_assert!(self.head < self.cap); debug_assert!(self.tail < self.cap); debug_assert!(self.cap.count_ones() == 1); } } /// Shorten a ringbuf, dropping excess elements from the back. /// /// If `len` is greater than the ringbuf's current length, this has no /// effect. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.truncate(1); /// assert_eq!(buf.len(), 1); /// assert_eq!(Some(&5), buf.get(0)); /// ``` #[unstable(feature = "collections", reason = "matches collection reform specification; waiting on panic semantics")] pub fn truncate(&mut self, len: usize) { for _ in len..self.len() { self.pop_back(); } } /// Returns a front-to-back iterator. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// let b: &[_] = &[&5, &3, &4]; /// assert_eq!(buf.iter().collect::<Vec<&i32>>().as_slice(), b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<T> { Iter { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_slice() } } } /// Returns a front-to-back iterator that returns mutable references. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// for num in buf.iter_mut() { /// *num = *num - 2; /// } /// let b: &[_] = &[&mut 3, &mut 1, &mut 2]; /// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut<T> { IterMut { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_mut_slice() }, } } /// Consumes the list into a front-to-back iterator yielding elements by value. #[stable(feature = "rust1", since = "1.0.0")] pub fn into_iter(self) -> IntoIter<T> { IntoIter { inner: self, } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub fn as_slices(&self) -> (&[T], &[T]) { unsafe { let contiguous = self.is_contiguous(); let buf = self.buffer_as_slice(); if contiguous { let (empty, buf) = buf.split_at(0); (&buf[self.tail..self.head], empty) } else { let (mid, right) = buf.split_at(self.tail); let (left, _) = mid.split_at(self.head); (right, left) } } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { unsafe { let contiguous = self.is_contiguous(); let head = self.head; let tail = self.tail; let buf = self.buffer_as_mut_slice(); if contiguous { let (empty, buf) = buf.split_at_mut(0); (&mut buf[tail .. head], empty) } else { let (mid, right) = buf.split_at_mut(tail); let (left, _) = mid.split_at_mut(head); (right, left) } } } /// Returns the number of elements in the `VecDeque`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert_eq!(v.len(), 0); /// v.push_back(1); /// assert_eq!(v.len(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { count(self.tail, self.head, self.cap) } /// Returns true if the buffer contains no elements /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert!(v.is_empty()); /// v.push_front(1); /// assert!(!v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Creates a draining iterator that clears the `VecDeque` and iterates over /// the removed items from start to end. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// v.push_back(1); /// assert_eq!(v.drain().next(), Some(1)); /// assert!(v.is_empty()); /// ``` #[inline] #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub fn drain(&mut self) -> Drain<T> { Drain { inner: self, } } /// Clears the buffer, removing all values. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// v.push_back(1); /// v.clear(); /// assert!(v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn clear(&mut self) { self.drain(); } /// Provides a reference to the front element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.front(), Some(&1)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front(&self) -> Option<&T> { if !self.is_empty() { Some(&self[0]) } else { None } } /// Provides a mutable reference to the front element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front_mut(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.front_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.front(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front_mut(&mut self) -> Option<&mut T> { if !self.is_empty() { Some(&mut self[0]) } else { None } } /// Provides a reference to the back element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.back(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back(&self) -> Option<&T> { if !self.is_empty() { Some(&self[self.len() - 1]) } else { None } } /// Provides a mutable reference to the back element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.back_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.back(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back_mut(&mut self) -> Option<&mut T> { let len = self.len(); if !self.is_empty() { Some(&mut self[len - 1]) } else { None } } /// Removes the first element and returns it, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_back(1); /// d.push_back(2); /// /// assert_eq!(d.pop_front(), Some(1)); /// assert_eq!(d.pop_front(), Some(2)); /// assert_eq!(d.pop_front(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_front(&mut self) -> Option<T> { if self.is_empty() { None } else { let tail = self.tail; self.tail = self.wrap_add(self.tail, 1); unsafe { Some(self.buffer_read(tail)) } } } /// Inserts an element first in the sequence. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_front(1); /// d.push_front(2); /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, t: T) { if self.is_full() { self.reserve(1); debug_assert!(!self.is_full()); } self.tail = self.wrap_sub(self.tail, 1); let tail = self.tail; unsafe { self.buffer_write(tail, t); } } /// Appends an element to the back of a buffer /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_back(&mut self, t: T) { if self.is_full() { self.reserve(1); debug_assert!(!self.is_full()); } let head = self.head; self.head = self.wrap_add(self.head, 1); unsafe { self.buffer_write(head, t) } } /// Removes the last element from a buffer and returns it, or `None` if /// it is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.pop_back(), None); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(buf.pop_back(), Some(3)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_back(&mut self) -> Option<T> { if self.is_empty() { None } else { self.head = self.wrap_sub(self.head, 1); let head = self.head; unsafe { Some(self.buffer_read(head)) } } } #[inline] fn is_contiguous(&self) -> bool { self.tail <= self.head } /// Removes an element from anywhere in the ringbuf and returns it, replacing it with the last /// element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_back_remove(0), None); /// buf.push_back(5); /// buf.push_back(99); /// buf.push_back(15); /// buf.push_back(20); /// buf.push_back(10); /// assert_eq!(buf.swap_back_remove(1), Some(99)); /// ``` #[unstable(feature = "collections", reason = "the naming of this function may be altered")] pub fn swap_back_remove(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length - 1 { self.swap(index, length - 1); } else if index >= length { return None; } self.pop_back() } /// Removes an element from anywhere in the ringbuf and returns it, replacing it with the first /// element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_front_remove(0), None); /// buf.push_back(15); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(99); /// buf.push_back(20); /// assert_eq!(buf.swap_front_remove(3), Some(99)); /// ``` #[unstable(feature = "collections", reason = "the naming of this function may be altered")] pub fn swap_front_remove(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length && index != 0 { self.swap(index, 0); } else if index >= length { return None; } self.pop_front() } /// Inserts an element at position `i` within the ringbuf. Whichever /// end is closer to the insertion point will be moved to make room, /// and all the affected elements will be moved to new positions. /// /// # Panics /// /// Panics if `i` is greater than ringbuf's length /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(10); /// buf.push_back(12); /// buf.insert(1,11); /// assert_eq!(Some(&11), buf.get(1)); /// ``` pub fn insert(&mut self, i: usize, t: T) { assert!(i <= self.len(), "index out of bounds"); if self.is_full() { self.reserve(1); debug_assert!(!self.is_full()); } // Move the least number of elements in the ring buffer and insert // the given object // // At most len/2 - 1 elements will be moved. O(min(n, n-i)) // // There are three main cases: // Elements are contiguous // - special case when tail is 0 // Elements are discontiguous and the insert is in the tail section // Elements are discontiguous and the insert is in the head section // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // I - Insertion element // A - The element that should be after the insertion point // M - Indicates element was moved let idx = self.wrap_add(self.tail, i); let distance_to_tail = i; let distance_to_head = self.len() - i; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) if i == 0 => { // push_front // // T // I H // [A o o o o o o . . . . . . . . .] // // H T // [A o o o o o o o . . . . . I] // self.tail = self.wrap_sub(self.tail, 1); }, (true, true, _) => unsafe { // contiguous, insert closer to tail: // // T I H // [. . . o o A o o o o . . . . . .] // // T H // [. . o o I A o o o o . . . . . .] // M M // // contiguous, insert closer to tail and tail is 0: // // // T I H // [o o A o o o o . . . . . . . . .] // // H T // [o I A o o o o o . . . . . . . o] // M M let new_tail = self.wrap_sub(self.tail, 1); self.copy(new_tail, self.tail, 1); // Already moved the tail, so we only copy `i - 1` elements. self.copy(self.tail, self.tail + 1, i - 1); self.tail = new_tail; }, (true, false, _) => unsafe { // contiguous, insert closer to head: // // T I H // [. . . o o o o A o o . . . . . .] // // T H // [. . . o o o o I A o o . . . . .] // M M M self.copy(idx + 1, idx, self.head - idx); self.head = self.wrap_add(self.head, 1); }, (false, true, true) => unsafe { // discontiguous, insert closer to tail, tail section: // // H T I // [o o o o o o . . . . . o o A o o] // // H T // [o o o o o o . . . . o o I A o o] // M M self.copy(self.tail - 1, self.tail, i); self.tail -= 1; }, (false, false, true) => unsafe { // discontiguous, insert closer to head, tail section: // // H T I // [o o . . . . . . . o o o o o A o] // // H T // [o o o . . . . . . o o o o o I A] // M M M M // copy elements up to new head self.copy(1, 0, self.head); // copy last element into empty spot at bottom of buffer self.copy(0, self.cap - 1, 1); // move elements from idx to end forward not including ^ element self.copy(idx + 1, idx, self.cap - 1 - idx); self.head += 1; }, (false, true, false) if idx == 0 => unsafe { // discontiguous, insert is closer to tail, head section, // and is at index zero in the internal buffer: // // I H T // [A o o o o o o o o o . . . o o o] // // H T // [A o o o o o o o o o . . o o o I] // M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap - 1, 0, 1); self.tail -= 1; }, (false, true, false) => unsafe { // discontiguous, insert closer to tail, head section: // // I H T // [o o o A o o o o o o . . . o o o] // // H T // [o o I A o o o o o o . . o o o o] // M M M M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap - 1, 0, 1); // move elements from idx-1 to end forward not including ^ element self.copy(0, 1, idx - 1); self.tail -= 1; }, (false, false, false) => unsafe { // discontiguous, insert closer to head, head section: // // I H T // [o o o o A o o . . . . . . o o o] // // H T // [o o o o I A o o . . . . . o o o] // M M M self.copy(idx + 1, idx, self.head - idx); self.head += 1; } } // tail might've been changed so we need to recalculate let new_idx = self.wrap_add(self.tail, i); unsafe { self.buffer_write(new_idx, t); } } /// Removes and returns the element at position `i` from the ringbuf. /// Whichever end is closer to the removal point will be moved to make /// room, and all the affected elements will be moved to new positions. /// Returns `None` if `i` is out of bounds. /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(12); /// buf.push_back(15); /// buf.remove(2); /// assert_eq!(Some(&15), buf.get(2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, i: usize) -> Option<T> { if self.is_empty() || self.len() <= i { return None; } // There are three main cases: // Elements are contiguous // Elements are discontiguous and the removal is in the tail section // Elements are discontiguous and the removal is in the head section // - special case when elements are technically contiguous, // but self.head = 0 // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // x - Element marked for removal // R - Indicates element that is being removed // M - Indicates element was moved let idx = self.wrap_add(self.tail, i); let elem = unsafe { Some(self.buffer_read(idx)) }; let distance_to_tail = i; let distance_to_head = self.len() - i; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) => unsafe { // contiguous, remove closer to tail: // // T R H // [. . . o o x o o o o . . . . . .] // // T H // [. . . . o o o o o o . . . . . .] // M M self.copy(self.tail + 1, self.tail, i); self.tail += 1; }, (true, false, _) => unsafe { // contiguous, remove closer to head: // // T R H // [. . . o o o o x o o . . . . . .] // // T H // [. . . o o o o o o . . . . . . .] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; }, (false, true, true) => unsafe { // discontiguous, remove closer to tail, tail section: // // H T R // [o o o o o o . . . . . o o x o o] // // H T // [o o o o o o . . . . . . o o o o] // M M self.copy(self.tail + 1, self.tail, i); self.tail = self.wrap_add(self.tail, 1); }, (false, false, false) => unsafe { // discontiguous, remove closer to head, head section: // // R H T // [o o o o x o o . . . . . . o o o] // // H T // [o o o o o o . . . . . . . o o o] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; }, (false, false, true) => unsafe { // discontiguous, remove closer to head, tail section: // // H T R // [o o o . . . . . . o o o o o x o] // // H T // [o o . . . . . . . o o o o o o o] // M M M M // // or quasi-discontiguous, remove next to head, tail section: // // H T R // [. . . . . . . . . o o o o o x o] // // T H // [. . . . . . . . . o o o o o o .] // M // draw in elements in the tail section self.copy(idx, idx + 1, self.cap - idx - 1); // Prevents underflow. if self.head != 0 { // copy first element into empty spot self.copy(self.cap - 1, 0, 1); // move elements in the head section backwards self.copy(0, 1, self.head - 1); } self.head = self.wrap_sub(self.head, 1); }, (false, true, false) => unsafe { // discontiguous, remove closer to tail, head section: // // R H T // [o o x o o o o o o o . . . o o o] // // H T // [o o o o o o o o o o . . . . o o] // M M M M M // draw in elements up to idx self.copy(1, 0, idx); // copy last element into empty spot self.copy(0, self.cap - 1, 1); // move elements from tail to end forward, excluding the last one self.copy(self.tail + 1, self.tail, self.cap - self.tail - 1); self.tail = self.wrap_add(self.tail, 1); } } return elem; } /// Splits the collection into two at the given index. /// /// Returns a newly allocated `Self`. `self` contains elements `[0, at)`, /// and the returned `Self` contains elements `[at, len)`. /// /// Note that the capacity of `self` does not change. /// /// # Panics /// /// Panics if `at > len` /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect(); /// let buf2 = buf.split_off(1); /// // buf = [1], buf2 = [2, 3] /// assert_eq!(buf.len(), 1); /// assert_eq!(buf2.len(), 2); /// ``` #[inline] #[unstable(feature = "collections", reason = "new API, waiting for dust to settle")] pub fn split_off(&mut self, at: usize) -> Self { let len = self.len(); assert!(at <= len, "`at` out of bounds"); let other_len = len - at; let mut other = VecDeque::with_capacity(other_len); unsafe { let (first_half, second_half) = self.as_slices(); let first_len = first_half.len(); let second_len = second_half.len(); if at < first_len { // `at` lies in the first half. let amount_in_first = first_len - at; ptr::copy_nonoverlapping(*other.ptr, first_half.as_ptr().offset(at as isize), amount_in_first); // just take all of the second half. ptr::copy_nonoverlapping(other.ptr.offset(amount_in_first as isize), second_half.as_ptr(), second_len); } else { // `at` lies in the second half, need to factor in the elements we skipped // in the first half. let offset = at - first_len; let amount_in_second = second_len - offset; ptr::copy_nonoverlapping(*other.ptr, second_half.as_ptr().offset(offset as isize), amount_in_second); } } // Cleanup where the ends of the buffers are self.head = self.wrap_sub(self.head, other_len); other.head = other.wrap_index(other_len); other } /// Moves all the elements of `other` into `Self`, leaving `other` empty. /// /// # Panics /// /// Panics if the new number of elements in self overflows a `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// let mut buf2: VecDeque<_> = vec![4, 5, 6].into_iter().collect(); /// buf.append(&mut buf2); /// assert_eq!(buf.len(), 6); /// assert_eq!(buf2.len(), 0); /// ``` #[inline] #[unstable(feature = "collections", reason = "new API, waiting for dust to settle")] pub fn append(&mut self, other: &mut Self) { // naive impl self.extend(other.drain()); } } impl<T: Clone> VecDeque<T> { /// Modifies the ringbuf in-place so that `len()` is equal to new_len, /// either by removing excess elements or by appending copies of a value to the back. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.resize(2, 0); /// buf.resize(6, 20); /// for (a, b) in [5, 10, 20, 20, 20, 20].iter().zip(buf.iter()) { /// assert_eq!(a, b); /// } /// ``` #[unstable(feature = "collections", reason = "matches collection reform specification; waiting on panic semantics")] pub fn resize(&mut self, new_len: usize, value: T) { let len = self.len(); if new_len > len { self.extend(repeat(value).take(new_len - len)) } else { self.truncate(new_len); } } } /// Returns the index in the underlying buffer for a given logical element index. #[inline] fn wrap_index(index: usize, size: usize) -> usize { // size is always a power of 2 index & (size - 1) } /// Calculate the number of elements left to be read in the buffer #[inline] fn count(tail: usize, head: usize, size: usize) -> usize { // size is always a power of 2 (head.wrapping_sub(tail)) & (size - 1) } /// `VecDeque` iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T:'a> { ring: &'a [T], tail: usize, head: usize } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { ring: self.ring, tail: self.tail, head: self.head } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; #[inline] fn next(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(tail)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(self.head)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> RandomAccessIterator for Iter<'a, T> { #[inline] fn indexable(&self) -> usize { let (len, _) = self.size_hint(); len } #[inline] fn idx(&mut self, j: usize) -> Option<&'a T> { if j >= self.indexable() { None } else { let idx = wrap_index(self.tail.wrapping_add(j), self.ring.len()); unsafe { Some(self.ring.get_unchecked(idx)) } } } } /// `VecDeque` mutable iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T:'a> { ring: &'a mut [T], tail: usize, head: usize, } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for IterMut<'a, T> { type Item = &'a mut T; #[inline] fn next(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(tail); Some(&mut *(elem as *mut _)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(self.head); Some(&mut *(elem as *mut _)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} /// A by-value VecDeque iterator #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter<T> { inner: VecDeque<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Iterator for IntoIter<T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.inner.pop_front() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = self.inner.len(); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> DoubleEndedIterator for IntoIter<T> { #[inline] fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> ExactSizeIterator for IntoIter<T> {} /// A draining VecDeque iterator #[unstable(feature = "collections", reason = "matches collection reform specification, waiting for dust to settle")] pub struct Drain<'a, T: 'a> { inner: &'a mut VecDeque<T>, } #[unsafe_destructor] #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { for _ in self.by_ref() {} self.inner.head = 0; self.inner.tail = 0; } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Iterator for Drain<'a, T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.inner.pop_front() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = self.inner.len(); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialEq> PartialEq for VecDeque<A> { fn eq(&self, other: &VecDeque<A>) -> bool { self.len() == other.len() && self.iter().zip(other.iter()).all(|(a, b)| a.eq(b)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Eq> Eq for VecDeque<A> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialOrd> PartialOrd for VecDeque<A> { fn partial_cmp(&self, other: &VecDeque<A>) -> Option<Ordering> { iter::order::partial_cmp(self.iter(), other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Ord> Ord for VecDeque<A> { #[inline] fn cmp(&self, other: &VecDeque<A>) -> Ordering { iter::order::cmp(self.iter(), other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Hash> Hash for VecDeque<A> { fn hash<H: Hasher>(&self, state: &mut H) { self.len().hash(state); for elt in self { elt.hash(state); } } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Index<usize> for VecDeque<A> { type Output = A; #[inline] fn index(&self, i: &usize) -> &A { self.get(*i).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> IndexMut<usize> for VecDeque<A> { #[inline] fn index_mut(&mut self, i: &usize) -> &mut A { self.get_mut(*i).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> FromIterator<A> for VecDeque<A> { fn from_iter<T: IntoIterator<Item=A>>(iterable: T) -> VecDeque<A> { let iterator = iterable.into_iter(); let (lower, _) = iterator.size_hint(); let mut deq = VecDeque::with_capacity(lower); deq.extend(iterator); deq } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> IntoIterator for VecDeque<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter(self) -> IntoIter<T> { self.into_iter() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a VecDeque<T> { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Iter<'a, T> { self.iter() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a mut VecDeque<T> { type Item = &'a mut T; type IntoIter = IterMut<'a, T>; fn into_iter(mut self) -> IterMut<'a, T> { self.iter_mut() } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Extend<A> for VecDeque<A> { fn extend<T: IntoIterator<Item=A>>(&mut self, iter: T) { for elt in iter { self.push_back(elt); } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: fmt::Debug> fmt::Debug for VecDeque<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(write!(f, "[")); for (i, e) in self.iter().enumerate() { if i != 0 { try!(write!(f, ", ")); } try!(write!(f, "{:?}", *e)); } write!(f, "]") } } #[cfg(test)] mod test { use core::iter::{IteratorExt, self}; use core::option::Option::Some; use test; use super::VecDeque; #[bench] fn bench_push_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_back(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_push_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_front(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_pop_back_100(b: &mut test::Bencher) { let mut deq= VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_back()); } }) } #[bench] fn bench_pop_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_front()); } }) } #[test] fn test_swap_front_back_remove() { fn test(back: bool) { // This test checks that every single combination of tail position and length is tested. // Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); let usable_cap = tester.capacity(); let final_len = usable_cap / 2; for len in 0..final_len { let expected = if back { (0..len).collect() } else { (0..len).rev().collect() }; for tail_pos in 0..usable_cap { tester.tail = tail_pos; tester.head = tail_pos; if back { for i in 0..len * 2 { tester.push_front(i); } for i in 0..len { assert_eq!(tester.swap_back_remove(i), Some(len * 2 - 1 - i)); } } else { for i in 0..len * 2 { tester.push_back(i); } for i in 0..len { let idx = tester.len() - 1 - i; assert_eq!(tester.swap_front_remove(idx), Some(len * 2 - 1 - i)); } } assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } test(true); test(false); } #[test] fn test_insert() { // This test checks that every single combination of tail position, length, and // insertion position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* insertion for len in 1..cap { // 0, 1, 2, .., len - 1 let expected = iter::count(0, 1).take(len).collect(); for tail_pos in 0..cap { for to_insert in 0..len { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i != to_insert { tester.push_back(i); } } tester.insert(to_insert, to_insert); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } } #[test] fn test_remove() { // This test checks that every single combination of tail position, length, and // removal position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* removal for len in 0..cap - 1 { // 0, 1, 2, .., len - 1 let expected = iter::count(0, 1).take(len).collect(); for tail_pos in 0..cap { for to_remove in 0..len + 1 { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i == to_remove { tester.push_back(1234); } tester.push_back(i); } if to_remove == len { tester.push_back(1234); } tester.remove(to_remove); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } } #[test] fn test_shrink_to_fit() { // This test checks that every single combination of head and tail position, // is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); tester.reserve(63); let max_cap = tester.capacity(); for len in 0..cap + 1 { // 0, 1, 2, .., len - 1 let expected = iter::count(0, 1).take(len).collect(); for tail_pos in 0..max_cap + 1 { tester.tail = tail_pos; tester.head = tail_pos; tester.reserve(63); for i in 0..len { tester.push_back(i); } tester.shrink_to_fit(); assert!(tester.capacity() <= cap); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert_eq!(tester, expected); } } } #[test] fn test_split_off() { // This test checks that every single combination of tail position, length, and // split position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *before* splitting for len in 0..cap { // index to split at for at in 0..len + 1 { // 0, 1, 2, .., at - 1 (may be empty) let expected_self = iter::count(0, 1).take(at).collect(); // at, at + 1, .., len - 1 (may be empty) let expected_other = iter::count(at, 1).take(len - at).collect(); for tail_pos in 0..cap { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { tester.push_back(i); } let result = tester.split_off(at); assert!(tester.tail < tester.cap); assert!(tester.head < tester.cap); assert!(result.tail < result.cap); assert!(result.head < result.cap); assert_eq!(tester, expected_self); assert_eq!(result, expected_other); } } } } }
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! VecDeque is a double-ended queue, which is implemented with the help of a //! growing ring buffer. //! //! This queue has `O(1)` amortized inserts and removals from both ends of the //! container. It also has `O(1)` indexing like a vector. The contained elements //! are not required to be copyable, and the queue will be sendable if the //! contained type is sendable. #![stable(feature = "rust1", since = "1.0.0")] use core::cmp::Ordering; use core::fmt; use core::iter::{repeat, FromIterator}; use core::mem; use core::ops::{Index, IndexMut}; use core::ptr; use core::slice; use core::hash::{Hash, Hasher}; use core::cmp; use alloc::raw_vec::RawVec; use super::range::RangeArgument; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 #[cfg(target_pointer_width = "32")] const MAXIMUM_ZST_CAPACITY: usize = 1 << (32 - 1); // Largest possible power of two #[cfg(target_pointer_width = "64")] const MAXIMUM_ZST_CAPACITY: usize = 1 << (64 - 1); // Largest possible power of two /// `VecDeque` is a growable ring buffer, which can be used as a double-ended /// queue efficiently. /// /// The "default" usage of this type as a queue is to use `push_back` to add to /// the queue, and `pop_front` to remove from the queue. `extend` and `append` /// push onto the back in this manner, and iterating over `VecDeque` goes front /// to back. #[stable(feature = "rust1", since = "1.0.0")] pub struct VecDeque<T> { // tail and head are pointers into the buffer. Tail always points // to the first element that could be read, Head always points // to where data should be written. // If tail == head the buffer is empty. The length of the ringbuffer // is defined as the distance between the two. tail: usize, head: usize, buf: RawVec<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone> Clone for VecDeque<T> { fn clone(&self) -> VecDeque<T> { self.iter().cloned().collect() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for VecDeque<T> { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { let (front, back) = self.as_mut_slices(); unsafe { // use drop for [T] ptr::drop_in_place(front); ptr::drop_in_place(back); } // RawVec handles deallocation } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Default for VecDeque<T> { #[inline] fn default() -> VecDeque<T> { VecDeque::new() } } impl<T> VecDeque<T> { /// Marginally more convenient #[inline] fn ptr(&self) -> *mut T { self.buf.ptr() } /// Marginally more convenient #[inline] fn cap(&self) -> usize { if mem::size_of::<T>() == 0 { // For zero sized types, we are always at maximum capacity MAXIMUM_ZST_CAPACITY } else { self.buf.cap() } } /// Turn ptr into a slice #[inline] unsafe fn buffer_as_slice(&self) -> &[T] { slice::from_raw_parts(self.ptr(), self.cap()) } /// Turn ptr into a mut slice #[inline] unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] { slice::from_raw_parts_mut(self.ptr(), self.cap()) } /// Moves an element out of the buffer #[inline] unsafe fn buffer_read(&mut self, off: usize) -> T { ptr::read(self.ptr().offset(off as isize)) } /// Writes an element into the buffer, moving it. #[inline] unsafe fn buffer_write(&mut self, off: usize, value: T) { ptr::write(self.ptr().offset(off as isize), value); } /// Returns true if and only if the buffer is at capacity #[inline] fn is_full(&self) -> bool { self.cap() - self.len() == 1 } /// Returns the index in the underlying buffer for a given logical element /// index. #[inline] fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap()) } /// Returns the index in the underlying buffer for a given logical element /// index + addend. #[inline] fn wrap_add(&self, idx: usize, addend: usize) -> usize { wrap_index(idx.wrapping_add(addend), self.cap()) } /// Returns the index in the underlying buffer for a given logical element /// index - subtrahend. #[inline] fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize { wrap_index(idx.wrapping_sub(subtrahend), self.cap()) } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap(), "cpy dst={} src={} len={} cap={}", dst, src, len, self.cap()); debug_assert!(src + len <= self.cap(), "cpy dst={} src={} len={} cap={}", dst, src, len, self.cap()); ptr::copy(self.ptr().offset(src as isize), self.ptr().offset(dst as isize), len); } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap(), "cno dst={} src={} len={} cap={}", dst, src, len, self.cap()); debug_assert!(src + len <= self.cap(), "cno dst={} src={} len={} cap={}", dst, src, len, self.cap()); ptr::copy_nonoverlapping(self.ptr().offset(src as isize), self.ptr().offset(dst as isize), len); } /// Copies a potentially wrapping block of memory len long from src to dest. /// (abs(dst - src) + len) must be no larger than cap() (There must be at /// most one continuous overlapping region between src and dest). unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) { #[allow(dead_code)] fn diff(a: usize, b: usize) -> usize { if a <= b { b - a } else { a - b } } debug_assert!(cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(), "wrc dst={} src={} len={} cap={}", dst, src, len, self.cap()); if src == dst || len == 0 { return; } let dst_after_src = self.wrap_sub(dst, src) < len; let src_pre_wrap_len = self.cap() - src; let dst_pre_wrap_len = self.cap() - dst; let src_wraps = src_pre_wrap_len < len; let dst_wraps = dst_pre_wrap_len < len; match (dst_after_src, src_wraps, dst_wraps) { (_, false, false) => { // src doesn't wrap, dst doesn't wrap // // S . . . // 1 [_ _ A A B B C C _] // 2 [_ _ A A A A B B _] // D . . . // self.copy(dst, src, len); } (false, false, true) => { // dst before src, src doesn't wrap, dst wraps // // S . . . // 1 [A A B B _ _ _ C C] // 2 [A A B B _ _ _ A A] // 3 [B B B B _ _ _ A A] // . . D . // self.copy(dst, src, dst_pre_wrap_len); self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len); } (true, false, true) => { // src before dst, src doesn't wrap, dst wraps // // S . . . // 1 [C C _ _ _ A A B B] // 2 [B B _ _ _ A A B B] // 3 [B B _ _ _ A A A A] // . . D . // self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len); self.copy(dst, src, dst_pre_wrap_len); } (false, true, false) => { // dst before src, src wraps, dst doesn't wrap // // . . S . // 1 [C C _ _ _ A A B B] // 2 [C C _ _ _ B B B B] // 3 [C C _ _ _ B B C C] // D . . . // self.copy(dst, src, src_pre_wrap_len); self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len); } (true, true, false) => { // src before dst, src wraps, dst doesn't wrap // // . . S . // 1 [A A B B _ _ _ C C] // 2 [A A A A _ _ _ C C] // 3 [C C A A _ _ _ C C] // D . . . // self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len); self.copy(dst, src, src_pre_wrap_len); } (false, true, true) => { // dst before src, src wraps, dst wraps // // . . . S . // 1 [A B C D _ E F G H] // 2 [A B C D _ E G H H] // 3 [A B C D _ E G H A] // 4 [B C C D _ E G H A] // . . D . . // debug_assert!(dst_pre_wrap_len > src_pre_wrap_len); let delta = dst_pre_wrap_len - src_pre_wrap_len; self.copy(dst, src, src_pre_wrap_len); self.copy(dst + src_pre_wrap_len, 0, delta); self.copy(0, delta, len - dst_pre_wrap_len); } (true, true, true) => { // src before dst, src wraps, dst wraps // // . . S . . // 1 [A B C D _ E F G H] // 2 [A A B D _ E F G H] // 3 [H A B D _ E F G H] // 4 [H A B D _ E F F G] // . . . D . // debug_assert!(src_pre_wrap_len > dst_pre_wrap_len); let delta = src_pre_wrap_len - dst_pre_wrap_len; self.copy(delta, 0, len - src_pre_wrap_len); self.copy(0, self.cap() - delta, delta); self.copy(dst, src, dst_pre_wrap_len); } } } /// Frobs the head and tail sections around to handle the fact that we /// just reallocated. Unsafe because it trusts old_cap. #[inline] unsafe fn handle_cap_increase(&mut self, old_cap: usize) { let new_cap = self.cap(); // Move the shortest contiguous section of the ring buffer // T H // [o o o o o o o . ] // T H // A [o o o o o o o . . . . . . . . . ] // H T // [o o . o o o o o ] // T H // B [. . . o o o o o o o . . . . . . ] // H T // [o o o o o . o o ] // H T // C [o o o o o . . . . . . . . . o o ] if self.tail <= self.head { // A // Nop } else if self.head < old_cap - self.tail { // B self.copy_nonoverlapping(old_cap, 0, self.head); self.head += old_cap; debug_assert!(self.head > self.tail); } else { // C let new_tail = new_cap - (old_cap - self.tail); self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail); self.tail = new_tail; debug_assert!(self.head < self.tail); } debug_assert!(self.head < self.cap()); debug_assert!(self.tail < self.cap()); debug_assert!(self.cap().count_ones() == 1); } } impl<T> VecDeque<T> { /// Creates an empty `VecDeque`. #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> VecDeque<T> { VecDeque::with_capacity(INITIAL_CAPACITY) } /// Creates an empty `VecDeque` with space for at least `n` elements. #[stable(feature = "rust1", since = "1.0.0")] pub fn with_capacity(n: usize) -> VecDeque<T> { // +1 since the ringbuffer always leaves one space empty let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); assert!(cap > n, "capacity overflow"); VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity(cap), } } /// Retrieves an element in the `VecDeque` by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// assert_eq!(buf.get(1), Some(&4)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get(&self, index: usize) -> Option<&T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); unsafe { Some(&*self.ptr().offset(idx as isize)) } } else { None } } /// Retrieves an element in the `VecDeque` mutably by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// if let Some(elem) = buf.get_mut(1) { /// *elem = 7; /// } /// /// assert_eq!(buf[1], 7); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); unsafe { Some(&mut *self.ptr().offset(idx as isize)) } } else { None } } /// Swaps elements at indices `i` and `j`. /// /// `i` and `j` may be equal. /// /// Fails if there is no element with either index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// buf.swap(0, 2); /// assert_eq!(buf[0], 5); /// assert_eq!(buf[2], 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn swap(&mut self, i: usize, j: usize) { assert!(i < self.len()); assert!(j < self.len()); let ri = self.wrap_add(self.tail, i); let rj = self.wrap_add(self.tail, j); unsafe { ptr::swap(self.ptr().offset(ri as isize), self.ptr().offset(rj as isize)) } } /// Returns the number of elements the `VecDeque` can hold without /// reallocating. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let buf: VecDeque<i32> = VecDeque::with_capacity(10); /// assert!(buf.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { self.cap() - 1 } /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the /// given `VecDeque`. Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it requests. Therefore /// capacity can not be relied upon to be precisely minimal. Prefer `reserve` if future /// insertions are expected. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve_exact(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve_exact(&mut self, additional: usize) { self.reserve(additional); } /// Reserves capacity for at least `additional` more elements to be inserted in the given /// `VecDeque`. The collection may reserve more space to avoid frequent reallocations. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { let old_cap = self.cap(); let used_cap = self.len() + 1; let new_cap = used_cap.checked_add(additional) .and_then(|needed_cap| needed_cap.checked_next_power_of_two()) .expect("capacity overflow"); if new_cap > self.capacity() { self.buf.reserve_exact(used_cap, new_cap - used_cap); unsafe { self.handle_cap_increase(old_cap); } } } /// Shrinks the capacity of the `VecDeque` as much as possible. /// /// It will drop down as close as possible to the length but the allocator may still inform the /// `VecDeque` that there is space for a few more elements. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::with_capacity(15); /// buf.extend(0..4); /// assert_eq!(buf.capacity(), 15); /// buf.shrink_to_fit(); /// assert!(buf.capacity() >= 4); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn shrink_to_fit(&mut self) { // +1 since the ringbuffer always leaves one space empty // len + 1 can't overflow for an existing, well-formed ringbuffer. let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); if target_cap < self.cap() { // There are three cases of interest: // All elements are out of desired bounds // Elements are contiguous, and head is out of desired bounds // Elements are discontiguous, and tail is out of desired bounds // // At all other times, element positions are unaffected. // // Indicates that elements at the head should be moved. let head_outside = self.head == 0 || self.head >= target_cap; // Move elements from out of desired bounds (positions after target_cap) if self.tail >= target_cap && head_outside { // T H // [. . . . . . . . o o o o o o o . ] // T H // [o o o o o o o . ] unsafe { self.copy_nonoverlapping(0, self.tail, self.len()); } self.head = self.len(); self.tail = 0; } else if self.tail != 0 && self.tail < target_cap && head_outside { // T H // [. . . o o o o o o o . . . . . . ] // H T // [o o . o o o o o ] let len = self.wrap_sub(self.head, target_cap); unsafe { self.copy_nonoverlapping(0, target_cap, len); } self.head = len; debug_assert!(self.head < self.tail); } else if self.tail >= target_cap { // H T // [o o o o o . . . . . . . . . o o ] // H T // [o o o o o . o o ] debug_assert!(self.wrap_sub(self.head, 1) < target_cap); let len = self.cap() - self.tail; let new_tail = target_cap - len; unsafe { self.copy_nonoverlapping(new_tail, self.tail, len); } self.tail = new_tail; debug_assert!(self.head < self.tail); } self.buf.shrink_to_fit(target_cap); debug_assert!(self.head < self.cap()); debug_assert!(self.tail < self.cap()); debug_assert!(self.cap().count_ones() == 1); } } /// Shortens a `VecDeque`, dropping excess elements from the back. /// /// If `len` is greater than the `VecDeque`'s current length, this has no /// effect. /// /// # Examples /// /// ``` /// #![feature(deque_extras)] /// /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.truncate(1); /// assert_eq!(buf.len(), 1); /// assert_eq!(Some(&5), buf.get(0)); /// ``` #[unstable(feature = "deque_extras", reason = "matches collection reform specification; waiting on panic semantics", issue = "27788")] pub fn truncate(&mut self, len: usize) { for _ in len..self.len() { self.pop_back(); } } /// Returns a front-to-back iterator. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// let b: &[_] = &[&5, &3, &4]; /// let c: Vec<&i32> = buf.iter().collect(); /// assert_eq!(&c[..], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<T> { Iter { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_slice() }, } } /// Returns a front-to-back iterator that returns mutable references. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// for num in buf.iter_mut() { /// *num = *num - 2; /// } /// let b: &[_] = &[&mut 3, &mut 1, &mut 2]; /// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[..], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut<T> { IterMut { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_mut_slice() }, } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn as_slices(&self) -> (&[T], &[T]) { unsafe { let contiguous = self.is_contiguous(); let buf = self.buffer_as_slice(); if contiguous { let (empty, buf) = buf.split_at(0); (&buf[self.tail..self.head], empty) } else { let (mid, right) = buf.split_at(self.tail); let (left, _) = mid.split_at(self.head); (right, left) } } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { unsafe { let contiguous = self.is_contiguous(); let head = self.head; let tail = self.tail; let buf = self.buffer_as_mut_slice(); if contiguous { let (empty, buf) = buf.split_at_mut(0); (&mut buf[tail..head], empty) } else { let (mid, right) = buf.split_at_mut(tail); let (left, _) = mid.split_at_mut(head); (right, left) } } } /// Returns the number of elements in the `VecDeque`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert_eq!(v.len(), 0); /// v.push_back(1); /// assert_eq!(v.len(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { count(self.tail, self.head, self.cap()) } /// Returns true if the buffer contains no elements /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert!(v.is_empty()); /// v.push_front(1); /// assert!(!v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Create a draining iterator that removes the specified range in the /// `VecDeque` and yields the removed items. /// /// Note 1: The element range is removed even if the iterator is not /// consumed until the end. /// /// Note 2: It is unspecified how many elements are removed from the deque, /// if the `Drain` value is not dropped, but the borrow it holds expires /// (eg. due to mem::forget). /// /// # Panics /// /// Panics if the starting point is greater than the end point or if /// the end point is greater than the length of the vector. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// assert_eq!(vec![3].into_iter().collect::<VecDeque<_>>(), v.drain(2..).collect()); /// assert_eq!(vec![1, 2].into_iter().collect::<VecDeque<_>>(), v); /// /// // A full range clears all contents /// v.drain(..); /// assert!(v.is_empty()); /// ``` #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain<R>(&mut self, range: R) -> Drain<T> where R: RangeArgument<usize> { // Memory safety // // When the Drain is first created, the source deque is shortened to // make sure no uninitialized or moved-from elements are accessible at // all if the Drain's destructor never gets to run. // // Drain will ptr::read out the values to remove. // When finished, the remaining data will be copied back to cover the hole, // and the head/tail values will be restored correctly. // let len = self.len(); let start = *range.start().unwrap_or(&0); let end = *range.end().unwrap_or(&len); assert!(start <= end, "drain lower bound was too large"); assert!(end <= len, "drain upper bound was too large"); // The deque's elements are parted into three segments: // * self.tail -> drain_tail // * drain_tail -> drain_head // * drain_head -> self.head // // T = self.tail; H = self.head; t = drain_tail; h = drain_head // // We store drain_tail as self.head, and drain_head and self.head as // after_tail and after_head respectively on the Drain. This also // truncates the effective array such that if the Drain is leaked, we // have forgotten about the potentially moved values after the start of // the drain. // // T t h H // [. . . o o x x o o . . .] // let drain_tail = self.wrap_add(self.tail, start); let drain_head = self.wrap_add(self.tail, end); let head = self.head; // "forget" about the values after the start of the drain until after // the drain is complete and the Drain destructor is run. self.head = drain_tail; Drain { deque: self as *mut _, after_tail: drain_head, after_head: head, iter: Iter { tail: drain_tail, head: drain_head, ring: unsafe { self.buffer_as_mut_slice() }, }, } } /// Clears the buffer, removing all values. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// v.push_back(1); /// v.clear(); /// assert!(v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn clear(&mut self) { self.drain(..); } /// Provides a reference to the front element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.front(), Some(&1)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front(&self) -> Option<&T> { if !self.is_empty() { Some(&self[0]) } else { None } } /// Provides a mutable reference to the front element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front_mut(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.front_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.front(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front_mut(&mut self) -> Option<&mut T> { if !self.is_empty() { Some(&mut self[0]) } else { None } } /// Provides a reference to the back element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.back(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back(&self) -> Option<&T> { if !self.is_empty() { Some(&self[self.len() - 1]) } else { None } } /// Provides a mutable reference to the back element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.back_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.back(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back_mut(&mut self) -> Option<&mut T> { let len = self.len(); if !self.is_empty() { Some(&mut self[len - 1]) } else { None } } /// Removes the first element and returns it, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_back(1); /// d.push_back(2); /// /// assert_eq!(d.pop_front(), Some(1)); /// assert_eq!(d.pop_front(), Some(2)); /// assert_eq!(d.pop_front(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_front(&mut self) -> Option<T> { if self.is_empty() { None } else { let tail = self.tail; self.tail = self.wrap_add(self.tail, 1); unsafe { Some(self.buffer_read(tail)) } } } /// Inserts an element first in the sequence. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_front(1); /// d.push_front(2); /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, value: T) { if self.is_full() { let old_cap = self.cap(); self.buf.double(); unsafe { self.handle_cap_increase(old_cap); } debug_assert!(!self.is_full()); } self.tail = self.wrap_sub(self.tail, 1); let tail = self.tail; unsafe { self.buffer_write(tail, value); } } /// Appends an element to the back of a buffer /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_back(&mut self, value: T) { if self.is_full() { let old_cap = self.cap(); self.buf.double(); unsafe { self.handle_cap_increase(old_cap); } debug_assert!(!self.is_full()); } let head = self.head; self.head = self.wrap_add(self.head, 1); unsafe { self.buffer_write(head, value) } } /// Removes the last element from a buffer and returns it, or `None` if /// it is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.pop_back(), None); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(buf.pop_back(), Some(3)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_back(&mut self) -> Option<T> { if self.is_empty() { None } else { self.head = self.wrap_sub(self.head, 1); let head = self.head; unsafe { Some(self.buffer_read(head)) } } } #[inline] fn is_contiguous(&self) -> bool { self.tail <= self.head } /// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the /// last element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_remove_back(0), None); /// buf.push_back(1); /// buf.push_back(2); /// buf.push_back(3); /// /// assert_eq!(buf.swap_remove_back(0), Some(1)); /// assert_eq!(buf.len(), 2); /// assert_eq!(buf[0], 3); /// assert_eq!(buf[1], 2); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn swap_remove_back(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length - 1 { self.swap(index, length - 1); } else if index >= length { return None; } self.pop_back() } /// Removes an element from anywhere in the `VecDeque` and returns it, /// replacing it with the first element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_remove_front(0), None); /// buf.push_back(1); /// buf.push_back(2); /// buf.push_back(3); /// /// assert_eq!(buf.swap_remove_front(2), Some(3)); /// assert_eq!(buf.len(), 2); /// assert_eq!(buf[0], 2); /// assert_eq!(buf[1], 1); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn swap_remove_front(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length && index != 0 { self.swap(index, 0); } else if index >= length { return None; } self.pop_front() } /// Inserts an element at `index` within the `VecDeque`. Whichever /// end is closer to the insertion point will be moved to make room, /// and all the affected elements will be moved to new positions. /// /// # Panics /// /// Panics if `index` is greater than `VecDeque`'s length /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(10); /// buf.push_back(12); /// buf.insert(1, 11); /// assert_eq!(Some(&11), buf.get(1)); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn insert(&mut self, index: usize, value: T) { assert!(index <= self.len(), "index out of bounds"); if self.is_full() { let old_cap = self.cap(); self.buf.double(); unsafe { self.handle_cap_increase(old_cap); } debug_assert!(!self.is_full()); } // Move the least number of elements in the ring buffer and insert // the given object // // At most len/2 - 1 elements will be moved. O(min(n, n-i)) // // There are three main cases: // Elements are contiguous // - special case when tail is 0 // Elements are discontiguous and the insert is in the tail section // Elements are discontiguous and the insert is in the head section // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // I - Insertion element // A - The element that should be after the insertion point // M - Indicates element was moved let idx = self.wrap_add(self.tail, index); let distance_to_tail = index; let distance_to_head = self.len() - index; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) if index == 0 => { // push_front // // T // I H // [A o o o o o o . . . . . . . . .] // // H T // [A o o o o o o o . . . . . I] // self.tail = self.wrap_sub(self.tail, 1); } (true, true, _) => { unsafe { // contiguous, insert closer to tail: // // T I H // [. . . o o A o o o o . . . . . .] // // T H // [. . o o I A o o o o . . . . . .] // M M // // contiguous, insert closer to tail and tail is 0: // // // T I H // [o o A o o o o . . . . . . . . .] // // H T // [o I A o o o o o . . . . . . . o] // M M let new_tail = self.wrap_sub(self.tail, 1); self.copy(new_tail, self.tail, 1); // Already moved the tail, so we only copy `index - 1` elements. self.copy(self.tail, self.tail + 1, index - 1); self.tail = new_tail; } } (true, false, _) => { unsafe { // contiguous, insert closer to head: // // T I H // [. . . o o o o A o o . . . . . .] // // T H // [. . . o o o o I A o o . . . . .] // M M M self.copy(idx + 1, idx, self.head - idx); self.head = self.wrap_add(self.head, 1); } } (false, true, true) => { unsafe { // discontiguous, insert closer to tail, tail section: // // H T I // [o o o o o o . . . . . o o A o o] // // H T // [o o o o o o . . . . o o I A o o] // M M self.copy(self.tail - 1, self.tail, index); self.tail -= 1; } } (false, false, true) => { unsafe { // discontiguous, insert closer to head, tail section: // // H T I // [o o . . . . . . . o o o o o A o] // // H T // [o o o . . . . . . o o o o o I A] // M M M M // copy elements up to new head self.copy(1, 0, self.head); // copy last element into empty spot at bottom of buffer self.copy(0, self.cap() - 1, 1); // move elements from idx to end forward not including ^ element self.copy(idx + 1, idx, self.cap() - 1 - idx); self.head += 1; } } (false, true, false) if idx == 0 => { unsafe { // discontiguous, insert is closer to tail, head section, // and is at index zero in the internal buffer: // // I H T // [A o o o o o o o o o . . . o o o] // // H T // [A o o o o o o o o o . . o o o I] // M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap() - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap() - 1, 0, 1); self.tail -= 1; } } (false, true, false) => { unsafe { // discontiguous, insert closer to tail, head section: // // I H T // [o o o A o o o o o o . . . o o o] // // H T // [o o I A o o o o o o . . o o o o] // M M M M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap() - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap() - 1, 0, 1); // move elements from idx-1 to end forward not including ^ element self.copy(0, 1, idx - 1); self.tail -= 1; } } (false, false, false) => { unsafe { // discontiguous, insert closer to head, head section: // // I H T // [o o o o A o o . . . . . . o o o] // // H T // [o o o o I A o o . . . . . o o o] // M M M self.copy(idx + 1, idx, self.head - idx); self.head += 1; } } } // tail might've been changed so we need to recalculate let new_idx = self.wrap_add(self.tail, index); unsafe { self.buffer_write(new_idx, value); } } /// Removes and returns the element at `index` from the `VecDeque`. /// Whichever end is closer to the removal point will be moved to make /// room, and all the affected elements will be moved to new positions. /// Returns `None` if `index` is out of bounds. /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(1); /// buf.push_back(2); /// buf.push_back(3); /// /// assert_eq!(buf.remove(1), Some(2)); /// assert_eq!(buf.get(1), Some(&3)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, index: usize) -> Option<T> { if self.is_empty() || self.len() <= index { return None; } // There are three main cases: // Elements are contiguous // Elements are discontiguous and the removal is in the tail section // Elements are discontiguous and the removal is in the head section // - special case when elements are technically contiguous, // but self.head = 0 // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // x - Element marked for removal // R - Indicates element that is being removed // M - Indicates element was moved let idx = self.wrap_add(self.tail, index); let elem = unsafe { Some(self.buffer_read(idx)) }; let distance_to_tail = index; let distance_to_head = self.len() - index; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) => { unsafe { // contiguous, remove closer to tail: // // T R H // [. . . o o x o o o o . . . . . .] // // T H // [. . . . o o o o o o . . . . . .] // M M self.copy(self.tail + 1, self.tail, index); self.tail += 1; } } (true, false, _) => { unsafe { // contiguous, remove closer to head: // // T R H // [. . . o o o o x o o . . . . . .] // // T H // [. . . o o o o o o . . . . . . .] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; } } (false, true, true) => { unsafe { // discontiguous, remove closer to tail, tail section: // // H T R // [o o o o o o . . . . . o o x o o] // // H T // [o o o o o o . . . . . . o o o o] // M M self.copy(self.tail + 1, self.tail, index); self.tail = self.wrap_add(self.tail, 1); } } (false, false, false) => { unsafe { // discontiguous, remove closer to head, head section: // // R H T // [o o o o x o o . . . . . . o o o] // // H T // [o o o o o o . . . . . . . o o o] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; } } (false, false, true) => { unsafe { // discontiguous, remove closer to head, tail section: // // H T R // [o o o . . . . . . o o o o o x o] // // H T // [o o . . . . . . . o o o o o o o] // M M M M // // or quasi-discontiguous, remove next to head, tail section: // // H T R // [. . . . . . . . . o o o o o x o] // // T H // [. . . . . . . . . o o o o o o .] // M // draw in elements in the tail section self.copy(idx, idx + 1, self.cap() - idx - 1); // Prevents underflow. if self.head != 0 { // copy first element into empty spot self.copy(self.cap() - 1, 0, 1); // move elements in the head section backwards self.copy(0, 1, self.head - 1); } self.head = self.wrap_sub(self.head, 1); } } (false, true, false) => { unsafe { // discontiguous, remove closer to tail, head section: // // R H T // [o o x o o o o o o o . . . o o o] // // H T // [o o o o o o o o o o . . . . o o] // M M M M M // draw in elements up to idx self.copy(1, 0, idx); // copy last element into empty spot self.copy(0, self.cap() - 1, 1); // move elements from tail to end forward, excluding the last one self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1); self.tail = self.wrap_add(self.tail, 1); } } } return elem; } /// Splits the collection into two at the given index. /// /// Returns a newly allocated `Self`. `self` contains elements `[0, at)`, /// and the returned `Self` contains elements `[at, len)`. /// /// Note that the capacity of `self` does not change. /// /// # Panics /// /// Panics if `at > len` /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect(); /// let buf2 = buf.split_off(1); /// // buf = [1], buf2 = [2, 3] /// assert_eq!(buf.len(), 1); /// assert_eq!(buf2.len(), 2); /// ``` #[inline] #[stable(feature = "split_off", since = "1.4.0")] pub fn split_off(&mut self, at: usize) -> Self { let len = self.len(); assert!(at <= len, "`at` out of bounds"); let other_len = len - at; let mut other = VecDeque::with_capacity(other_len); unsafe { let (first_half, second_half) = self.as_slices(); let first_len = first_half.len(); let second_len = second_half.len(); if at < first_len { // `at` lies in the first half. let amount_in_first = first_len - at; ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize), other.ptr(), amount_in_first); // just take all of the second half. ptr::copy_nonoverlapping(second_half.as_ptr(), other.ptr().offset(amount_in_first as isize), second_len); } else { // `at` lies in the second half, need to factor in the elements we skipped // in the first half. let offset = at - first_len; let amount_in_second = second_len - offset; ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize), other.ptr(), amount_in_second); } } // Cleanup where the ends of the buffers are self.head = self.wrap_sub(self.head, other_len); other.head = other.wrap_index(other_len); other } /// Moves all the elements of `other` into `Self`, leaving `other` empty. /// /// # Panics /// /// Panics if the new number of elements in self overflows a `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// let mut buf2: VecDeque<_> = vec![4, 5, 6].into_iter().collect(); /// buf.append(&mut buf2); /// assert_eq!(buf.len(), 6); /// assert_eq!(buf2.len(), 0); /// ``` #[inline] #[stable(feature = "append", since = "1.4.0")] pub fn append(&mut self, other: &mut Self) { // naive impl self.extend(other.drain(..)); } /// Retains only the elements specified by the predicate. /// /// In other words, remove all elements `e` such that `f(&e)` returns false. /// This method operates in place and preserves the order of the retained /// elements. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.extend(1..5); /// buf.retain(|&x| x%2 == 0); /// /// let v: Vec<_> = buf.into_iter().collect(); /// assert_eq!(&v[..], &[2, 4]); /// ``` #[stable(feature = "vec_deque_retain", since = "1.4.0")] pub fn retain<F>(&mut self, mut f: F) where F: FnMut(&T) -> bool { let len = self.len(); let mut del = 0; for i in 0..len { if !f(&self[i]) { del += 1; } else if del > 0 { self.swap(i - del, i); } } if del > 0 { self.truncate(len - del); } } } impl<T: Clone> VecDeque<T> { /// Modifies the `VecDeque` in-place so that `len()` is equal to new_len, /// either by removing excess elements or by appending copies of a value to the back. /// /// # Examples /// /// ``` /// #![feature(deque_extras)] /// /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.resize(2, 0); /// buf.resize(6, 20); /// for (a, b) in [5, 10, 20, 20, 20, 20].iter().zip(&buf) { /// assert_eq!(a, b); /// } /// ``` #[unstable(feature = "deque_extras", reason = "matches collection reform specification; waiting on panic semantics", issue = "27788")] pub fn resize(&mut self, new_len: usize, value: T) { let len = self.len(); if new_len > len { self.extend(repeat(value).take(new_len - len)) } else { self.truncate(new_len); } } } /// Returns the index in the underlying buffer for a given logical element index. #[inline] fn wrap_index(index: usize, size: usize) -> usize { // size is always a power of 2 debug_assert!(size.is_power_of_two()); index & (size - 1) } /// Calculate the number of elements left to be read in the buffer #[inline] fn count(tail: usize, head: usize, size: usize) -> usize { // size is always a power of 2 (head.wrapping_sub(tail)) & (size - 1) } /// `VecDeque` iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { ring: &'a [T], tail: usize, head: usize, } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { ring: self.ring, tail: self.tail, head: self.head, } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; #[inline] fn next(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(tail)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(self.head)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} /// `VecDeque` mutable iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { ring: &'a mut [T], tail: usize, head: usize, } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for IterMut<'a, T> { type Item = &'a mut T; #[inline] fn next(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(tail); Some(&mut *(elem as *mut _)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(self.head); Some(&mut *(elem as *mut _)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} /// A by-value VecDeque iterator #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter<T> { inner: VecDeque<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Iterator for IntoIter<T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.inner.pop_front() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = self.inner.len(); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> DoubleEndedIterator for IntoIter<T> { #[inline] fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> ExactSizeIterator for IntoIter<T> {} /// A draining VecDeque iterator #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, T: 'a> { after_tail: usize, after_head: usize, iter: Iter<'a, T>, deque: *mut VecDeque<T>, } #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a, T: Send> Send for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { for _ in self.by_ref() {} let source_deque = unsafe { &mut *self.deque }; // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head // // T t h H // [. . . o o x x o o . . .] // let orig_tail = source_deque.tail; let drain_tail = source_deque.head; let drain_head = self.after_tail; let orig_head = self.after_head; let tail_len = count(orig_tail, drain_tail, source_deque.cap()); let head_len = count(drain_head, orig_head, source_deque.cap()); // Restore the original head value source_deque.head = orig_head; match (tail_len, head_len) { (0, 0) => { source_deque.head = 0; source_deque.tail = 0; } (0, _) => { source_deque.tail = drain_head; } (_, 0) => { source_deque.head = drain_tail; } _ => { unsafe { if tail_len <= head_len { source_deque.tail = source_deque.wrap_sub(drain_head, tail_len); source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len); } else { source_deque.head = source_deque.wrap_add(drain_tail, head_len); source_deque.wrap_copy(drain_tail, drain_head, head_len); } } } } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Iterator for Drain<'a, T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.iter.next().map(|elt| unsafe { ptr::read(elt) }) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option<T> { self.iter.next_back().map(|elt| unsafe { ptr::read(elt) }) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialEq> PartialEq for VecDeque<A> { fn eq(&self, other: &VecDeque<A>) -> bool { if self.len() != other.len() { return false; } let (sa, sb) = self.as_slices(); let (oa, ob) = other.as_slices(); if sa.len() == oa.len() { sa == oa && sb == ob } else if sa.len() < oa.len() { // Always divisible in three sections, for example: // self: [a b c|d e f] // other: [0 1 2 3|4 5] // front = 3, mid = 1, // [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5] let front = sa.len(); let mid = oa.len() - front; let (oa_front, oa_mid) = oa.split_at(front); let (sb_mid, sb_back) = sb.split_at(mid); debug_assert_eq!(sa.len(), oa_front.len()); debug_assert_eq!(sb_mid.len(), oa_mid.len()); debug_assert_eq!(sb_back.len(), ob.len()); sa == oa_front && sb_mid == oa_mid && sb_back == ob } else { let front = oa.len(); let mid = sa.len() - front; let (sa_front, sa_mid) = sa.split_at(front); let (ob_mid, ob_back) = ob.split_at(mid); debug_assert_eq!(sa_front.len(), oa.len()); debug_assert_eq!(sa_mid.len(), ob_mid.len()); debug_assert_eq!(sb.len(), ob_back.len()); sa_front == oa && sa_mid == ob_mid && sb == ob_back } } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Eq> Eq for VecDeque<A> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialOrd> PartialOrd for VecDeque<A> { fn partial_cmp(&self, other: &VecDeque<A>) -> Option<Ordering> { self.iter().partial_cmp(other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Ord> Ord for VecDeque<A> { #[inline] fn cmp(&self, other: &VecDeque<A>) -> Ordering { self.iter().cmp(other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Hash> Hash for VecDeque<A> { fn hash<H: Hasher>(&self, state: &mut H) { self.len().hash(state); let (a, b) = self.as_slices(); Hash::hash_slice(a, state); Hash::hash_slice(b, state); } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Index<usize> for VecDeque<A> { type Output = A; #[inline] fn index(&self, index: usize) -> &A { self.get(index).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> IndexMut<usize> for VecDeque<A> { #[inline] fn index_mut(&mut self, index: usize) -> &mut A { self.get_mut(index).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> FromIterator<A> for VecDeque<A> { fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> VecDeque<A> { let iterator = iter.into_iter(); let (lower, _) = iterator.size_hint(); let mut deq = VecDeque::with_capacity(lower); deq.extend(iterator); deq } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> IntoIterator for VecDeque<T> { type Item = T; type IntoIter = IntoIter<T>; /// Consumes the list into a front-to-back iterator yielding elements by /// value. fn into_iter(self) -> IntoIter<T> { IntoIter { inner: self } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a VecDeque<T> { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Iter<'a, T> { self.iter() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a mut VecDeque<T> { type Item = &'a mut T; type IntoIter = IterMut<'a, T>; fn into_iter(mut self) -> IterMut<'a, T> { self.iter_mut() } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Extend<A> for VecDeque<A> { fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T) { for elt in iter { self.push_back(elt); } } } #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, T: 'a + Copy> Extend<&'a T> for VecDeque<T> { fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) { self.extend(iter.into_iter().cloned()); } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: fmt::Debug> fmt::Debug for VecDeque<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self).finish() } } #[cfg(test)] mod tests { use core::iter::Iterator; use core::option::Option::Some; use test; use super::VecDeque; #[bench] fn bench_push_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_back(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_push_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_front(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_pop_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_back()); } }) } #[bench] fn bench_pop_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_front()); } }) } #[test] fn test_swap_front_back_remove() { fn test(back: bool) { // This test checks that every single combination of tail position and length is tested. // Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); let usable_cap = tester.capacity(); let final_len = usable_cap / 2; for len in 0..final_len { let expected = if back { (0..len).collect() } else { (0..len).rev().collect() }; for tail_pos in 0..usable_cap { tester.tail = tail_pos; tester.head = tail_pos; if back { for i in 0..len * 2 { tester.push_front(i); } for i in 0..len { assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i)); } } else { for i in 0..len * 2 { tester.push_back(i); } for i in 0..len { let idx = tester.len() - 1 - i; assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i)); } } assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } test(true); test(false); } #[test] fn test_insert() { // This test checks that every single combination of tail position, length, and // insertion position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* insertion for len in 1..cap { // 0, 1, 2, .., len - 1 let expected = (0..).take(len).collect(); for tail_pos in 0..cap { for to_insert in 0..len { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i != to_insert { tester.push_back(i); } } tester.insert(to_insert, to_insert); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } } #[test] fn test_remove() { // This test checks that every single combination of tail position, length, and // removal position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* removal for len in 0..cap - 1 { // 0, 1, 2, .., len - 1 let expected = (0..).take(len).collect(); for tail_pos in 0..cap { for to_remove in 0..len + 1 { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i == to_remove { tester.push_back(1234); } tester.push_back(i); } if to_remove == len { tester.push_back(1234); } tester.remove(to_remove); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } } #[test] fn test_drain() { let mut tester: VecDeque<usize> = VecDeque::with_capacity(7); let cap = tester.capacity(); for len in 0..cap + 1 { for tail in 0..cap + 1 { for drain_start in 0..len + 1 { for drain_end in drain_start..len + 1 { tester.tail = tail; tester.head = tail; for i in 0..len { tester.push_back(i); } // Check that we drain the correct values let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect(); let drained_expected: VecDeque<_> = (drain_start..drain_end).collect(); assert_eq!(drained, drained_expected); // We shouldn't have changed the capacity or made the // head or tail out of bounds assert_eq!(tester.capacity(), cap); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); // We should see the correct values in the VecDeque let expected: VecDeque<_> = (0..drain_start) .chain(drain_end..len) .collect(); assert_eq!(expected, tester); } } } } } #[test] fn test_shrink_to_fit() { // This test checks that every single combination of head and tail position, // is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); tester.reserve(63); let max_cap = tester.capacity(); for len in 0..cap + 1 { // 0, 1, 2, .., len - 1 let expected = (0..).take(len).collect(); for tail_pos in 0..max_cap + 1 { tester.tail = tail_pos; tester.head = tail_pos; tester.reserve(63); for i in 0..len { tester.push_back(i); } tester.shrink_to_fit(); assert!(tester.capacity() <= cap); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } #[test] fn test_split_off() { // This test checks that every single combination of tail position, length, and // split position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *before* splitting for len in 0..cap { // index to split at for at in 0..len + 1 { // 0, 1, 2, .., at - 1 (may be empty) let expected_self = (0..).take(at).collect(); // at, at + 1, .., len - 1 (may be empty) let expected_other = (at..).take(len - at).collect(); for tail_pos in 0..cap { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { tester.push_back(i); } let result = tester.split_off(at); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert!(result.tail < result.cap()); assert!(result.head < result.cap()); assert_eq!(tester, expected_self); assert_eq!(result, expected_other); } } } } } Auto merge of #32866 - davidhewitt:master, r=apasel422 Implement `From<Vec<T>>` and `Into<Vec<T>>` for `VecDeque<T>` // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! VecDeque is a double-ended queue, which is implemented with the help of a //! growing ring buffer. //! //! This queue has `O(1)` amortized inserts and removals from both ends of the //! container. It also has `O(1)` indexing like a vector. The contained elements //! are not required to be copyable, and the queue will be sendable if the //! contained type is sendable. #![stable(feature = "rust1", since = "1.0.0")] use core::cmp::Ordering; use core::fmt; use core::iter::{repeat, FromIterator}; use core::mem; use core::ops::{Index, IndexMut}; use core::ptr; use core::slice; use core::hash::{Hash, Hasher}; use core::cmp; use alloc::raw_vec::RawVec; use super::range::RangeArgument; use super::vec::Vec; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 #[cfg(target_pointer_width = "32")] const MAXIMUM_ZST_CAPACITY: usize = 1 << (32 - 1); // Largest possible power of two #[cfg(target_pointer_width = "64")] const MAXIMUM_ZST_CAPACITY: usize = 1 << (64 - 1); // Largest possible power of two /// `VecDeque` is a growable ring buffer, which can be used as a double-ended /// queue efficiently. /// /// The "default" usage of this type as a queue is to use `push_back` to add to /// the queue, and `pop_front` to remove from the queue. `extend` and `append` /// push onto the back in this manner, and iterating over `VecDeque` goes front /// to back. #[stable(feature = "rust1", since = "1.0.0")] pub struct VecDeque<T> { // tail and head are pointers into the buffer. Tail always points // to the first element that could be read, Head always points // to where data should be written. // If tail == head the buffer is empty. The length of the ringbuffer // is defined as the distance between the two. tail: usize, head: usize, buf: RawVec<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone> Clone for VecDeque<T> { fn clone(&self) -> VecDeque<T> { self.iter().cloned().collect() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Drop for VecDeque<T> { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { let (front, back) = self.as_mut_slices(); unsafe { // use drop for [T] ptr::drop_in_place(front); ptr::drop_in_place(back); } // RawVec handles deallocation } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Default for VecDeque<T> { #[inline] fn default() -> VecDeque<T> { VecDeque::new() } } impl<T> VecDeque<T> { /// Marginally more convenient #[inline] fn ptr(&self) -> *mut T { self.buf.ptr() } /// Marginally more convenient #[inline] fn cap(&self) -> usize { if mem::size_of::<T>() == 0 { // For zero sized types, we are always at maximum capacity MAXIMUM_ZST_CAPACITY } else { self.buf.cap() } } /// Turn ptr into a slice #[inline] unsafe fn buffer_as_slice(&self) -> &[T] { slice::from_raw_parts(self.ptr(), self.cap()) } /// Turn ptr into a mut slice #[inline] unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] { slice::from_raw_parts_mut(self.ptr(), self.cap()) } /// Moves an element out of the buffer #[inline] unsafe fn buffer_read(&mut self, off: usize) -> T { ptr::read(self.ptr().offset(off as isize)) } /// Writes an element into the buffer, moving it. #[inline] unsafe fn buffer_write(&mut self, off: usize, value: T) { ptr::write(self.ptr().offset(off as isize), value); } /// Returns true if and only if the buffer is at capacity #[inline] fn is_full(&self) -> bool { self.cap() - self.len() == 1 } /// Returns the index in the underlying buffer for a given logical element /// index. #[inline] fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap()) } /// Returns the index in the underlying buffer for a given logical element /// index + addend. #[inline] fn wrap_add(&self, idx: usize, addend: usize) -> usize { wrap_index(idx.wrapping_add(addend), self.cap()) } /// Returns the index in the underlying buffer for a given logical element /// index - subtrahend. #[inline] fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize { wrap_index(idx.wrapping_sub(subtrahend), self.cap()) } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap(), "cpy dst={} src={} len={} cap={}", dst, src, len, self.cap()); debug_assert!(src + len <= self.cap(), "cpy dst={} src={} len={} cap={}", dst, src, len, self.cap()); ptr::copy(self.ptr().offset(src as isize), self.ptr().offset(dst as isize), len); } /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap(), "cno dst={} src={} len={} cap={}", dst, src, len, self.cap()); debug_assert!(src + len <= self.cap(), "cno dst={} src={} len={} cap={}", dst, src, len, self.cap()); ptr::copy_nonoverlapping(self.ptr().offset(src as isize), self.ptr().offset(dst as isize), len); } /// Copies a potentially wrapping block of memory len long from src to dest. /// (abs(dst - src) + len) must be no larger than cap() (There must be at /// most one continuous overlapping region between src and dest). unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) { #[allow(dead_code)] fn diff(a: usize, b: usize) -> usize { if a <= b { b - a } else { a - b } } debug_assert!(cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(), "wrc dst={} src={} len={} cap={}", dst, src, len, self.cap()); if src == dst || len == 0 { return; } let dst_after_src = self.wrap_sub(dst, src) < len; let src_pre_wrap_len = self.cap() - src; let dst_pre_wrap_len = self.cap() - dst; let src_wraps = src_pre_wrap_len < len; let dst_wraps = dst_pre_wrap_len < len; match (dst_after_src, src_wraps, dst_wraps) { (_, false, false) => { // src doesn't wrap, dst doesn't wrap // // S . . . // 1 [_ _ A A B B C C _] // 2 [_ _ A A A A B B _] // D . . . // self.copy(dst, src, len); } (false, false, true) => { // dst before src, src doesn't wrap, dst wraps // // S . . . // 1 [A A B B _ _ _ C C] // 2 [A A B B _ _ _ A A] // 3 [B B B B _ _ _ A A] // . . D . // self.copy(dst, src, dst_pre_wrap_len); self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len); } (true, false, true) => { // src before dst, src doesn't wrap, dst wraps // // S . . . // 1 [C C _ _ _ A A B B] // 2 [B B _ _ _ A A B B] // 3 [B B _ _ _ A A A A] // . . D . // self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len); self.copy(dst, src, dst_pre_wrap_len); } (false, true, false) => { // dst before src, src wraps, dst doesn't wrap // // . . S . // 1 [C C _ _ _ A A B B] // 2 [C C _ _ _ B B B B] // 3 [C C _ _ _ B B C C] // D . . . // self.copy(dst, src, src_pre_wrap_len); self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len); } (true, true, false) => { // src before dst, src wraps, dst doesn't wrap // // . . S . // 1 [A A B B _ _ _ C C] // 2 [A A A A _ _ _ C C] // 3 [C C A A _ _ _ C C] // D . . . // self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len); self.copy(dst, src, src_pre_wrap_len); } (false, true, true) => { // dst before src, src wraps, dst wraps // // . . . S . // 1 [A B C D _ E F G H] // 2 [A B C D _ E G H H] // 3 [A B C D _ E G H A] // 4 [B C C D _ E G H A] // . . D . . // debug_assert!(dst_pre_wrap_len > src_pre_wrap_len); let delta = dst_pre_wrap_len - src_pre_wrap_len; self.copy(dst, src, src_pre_wrap_len); self.copy(dst + src_pre_wrap_len, 0, delta); self.copy(0, delta, len - dst_pre_wrap_len); } (true, true, true) => { // src before dst, src wraps, dst wraps // // . . S . . // 1 [A B C D _ E F G H] // 2 [A A B D _ E F G H] // 3 [H A B D _ E F G H] // 4 [H A B D _ E F F G] // . . . D . // debug_assert!(src_pre_wrap_len > dst_pre_wrap_len); let delta = src_pre_wrap_len - dst_pre_wrap_len; self.copy(delta, 0, len - src_pre_wrap_len); self.copy(0, self.cap() - delta, delta); self.copy(dst, src, dst_pre_wrap_len); } } } /// Frobs the head and tail sections around to handle the fact that we /// just reallocated. Unsafe because it trusts old_cap. #[inline] unsafe fn handle_cap_increase(&mut self, old_cap: usize) { let new_cap = self.cap(); // Move the shortest contiguous section of the ring buffer // T H // [o o o o o o o . ] // T H // A [o o o o o o o . . . . . . . . . ] // H T // [o o . o o o o o ] // T H // B [. . . o o o o o o o . . . . . . ] // H T // [o o o o o . o o ] // H T // C [o o o o o . . . . . . . . . o o ] if self.tail <= self.head { // A // Nop } else if self.head < old_cap - self.tail { // B self.copy_nonoverlapping(old_cap, 0, self.head); self.head += old_cap; debug_assert!(self.head > self.tail); } else { // C let new_tail = new_cap - (old_cap - self.tail); self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail); self.tail = new_tail; debug_assert!(self.head < self.tail); } debug_assert!(self.head < self.cap()); debug_assert!(self.tail < self.cap()); debug_assert!(self.cap().count_ones() == 1); } } impl<T> VecDeque<T> { /// Creates an empty `VecDeque`. #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> VecDeque<T> { VecDeque::with_capacity(INITIAL_CAPACITY) } /// Creates an empty `VecDeque` with space for at least `n` elements. #[stable(feature = "rust1", since = "1.0.0")] pub fn with_capacity(n: usize) -> VecDeque<T> { // +1 since the ringbuffer always leaves one space empty let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); assert!(cap > n, "capacity overflow"); VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity(cap), } } /// Retrieves an element in the `VecDeque` by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// assert_eq!(buf.get(1), Some(&4)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get(&self, index: usize) -> Option<&T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); unsafe { Some(&*self.ptr().offset(idx as isize)) } } else { None } } /// Retrieves an element in the `VecDeque` mutably by index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// if let Some(elem) = buf.get_mut(1) { /// *elem = 7; /// } /// /// assert_eq!(buf[1], 7); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); unsafe { Some(&mut *self.ptr().offset(idx as isize)) } } else { None } } /// Swaps elements at indices `i` and `j`. /// /// `i` and `j` may be equal. /// /// Fails if there is no element with either index. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(3); /// buf.push_back(4); /// buf.push_back(5); /// buf.swap(0, 2); /// assert_eq!(buf[0], 5); /// assert_eq!(buf[2], 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn swap(&mut self, i: usize, j: usize) { assert!(i < self.len()); assert!(j < self.len()); let ri = self.wrap_add(self.tail, i); let rj = self.wrap_add(self.tail, j); unsafe { ptr::swap(self.ptr().offset(ri as isize), self.ptr().offset(rj as isize)) } } /// Returns the number of elements the `VecDeque` can hold without /// reallocating. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let buf: VecDeque<i32> = VecDeque::with_capacity(10); /// assert!(buf.capacity() >= 10); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn capacity(&self) -> usize { self.cap() - 1 } /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the /// given `VecDeque`. Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it requests. Therefore /// capacity can not be relied upon to be precisely minimal. Prefer `reserve` if future /// insertions are expected. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve_exact(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve_exact(&mut self, additional: usize) { self.reserve(additional); } /// Reserves capacity for at least `additional` more elements to be inserted in the given /// `VecDeque`. The collection may reserve more space to avoid frequent reallocations. /// /// # Panics /// /// Panics if the new capacity overflows `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<i32> = vec![1].into_iter().collect(); /// buf.reserve(10); /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { let old_cap = self.cap(); let used_cap = self.len() + 1; let new_cap = used_cap.checked_add(additional) .and_then(|needed_cap| needed_cap.checked_next_power_of_two()) .expect("capacity overflow"); if new_cap > self.capacity() { self.buf.reserve_exact(used_cap, new_cap - used_cap); unsafe { self.handle_cap_increase(old_cap); } } } /// Shrinks the capacity of the `VecDeque` as much as possible. /// /// It will drop down as close as possible to the length but the allocator may still inform the /// `VecDeque` that there is space for a few more elements. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::with_capacity(15); /// buf.extend(0..4); /// assert_eq!(buf.capacity(), 15); /// buf.shrink_to_fit(); /// assert!(buf.capacity() >= 4); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn shrink_to_fit(&mut self) { // +1 since the ringbuffer always leaves one space empty // len + 1 can't overflow for an existing, well-formed ringbuffer. let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); if target_cap < self.cap() { // There are three cases of interest: // All elements are out of desired bounds // Elements are contiguous, and head is out of desired bounds // Elements are discontiguous, and tail is out of desired bounds // // At all other times, element positions are unaffected. // // Indicates that elements at the head should be moved. let head_outside = self.head == 0 || self.head >= target_cap; // Move elements from out of desired bounds (positions after target_cap) if self.tail >= target_cap && head_outside { // T H // [. . . . . . . . o o o o o o o . ] // T H // [o o o o o o o . ] unsafe { self.copy_nonoverlapping(0, self.tail, self.len()); } self.head = self.len(); self.tail = 0; } else if self.tail != 0 && self.tail < target_cap && head_outside { // T H // [. . . o o o o o o o . . . . . . ] // H T // [o o . o o o o o ] let len = self.wrap_sub(self.head, target_cap); unsafe { self.copy_nonoverlapping(0, target_cap, len); } self.head = len; debug_assert!(self.head < self.tail); } else if self.tail >= target_cap { // H T // [o o o o o . . . . . . . . . o o ] // H T // [o o o o o . o o ] debug_assert!(self.wrap_sub(self.head, 1) < target_cap); let len = self.cap() - self.tail; let new_tail = target_cap - len; unsafe { self.copy_nonoverlapping(new_tail, self.tail, len); } self.tail = new_tail; debug_assert!(self.head < self.tail); } self.buf.shrink_to_fit(target_cap); debug_assert!(self.head < self.cap()); debug_assert!(self.tail < self.cap()); debug_assert!(self.cap().count_ones() == 1); } } /// Shortens a `VecDeque`, dropping excess elements from the back. /// /// If `len` is greater than the `VecDeque`'s current length, this has no /// effect. /// /// # Examples /// /// ``` /// #![feature(deque_extras)] /// /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.truncate(1); /// assert_eq!(buf.len(), 1); /// assert_eq!(Some(&5), buf.get(0)); /// ``` #[unstable(feature = "deque_extras", reason = "matches collection reform specification; waiting on panic semantics", issue = "27788")] pub fn truncate(&mut self, len: usize) { for _ in len..self.len() { self.pop_back(); } } /// Returns a front-to-back iterator. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// let b: &[_] = &[&5, &3, &4]; /// let c: Vec<&i32> = buf.iter().collect(); /// assert_eq!(&c[..], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<T> { Iter { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_slice() }, } } /// Returns a front-to-back iterator that returns mutable references. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(3); /// buf.push_back(4); /// for num in buf.iter_mut() { /// *num = *num - 2; /// } /// let b: &[_] = &[&mut 3, &mut 1, &mut 2]; /// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[..], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut<T> { IterMut { tail: self.tail, head: self.head, ring: unsafe { self.buffer_as_mut_slice() }, } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn as_slices(&self) -> (&[T], &[T]) { unsafe { let contiguous = self.is_contiguous(); let buf = self.buffer_as_slice(); if contiguous { let (empty, buf) = buf.split_at(0); (&buf[self.tail..self.head], empty) } else { let (mid, right) = buf.split_at(self.tail); let (left, _) = mid.split_at(self.head); (right, left) } } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { unsafe { let contiguous = self.is_contiguous(); let head = self.head; let tail = self.tail; let buf = self.buffer_as_mut_slice(); if contiguous { let (empty, buf) = buf.split_at_mut(0); (&mut buf[tail..head], empty) } else { let (mid, right) = buf.split_at_mut(tail); let (left, _) = mid.split_at_mut(head); (right, left) } } } /// Returns the number of elements in the `VecDeque`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert_eq!(v.len(), 0); /// v.push_back(1); /// assert_eq!(v.len(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { count(self.tail, self.head, self.cap()) } /// Returns true if the buffer contains no elements /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// assert!(v.is_empty()); /// v.push_front(1); /// assert!(!v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Create a draining iterator that removes the specified range in the /// `VecDeque` and yields the removed items. /// /// Note 1: The element range is removed even if the iterator is not /// consumed until the end. /// /// Note 2: It is unspecified how many elements are removed from the deque, /// if the `Drain` value is not dropped, but the borrow it holds expires /// (eg. due to mem::forget). /// /// # Panics /// /// Panics if the starting point is greater than the end point or if /// the end point is greater than the length of the vector. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// assert_eq!(vec![3].into_iter().collect::<VecDeque<_>>(), v.drain(2..).collect()); /// assert_eq!(vec![1, 2].into_iter().collect::<VecDeque<_>>(), v); /// /// // A full range clears all contents /// v.drain(..); /// assert!(v.is_empty()); /// ``` #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain<R>(&mut self, range: R) -> Drain<T> where R: RangeArgument<usize> { // Memory safety // // When the Drain is first created, the source deque is shortened to // make sure no uninitialized or moved-from elements are accessible at // all if the Drain's destructor never gets to run. // // Drain will ptr::read out the values to remove. // When finished, the remaining data will be copied back to cover the hole, // and the head/tail values will be restored correctly. // let len = self.len(); let start = *range.start().unwrap_or(&0); let end = *range.end().unwrap_or(&len); assert!(start <= end, "drain lower bound was too large"); assert!(end <= len, "drain upper bound was too large"); // The deque's elements are parted into three segments: // * self.tail -> drain_tail // * drain_tail -> drain_head // * drain_head -> self.head // // T = self.tail; H = self.head; t = drain_tail; h = drain_head // // We store drain_tail as self.head, and drain_head and self.head as // after_tail and after_head respectively on the Drain. This also // truncates the effective array such that if the Drain is leaked, we // have forgotten about the potentially moved values after the start of // the drain. // // T t h H // [. . . o o x x o o . . .] // let drain_tail = self.wrap_add(self.tail, start); let drain_head = self.wrap_add(self.tail, end); let head = self.head; // "forget" about the values after the start of the drain until after // the drain is complete and the Drain destructor is run. self.head = drain_tail; Drain { deque: self as *mut _, after_tail: drain_head, after_head: head, iter: Iter { tail: drain_tail, head: drain_head, ring: unsafe { self.buffer_as_mut_slice() }, }, } } /// Clears the buffer, removing all values. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut v = VecDeque::new(); /// v.push_back(1); /// v.clear(); /// assert!(v.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn clear(&mut self) { self.drain(..); } /// Provides a reference to the front element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.front(), Some(&1)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front(&self) -> Option<&T> { if !self.is_empty() { Some(&self[0]) } else { None } } /// Provides a mutable reference to the front element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.front_mut(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.front_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.front(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn front_mut(&mut self) -> Option<&mut T> { if !self.is_empty() { Some(&mut self[0]) } else { None } } /// Provides a reference to the back element, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// assert_eq!(d.back(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back(&self) -> Option<&T> { if !self.is_empty() { Some(&self[self.len() - 1]) } else { None } } /// Provides a mutable reference to the back element, or `None` if the /// sequence is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// assert_eq!(d.back(), None); /// /// d.push_back(1); /// d.push_back(2); /// match d.back_mut() { /// Some(x) => *x = 9, /// None => (), /// } /// assert_eq!(d.back(), Some(&9)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn back_mut(&mut self) -> Option<&mut T> { let len = self.len(); if !self.is_empty() { Some(&mut self[len - 1]) } else { None } } /// Removes the first element and returns it, or `None` if the sequence is /// empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_back(1); /// d.push_back(2); /// /// assert_eq!(d.pop_front(), Some(1)); /// assert_eq!(d.pop_front(), Some(2)); /// assert_eq!(d.pop_front(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_front(&mut self) -> Option<T> { if self.is_empty() { None } else { let tail = self.tail; self.tail = self.wrap_add(self.tail, 1); unsafe { Some(self.buffer_read(tail)) } } } /// Inserts an element first in the sequence. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut d = VecDeque::new(); /// d.push_front(1); /// d.push_front(2); /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, value: T) { if self.is_full() { let old_cap = self.cap(); self.buf.double(); unsafe { self.handle_cap_increase(old_cap); } debug_assert!(!self.is_full()); } self.tail = self.wrap_sub(self.tail, 1); let tail = self.tail; unsafe { self.buffer_write(tail, value); } } /// Appends an element to the back of a buffer /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_back(&mut self, value: T) { if self.is_full() { let old_cap = self.cap(); self.buf.double(); unsafe { self.handle_cap_increase(old_cap); } debug_assert!(!self.is_full()); } let head = self.head; self.head = self.wrap_add(self.head, 1); unsafe { self.buffer_write(head, value) } } /// Removes the last element from a buffer and returns it, or `None` if /// it is empty. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.pop_back(), None); /// buf.push_back(1); /// buf.push_back(3); /// assert_eq!(buf.pop_back(), Some(3)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_back(&mut self) -> Option<T> { if self.is_empty() { None } else { self.head = self.wrap_sub(self.head, 1); let head = self.head; unsafe { Some(self.buffer_read(head)) } } } #[inline] fn is_contiguous(&self) -> bool { self.tail <= self.head } /// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the /// last element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_remove_back(0), None); /// buf.push_back(1); /// buf.push_back(2); /// buf.push_back(3); /// /// assert_eq!(buf.swap_remove_back(0), Some(1)); /// assert_eq!(buf.len(), 2); /// assert_eq!(buf[0], 3); /// assert_eq!(buf[1], 2); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn swap_remove_back(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length - 1 { self.swap(index, length - 1); } else if index >= length { return None; } self.pop_back() } /// Removes an element from anywhere in the `VecDeque` and returns it, /// replacing it with the first element. /// /// This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// assert_eq!(buf.swap_remove_front(0), None); /// buf.push_back(1); /// buf.push_back(2); /// buf.push_back(3); /// /// assert_eq!(buf.swap_remove_front(2), Some(3)); /// assert_eq!(buf.len(), 2); /// assert_eq!(buf[0], 2); /// assert_eq!(buf[1], 1); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn swap_remove_front(&mut self, index: usize) -> Option<T> { let length = self.len(); if length > 0 && index < length && index != 0 { self.swap(index, 0); } else if index >= length { return None; } self.pop_front() } /// Inserts an element at `index` within the `VecDeque`. Whichever /// end is closer to the insertion point will be moved to make room, /// and all the affected elements will be moved to new positions. /// /// # Panics /// /// Panics if `index` is greater than `VecDeque`'s length /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(10); /// buf.push_back(12); /// buf.insert(1, 11); /// assert_eq!(Some(&11), buf.get(1)); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn insert(&mut self, index: usize, value: T) { assert!(index <= self.len(), "index out of bounds"); if self.is_full() { let old_cap = self.cap(); self.buf.double(); unsafe { self.handle_cap_increase(old_cap); } debug_assert!(!self.is_full()); } // Move the least number of elements in the ring buffer and insert // the given object // // At most len/2 - 1 elements will be moved. O(min(n, n-i)) // // There are three main cases: // Elements are contiguous // - special case when tail is 0 // Elements are discontiguous and the insert is in the tail section // Elements are discontiguous and the insert is in the head section // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // I - Insertion element // A - The element that should be after the insertion point // M - Indicates element was moved let idx = self.wrap_add(self.tail, index); let distance_to_tail = index; let distance_to_head = self.len() - index; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) if index == 0 => { // push_front // // T // I H // [A o o o o o o . . . . . . . . .] // // H T // [A o o o o o o o . . . . . I] // self.tail = self.wrap_sub(self.tail, 1); } (true, true, _) => { unsafe { // contiguous, insert closer to tail: // // T I H // [. . . o o A o o o o . . . . . .] // // T H // [. . o o I A o o o o . . . . . .] // M M // // contiguous, insert closer to tail and tail is 0: // // // T I H // [o o A o o o o . . . . . . . . .] // // H T // [o I A o o o o o . . . . . . . o] // M M let new_tail = self.wrap_sub(self.tail, 1); self.copy(new_tail, self.tail, 1); // Already moved the tail, so we only copy `index - 1` elements. self.copy(self.tail, self.tail + 1, index - 1); self.tail = new_tail; } } (true, false, _) => { unsafe { // contiguous, insert closer to head: // // T I H // [. . . o o o o A o o . . . . . .] // // T H // [. . . o o o o I A o o . . . . .] // M M M self.copy(idx + 1, idx, self.head - idx); self.head = self.wrap_add(self.head, 1); } } (false, true, true) => { unsafe { // discontiguous, insert closer to tail, tail section: // // H T I // [o o o o o o . . . . . o o A o o] // // H T // [o o o o o o . . . . o o I A o o] // M M self.copy(self.tail - 1, self.tail, index); self.tail -= 1; } } (false, false, true) => { unsafe { // discontiguous, insert closer to head, tail section: // // H T I // [o o . . . . . . . o o o o o A o] // // H T // [o o o . . . . . . o o o o o I A] // M M M M // copy elements up to new head self.copy(1, 0, self.head); // copy last element into empty spot at bottom of buffer self.copy(0, self.cap() - 1, 1); // move elements from idx to end forward not including ^ element self.copy(idx + 1, idx, self.cap() - 1 - idx); self.head += 1; } } (false, true, false) if idx == 0 => { unsafe { // discontiguous, insert is closer to tail, head section, // and is at index zero in the internal buffer: // // I H T // [A o o o o o o o o o . . . o o o] // // H T // [A o o o o o o o o o . . o o o I] // M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap() - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap() - 1, 0, 1); self.tail -= 1; } } (false, true, false) => { unsafe { // discontiguous, insert closer to tail, head section: // // I H T // [o o o A o o o o o o . . . o o o] // // H T // [o o I A o o o o o o . . o o o o] // M M M M M M // copy elements up to new tail self.copy(self.tail - 1, self.tail, self.cap() - self.tail); // copy last element into empty spot at bottom of buffer self.copy(self.cap() - 1, 0, 1); // move elements from idx-1 to end forward not including ^ element self.copy(0, 1, idx - 1); self.tail -= 1; } } (false, false, false) => { unsafe { // discontiguous, insert closer to head, head section: // // I H T // [o o o o A o o . . . . . . o o o] // // H T // [o o o o I A o o . . . . . o o o] // M M M self.copy(idx + 1, idx, self.head - idx); self.head += 1; } } } // tail might've been changed so we need to recalculate let new_idx = self.wrap_add(self.tail, index); unsafe { self.buffer_write(new_idx, value); } } /// Removes and returns the element at `index` from the `VecDeque`. /// Whichever end is closer to the removal point will be moved to make /// room, and all the affected elements will be moved to new positions. /// Returns `None` if `index` is out of bounds. /// /// # Examples /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(1); /// buf.push_back(2); /// buf.push_back(3); /// /// assert_eq!(buf.remove(1), Some(2)); /// assert_eq!(buf.get(1), Some(&3)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, index: usize) -> Option<T> { if self.is_empty() || self.len() <= index { return None; } // There are three main cases: // Elements are contiguous // Elements are discontiguous and the removal is in the tail section // Elements are discontiguous and the removal is in the head section // - special case when elements are technically contiguous, // but self.head = 0 // // For each of those there are two more cases: // Insert is closer to tail // Insert is closer to head // // Key: H - self.head // T - self.tail // o - Valid element // x - Element marked for removal // R - Indicates element that is being removed // M - Indicates element was moved let idx = self.wrap_add(self.tail, index); let elem = unsafe { Some(self.buffer_read(idx)) }; let distance_to_tail = index; let distance_to_head = self.len() - index; let contiguous = self.is_contiguous(); match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) { (true, true, _) => { unsafe { // contiguous, remove closer to tail: // // T R H // [. . . o o x o o o o . . . . . .] // // T H // [. . . . o o o o o o . . . . . .] // M M self.copy(self.tail + 1, self.tail, index); self.tail += 1; } } (true, false, _) => { unsafe { // contiguous, remove closer to head: // // T R H // [. . . o o o o x o o . . . . . .] // // T H // [. . . o o o o o o . . . . . . .] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; } } (false, true, true) => { unsafe { // discontiguous, remove closer to tail, tail section: // // H T R // [o o o o o o . . . . . o o x o o] // // H T // [o o o o o o . . . . . . o o o o] // M M self.copy(self.tail + 1, self.tail, index); self.tail = self.wrap_add(self.tail, 1); } } (false, false, false) => { unsafe { // discontiguous, remove closer to head, head section: // // R H T // [o o o o x o o . . . . . . o o o] // // H T // [o o o o o o . . . . . . . o o o] // M M self.copy(idx, idx + 1, self.head - idx - 1); self.head -= 1; } } (false, false, true) => { unsafe { // discontiguous, remove closer to head, tail section: // // H T R // [o o o . . . . . . o o o o o x o] // // H T // [o o . . . . . . . o o o o o o o] // M M M M // // or quasi-discontiguous, remove next to head, tail section: // // H T R // [. . . . . . . . . o o o o o x o] // // T H // [. . . . . . . . . o o o o o o .] // M // draw in elements in the tail section self.copy(idx, idx + 1, self.cap() - idx - 1); // Prevents underflow. if self.head != 0 { // copy first element into empty spot self.copy(self.cap() - 1, 0, 1); // move elements in the head section backwards self.copy(0, 1, self.head - 1); } self.head = self.wrap_sub(self.head, 1); } } (false, true, false) => { unsafe { // discontiguous, remove closer to tail, head section: // // R H T // [o o x o o o o o o o . . . o o o] // // H T // [o o o o o o o o o o . . . . o o] // M M M M M // draw in elements up to idx self.copy(1, 0, idx); // copy last element into empty spot self.copy(0, self.cap() - 1, 1); // move elements from tail to end forward, excluding the last one self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1); self.tail = self.wrap_add(self.tail, 1); } } } return elem; } /// Splits the collection into two at the given index. /// /// Returns a newly allocated `Self`. `self` contains elements `[0, at)`, /// and the returned `Self` contains elements `[at, len)`. /// /// Note that the capacity of `self` does not change. /// /// # Panics /// /// Panics if `at > len` /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect(); /// let buf2 = buf.split_off(1); /// // buf = [1], buf2 = [2, 3] /// assert_eq!(buf.len(), 1); /// assert_eq!(buf2.len(), 2); /// ``` #[inline] #[stable(feature = "split_off", since = "1.4.0")] pub fn split_off(&mut self, at: usize) -> Self { let len = self.len(); assert!(at <= len, "`at` out of bounds"); let other_len = len - at; let mut other = VecDeque::with_capacity(other_len); unsafe { let (first_half, second_half) = self.as_slices(); let first_len = first_half.len(); let second_len = second_half.len(); if at < first_len { // `at` lies in the first half. let amount_in_first = first_len - at; ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize), other.ptr(), amount_in_first); // just take all of the second half. ptr::copy_nonoverlapping(second_half.as_ptr(), other.ptr().offset(amount_in_first as isize), second_len); } else { // `at` lies in the second half, need to factor in the elements we skipped // in the first half. let offset = at - first_len; let amount_in_second = second_len - offset; ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize), other.ptr(), amount_in_second); } } // Cleanup where the ends of the buffers are self.head = self.wrap_sub(self.head, other_len); other.head = other.wrap_index(other_len); other } /// Moves all the elements of `other` into `Self`, leaving `other` empty. /// /// # Panics /// /// Panics if the new number of elements in self overflows a `usize`. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// let mut buf2: VecDeque<_> = vec![4, 5, 6].into_iter().collect(); /// buf.append(&mut buf2); /// assert_eq!(buf.len(), 6); /// assert_eq!(buf2.len(), 0); /// ``` #[inline] #[stable(feature = "append", since = "1.4.0")] pub fn append(&mut self, other: &mut Self) { // naive impl self.extend(other.drain(..)); } /// Retains only the elements specified by the predicate. /// /// In other words, remove all elements `e` such that `f(&e)` returns false. /// This method operates in place and preserves the order of the retained /// elements. /// /// # Examples /// /// ``` /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.extend(1..5); /// buf.retain(|&x| x%2 == 0); /// /// let v: Vec<_> = buf.into_iter().collect(); /// assert_eq!(&v[..], &[2, 4]); /// ``` #[stable(feature = "vec_deque_retain", since = "1.4.0")] pub fn retain<F>(&mut self, mut f: F) where F: FnMut(&T) -> bool { let len = self.len(); let mut del = 0; for i in 0..len { if !f(&self[i]) { del += 1; } else if del > 0 { self.swap(i - del, i); } } if del > 0 { self.truncate(len - del); } } } impl<T: Clone> VecDeque<T> { /// Modifies the `VecDeque` in-place so that `len()` is equal to new_len, /// either by removing excess elements or by appending copies of a value to the back. /// /// # Examples /// /// ``` /// #![feature(deque_extras)] /// /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); /// buf.push_back(5); /// buf.push_back(10); /// buf.push_back(15); /// buf.resize(2, 0); /// buf.resize(6, 20); /// for (a, b) in [5, 10, 20, 20, 20, 20].iter().zip(&buf) { /// assert_eq!(a, b); /// } /// ``` #[unstable(feature = "deque_extras", reason = "matches collection reform specification; waiting on panic semantics", issue = "27788")] pub fn resize(&mut self, new_len: usize, value: T) { let len = self.len(); if new_len > len { self.extend(repeat(value).take(new_len - len)) } else { self.truncate(new_len); } } } /// Returns the index in the underlying buffer for a given logical element index. #[inline] fn wrap_index(index: usize, size: usize) -> usize { // size is always a power of 2 debug_assert!(size.is_power_of_two()); index & (size - 1) } /// Calculate the number of elements left to be read in the buffer #[inline] fn count(tail: usize, head: usize, size: usize) -> usize { // size is always a power of 2 (head.wrapping_sub(tail)) & (size - 1) } /// `VecDeque` iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { ring: &'a [T], tail: usize, head: usize, } // FIXME(#19839) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { ring: self.ring, tail: self.tail, head: self.head, } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; #[inline] fn next(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(tail)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { Some(self.ring.get_unchecked(self.head)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} /// `VecDeque` mutable iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { ring: &'a mut [T], tail: usize, head: usize, } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for IterMut<'a, T> { type Item = &'a mut T; #[inline] fn next(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } let tail = self.tail; self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(tail); Some(&mut *(elem as *mut _)) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[inline] fn next_back(&mut self) -> Option<&'a mut T> { if self.tail == self.head { return None; } self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); unsafe { let elem = self.ring.get_unchecked_mut(self.head); Some(&mut *(elem as *mut _)) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} /// A by-value VecDeque iterator #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter<T> { inner: VecDeque<T>, } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Iterator for IntoIter<T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.inner.pop_front() } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let len = self.inner.len(); (len, Some(len)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> DoubleEndedIterator for IntoIter<T> { #[inline] fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> ExactSizeIterator for IntoIter<T> {} /// A draining VecDeque iterator #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, T: 'a> { after_tail: usize, after_head: usize, iter: Iter<'a, T>, deque: *mut VecDeque<T>, } #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a, T: Send> Send for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { for _ in self.by_ref() {} let source_deque = unsafe { &mut *self.deque }; // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head // // T t h H // [. . . o o x x o o . . .] // let orig_tail = source_deque.tail; let drain_tail = source_deque.head; let drain_head = self.after_tail; let orig_head = self.after_head; let tail_len = count(orig_tail, drain_tail, source_deque.cap()); let head_len = count(drain_head, orig_head, source_deque.cap()); // Restore the original head value source_deque.head = orig_head; match (tail_len, head_len) { (0, 0) => { source_deque.head = 0; source_deque.tail = 0; } (0, _) => { source_deque.tail = drain_head; } (_, 0) => { source_deque.head = drain_tail; } _ => { unsafe { if tail_len <= head_len { source_deque.tail = source_deque.wrap_sub(drain_head, tail_len); source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len); } else { source_deque.head = source_deque.wrap_add(drain_tail, head_len); source_deque.wrap_copy(drain_tail, drain_head, head_len); } } } } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> Iterator for Drain<'a, T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { self.iter.next().map(|elt| unsafe { ptr::read(elt) }) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option<T> { self.iter.next_back().map(|elt| unsafe { ptr::read(elt) }) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialEq> PartialEq for VecDeque<A> { fn eq(&self, other: &VecDeque<A>) -> bool { if self.len() != other.len() { return false; } let (sa, sb) = self.as_slices(); let (oa, ob) = other.as_slices(); if sa.len() == oa.len() { sa == oa && sb == ob } else if sa.len() < oa.len() { // Always divisible in three sections, for example: // self: [a b c|d e f] // other: [0 1 2 3|4 5] // front = 3, mid = 1, // [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5] let front = sa.len(); let mid = oa.len() - front; let (oa_front, oa_mid) = oa.split_at(front); let (sb_mid, sb_back) = sb.split_at(mid); debug_assert_eq!(sa.len(), oa_front.len()); debug_assert_eq!(sb_mid.len(), oa_mid.len()); debug_assert_eq!(sb_back.len(), ob.len()); sa == oa_front && sb_mid == oa_mid && sb_back == ob } else { let front = oa.len(); let mid = sa.len() - front; let (sa_front, sa_mid) = sa.split_at(front); let (ob_mid, ob_back) = ob.split_at(mid); debug_assert_eq!(sa_front.len(), oa.len()); debug_assert_eq!(sa_mid.len(), ob_mid.len()); debug_assert_eq!(sb.len(), ob_back.len()); sa_front == oa && sa_mid == ob_mid && sb == ob_back } } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Eq> Eq for VecDeque<A> {} #[stable(feature = "rust1", since = "1.0.0")] impl<A: PartialOrd> PartialOrd for VecDeque<A> { fn partial_cmp(&self, other: &VecDeque<A>) -> Option<Ordering> { self.iter().partial_cmp(other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Ord> Ord for VecDeque<A> { #[inline] fn cmp(&self, other: &VecDeque<A>) -> Ordering { self.iter().cmp(other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<A: Hash> Hash for VecDeque<A> { fn hash<H: Hasher>(&self, state: &mut H) { self.len().hash(state); let (a, b) = self.as_slices(); Hash::hash_slice(a, state); Hash::hash_slice(b, state); } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Index<usize> for VecDeque<A> { type Output = A; #[inline] fn index(&self, index: usize) -> &A { self.get(index).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> IndexMut<usize> for VecDeque<A> { #[inline] fn index_mut(&mut self, index: usize) -> &mut A { self.get_mut(index).expect("Out of bounds access") } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> FromIterator<A> for VecDeque<A> { fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> VecDeque<A> { let iterator = iter.into_iter(); let (lower, _) = iterator.size_hint(); let mut deq = VecDeque::with_capacity(lower); deq.extend(iterator); deq } } #[stable(feature = "rust1", since = "1.0.0")] impl<T> IntoIterator for VecDeque<T> { type Item = T; type IntoIter = IntoIter<T>; /// Consumes the list into a front-to-back iterator yielding elements by /// value. fn into_iter(self) -> IntoIter<T> { IntoIter { inner: self } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a VecDeque<T> { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Iter<'a, T> { self.iter() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> IntoIterator for &'a mut VecDeque<T> { type Item = &'a mut T; type IntoIter = IterMut<'a, T>; fn into_iter(mut self) -> IterMut<'a, T> { self.iter_mut() } } #[stable(feature = "rust1", since = "1.0.0")] impl<A> Extend<A> for VecDeque<A> { fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T) { for elt in iter { self.push_back(elt); } } } #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, T: 'a + Copy> Extend<&'a T> for VecDeque<T> { fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) { self.extend(iter.into_iter().cloned()); } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: fmt::Debug> fmt::Debug for VecDeque<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self).finish() } } #[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")] impl<T> From<Vec<T>> for VecDeque<T> { fn from(mut other: Vec<T>) -> Self { unsafe { let other_buf = other.as_mut_ptr(); let mut buf = RawVec::from_raw_parts(other_buf, other.capacity()); let len = other.len(); mem::forget(other); // We need to extend the buf if it's not a power of two, too small // or doesn't have at least one free space if !buf.cap().is_power_of_two() || (buf.cap() < (MINIMUM_CAPACITY + 1)) || (buf.cap() == len) { let cap = cmp::max(buf.cap() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); buf.reserve_exact(len, cap - len); } VecDeque { tail: 0, head: len, buf: buf } } } } #[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")] impl<T> From<VecDeque<T>> for Vec<T> { fn from(other: VecDeque<T>) -> Self { unsafe { let buf = other.buf.ptr(); let len = other.len(); let tail = other.tail; let head = other.head; let cap = other.cap(); // Need to move the ring to the front of the buffer, as vec will expect this. if other.is_contiguous() { ptr::copy(buf.offset(tail as isize), buf, len); } else { if (tail - head) >= cmp::min((cap - tail), head) { // There is enough free space in the centre for the shortest block so we can // do this in at most three copy moves. if (cap - tail) > head { // right hand block is the long one; move that enough for the left ptr::copy( buf.offset(tail as isize), buf.offset((tail - head) as isize), cap - tail); // copy left in the end ptr::copy(buf, buf.offset((cap - head) as isize), head); // shift the new thing to the start ptr::copy(buf.offset((tail-head) as isize), buf, len); } else { // left hand block is the long one, we can do it in two! ptr::copy(buf, buf.offset((cap-tail) as isize), head); ptr::copy(buf.offset(tail as isize), buf, cap-tail); } } else { // Need to use N swaps to move the ring // We can use the space at the end of the ring as a temp store let mut left_edge: usize = 0; let mut right_edge: usize = tail; // The general problem looks like this // GHIJKLM...ABCDEF - before any swaps // ABCDEFM...GHIJKL - after 1 pass of swaps // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store // - then restart the algorithm with a new (smaller) store // Sometimes the temp store is reached when the right edge is at the end // of the buffer - this means we've hit the right order with fewer swaps! // E.g // EF..ABCD // ABCDEF.. - after four only swaps we've finished while left_edge < len && right_edge != cap { let mut right_offset = 0; for i in left_edge..right_edge { right_offset = (i - left_edge) % (cap - right_edge); let src: isize = (right_edge + right_offset) as isize; ptr::swap(buf.offset(i as isize), buf.offset(src)); } let n_ops = right_edge - left_edge; left_edge += n_ops; right_edge += right_offset + 1; } } } let out = Vec::from_raw_parts(buf, len, cap); mem::forget(other); out } } } #[cfg(test)] mod tests { use core::iter::Iterator; use core::option::Option::Some; use test; use super::VecDeque; #[bench] fn bench_push_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_back(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_push_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::with_capacity(101); b.iter(|| { for i in 0..100 { deq.push_front(i); } deq.head = 0; deq.tail = 0; }) } #[bench] fn bench_pop_back_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_back()); } }) } #[bench] fn bench_pop_front_100(b: &mut test::Bencher) { let mut deq = VecDeque::<i32>::with_capacity(101); b.iter(|| { deq.head = 100; deq.tail = 0; while !deq.is_empty() { test::black_box(deq.pop_front()); } }) } #[test] fn test_swap_front_back_remove() { fn test(back: bool) { // This test checks that every single combination of tail position and length is tested. // Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); let usable_cap = tester.capacity(); let final_len = usable_cap / 2; for len in 0..final_len { let expected = if back { (0..len).collect() } else { (0..len).rev().collect() }; for tail_pos in 0..usable_cap { tester.tail = tail_pos; tester.head = tail_pos; if back { for i in 0..len * 2 { tester.push_front(i); } for i in 0..len { assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i)); } } else { for i in 0..len * 2 { tester.push_back(i); } for i in 0..len { let idx = tester.len() - 1 - i; assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i)); } } assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } test(true); test(false); } #[test] fn test_insert() { // This test checks that every single combination of tail position, length, and // insertion position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* insertion for len in 1..cap { // 0, 1, 2, .., len - 1 let expected = (0..).take(len).collect(); for tail_pos in 0..cap { for to_insert in 0..len { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i != to_insert { tester.push_back(i); } } tester.insert(to_insert, to_insert); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } } #[test] fn test_remove() { // This test checks that every single combination of tail position, length, and // removal position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *after* removal for len in 0..cap - 1 { // 0, 1, 2, .., len - 1 let expected = (0..).take(len).collect(); for tail_pos in 0..cap { for to_remove in 0..len + 1 { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { if i == to_remove { tester.push_back(1234); } tester.push_back(i); } if to_remove == len { tester.push_back(1234); } tester.remove(to_remove); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } } #[test] fn test_drain() { let mut tester: VecDeque<usize> = VecDeque::with_capacity(7); let cap = tester.capacity(); for len in 0..cap + 1 { for tail in 0..cap + 1 { for drain_start in 0..len + 1 { for drain_end in drain_start..len + 1 { tester.tail = tail; tester.head = tail; for i in 0..len { tester.push_back(i); } // Check that we drain the correct values let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect(); let drained_expected: VecDeque<_> = (drain_start..drain_end).collect(); assert_eq!(drained, drained_expected); // We shouldn't have changed the capacity or made the // head or tail out of bounds assert_eq!(tester.capacity(), cap); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); // We should see the correct values in the VecDeque let expected: VecDeque<_> = (0..drain_start) .chain(drain_end..len) .collect(); assert_eq!(expected, tester); } } } } } #[test] fn test_shrink_to_fit() { // This test checks that every single combination of head and tail position, // is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); tester.reserve(63); let max_cap = tester.capacity(); for len in 0..cap + 1 { // 0, 1, 2, .., len - 1 let expected = (0..).take(len).collect(); for tail_pos in 0..max_cap + 1 { tester.tail = tail_pos; tester.head = tail_pos; tester.reserve(63); for i in 0..len { tester.push_back(i); } tester.shrink_to_fit(); assert!(tester.capacity() <= cap); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert_eq!(tester, expected); } } } #[test] fn test_split_off() { // This test checks that every single combination of tail position, length, and // split position is tested. Capacity 15 should be large enough to cover every case. let mut tester = VecDeque::with_capacity(15); // can't guarantee we got 15, so have to get what we got. // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else // this test isn't covering what it wants to let cap = tester.capacity(); // len is the length *before* splitting for len in 0..cap { // index to split at for at in 0..len + 1 { // 0, 1, 2, .., at - 1 (may be empty) let expected_self = (0..).take(at).collect(); // at, at + 1, .., len - 1 (may be empty) let expected_other = (at..).take(len - at).collect(); for tail_pos in 0..cap { tester.tail = tail_pos; tester.head = tail_pos; for i in 0..len { tester.push_back(i); } let result = tester.split_off(at); assert!(tester.tail < tester.cap()); assert!(tester.head < tester.cap()); assert!(result.tail < result.cap()); assert!(result.head < result.cap()); assert_eq!(tester, expected_self); assert_eq!(result, expected_other); } } } } #[test] fn test_from_vec() { use super::super::vec::Vec; for cap in 0..35 { for len in 0..cap + 1 { let mut vec = Vec::with_capacity(cap); vec.extend(0..len); let vd = VecDeque::from(vec.clone()); assert!(vd.cap().is_power_of_two()); assert_eq!(vd.len(), vec.len()); assert!(vd.into_iter().eq(vec)); } } } #[test] fn test_vec_from_vecdeque() { use super::super::vec::Vec; fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) { let mut vd = VecDeque::with_capacity(cap); for _ in 0..offset { vd.push_back(0); vd.pop_front(); } vd.extend(0..len); let vec: Vec<_> = Vec::from(vd.clone()); assert_eq!(vec.len(), vd.len()); assert!(vec.into_iter().eq(vd)); } for cap_pwr in 0..7 { // Make capacity as a (2^x)-1, so that the ring size is 2^x let cap = (2i32.pow(cap_pwr) - 1) as usize; // In these cases there is enough free space to solve it with copies for len in 0..((cap+1)/2) { // Test contiguous cases for offset in 0..(cap-len) { create_vec_and_test_convert(cap, offset, len) } // Test cases where block at end of buffer is bigger than block at start for offset in (cap-len)..(cap-(len/2)) { create_vec_and_test_convert(cap, offset, len) } // Test cases where block at start of buffer is bigger than block at end for offset in (cap-(len/2))..cap { create_vec_and_test_convert(cap, offset, len) } } // Now there's not (necessarily) space to straighten the ring with simple copies, // the ring will use swapping when: // (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len)) // right block size > free space && left block size > free space for len in ((cap+1)/2)..cap { // Test contiguous cases for offset in 0..(cap-len) { create_vec_and_test_convert(cap, offset, len) } // Test cases where block at end of buffer is bigger than block at start for offset in (cap-len)..(cap-(len/2)) { create_vec_and_test_convert(cap, offset, len) } // Test cases where block at start of buffer is bigger than block at end for offset in (cap-(len/2))..cap { create_vec_and_test_convert(cap, offset, len) } } } } }
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc_serialize::{Encodable, Decodable, Encoder, Decoder}; use rustc_data_structures::stable_hasher; use std::mem; use std::slice; #[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Clone, Copy)] pub struct Fingerprint(u64, u64); impl Fingerprint { #[inline] pub fn zero() -> Fingerprint { Fingerprint(0, 0) } #[inline] pub fn from_smaller_hash(hash: u64) -> Fingerprint { Fingerprint(hash, hash) } #[inline] pub fn to_smaller_hash(&self) -> u64 { self.0 } pub fn to_hex(&self) -> String { format!("{:x}{:x}", self.0, self.1) } } impl Encodable for Fingerprint { #[inline] fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> { s.emit_u64(self.0.to_le())?; s.emit_u64(self.1.to_le()) } } impl Decodable for Fingerprint { #[inline] fn decode<D: Decoder>(d: &mut D) -> Result<Fingerprint, D::Error> { let _0 = u64::from_le(d.read_u64()?); let _1 = u64::from_le(d.read_u64()?); Ok(Fingerprint(_0, _1)) } } impl ::std::fmt::Display for Fingerprint { fn fmt(&self, formatter: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { write!(formatter, "{:x}-{:x}", self.0, self.1) } } impl stable_hasher::StableHasherResult for Fingerprint { fn finish(mut hasher: stable_hasher::StableHasher<Self>) -> Self { let hash_bytes: &[u8] = hasher.finalize(); assert!(hash_bytes.len() >= mem::size_of::<u64>() * 2); let hash_bytes: &[u64] = unsafe { slice::from_raw_parts(hash_bytes.as_ptr() as *const u64, 2) }; // The bytes returned bytes the Blake2B hasher are always little-endian. Fingerprint(u64::from_le(hash_bytes[0]), u64::from_le(hash_bytes[1])) } } impl<CTX> stable_hasher::HashStable<CTX> for Fingerprint { #[inline] fn hash_stable<W: stable_hasher::StableHasherResult>(&self, _: &mut CTX, hasher: &mut stable_hasher::StableHasher<W>) { ::std::hash::Hash::hash(self, hasher); } } Don't byteswap Fingerprints when encoding Byteswapping Fingerprints when encoding is unnessesary and breaks if the Fingerprint is later decoded on a machine with different endianness to the one it was encoded on. Fix by removing the Encodable and Decodable implementations and use the ones derived from RustcEncodable and RustcDecodable. Fixes #42239 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc_data_structures::stable_hasher; use std::mem; use std::slice; #[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Clone, Copy, RustcEncodable, RustcDecodable)] pub struct Fingerprint(u64, u64); impl Fingerprint { #[inline] pub fn zero() -> Fingerprint { Fingerprint(0, 0) } #[inline] pub fn from_smaller_hash(hash: u64) -> Fingerprint { Fingerprint(hash, hash) } #[inline] pub fn to_smaller_hash(&self) -> u64 { self.0 } pub fn to_hex(&self) -> String { format!("{:x}{:x}", self.0, self.1) } } impl ::std::fmt::Display for Fingerprint { fn fmt(&self, formatter: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { write!(formatter, "{:x}-{:x}", self.0, self.1) } } impl stable_hasher::StableHasherResult for Fingerprint { fn finish(mut hasher: stable_hasher::StableHasher<Self>) -> Self { let hash_bytes: &[u8] = hasher.finalize(); assert!(hash_bytes.len() >= mem::size_of::<u64>() * 2); let hash_bytes: &[u64] = unsafe { slice::from_raw_parts(hash_bytes.as_ptr() as *const u64, 2) }; // The bytes returned bytes the Blake2B hasher are always little-endian. Fingerprint(u64::from_le(hash_bytes[0]), u64::from_le(hash_bytes[1])) } } impl<CTX> stable_hasher::HashStable<CTX> for Fingerprint { #[inline] fn hash_stable<W: stable_hasher::StableHasherResult>(&self, _: &mut CTX, hasher: &mut stable_hasher::StableHasher<W>) { ::std::hash::Hash::hash(self, hasher); } }
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Managing the scope stack. The scopes are tied to lexical scopes, so as we descend the HAIR, we push a scope on the stack, translate ite contents, and then pop it off. Every scope is named by a `CodeExtent`. ### SEME Regions When pushing a new scope, we record the current point in the graph (a basic block); this marks the entry to the scope. We then generate more stuff in the control-flow graph. Whenever the scope is exited, either via a `break` or `return` or just by fallthrough, that marks an exit from the scope. Each lexical scope thus corresponds to a single-entry, multiple-exit (SEME) region in the control-flow graph. For now, we keep a mapping from each `CodeExtent` to its corresponding SEME region for later reference (see caveat in next paragraph). This is because region scopes are tied to them. Eventually, when we shift to non-lexical lifetimes, three should be no need to remember this mapping. There is one additional wrinkle, actually, that I wanted to hide from you but duty compels me to mention. In the course of translating matches, it sometimes happen that certain code (namely guards) gets executed multiple times. This means that the scope lexical scope may in fact correspond to multiple, disjoint SEME regions. So in fact our mapping is from one scope to a vector of SEME regions. ### Drops The primary purpose for scopes is to insert drops: while translating the contents, we also accumulate lvalues that need to be dropped upon exit from each scope. This is done by calling `schedule_drop`. Once a drop is scheduled, whenever we branch out we will insert drops of all those lvalues onto the outgoing edge. Note that we don't know the full set of scheduled drops up front, and so whenever we exit from the scope we only drop the values scheduled thus far. For example, consider the scope S corresponding to this loop: ``` loop { let x = ...; if cond { break; } let y = ...; } ``` When processing the `let x`, we will add one drop to the scope for `x`. The break will then insert a drop for `x`. When we process `let y`, we will add another drop (in fact, to a subscope, but let's ignore that for now); any later drops would also drop `y`. ### Early exit There are numerous "normal" ways to early exit a scope: `break`, `continue`, `return` (panics are handled separately). Whenever an early exit occurs, the method `exit_scope` is called. It is given the current point in execution where the early exit occurs, as well as the scope you want to branch to (note that all early exits from to some other enclosing scope). `exit_scope` will record thid exit point and also add all drops. Panics are handled in a similar fashion, except that a panic always returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call `panic(p)` with the current point `p`. Or else you can call `diverge_cleanup`, which will produce a block that you can branch to which does the appropriate cleanup and then diverges. `panic(p)` simply calls `diverge_cleanup()` and adds an edge from `p` to the result. ### Loop scopes In addition to the normal scope stack, we track a loop scope stack that contains only loops. It tracks where a `break` and `continue` should go to. */ use build::{BlockAnd, BlockAndExtension, Builder, CFG}; use rustc::middle::region::CodeExtent; use rustc::middle::lang_items; use rustc::middle::subst::{Substs, Subst, VecPerParamSpace}; use rustc::middle::ty::{self, Ty}; use rustc::mir::repr::*; use syntax::codemap::{Span, DUMMY_SP}; use syntax::parse::token::intern_and_get_ident; pub struct Scope<'tcx> { extent: CodeExtent, drops: Vec<DropData<'tcx>>, // A scope may only have one associated free, because: // 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`; // 2. It only makes sense to have it translated into the diverge-path. // // This kind of drop will be run *after* all the regular drops scheduled onto this scope, // because drops may have dependencies on the allocated memory. // // This is expected to go away once `box EXPR` becomes a sugar for placement protocol and gets // desugared in some earlier stage. free: Option<FreeData<'tcx>>, } struct DropData<'tcx> { value: Lvalue<'tcx>, // NB: per-drop “cache” is necessary for the build_scope_drops function below. /// The cached block for the cleanups-on-diverge path. This block contains code to run the /// current drop and all the preceding drops (i.e. those having lower index in Drop’s /// Scope drop array) cached_block: Option<BasicBlock> } struct FreeData<'tcx> { span: Span, /// Lvalue containing the allocated box. value: Lvalue<'tcx>, /// type of item for which the box was allocated for (i.e. the T in Box<T>). item_ty: Ty<'tcx>, /// The cached block containing code to run the free. The block will also execute all the drops /// in the scope. cached_block: Option<BasicBlock> } #[derive(Clone, Debug)] pub struct LoopScope { /// Extent of the loop pub extent: CodeExtent, /// Where the body of the loop begins pub continue_block: BasicBlock, /// Block to branch into when the loop terminates (either by being `break`-en out from, or by /// having its condition to become false) pub break_block: BasicBlock, // where to go on a `break /// Indicates the reachability of the break_block for this loop pub might_break: bool } impl<'tcx> Scope<'tcx> { /// Invalidate all the cached blocks in the scope. /// /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a /// larger extent of code. fn invalidate_cache(&mut self) { for dropdata in &mut self.drops { dropdata.cached_block = None; } if let Some(ref mut freedata) = self.free { freedata.cached_block = None; } } /// Returns the cached block for this scope. /// /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for /// this method to work correctly. fn cached_block(&self) -> Option<BasicBlock> { if let Some(data) = self.drops.last() { Some(data.cached_block.expect("drop cache is not filled")) } else if let Some(ref data) = self.free { Some(data.cached_block.expect("free cache is not filled")) } else { None } } } impl<'a,'tcx> Builder<'a,'tcx> { // Adding and removing scopes // ========================== /// Start a loop scope, which tracks where `continue` and `break` /// should branch to. See module comment for more details. /// /// Returns the might_break attribute of the LoopScope used. pub fn in_loop_scope<F>(&mut self, loop_block: BasicBlock, break_block: BasicBlock, f: F) -> bool where F: FnOnce(&mut Builder<'a, 'tcx>) { let extent = self.extent_of_innermost_scope(); let loop_scope = LoopScope { extent: extent.clone(), continue_block: loop_block, break_block: break_block, might_break: false }; self.loop_scopes.push(loop_scope); f(self); let loop_scope = self.loop_scopes.pop().unwrap(); assert!(loop_scope.extent == extent); loop_scope.might_break } /// Convenience wrapper that pushes a scope and then executes `f` /// to build its contents, popping the scope afterwards. pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R> where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R> { debug!("in_scope(extent={:?}, block={:?})", extent, block); self.push_scope(extent); let rv = unpack!(block = f(self)); unpack!(block = self.pop_scope(extent, block)); debug!("in_scope: exiting extent={:?} block={:?}", extent, block); block.and(rv) } /// Push a scope onto the stack. You can then build code in this /// scope and call `pop_scope` afterwards. Note that these two /// calls must be paired; using `in_scope` as a convenience /// wrapper maybe preferable. pub fn push_scope(&mut self, extent: CodeExtent) { debug!("push_scope({:?})", extent); self.scopes.push(Scope { extent: extent.clone(), drops: vec![], free: None }); } /// Pops a scope, which should have extent `extent`, adding any /// drops onto the end of `block` that are needed. This must /// match 1-to-1 with `push_scope`. pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", extent, block); // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup // to make sure all the `cached_block`s are filled in. self.diverge_cleanup(); let scope = self.scopes.pop().unwrap(); assert_eq!(scope.extent, extent); build_scope_drops(block, &scope, &self.scopes[..], &mut self.cfg) } /// Branch out of `block` to `target`, exiting all scopes up to /// and including `extent`. This will insert whatever drops are /// needed, as well as tracking this exit for the SEME region. See /// module comment for details. pub fn exit_scope(&mut self, span: Span, extent: CodeExtent, mut block: BasicBlock, target: BasicBlock) { let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent) .unwrap_or_else(||{ self.hir.span_bug(span, &format!("extent {:?} does not enclose", extent)) }); for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) { unpack!(block = build_scope_drops(block, scope, &self.scopes[..idx], &mut self.cfg)); } self.cfg.terminate(block, Terminator::Goto { target: target }); } // Finding scopes // ============== /// Finds the loop scope for a given label. This is used for /// resolving `break` and `continue`. pub fn find_loop_scope(&mut self, span: Span, label: Option<CodeExtent>) -> &mut LoopScope { let Builder { ref mut loop_scopes, ref mut hir, .. } = *self; match label { None => { // no label? return the innermost loop scope loop_scopes.iter_mut().rev().next() } Some(label) => { // otherwise, find the loop-scope with the correct id loop_scopes.iter_mut() .rev() .filter(|loop_scope| loop_scope.extent == label) .next() } }.unwrap_or_else(|| hir.span_bug(span, "no enclosing loop scope found?")) } pub fn extent_of_innermost_scope(&self) -> CodeExtent { self.scopes.last().map(|scope| scope.extent).unwrap() } pub fn extent_of_outermost_scope(&self) -> CodeExtent { self.scopes.first().map(|scope| scope.extent).unwrap() } // Scheduling drops // ================ /// Indicates that `lvalue` should be dropped on exit from /// `extent`. pub fn schedule_drop(&mut self, span: Span, extent: CodeExtent, lvalue: &Lvalue<'tcx>, lvalue_ty: Ty<'tcx>) { if !self.hir.needs_drop(lvalue_ty) { return } for scope in self.scopes.iter_mut().rev() { if scope.extent == extent { // No need to invalidate any caches here. The just-scheduled drop will branch into // the drop that comes before it in the vector. scope.drops.push(DropData { value: lvalue.clone(), cached_block: None }); return; } else { // We must invalidate all the cached_blocks leading up to the scope we’re // looking for, because all of the blocks in the chain will become incorrect. scope.invalidate_cache() } } self.hir.span_bug(span, &format!("extent {:?} not in scope to drop {:?}", extent, lvalue)); } /// Schedule dropping of a not-yet-fully-initialised box. /// /// This cleanup will only be translated into unwind branch. /// The extent should be for the `EXPR` inside `box EXPR`. /// There may only be one “free” scheduled in any given scope. pub fn schedule_box_free(&mut self, span: Span, extent: CodeExtent, value: &Lvalue<'tcx>, item_ty: Ty<'tcx>) { for scope in self.scopes.iter_mut().rev() { if scope.extent == extent { assert!(scope.free.is_none(), "scope already has a scheduled free!"); // We also must invalidate the caches in the scope for which the free is scheduled // because the drops must branch into the free we schedule here. scope.invalidate_cache(); scope.free = Some(FreeData { span: span, value: value.clone(), item_ty: item_ty, cached_block: None }); return; } else { // We must invalidate all the cached_blocks leading up to the scope we’re looking // for, because otherwise some/most of the blocks in the chain will become // incorrect. scope.invalidate_cache(); } } self.hir.span_bug(span, &format!("extent {:?} not in scope to free {:?}", extent, value)); } // Other // ===== /// Creates a path that performs all required cleanup for unwinding. /// /// This path terminates in Resume. Returns the start of the path. /// See module comment for more details. None indicates there’s no /// cleanup to do at this point. pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> { if self.scopes.is_empty() { return None; } let unit_temp = self.get_unit_temp(); let Builder { ref mut hir, ref mut cfg, ref mut scopes, .. } = *self; let mut next_block = None; // Given an array of scopes, we generate these from the outermost scope to the innermost // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will // generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks // always ends up at a block with the Resume terminator. for scope in scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()) { next_block = Some(build_diverge_scope(hir.tcx(), cfg, unit_temp.clone(), scope, next_block)); } scopes.iter().rev().flat_map(|x| x.cached_block()).next() } /// Utility function for *non*-scope code to build their own drops pub fn build_drop(&mut self, block: BasicBlock, value: Lvalue<'tcx>) -> BlockAnd<()> { let next_target = self.cfg.start_new_block(); let diverge_target = self.diverge_cleanup(); self.cfg.terminate(block, Terminator::Drop { value: value, target: next_target, unwind: diverge_target, }); next_target.unit() } // Panicking // ========= // FIXME: should be moved into their own module pub fn panic_bounds_check(&mut self, block: BasicBlock, index: Operand<'tcx>, len: Operand<'tcx>, span: Span) { // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> ! let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem); let args = func.ty.fn_args(); let ref_ty = args.skip_binder()[0]; let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty { (region, tyandmut.ty) } else { self.hir.span_bug(span, &format!("unexpected panic_bound_check type: {:?}", func.ty)); }; let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); let (file, line) = self.span_to_fileline_args(span); let elems = vec![Operand::Constant(file), Operand::Constant(line)]; // FIXME: We should have this as a constant, rather than a stack variable (to not pollute // icache with cold branch code), however to achieve that we either have to rely on rvalue // promotion or have some way, in MIR, to create constants. self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (file_arg, line_arg); Rvalue::Aggregate(AggregateKind::Tuple, elems)); // FIXME: is this region really correct here? self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; Rvalue::Ref(*region, BorrowKind::Unique, tuple)); let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, Terminator::Call { func: Operand::Constant(func), args: vec![Operand::Consume(tuple_ref), index, len], destination: None, cleanup: cleanup, }); } /// Create diverge cleanup and branch to it from `block`. pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) { // fn(&(msg: &'static str filename: &'static str, line: u32)) -> ! let func = self.lang_function(lang_items::PanicFnLangItem); let args = func.ty.fn_args(); let ref_ty = args.skip_binder()[0]; let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty { (region, tyandmut.ty) } else { self.hir.span_bug(span, &format!("unexpected panic type: {:?}", func.ty)); }; let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); let (file, line) = self.span_to_fileline_args(span); let message = Constant { span: DUMMY_SP, ty: self.hir.tcx().mk_static_str(), literal: self.hir.str_literal(intern_and_get_ident(msg)) }; let elems = vec![Operand::Constant(message), Operand::Constant(file), Operand::Constant(line)]; // FIXME: We should have this as a constant, rather than a stack variable (to not pollute // icache with cold branch code), however to achieve that we either have to rely on rvalue // promotion or have some way, in MIR, to create constants. self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (message_arg, file_arg, line_arg); Rvalue::Aggregate(AggregateKind::Tuple, elems)); // FIXME: is this region really correct here? self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; Rvalue::Ref(*region, BorrowKind::Unique, tuple)); let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, Terminator::Call { func: Operand::Constant(func), args: vec![Operand::Consume(tuple_ref)], cleanup: cleanup, destination: None, }); } fn lang_function(&mut self, lang_item: lang_items::LangItem) -> Constant<'tcx> { let funcdid = match self.hir.tcx().lang_items.require(lang_item) { Ok(d) => d, Err(m) => { self.hir.tcx().sess.fatal(&*m) } }; Constant { span: DUMMY_SP, ty: self.hir.tcx().lookup_item_type(funcdid).ty, literal: Literal::Item { def_id: funcdid, kind: ItemKind::Function, substs: self.hir.tcx().mk_substs(Substs::empty()) } } } fn span_to_fileline_args(&mut self, span: Span) -> (Constant<'tcx>, Constant<'tcx>) { let span_lines = self.hir.tcx().sess.codemap().lookup_char_pos(span.lo); (Constant { span: DUMMY_SP, ty: self.hir.tcx().mk_static_str(), literal: self.hir.str_literal(intern_and_get_ident(&span_lines.file.name)) }, Constant { span: DUMMY_SP, ty: self.hir.tcx().types.u32, literal: self.hir.usize_literal(span_lines.line) }) } } /// Builds drops for pop_scope and exit_scope. fn build_scope_drops<'tcx>(mut block: BasicBlock, scope: &Scope<'tcx>, earlier_scopes: &[Scope<'tcx>], cfg: &mut CFG<'tcx>) -> BlockAnd<()> { let mut iter = scope.drops.iter().rev().peekable(); while let Some(drop_data) = iter.next() { // Try to find the next block with its cached block for us to diverge into in case the // drop panics. let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next(); // If there’s no `cached_block`s within current scope, we must look for one in the // enclosing scope. let on_diverge = on_diverge.or_else(||{ earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() }); let next = cfg.start_new_block(); cfg.terminate(block, Terminator::Drop { value: drop_data.value.clone(), target: next, unwind: on_diverge }); block = next; } block.unit() } fn build_diverge_scope<'tcx>(tcx: &ty::ctxt<'tcx>, cfg: &mut CFG<'tcx>, unit_temp: Lvalue<'tcx>, scope: &mut Scope<'tcx>, target: Option<BasicBlock>) -> BasicBlock { debug_assert!(!scope.drops.is_empty() || scope.free.is_some()); // First, we build the drops, iterating the drops array in reverse. We do that so that as soon // as we find a `cached_block`, we know that we’re finished and don’t need to do anything else. let mut previous = None; let mut last_drop_block = None; for drop_data in scope.drops.iter_mut().rev() { if let Some(cached_block) = drop_data.cached_block { if let Some((previous_block, previous_value)) = previous { cfg.terminate(previous_block, Terminator::Drop { value: previous_value, target: cached_block, unwind: None }); return last_drop_block.unwrap(); } else { return cached_block; } } else { let block = cfg.start_new_cleanup_block(); drop_data.cached_block = Some(block); if let Some((previous_block, previous_value)) = previous { cfg.terminate(previous_block, Terminator::Drop { value: previous_value, target: block, unwind: None }); } else { last_drop_block = Some(block); } previous = Some((block, drop_data.value.clone())); } } // Prepare the end target for this chain. let mut target = target.unwrap_or_else(||{ let b = cfg.start_new_cleanup_block(); cfg.terminate(b, Terminator::Resume); b }); // Then, build the free branching into the prepared target. if let Some(ref mut free_data) = scope.free { target = if let Some(cached_block) = free_data.cached_block { cached_block } else { let t = build_free(tcx, cfg, unit_temp, free_data, target); free_data.cached_block = Some(t); t } }; if let Some((previous_block, previous_value)) = previous { // Finally, branch into that just-built `target` from the `previous_block`. cfg.terminate(previous_block, Terminator::Drop { value: previous_value, target: target, unwind: None }); last_drop_block.unwrap() } else { // If `previous.is_none()`, there were no drops in this scope – we return the // target. target } } fn build_free<'tcx>(tcx: &ty::ctxt<'tcx>, cfg: &mut CFG<'tcx>, unit_temp: Lvalue<'tcx>, data: &FreeData<'tcx>, target: BasicBlock) -> BasicBlock { let free_func = tcx.lang_items.box_free_fn() .expect("box_free language item is missing"); let substs = tcx.mk_substs(Substs::new( VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]), VecPerParamSpace::new(vec![], vec![], vec![]) )); let block = cfg.start_new_cleanup_block(); cfg.terminate(block, Terminator::Call { func: Operand::Constant(Constant { span: data.span, ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs), literal: Literal::Item { def_id: free_func, kind: ItemKind::Function, substs: substs } }), args: vec![Operand::Consume(data.value.clone())], destination: Some((unit_temp, target)), cleanup: None }); block } Do not forget to drop the boxes on scope exits Fixes #31463 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Managing the scope stack. The scopes are tied to lexical scopes, so as we descend the HAIR, we push a scope on the stack, translate ite contents, and then pop it off. Every scope is named by a `CodeExtent`. ### SEME Regions When pushing a new scope, we record the current point in the graph (a basic block); this marks the entry to the scope. We then generate more stuff in the control-flow graph. Whenever the scope is exited, either via a `break` or `return` or just by fallthrough, that marks an exit from the scope. Each lexical scope thus corresponds to a single-entry, multiple-exit (SEME) region in the control-flow graph. For now, we keep a mapping from each `CodeExtent` to its corresponding SEME region for later reference (see caveat in next paragraph). This is because region scopes are tied to them. Eventually, when we shift to non-lexical lifetimes, three should be no need to remember this mapping. There is one additional wrinkle, actually, that I wanted to hide from you but duty compels me to mention. In the course of translating matches, it sometimes happen that certain code (namely guards) gets executed multiple times. This means that the scope lexical scope may in fact correspond to multiple, disjoint SEME regions. So in fact our mapping is from one scope to a vector of SEME regions. ### Drops The primary purpose for scopes is to insert drops: while translating the contents, we also accumulate lvalues that need to be dropped upon exit from each scope. This is done by calling `schedule_drop`. Once a drop is scheduled, whenever we branch out we will insert drops of all those lvalues onto the outgoing edge. Note that we don't know the full set of scheduled drops up front, and so whenever we exit from the scope we only drop the values scheduled thus far. For example, consider the scope S corresponding to this loop: ``` loop { let x = ...; if cond { break; } let y = ...; } ``` When processing the `let x`, we will add one drop to the scope for `x`. The break will then insert a drop for `x`. When we process `let y`, we will add another drop (in fact, to a subscope, but let's ignore that for now); any later drops would also drop `y`. ### Early exit There are numerous "normal" ways to early exit a scope: `break`, `continue`, `return` (panics are handled separately). Whenever an early exit occurs, the method `exit_scope` is called. It is given the current point in execution where the early exit occurs, as well as the scope you want to branch to (note that all early exits from to some other enclosing scope). `exit_scope` will record thid exit point and also add all drops. Panics are handled in a similar fashion, except that a panic always returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call `panic(p)` with the current point `p`. Or else you can call `diverge_cleanup`, which will produce a block that you can branch to which does the appropriate cleanup and then diverges. `panic(p)` simply calls `diverge_cleanup()` and adds an edge from `p` to the result. ### Loop scopes In addition to the normal scope stack, we track a loop scope stack that contains only loops. It tracks where a `break` and `continue` should go to. */ use build::{BlockAnd, BlockAndExtension, Builder, CFG}; use rustc::middle::region::CodeExtent; use rustc::middle::lang_items; use rustc::middle::subst::{Substs, Subst, VecPerParamSpace}; use rustc::middle::ty::{self, Ty}; use rustc::mir::repr::*; use syntax::codemap::{Span, DUMMY_SP}; use syntax::parse::token::intern_and_get_ident; pub struct Scope<'tcx> { extent: CodeExtent, drops: Vec<DropData<'tcx>>, // A scope may only have one associated free, because: // 1. We require a `free` to only be scheduled in the scope of `EXPR` in `box EXPR`; // 2. It only makes sense to have it translated into the diverge-path. // // This kind of drop will be run *after* all the regular drops scheduled onto this scope, // because drops may have dependencies on the allocated memory. // // This is expected to go away once `box EXPR` becomes a sugar for placement protocol and gets // desugared in some earlier stage. free: Option<FreeData<'tcx>>, } struct DropData<'tcx> { value: Lvalue<'tcx>, // NB: per-drop “cache” is necessary for the build_scope_drops function below. /// The cached block for the cleanups-on-diverge path. This block contains code to run the /// current drop and all the preceding drops (i.e. those having lower index in Drop’s /// Scope drop array) cached_block: Option<BasicBlock> } struct FreeData<'tcx> { span: Span, /// Lvalue containing the allocated box. value: Lvalue<'tcx>, /// type of item for which the box was allocated for (i.e. the T in Box<T>). item_ty: Ty<'tcx>, /// The cached block containing code to run the free. The block will also execute all the drops /// in the scope. cached_block: Option<BasicBlock> } #[derive(Clone, Debug)] pub struct LoopScope { /// Extent of the loop pub extent: CodeExtent, /// Where the body of the loop begins pub continue_block: BasicBlock, /// Block to branch into when the loop terminates (either by being `break`-en out from, or by /// having its condition to become false) pub break_block: BasicBlock, // where to go on a `break /// Indicates the reachability of the break_block for this loop pub might_break: bool } impl<'tcx> Scope<'tcx> { /// Invalidate all the cached blocks in the scope. /// /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a /// larger extent of code. fn invalidate_cache(&mut self) { for dropdata in &mut self.drops { dropdata.cached_block = None; } if let Some(ref mut freedata) = self.free { freedata.cached_block = None; } } /// Returns the cached block for this scope. /// /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for /// this method to work correctly. fn cached_block(&self) -> Option<BasicBlock> { if let Some(data) = self.drops.last() { Some(data.cached_block.expect("drop cache is not filled")) } else if let Some(ref data) = self.free { Some(data.cached_block.expect("free cache is not filled")) } else { None } } } impl<'a,'tcx> Builder<'a,'tcx> { // Adding and removing scopes // ========================== /// Start a loop scope, which tracks where `continue` and `break` /// should branch to. See module comment for more details. /// /// Returns the might_break attribute of the LoopScope used. pub fn in_loop_scope<F>(&mut self, loop_block: BasicBlock, break_block: BasicBlock, f: F) -> bool where F: FnOnce(&mut Builder<'a, 'tcx>) { let extent = self.extent_of_innermost_scope(); let loop_scope = LoopScope { extent: extent.clone(), continue_block: loop_block, break_block: break_block, might_break: false }; self.loop_scopes.push(loop_scope); f(self); let loop_scope = self.loop_scopes.pop().unwrap(); assert!(loop_scope.extent == extent); loop_scope.might_break } /// Convenience wrapper that pushes a scope and then executes `f` /// to build its contents, popping the scope afterwards. pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R> where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R> { debug!("in_scope(extent={:?}, block={:?})", extent, block); self.push_scope(extent); let rv = unpack!(block = f(self)); unpack!(block = self.pop_scope(extent, block)); debug!("in_scope: exiting extent={:?} block={:?}", extent, block); block.and(rv) } /// Push a scope onto the stack. You can then build code in this /// scope and call `pop_scope` afterwards. Note that these two /// calls must be paired; using `in_scope` as a convenience /// wrapper maybe preferable. pub fn push_scope(&mut self, extent: CodeExtent) { debug!("push_scope({:?})", extent); self.scopes.push(Scope { extent: extent.clone(), drops: vec![], free: None }); } /// Pops a scope, which should have extent `extent`, adding any /// drops onto the end of `block` that are needed. This must /// match 1-to-1 with `push_scope`. pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", extent, block); // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup // to make sure all the `cached_block`s are filled in. self.diverge_cleanup(); let scope = self.scopes.pop().unwrap(); assert_eq!(scope.extent, extent); build_scope_drops(&mut self.cfg, &scope, &self.scopes[..], block) } /// Branch out of `block` to `target`, exiting all scopes up to /// and including `extent`. This will insert whatever drops are /// needed, as well as tracking this exit for the SEME region. See /// module comment for details. pub fn exit_scope(&mut self, span: Span, extent: CodeExtent, mut block: BasicBlock, target: BasicBlock) { let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent) .unwrap_or_else(||{ self.hir.span_bug(span, &format!("extent {:?} does not enclose", extent)) }); let tmp = self.get_unit_temp(); for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) { unpack!(block = build_scope_drops(&mut self.cfg, scope, &self.scopes[..idx], block)); if let Some(ref free_data) = scope.free { let next = self.cfg.start_new_block(); let free = build_free(self.hir.tcx(), tmp.clone(), free_data, next); self.cfg.terminate(block, free); block = next; } } self.cfg.terminate(block, Terminator::Goto { target: target }); } // Finding scopes // ============== /// Finds the loop scope for a given label. This is used for /// resolving `break` and `continue`. pub fn find_loop_scope(&mut self, span: Span, label: Option<CodeExtent>) -> &mut LoopScope { let Builder { ref mut loop_scopes, ref mut hir, .. } = *self; match label { None => { // no label? return the innermost loop scope loop_scopes.iter_mut().rev().next() } Some(label) => { // otherwise, find the loop-scope with the correct id loop_scopes.iter_mut() .rev() .filter(|loop_scope| loop_scope.extent == label) .next() } }.unwrap_or_else(|| hir.span_bug(span, "no enclosing loop scope found?")) } pub fn extent_of_innermost_scope(&self) -> CodeExtent { self.scopes.last().map(|scope| scope.extent).unwrap() } pub fn extent_of_outermost_scope(&self) -> CodeExtent { self.scopes.first().map(|scope| scope.extent).unwrap() } // Scheduling drops // ================ /// Indicates that `lvalue` should be dropped on exit from /// `extent`. pub fn schedule_drop(&mut self, span: Span, extent: CodeExtent, lvalue: &Lvalue<'tcx>, lvalue_ty: Ty<'tcx>) { if !self.hir.needs_drop(lvalue_ty) { return } for scope in self.scopes.iter_mut().rev() { if scope.extent == extent { // No need to invalidate any caches here. The just-scheduled drop will branch into // the drop that comes before it in the vector. scope.drops.push(DropData { value: lvalue.clone(), cached_block: None }); return; } else { // We must invalidate all the cached_blocks leading up to the scope we’re // looking for, because all of the blocks in the chain will become incorrect. scope.invalidate_cache() } } self.hir.span_bug(span, &format!("extent {:?} not in scope to drop {:?}", extent, lvalue)); } /// Schedule dropping of a not-yet-fully-initialised box. /// /// This cleanup will only be translated into unwind branch. /// The extent should be for the `EXPR` inside `box EXPR`. /// There may only be one “free” scheduled in any given scope. pub fn schedule_box_free(&mut self, span: Span, extent: CodeExtent, value: &Lvalue<'tcx>, item_ty: Ty<'tcx>) { for scope in self.scopes.iter_mut().rev() { if scope.extent == extent { assert!(scope.free.is_none(), "scope already has a scheduled free!"); // We also must invalidate the caches in the scope for which the free is scheduled // because the drops must branch into the free we schedule here. scope.invalidate_cache(); scope.free = Some(FreeData { span: span, value: value.clone(), item_ty: item_ty, cached_block: None }); return; } else { // We must invalidate all the cached_blocks leading up to the scope we’re looking // for, because otherwise some/most of the blocks in the chain will become // incorrect. scope.invalidate_cache(); } } self.hir.span_bug(span, &format!("extent {:?} not in scope to free {:?}", extent, value)); } // Other // ===== /// Creates a path that performs all required cleanup for unwinding. /// /// This path terminates in Resume. Returns the start of the path. /// See module comment for more details. None indicates there’s no /// cleanup to do at this point. pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> { if self.scopes.is_empty() { return None; } let unit_temp = self.get_unit_temp(); let Builder { ref mut hir, ref mut cfg, ref mut scopes, .. } = *self; let mut next_block = None; // Given an array of scopes, we generate these from the outermost scope to the innermost // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will // generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks // always ends up at a block with the Resume terminator. for scope in scopes.iter_mut().filter(|s| !s.drops.is_empty() || s.free.is_some()) { next_block = Some(build_diverge_scope(hir.tcx(), cfg, unit_temp.clone(), scope, next_block)); } scopes.iter().rev().flat_map(|x| x.cached_block()).next() } /// Utility function for *non*-scope code to build their own drops pub fn build_drop(&mut self, block: BasicBlock, value: Lvalue<'tcx>) -> BlockAnd<()> { let next_target = self.cfg.start_new_block(); let diverge_target = self.diverge_cleanup(); self.cfg.terminate(block, Terminator::Drop { value: value, target: next_target, unwind: diverge_target, }); next_target.unit() } // Panicking // ========= // FIXME: should be moved into their own module pub fn panic_bounds_check(&mut self, block: BasicBlock, index: Operand<'tcx>, len: Operand<'tcx>, span: Span) { // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> ! let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem); let args = func.ty.fn_args(); let ref_ty = args.skip_binder()[0]; let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty { (region, tyandmut.ty) } else { self.hir.span_bug(span, &format!("unexpected panic_bound_check type: {:?}", func.ty)); }; let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); let (file, line) = self.span_to_fileline_args(span); let elems = vec![Operand::Constant(file), Operand::Constant(line)]; // FIXME: We should have this as a constant, rather than a stack variable (to not pollute // icache with cold branch code), however to achieve that we either have to rely on rvalue // promotion or have some way, in MIR, to create constants. self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (file_arg, line_arg); Rvalue::Aggregate(AggregateKind::Tuple, elems)); // FIXME: is this region really correct here? self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; Rvalue::Ref(*region, BorrowKind::Unique, tuple)); let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, Terminator::Call { func: Operand::Constant(func), args: vec![Operand::Consume(tuple_ref), index, len], destination: None, cleanup: cleanup, }); } /// Create diverge cleanup and branch to it from `block`. pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) { // fn(&(msg: &'static str filename: &'static str, line: u32)) -> ! let func = self.lang_function(lang_items::PanicFnLangItem); let args = func.ty.fn_args(); let ref_ty = args.skip_binder()[0]; let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty { (region, tyandmut.ty) } else { self.hir.span_bug(span, &format!("unexpected panic type: {:?}", func.ty)); }; let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); let (file, line) = self.span_to_fileline_args(span); let message = Constant { span: DUMMY_SP, ty: self.hir.tcx().mk_static_str(), literal: self.hir.str_literal(intern_and_get_ident(msg)) }; let elems = vec![Operand::Constant(message), Operand::Constant(file), Operand::Constant(line)]; // FIXME: We should have this as a constant, rather than a stack variable (to not pollute // icache with cold branch code), however to achieve that we either have to rely on rvalue // promotion or have some way, in MIR, to create constants. self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (message_arg, file_arg, line_arg); Rvalue::Aggregate(AggregateKind::Tuple, elems)); // FIXME: is this region really correct here? self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; Rvalue::Ref(*region, BorrowKind::Unique, tuple)); let cleanup = self.diverge_cleanup(); self.cfg.terminate(block, Terminator::Call { func: Operand::Constant(func), args: vec![Operand::Consume(tuple_ref)], cleanup: cleanup, destination: None, }); } fn lang_function(&mut self, lang_item: lang_items::LangItem) -> Constant<'tcx> { let funcdid = match self.hir.tcx().lang_items.require(lang_item) { Ok(d) => d, Err(m) => { self.hir.tcx().sess.fatal(&*m) } }; Constant { span: DUMMY_SP, ty: self.hir.tcx().lookup_item_type(funcdid).ty, literal: Literal::Item { def_id: funcdid, kind: ItemKind::Function, substs: self.hir.tcx().mk_substs(Substs::empty()) } } } fn span_to_fileline_args(&mut self, span: Span) -> (Constant<'tcx>, Constant<'tcx>) { let span_lines = self.hir.tcx().sess.codemap().lookup_char_pos(span.lo); (Constant { span: DUMMY_SP, ty: self.hir.tcx().mk_static_str(), literal: self.hir.str_literal(intern_and_get_ident(&span_lines.file.name)) }, Constant { span: DUMMY_SP, ty: self.hir.tcx().types.u32, literal: self.hir.usize_literal(span_lines.line) }) } } /// Builds drops for pop_scope and exit_scope. fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, scope: &Scope<'tcx>, earlier_scopes: &[Scope<'tcx>], mut block: BasicBlock) -> BlockAnd<()> { let mut iter = scope.drops.iter().rev().peekable(); while let Some(drop_data) = iter.next() { // Try to find the next block with its cached block for us to diverge into in case the // drop panics. let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next(); // If there’s no `cached_block`s within current scope, we must look for one in the // enclosing scope. let on_diverge = on_diverge.or_else(||{ earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() }); let next = cfg.start_new_block(); cfg.terminate(block, Terminator::Drop { value: drop_data.value.clone(), target: next, unwind: on_diverge }); block = next; } block.unit() } fn build_diverge_scope<'tcx>(tcx: &ty::ctxt<'tcx>, cfg: &mut CFG<'tcx>, unit_temp: Lvalue<'tcx>, scope: &mut Scope<'tcx>, target: Option<BasicBlock>) -> BasicBlock { debug_assert!(!scope.drops.is_empty() || scope.free.is_some()); // First, we build the drops, iterating the drops array in reverse. We do that so that as soon // as we find a `cached_block`, we know that we’re finished and don’t need to do anything else. let mut previous = None; let mut last_drop_block = None; for drop_data in scope.drops.iter_mut().rev() { if let Some(cached_block) = drop_data.cached_block { if let Some((previous_block, previous_value)) = previous { cfg.terminate(previous_block, Terminator::Drop { value: previous_value, target: cached_block, unwind: None }); return last_drop_block.unwrap(); } else { return cached_block; } } else { let block = cfg.start_new_cleanup_block(); drop_data.cached_block = Some(block); if let Some((previous_block, previous_value)) = previous { cfg.terminate(previous_block, Terminator::Drop { value: previous_value, target: block, unwind: None }); } else { last_drop_block = Some(block); } previous = Some((block, drop_data.value.clone())); } } // Prepare the end target for this chain. let mut target = target.unwrap_or_else(||{ let b = cfg.start_new_cleanup_block(); cfg.terminate(b, Terminator::Resume); b }); // Then, build the free branching into the prepared target. if let Some(ref mut free_data) = scope.free { target = if let Some(cached_block) = free_data.cached_block { cached_block } else { let into = cfg.start_new_cleanup_block(); cfg.terminate(into, build_free(tcx, unit_temp, free_data, target)); free_data.cached_block = Some(into); into } }; if let Some((previous_block, previous_value)) = previous { // Finally, branch into that just-built `target` from the `previous_block`. cfg.terminate(previous_block, Terminator::Drop { value: previous_value, target: target, unwind: None }); last_drop_block.unwrap() } else { // If `previous.is_none()`, there were no drops in this scope – we return the // target. target } } fn build_free<'tcx>(tcx: &ty::ctxt<'tcx>, unit_temp: Lvalue<'tcx>, data: &FreeData<'tcx>, target: BasicBlock) -> Terminator<'tcx> { let free_func = tcx.lang_items.box_free_fn() .expect("box_free language item is missing"); let substs = tcx.mk_substs(Substs::new( VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]), VecPerParamSpace::new(vec![], vec![], vec![]) )); Terminator::Call { func: Operand::Constant(Constant { span: data.span, ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs), literal: Literal::Item { def_id: free_func, kind: ItemKind::Function, substs: substs } }), args: vec![Operand::Consume(data.value.clone())], destination: Some((unit_temp, target)), cleanup: None } }
use std::path::PathBuf; use crate::renderer_client::RendererClient; use crate::renderer_server::{TurtleId, ExportError}; use crate::radians::Radians; use crate::{Distance, Point, Color, Speed, Event, Size, async_turtle::AngleUnit, debug}; use super::{ ConnectionError, ClientRequest, ServerResponse, ExportFormat, DrawingProp, DrawingPropValue, TurtleProp, TurtlePropValue, PenProp, PenPropValue, RotationDirection, }; /// A wrapper for `RendererClient` that encodes the the IPC protocol in a type-safe manner pub struct ProtocolClient { client: RendererClient, } impl From<RendererClient> for ProtocolClient { fn from(client: RendererClient) -> Self { Self {client} } } impl ProtocolClient { /// Spawns a new server process and creates a connection to it pub async fn new() -> Result<Self, ConnectionError> { let client = RendererClient::new().await?; Ok(client.into()) } /// Creates a new renderer client that can also communicate to the same server pub async fn split(&self) -> Self { self.client.split().await.into() } pub async fn create_turtle(&self) -> TurtleId { self.client.send(ClientRequest::CreateTurtle); let response = self.client.recv().await; match response { ServerResponse::NewTurtle(id) => id, _ => unreachable!("bug: expected to receive `NewTurtle` in response to `CreateTurtle` request"), } } pub async fn export_svg(&self, path: PathBuf) -> Result<(), ExportError> { self.client.send(ClientRequest::Export(path, ExportFormat::Svg)); let response = self.client.recv().await; match response { ServerResponse::ExportComplete(res) => res, _ => unreachable!("bug: expected to receive `ExportComplete` in response to `Export` request"), } } pub async fn poll_event(&self) -> Option<Event> { self.client.send(ClientRequest::PollEvent); let response = self.client.recv().await; match response { ServerResponse::Event(event) => event, _ => unreachable!("bug: expected to receive `Event` in response to `NextEvent` request"), } } pub async fn drawing_title(&self) -> String { self.client.send(ClientRequest::DrawingProp(DrawingProp::Title)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Title(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_background(&self) -> Color { self.client.send(ClientRequest::DrawingProp(DrawingProp::Background)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Background(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_center(&self) -> Point { self.client.send(ClientRequest::DrawingProp(DrawingProp::Center)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Center(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_size(&self) -> Size { self.client.send(ClientRequest::DrawingProp(DrawingProp::Size)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Size(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_is_maximized(&self) -> bool { self.client.send(ClientRequest::DrawingProp(DrawingProp::IsMaximized)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::IsMaximized(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_is_fullscreen(&self) -> bool { self.client.send(ClientRequest::DrawingProp(DrawingProp::IsFullscreen)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::IsFullscreen(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub fn drawing_set_title(&self, value: String) { self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Title(value))) } pub fn drawing_set_background(&self, value: Color) { debug_assert!(value.is_valid(), "bug: colors should be validated before sending to renderer server"); self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Background(value))) } pub fn drawing_set_center(&self, value: Point) { debug_assert!(value.is_finite(), "bug: center should be validated before sending to renderer server"); self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Center(value))) } pub fn drawing_set_size(&self, value: Size) { debug_assert!(value.width > 0 && value.height > 0, "bug: size should be validated before sending to renderer server"); self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Size(value))) } pub fn drawing_set_is_maximized(&self, value: bool) { self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::IsMaximized(value))) } pub fn drawing_set_is_fullscreen(&self, value: bool) { self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::IsFullscreen(value))) } pub fn drawing_reset_center(&self) { self.client.send(ClientRequest::ResetDrawingProp(DrawingProp::Center)) } pub fn drawing_reset_size(&self) { self.client.send(ClientRequest::ResetDrawingProp(DrawingProp::Size)) } pub async fn turtle_pen_is_enabled(&self, id: TurtleId) -> bool { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Pen(PenProp::IsEnabled))); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Pen(PenPropValue::IsEnabled(value))) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_pen_thickness(&self, id: TurtleId) -> f64 { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Pen(PenProp::Thickness))); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Pen(PenPropValue::Thickness(value))) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_pen_color(&self, id: TurtleId) -> Color { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Pen(PenProp::Color))); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Pen(PenPropValue::Color(value))) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_fill_color(&self, id: TurtleId) -> Color { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::FillColor)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::FillColor(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_is_filling(&self, id: TurtleId) -> bool { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::IsFilling)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::IsFilling(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_position(&self, id: TurtleId) -> Point { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Position)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Position(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_heading(&self, id: TurtleId) -> Radians { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Heading)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Heading(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_speed(&self, id: TurtleId) -> Speed { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Speed)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Speed(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_is_visible(&self, id: TurtleId) -> bool { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::IsVisible)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::IsVisible(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub fn turtle_pen_set_is_enabled(&self, id: TurtleId, value: bool) { self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Pen(PenPropValue::IsEnabled(value)))) } pub fn turtle_pen_set_thickness(&self, id: TurtleId, value: f64) { debug_assert!(value >= 0.0 && value.is_finite(), "bug: pen size should be validated before sending to renderer server"); self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Pen(PenPropValue::Thickness(value)))) } pub fn turtle_pen_set_color(&self, id: TurtleId, value: Color) { debug_assert!(value.is_valid(), "bug: colors should be validated before sending to renderer server"); self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Pen(PenPropValue::Color(value)))) } pub fn turtle_set_fill_color(&self, id: TurtleId, value: Color) { debug_assert!(value.is_valid(), "bug: colors should be validated before sending to renderer server"); self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::FillColor(value))) } pub fn turtle_set_speed(&self, id: TurtleId, value: Speed) { self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Speed(value))) } pub fn turtle_set_is_visible(&self, id: TurtleId, value: bool) { self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::IsVisible(value))) } pub fn turtle_reset_heading(&self, id: TurtleId) { self.client.send(ClientRequest::ResetTurtleProp(id, TurtleProp::Heading)) } pub fn reset_turtle(&self, id: TurtleId) { self.client.send(ClientRequest::ResetTurtle(id)) } pub async fn move_forward(&self, id: TurtleId, distance: Distance) { if !distance.is_normal() { return; } self.client.send(ClientRequest::MoveForward(id, distance)); let response = self.client.recv().await; match response { ServerResponse::AnimationComplete(recv_id) => { debug_assert_eq!(id, recv_id, "bug: notified of complete animation for incorrect turtle"); }, _ => unreachable!("bug: expected to receive `AnimationComplete` in response to `MoveForward` request"), } } pub async fn move_to(&self, id: TurtleId, target: Point) { if !target.is_finite() { return; } self.client.send(ClientRequest::MoveTo(id, target)); let response = self.client.recv().await; match response { ServerResponse::AnimationComplete(recv_id) => { debug_assert_eq!(id, recv_id, "bug: notified of complete animation for incorrect turtle"); }, _ => unreachable!("bug: expected to receive `AnimationComplete` in response to `MoveTo` request"), } } pub async fn rotate_in_place(&self, id: TurtleId, angle: Radians, direction: RotationDirection) { if !angle.is_normal() { return; } self.client.send(ClientRequest::RotateInPlace(id, angle, direction)); let response = self.client.recv().await; match response { ServerResponse::AnimationComplete(recv_id) => { debug_assert_eq!(id, recv_id, "bug: notified of complete animation for incorrect turtle"); }, _ => unreachable!("bug: expected to receive `AnimationComplete` in response to `RotateInPlace` request"), } } pub fn begin_fill(&self, id: TurtleId) { self.client.send(ClientRequest::BeginFill(id)) } pub fn end_fill(&self, id: TurtleId) { self.client.send(ClientRequest::EndFill(id)) } pub fn clear_all(&self) { self.client.send(ClientRequest::ClearAll) } pub fn clear_turtle(&self, id: TurtleId) { self.client.send(ClientRequest::ClearTurtle(id)) } pub async fn debug_turtle(&self, id: TurtleId, angle_unit: AngleUnit) -> debug::Turtle { self.client.send(ClientRequest::DebugTurtle(id, angle_unit)); let response = self.client.recv().await; match response { ServerResponse::DebugTurtle(recv_id, state) => { debug_assert_eq!(id, recv_id, "bug: received debug turtle for incorrect turtle"); state }, _ => unreachable!("bug: expected to receive `DebugTurtle` in response to `DebugTurtle` request"), } } pub async fn debug_drawing(&self) -> debug::Drawing { self.client.send(ClientRequest::DebugDrawing); let response = self.client.recv().await; match response { ServerResponse::DebugDrawing(state) => { state }, _ => unreachable!("bug: expected to receive `DebugDrawing` in response to `DebugDrawing` request"), } } } Add circular_arc method on ProtocolClient Classical implementation for now: splits the arc in many forward movements and rotations. Signed-off-by: Paul Mabileau <a3aedf8c87c1af3dfeae5f57ddf6d5ae82ad981f@hotmail.fr> use std::path::PathBuf; use crate::renderer_client::RendererClient; use crate::renderer_server::{TurtleId, ExportError}; use crate::radians::Radians; use crate::{Distance, Point, Color, Speed, Event, Size, async_turtle::AngleUnit, debug}; use super::{ ConnectionError, ClientRequest, ServerResponse, ExportFormat, DrawingProp, DrawingPropValue, TurtleProp, TurtlePropValue, PenProp, PenPropValue, RotationDirection, }; /// A wrapper for `RendererClient` that encodes the the IPC protocol in a type-safe manner pub struct ProtocolClient { client: RendererClient, } impl From<RendererClient> for ProtocolClient { fn from(client: RendererClient) -> Self { Self {client} } } impl ProtocolClient { /// Spawns a new server process and creates a connection to it pub async fn new() -> Result<Self, ConnectionError> { let client = RendererClient::new().await?; Ok(client.into()) } /// Creates a new renderer client that can also communicate to the same server pub async fn split(&self) -> Self { self.client.split().await.into() } pub async fn create_turtle(&self) -> TurtleId { self.client.send(ClientRequest::CreateTurtle); let response = self.client.recv().await; match response { ServerResponse::NewTurtle(id) => id, _ => unreachable!("bug: expected to receive `NewTurtle` in response to `CreateTurtle` request"), } } pub async fn export_svg(&self, path: PathBuf) -> Result<(), ExportError> { self.client.send(ClientRequest::Export(path, ExportFormat::Svg)); let response = self.client.recv().await; match response { ServerResponse::ExportComplete(res) => res, _ => unreachable!("bug: expected to receive `ExportComplete` in response to `Export` request"), } } pub async fn poll_event(&self) -> Option<Event> { self.client.send(ClientRequest::PollEvent); let response = self.client.recv().await; match response { ServerResponse::Event(event) => event, _ => unreachable!("bug: expected to receive `Event` in response to `NextEvent` request"), } } pub async fn drawing_title(&self) -> String { self.client.send(ClientRequest::DrawingProp(DrawingProp::Title)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Title(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_background(&self) -> Color { self.client.send(ClientRequest::DrawingProp(DrawingProp::Background)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Background(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_center(&self) -> Point { self.client.send(ClientRequest::DrawingProp(DrawingProp::Center)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Center(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_size(&self) -> Size { self.client.send(ClientRequest::DrawingProp(DrawingProp::Size)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::Size(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_is_maximized(&self) -> bool { self.client.send(ClientRequest::DrawingProp(DrawingProp::IsMaximized)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::IsMaximized(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub async fn drawing_is_fullscreen(&self) -> bool { self.client.send(ClientRequest::DrawingProp(DrawingProp::IsFullscreen)); let response = self.client.recv().await; match response { ServerResponse::DrawingProp(DrawingPropValue::IsFullscreen(value)) => value, _ => unreachable!("bug: expected to receive `DrawingProp` in response to `DrawingProp` request"), } } pub fn drawing_set_title(&self, value: String) { self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Title(value))) } pub fn drawing_set_background(&self, value: Color) { debug_assert!(value.is_valid(), "bug: colors should be validated before sending to renderer server"); self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Background(value))) } pub fn drawing_set_center(&self, value: Point) { debug_assert!(value.is_finite(), "bug: center should be validated before sending to renderer server"); self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Center(value))) } pub fn drawing_set_size(&self, value: Size) { debug_assert!(value.width > 0 && value.height > 0, "bug: size should be validated before sending to renderer server"); self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::Size(value))) } pub fn drawing_set_is_maximized(&self, value: bool) { self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::IsMaximized(value))) } pub fn drawing_set_is_fullscreen(&self, value: bool) { self.client.send(ClientRequest::SetDrawingProp(DrawingPropValue::IsFullscreen(value))) } pub fn drawing_reset_center(&self) { self.client.send(ClientRequest::ResetDrawingProp(DrawingProp::Center)) } pub fn drawing_reset_size(&self) { self.client.send(ClientRequest::ResetDrawingProp(DrawingProp::Size)) } pub async fn turtle_pen_is_enabled(&self, id: TurtleId) -> bool { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Pen(PenProp::IsEnabled))); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Pen(PenPropValue::IsEnabled(value))) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_pen_thickness(&self, id: TurtleId) -> f64 { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Pen(PenProp::Thickness))); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Pen(PenPropValue::Thickness(value))) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_pen_color(&self, id: TurtleId) -> Color { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Pen(PenProp::Color))); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Pen(PenPropValue::Color(value))) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_fill_color(&self, id: TurtleId) -> Color { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::FillColor)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::FillColor(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_is_filling(&self, id: TurtleId) -> bool { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::IsFilling)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::IsFilling(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_position(&self, id: TurtleId) -> Point { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Position)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Position(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_heading(&self, id: TurtleId) -> Radians { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Heading)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Heading(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_speed(&self, id: TurtleId) -> Speed { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::Speed)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::Speed(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub async fn turtle_is_visible(&self, id: TurtleId) -> bool { self.client.send(ClientRequest::TurtleProp(id, TurtleProp::IsVisible)); let response = self.client.recv().await; match response { ServerResponse::TurtleProp(recv_id, TurtlePropValue::IsVisible(value)) => { debug_assert_eq!(id, recv_id, "bug: received data for incorrect turtle"); value }, _ => unreachable!("bug: expected to receive `TurtleProp` in response to `TurtleProp` request"), } } pub fn turtle_pen_set_is_enabled(&self, id: TurtleId, value: bool) { self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Pen(PenPropValue::IsEnabled(value)))) } pub fn turtle_pen_set_thickness(&self, id: TurtleId, value: f64) { debug_assert!(value >= 0.0 && value.is_finite(), "bug: pen size should be validated before sending to renderer server"); self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Pen(PenPropValue::Thickness(value)))) } pub fn turtle_pen_set_color(&self, id: TurtleId, value: Color) { debug_assert!(value.is_valid(), "bug: colors should be validated before sending to renderer server"); self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Pen(PenPropValue::Color(value)))) } pub fn turtle_set_fill_color(&self, id: TurtleId, value: Color) { debug_assert!(value.is_valid(), "bug: colors should be validated before sending to renderer server"); self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::FillColor(value))) } pub fn turtle_set_speed(&self, id: TurtleId, value: Speed) { self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::Speed(value))) } pub fn turtle_set_is_visible(&self, id: TurtleId, value: bool) { self.client.send(ClientRequest::SetTurtleProp(id, TurtlePropValue::IsVisible(value))) } pub fn turtle_reset_heading(&self, id: TurtleId) { self.client.send(ClientRequest::ResetTurtleProp(id, TurtleProp::Heading)) } pub fn reset_turtle(&self, id: TurtleId) { self.client.send(ClientRequest::ResetTurtle(id)) } pub async fn move_forward(&self, id: TurtleId, distance: Distance) { if !distance.is_normal() { return; } self.client.send(ClientRequest::MoveForward(id, distance)); let response = self.client.recv().await; match response { ServerResponse::AnimationComplete(recv_id) => { debug_assert_eq!(id, recv_id, "bug: notified of complete animation for incorrect turtle"); }, _ => unreachable!("bug: expected to receive `AnimationComplete` in response to `MoveForward` request"), } } pub async fn move_to(&self, id: TurtleId, target: Point) { if !target.is_finite() { return; } self.client.send(ClientRequest::MoveTo(id, target)); let response = self.client.recv().await; match response { ServerResponse::AnimationComplete(recv_id) => { debug_assert_eq!(id, recv_id, "bug: notified of complete animation for incorrect turtle"); }, _ => unreachable!("bug: expected to receive `AnimationComplete` in response to `MoveTo` request"), } } pub async fn rotate_in_place(&self, id: TurtleId, angle: Radians, direction: RotationDirection) { if !angle.is_normal() { return; } self.client.send(ClientRequest::RotateInPlace(id, angle, direction)); let response = self.client.recv().await; match response { ServerResponse::AnimationComplete(recv_id) => { debug_assert_eq!(id, recv_id, "bug: notified of complete animation for incorrect turtle"); }, _ => unreachable!("bug: expected to receive `AnimationComplete` in response to `RotateInPlace` request"), } } pub async fn circular_arc(&self, id: TurtleId, radius: Distance, extent: Radians, direction: RotationDirection) { if radius.is_normal() && extent.is_normal() { let steps = 250; // Arbitrary value for now. let step = radius.abs() * extent.to_radians() / steps as f64; let rotation = radius.signum() * extent / steps as f64; for _ in 0..steps { self.move_forward(id, step).await; self.rotate_in_place(id, rotation, direction).await; } } } pub fn begin_fill(&self, id: TurtleId) { self.client.send(ClientRequest::BeginFill(id)) } pub fn end_fill(&self, id: TurtleId) { self.client.send(ClientRequest::EndFill(id)) } pub fn clear_all(&self) { self.client.send(ClientRequest::ClearAll) } pub fn clear_turtle(&self, id: TurtleId) { self.client.send(ClientRequest::ClearTurtle(id)) } pub async fn debug_turtle(&self, id: TurtleId, angle_unit: AngleUnit) -> debug::Turtle { self.client.send(ClientRequest::DebugTurtle(id, angle_unit)); let response = self.client.recv().await; match response { ServerResponse::DebugTurtle(recv_id, state) => { debug_assert_eq!(id, recv_id, "bug: received debug turtle for incorrect turtle"); state }, _ => unreachable!("bug: expected to receive `DebugTurtle` in response to `DebugTurtle` request"), } } pub async fn debug_drawing(&self) -> debug::Drawing { self.client.send(ClientRequest::DebugDrawing); let response = self.client.recv().await; match response { ServerResponse::DebugDrawing(state) => { state }, _ => unreachable!("bug: expected to receive `DebugDrawing` in response to `DebugDrawing` request"), } } }
extern crate rustbox; extern crate yoga; extern crate yoga_rustbox; extern crate yoga_wrapper; use yoga::{Backend, Builds, Renders, Renderable}; use std::error::Error; use std::default::Default; use rustbox::RustBox; use rustbox::Key; fn main() { let rustbox = match RustBox::init(Default::default()) { Result::Ok(v) => v, Result::Err(e) => panic!("{}", e), }; let builder = yoga_rustbox::Builder::new(); let mut text = builder.text("Yo!"); text.set_height(3.0); text.set_align_self(yoga_wrapper::Align::Center); text.set_flex_grow(1.0); let mut image = builder.view(); image.set_width(8.0); image.set_margin(yoga_wrapper::Edge::End, 2.0); let mut root = builder.view(); root.set_width(50.0); root.set_height(12.0); root.set_flex_direction(yoga_wrapper::FlexDirection::Row); root.set_padding(yoga_wrapper::Edge::All, 2.0); root.insert_child(&image, 0); root.insert_child(&text, 1); root.calculate_layout(); yoga_rustbox::Backend::new(&rustbox).render(&root); loop { match rustbox.poll_event(false) { Ok(rustbox::Event::KeyEvent(key)) => { match key { Key::Char('q') => { break; } _ => {} } } Err(e) => panic!("{}", e.description()), _ => {} } } } feat(example): add bg color to c example extern crate rustbox; extern crate yoga; extern crate yoga_rustbox; extern crate yoga_wrapper; use yoga::{Backend, Builds, Renders, Renderable}; use std::error::Error; use std::default::Default; use rustbox::RustBox; use rustbox::Key; fn main() { let rustbox = match RustBox::init(Default::default()) { Result::Ok(v) => v, Result::Err(e) => panic!("{}", e), }; let builder = yoga_rustbox::Builder::new(); let mut text = builder.text("Yo!"); text.set_height(3.0); text.set_align_self(yoga_wrapper::Align::Center); text.set_flex_grow(1.0); text.set_background_color(Some(yoga::style::BackgroundColor::Color(rustbox::Color::Cyan))); let mut image = builder.view(); image.set_width(8.0); image.set_margin(yoga_wrapper::Edge::End, 2.0); image.set_background_color(Some(yoga::style::BackgroundColor::Color(rustbox::Color::Cyan))); let mut root = builder.view(); root.set_width(50.0); root.set_height(12.0); root.set_flex_direction(yoga_wrapper::FlexDirection::Row); root.set_padding(yoga_wrapper::Edge::All, 2.0); root.set_background_color(Some(yoga::style::BackgroundColor::Color(rustbox::Color::White))); root.insert_child(&image, 0); root.insert_child(&text, 1); root.calculate_layout(); yoga_rustbox::Backend::new(&rustbox).render(&root); loop { match rustbox.poll_event(false) { Ok(rustbox::Event::KeyEvent(key)) => { match key { Key::Char('q') => { break; } _ => {} } } Err(e) => panic!("{}", e.description()), _ => {} } } }
#![deny(unreachable_code)] use futures::{try_join, executor::block_on}; // TODO: This abuses https://github.com/rust-lang/rust/issues/58733 in order to // test behaviour of the `try_join!` macro with the never type before it is // stabilized. Once `!` is again stabilized this can be removed and replaced // with direct use of `!` below where `Never` is used. trait MyTrait { type Output; } impl<T> MyTrait for fn() -> T { type Output = T; } type Never = <fn() -> ! as MyTrait>::Output; #[test] fn try_join_never_error() { block_on(async { let future1 = async { Ok::<(), Never>(()) }; let future2 = async { Ok::<(), Never>(()) }; try_join!(future1, future2) }) .unwrap(); } #[test] fn try_join_never_ok() { block_on(async { let future1 = async { Err::<Never, ()>(()) }; let future2 = async { Err::<Never, ()>(()) }; try_join!(future1, future2) }) .unwrap_err(); } futures: tests/try_join.rs: Don't break w/o features Only really appropriate action here is to gate the whole file with `#![cfg()]` Tests now build and pass with all valid feature combinations See previous commits for details and rationale #![cfg(feature = "executor")] // executor:: #![cfg(feature = "async-await")] // try_join! #![deny(unreachable_code)] use futures::{try_join, executor::block_on}; // TODO: This abuses https://github.com/rust-lang/rust/issues/58733 in order to // test behaviour of the `try_join!` macro with the never type before it is // stabilized. Once `!` is again stabilized this can be removed and replaced // with direct use of `!` below where `Never` is used. trait MyTrait { type Output; } impl<T> MyTrait for fn() -> T { type Output = T; } type Never = <fn() -> ! as MyTrait>::Output; #[test] fn try_join_never_error() { block_on(async { let future1 = async { Ok::<(), Never>(()) }; let future2 = async { Ok::<(), Never>(()) }; try_join!(future1, future2) }) .unwrap(); } #[test] fn try_join_never_ok() { block_on(async { let future1 = async { Err::<Never, ()>(()) }; let future2 = async { Err::<Never, ()>(()) }; try_join!(future1, future2) }) .unwrap_err(); }
//! Deserialize JSON data to a Rust data structure. use std::{i32, u64}; use std::io; use std::marker::PhantomData; use serde::de::{self, Unexpected}; use super::error::{Error, ErrorCode, Result}; use read; pub use read::{Read, IteratorRead, SliceRead, StrRead}; ////////////////////////////////////////////////////////////////////////////// /// A structure that deserializes JSON into Rust values. pub struct Deserializer<R> { read: R, str_buf: Vec<u8>, remaining_depth: u8, } impl<R> Deserializer<R> where R: read::Read { /// Create a JSON deserializer from one of the possible serde_json input /// sources. /// /// Typically it is more convenient to use one of these methods instead: /// /// - Deserializer::from_str /// - Deserializer::from_bytes /// - Deserializer::from_iter /// - Deserializer::from_reader pub fn new(read: R) -> Self { Deserializer { read: read, str_buf: Vec::with_capacity(128), remaining_depth: 128, } } } impl<I> Deserializer<read::IteratorRead<I>> where I: Iterator<Item = io::Result<u8>> { /// Creates a JSON deserializer from a `std::iter::Iterator`. pub fn from_iter(iter: I) -> Self { Deserializer::new(read::IteratorRead::new(iter)) } } impl<R> Deserializer<read::IoRead<R>> where R: io::Read { /// Creates a JSON deserializer from an `io::Read`. pub fn from_reader(reader: R) -> Self { Deserializer::new(read::IoRead::new(reader)) } } impl<'a> Deserializer<read::SliceRead<'a>> { /// Creates a JSON deserializer from a `&[u8]`. pub fn from_slice(bytes: &'a [u8]) -> Self { Deserializer::new(read::SliceRead::new(bytes)) } } impl<'a> Deserializer<read::StrRead<'a>> { /// Creates a JSON deserializer from a `&str`. pub fn from_str(s: &'a str) -> Self { Deserializer::new(read::StrRead::new(s)) } } macro_rules! overflow { ($a:ident * 10 + $b:ident, $c:expr) => { $a >= $c / 10 && ($a > $c / 10 || $b > $c % 10) } } impl<R: Read> Deserializer<R> { /// The `Deserializer::end` method should be called after a value has been fully deserialized. /// This allows the `Deserializer` to validate that the input stream is at the end or that it /// only has trailing whitespace. pub fn end(&mut self) -> Result<()> { if try!(self.parse_whitespace()) { // true if eof Ok(()) } else { Err(self.peek_error(ErrorCode::TrailingCharacters)) } } /// Turn a JSON deserializer into an iterator over values of type T. pub fn into_iter<T>(self) -> StreamDeserializer<R, T> where T: de::Deserialize { // This cannot be an implementation of std::iter::IntoIterator because // we need the caller to choose what T is. StreamDeserializer { de: self, _marker: PhantomData, } } fn peek(&mut self) -> Result<Option<u8>> { self.read.peek().map_err(Into::into) } fn peek_or_null(&mut self) -> Result<u8> { Ok(try!(self.peek()).unwrap_or(b'\x00')) } fn eat_char(&mut self) { self.read.discard(); } fn next_char(&mut self) -> Result<Option<u8>> { self.read.next().map_err(Into::into) } fn next_char_or_null(&mut self) -> Result<u8> { Ok(try!(self.next_char()).unwrap_or(b'\x00')) } /// Error caused by a byte from next_char(). fn error(&mut self, reason: ErrorCode) -> Error { let pos = self.read.position(); Error::syntax(reason, pos.line, pos.column) } /// Error caused by a byte from peek(). fn peek_error(&mut self, reason: ErrorCode) -> Error { let pos = self.read.peek_position(); Error::syntax(reason, pos.line, pos.column) } /// Consume whitespace until the next non-whitespace character. /// /// Return `Ok(true)` if EOF was encountered in the process and `Ok(false)` otherwise. fn parse_whitespace(&mut self) -> Result<bool> { loop { match try!(self.peek()) { Some(b) => match b { b' ' | b'\n' | b'\t' | b'\r' => { self.eat_char(); } _ => { return Ok(false); } }, None => return Ok(true), } } } fn parse_value<V>(&mut self, visitor: V) -> Result<V::Value> where V: de::Visitor, { if try!(self.parse_whitespace()) { // true if eof return Err(self.peek_error(ErrorCode::EOFWhileParsingValue)); } let value = match try!(self.peek_or_null()) { b'n' => { self.eat_char(); try!(self.parse_ident(b"ull")); visitor.visit_unit() } b't' => { self.eat_char(); try!(self.parse_ident(b"rue")); visitor.visit_bool(true) } b'f' => { self.eat_char(); try!(self.parse_ident(b"alse")); visitor.visit_bool(false) } b'-' => { self.eat_char(); self.parse_integer(false, visitor) } b'0'...b'9' => self.parse_integer(true, visitor), b'"' => { self.eat_char(); self.str_buf.clear(); let s = try!(self.read.parse_str(&mut self.str_buf)); visitor.visit_str(s) } b'[' => { self.remaining_depth -= 1; if self.remaining_depth == 0 { return Err(self.peek_error(ErrorCode::RecursionLimitExceeded)); } self.eat_char(); let ret = visitor.visit_seq(SeqVisitor::new(self)); self.remaining_depth += 1; match (ret, self.end_seq()) { (Ok(ret), Ok(())) => Ok(ret), (Err(err), _) | (_, Err(err)) => Err(err), } } b'{' => { self.remaining_depth -= 1; if self.remaining_depth == 0 { return Err(self.peek_error(ErrorCode::RecursionLimitExceeded)); } self.eat_char(); let ret = visitor.visit_map(MapVisitor::new(self)); self.remaining_depth += 1; match (ret, self.end_map()) { (Ok(ret), Ok(())) => Ok(ret), (Err(err), _) | (_, Err(err)) => Err(err), } } _ => Err(self.peek_error(ErrorCode::ExpectedSomeValue)), }; match value { Ok(value) => Ok(value), // The de::Error and From<de::value::Error> impls both create errors // with unknown line and column. Fill in the position here by // looking at the current index in the input. There is no way to // tell whether this should call `error` or `peek_error` so pick the // one that seems correct more often. Worst case, the position is // off by one character. Err(err) => Err(err.fix_position(|code| self.error(code))), } } fn parse_ident(&mut self, ident: &[u8]) -> Result<()> { for c in ident { if Some(*c) != try!(self.next_char()) { return Err(self.error(ErrorCode::ExpectedSomeIdent)); } } Ok(()) } fn parse_integer<V>(&mut self, pos: bool, visitor: V) -> Result<V::Value> where V: de::Visitor, { match try!(self.next_char_or_null()) { b'0' => { // There can be only one leading '0'. match try!(self.peek_or_null()) { b'0'...b'9' => { Err(self.peek_error(ErrorCode::InvalidNumber)) } _ => self.parse_number(pos, 0, visitor), } } c @ b'1'...b'9' => { let mut res = (c - b'0') as u64; loop { match try!(self.peek_or_null()) { c @ b'0'...b'9' => { self.eat_char(); let digit = (c - b'0') as u64; // We need to be careful with overflow. If we can, try to keep the // number as a `u64` until we grow too large. At that point, switch to // parsing the value as a `f64`. if overflow!(res * 10 + digit, u64::MAX) { return self.parse_long_integer(pos, res, 1, // res * 10^1 visitor); } res = res * 10 + digit; } _ => { return self.parse_number(pos, res, visitor); } } } } _ => Err(self.error(ErrorCode::InvalidNumber)), } } fn parse_long_integer<V>( &mut self, pos: bool, significand: u64, mut exponent: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { loop { match try!(self.peek_or_null()) { b'0'...b'9' => { self.eat_char(); // This could overflow... if your integer is gigabytes long. // Ignore that possibility. exponent += 1; } b'.' => { return self.parse_decimal(pos, significand, exponent, visitor); } b'e' | b'E' => { return self.parse_exponent(pos, significand, exponent, visitor); } _ => { return self.visit_f64_from_parts(pos, significand, exponent, visitor); } } } } fn parse_number<V>( &mut self, pos: bool, significand: u64, visitor: V ) -> Result<V::Value> where V: de::Visitor, { match try!(self.peek_or_null()) { b'.' => self.parse_decimal(pos, significand, 0, visitor), b'e' | b'E' => self.parse_exponent(pos, significand, 0, visitor), _ => { if pos { visitor.visit_u64(significand) } else { let neg = (significand as i64).wrapping_neg(); // Convert into a float if we underflow. if neg > 0 { visitor.visit_f64(-(significand as f64)) } else { visitor.visit_i64(neg) } } } } } fn parse_decimal<V>( &mut self, pos: bool, mut significand: u64, mut exponent: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { self.eat_char(); let mut at_least_one_digit = false; while let c @ b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); let digit = (c - b'0') as u64; at_least_one_digit = true; if overflow!(significand * 10 + digit, u64::MAX) { // The next multiply/add would overflow, so just ignore all // further digits. while let b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); } break; } significand = significand * 10 + digit; exponent -= 1; } if !at_least_one_digit { return Err(self.peek_error(ErrorCode::InvalidNumber)); } match try!(self.peek_or_null()) { b'e' | b'E' => { self.parse_exponent(pos, significand, exponent, visitor) } _ => self.visit_f64_from_parts(pos, significand, exponent, visitor), } } fn parse_exponent<V>( &mut self, pos: bool, significand: u64, starting_exp: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { self.eat_char(); let pos_exp = match try!(self.peek_or_null()) { b'+' => { self.eat_char(); true } b'-' => { self.eat_char(); false } _ => true, }; // Make sure a digit follows the exponent place. let mut exp = match try!(self.next_char_or_null()) { c @ b'0'...b'9' => (c - b'0') as i32, _ => { return Err(self.error(ErrorCode::InvalidNumber)); } }; while let c @ b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); let digit = (c - b'0') as i32; if overflow!(exp * 10 + digit, i32::MAX) { return self.parse_exponent_overflow(pos, significand, pos_exp, visitor); } exp = exp * 10 + digit; } let final_exp = if pos_exp { starting_exp.saturating_add(exp) } else { starting_exp.saturating_sub(exp) }; self.visit_f64_from_parts(pos, significand, final_exp, visitor) } // This cold code should not be inlined into the middle of the hot // exponent-parsing loop above. #[cold] #[inline(never)] fn parse_exponent_overflow<V>( &mut self, pos: bool, significand: u64, pos_exp: bool, visitor: V ) -> Result<V::Value> where V: de::Visitor, { // Error instead of +/- infinity. if significand != 0 && pos_exp { return Err(self.error(ErrorCode::NumberOutOfRange)); } while let b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); } visitor.visit_f64(if pos { 0.0 } else { -0.0 }) } fn visit_f64_from_parts<V>( &mut self, pos: bool, significand: u64, mut exponent: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { let mut f = significand as f64; loop { match POW10.get(exponent.abs() as usize) { Some(&pow) => { if exponent >= 0 { f *= pow; if f.is_infinite() { return Err(self.error(ErrorCode::NumberOutOfRange)); } } else { f /= pow; } break; } None => { if f == 0.0 { break; } if exponent >= 0 { return Err(self.error(ErrorCode::NumberOutOfRange)); } f /= 1e308; exponent += 308; } } } visitor.visit_f64(if pos { f } else { -f }) } fn parse_object_colon(&mut self) -> Result<()> { try!(self.parse_whitespace()); match try!(self.peek()) { Some(b':') => { self.eat_char(); Ok(()) } Some(_) => Err(self.peek_error(ErrorCode::ExpectedColon)), None => Err(self.peek_error(ErrorCode::EOFWhileParsingObject)), } } fn end_seq(&mut self) -> Result<()> { try!(self.parse_whitespace()); match try!(self.next_char()) { Some(b']') => Ok(()), Some(_) => Err(self.error(ErrorCode::TrailingCharacters)), None => Err(self.error(ErrorCode::EOFWhileParsingList)), } } fn end_map(&mut self) -> Result<()> { try!(self.parse_whitespace()); match try!(self.next_char()) { Some(b'}') => Ok(()), Some(_) => Err(self.error(ErrorCode::TrailingCharacters)), None => Err(self.error(ErrorCode::EOFWhileParsingObject)), } } } static POW10: [f64; 309] = [1e000, 1e001, 1e002, 1e003, 1e004, 1e005, 1e006, 1e007, 1e008, 1e009, 1e010, 1e011, 1e012, 1e013, 1e014, 1e015, 1e016, 1e017, 1e018, 1e019, 1e020, 1e021, 1e022, 1e023, 1e024, 1e025, 1e026, 1e027, 1e028, 1e029, 1e030, 1e031, 1e032, 1e033, 1e034, 1e035, 1e036, 1e037, 1e038, 1e039, 1e040, 1e041, 1e042, 1e043, 1e044, 1e045, 1e046, 1e047, 1e048, 1e049, 1e050, 1e051, 1e052, 1e053, 1e054, 1e055, 1e056, 1e057, 1e058, 1e059, 1e060, 1e061, 1e062, 1e063, 1e064, 1e065, 1e066, 1e067, 1e068, 1e069, 1e070, 1e071, 1e072, 1e073, 1e074, 1e075, 1e076, 1e077, 1e078, 1e079, 1e080, 1e081, 1e082, 1e083, 1e084, 1e085, 1e086, 1e087, 1e088, 1e089, 1e090, 1e091, 1e092, 1e093, 1e094, 1e095, 1e096, 1e097, 1e098, 1e099, 1e100, 1e101, 1e102, 1e103, 1e104, 1e105, 1e106, 1e107, 1e108, 1e109, 1e110, 1e111, 1e112, 1e113, 1e114, 1e115, 1e116, 1e117, 1e118, 1e119, 1e120, 1e121, 1e122, 1e123, 1e124, 1e125, 1e126, 1e127, 1e128, 1e129, 1e130, 1e131, 1e132, 1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, 1e140, 1e141, 1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, 1e150, 1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159, 1e160, 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168, 1e169, 1e170, 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177, 1e178, 1e179, 1e180, 1e181, 1e182, 1e183, 1e184, 1e185, 1e186, 1e187, 1e188, 1e189, 1e190, 1e191, 1e192, 1e193, 1e194, 1e195, 1e196, 1e197, 1e198, 1e199, 1e200, 1e201, 1e202, 1e203, 1e204, 1e205, 1e206, 1e207, 1e208, 1e209, 1e210, 1e211, 1e212, 1e213, 1e214, 1e215, 1e216, 1e217, 1e218, 1e219, 1e220, 1e221, 1e222, 1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, 1e230, 1e231, 1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, 1e240, 1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249, 1e250, 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258, 1e259, 1e260, 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267, 1e268, 1e269, 1e270, 1e271, 1e272, 1e273, 1e274, 1e275, 1e276, 1e277, 1e278, 1e279, 1e280, 1e281, 1e282, 1e283, 1e284, 1e285, 1e286, 1e287, 1e288, 1e289, 1e290, 1e291, 1e292, 1e293, 1e294, 1e295, 1e296, 1e297, 1e298, 1e299, 1e300, 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308]; impl<'a, R: Read> de::Deserializer for &'a mut Deserializer<R> { type Error = Error; #[inline] fn deserialize<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor, { self.parse_value(visitor) } /// Parses a `null` as a None, and any other values as a `Some(...)`. #[inline] fn deserialize_option<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor, { try!(self.parse_whitespace()); match try!(self.peek_or_null()) { b'n' => { self.eat_char(); try!(self.parse_ident(b"ull")); visitor.visit_none() } _ => visitor.visit_some(self), } } /// Parses a newtype struct as the underlying value. #[inline] fn deserialize_newtype_struct<V>( self, _name: &str, visitor: V ) -> Result<V::Value> where V: de::Visitor, { visitor.visit_newtype_struct(self) } /// Parses an enum as an object like `{"$KEY":$VALUE}`, where $VALUE is either a straight /// value, a `[..]`, or a `{..}`. #[inline] fn deserialize_enum<V>( self, _name: &str, _variants: &'static [&'static str], visitor: V ) -> Result<V::Value> where V: de::Visitor, { try!(self.parse_whitespace()); match try!(self.peek_or_null()) { b'{' => { self.remaining_depth -= 1; if self.remaining_depth == 0 { return Err(self.peek_error(ErrorCode::RecursionLimitExceeded)); } self.eat_char(); let value = try!(visitor.visit_enum(VariantVisitor::new(self))); self.remaining_depth += 1; try!(self.parse_whitespace()); match try!(self.next_char_or_null()) { b'}' => Ok(value), _ => Err(self.error(ErrorCode::ExpectedSomeValue)), } } b'"' => visitor.visit_enum(UnitVariantVisitor::new(self)), _ => Err(self.peek_error(ErrorCode::ExpectedSomeValue)), } } /// Parses a JSON string as bytes. Note that this function does not /// check whether the bytes represent valid unicode code points. /// /// The JSON specification requires that strings only contain valid /// unicode characters. To deal with non-conforming JSON, you may use /// this function, which attempts to parse a string without checking /// whether the bytes represent valid unicode code points. /// /// Escape sequences are processed as usual, and for `\uXXXX` escapes /// it is still checked if the hex number represents a valid unicode /// code point. /// /// # Example usage /// /// You can use this to parse JSON strings containing non-unicode bytes: /// /// ``` /// # extern crate serde; /// # extern crate serde_json; /// # /// let bytes = serde::bytes::ByteBuf::from(b"some raw bytes: \xe5\x00\xe5".to_vec()); /// let parsed = serde_json::from_slice( b"\"some raw bytes: \xe5\x00\xe5\"").unwrap(); /// /// assert_eq!(bytes, parsed); /// ``` /// /// `\u` escape sequences with invalid unicode code points still fail to parse: /// /// ``` /// # extern crate serde; /// # extern crate serde_json; /// # /// let json = "\"invalid unicode surrogate: \\uD801\""; /// let parsed: Result<serde::bytes::ByteBuf, _> = serde_json::from_str(json); /// assert!(parsed.is_err(), "{} should not parse: {:?}", json, parsed); /// ``` fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor { try!(self.parse_whitespace()); let next = try!(self.peek()); let next = try!(next.ok_or(self.peek_error(ErrorCode::ExpectedSomeString))); match next { b'"' => { self.eat_char(); let slice = try!(self.read.parse_str_raw(&mut self.str_buf)); visitor.visit_bytes(slice) } _ => self.deserialize(visitor), } } fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor { self.deserialize_bytes(visitor) } forward_to_deserialize! { bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string unit seq seq_fixed_size map unit_struct tuple_struct struct struct_field tuple ignored_any } } struct SeqVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, first: bool, } impl<'a, R: Read + 'a> SeqVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { SeqVisitor { de: de, first: true, } } } impl<'a, R: Read + 'a> de::SeqVisitor for SeqVisitor<'a, R> { type Error = Error; fn visit_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>> where T: de::DeserializeSeed, { try!(self.de.parse_whitespace()); match try!(self.de.peek()) { Some(b']') => { return Ok(None); } Some(b',') if !self.first => { self.de.eat_char(); } Some(_) => { if self.first { self.first = false; } else { return Err(self.de .peek_error(ErrorCode::ExpectedListCommaOrEnd)); } } None => { return Err(self.de.peek_error(ErrorCode::EOFWhileParsingList)); } } let value = try!(seed.deserialize(&mut *self.de)); Ok(Some(value)) } } struct MapVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, first: bool, } impl<'a, R: Read + 'a> MapVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { MapVisitor { de: de, first: true, } } } impl<'a, R: Read + 'a> de::MapVisitor for MapVisitor<'a, R> { type Error = Error; fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>> where K: de::DeserializeSeed, { try!(self.de.parse_whitespace()); match try!(self.de.peek()) { Some(b'}') => { return Ok(None); } Some(b',') if !self.first => { self.de.eat_char(); try!(self.de.parse_whitespace()); } Some(_) => { if self.first { self.first = false; } else { return Err(self.de .peek_error(ErrorCode::ExpectedObjectCommaOrEnd)); } } None => { return Err(self.de .peek_error(ErrorCode::EOFWhileParsingObject)); } } match try!(self.de.peek()) { Some(b'"') => Ok(Some(try!(seed.deserialize(&mut *self.de)))), Some(_) => Err(self.de.peek_error(ErrorCode::KeyMustBeAString)), None => Err(self.de.peek_error(ErrorCode::EOFWhileParsingValue)), } } fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value> where V: de::DeserializeSeed, { try!(self.de.parse_object_colon()); seed.deserialize(&mut *self.de) } } struct VariantVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, } impl<'a, R: Read + 'a> VariantVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { VariantVisitor { de: de, } } } impl<'a, R: Read + 'a> de::EnumVisitor for VariantVisitor<'a, R> { type Error = Error; type Variant = Self; fn visit_variant_seed<V>(self, seed: V) -> Result<(V::Value, Self)> where V: de::DeserializeSeed, { let val = try!(seed.deserialize(&mut *self.de)); try!(self.de.parse_object_colon()); Ok((val, self)) } } impl<'a, R: Read + 'a> de::VariantVisitor for VariantVisitor<'a, R> { type Error = Error; fn visit_unit(self) -> Result<()> { de::Deserialize::deserialize(self.de) } fn visit_newtype_seed<T>(self, seed: T) -> Result<T::Value> where T: de::DeserializeSeed, { seed.deserialize(self.de) } fn visit_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value> where V: de::Visitor, { de::Deserializer::deserialize(self.de, visitor) } fn visit_struct<V>( self, _fields: &'static [&'static str], visitor: V ) -> Result<V::Value> where V: de::Visitor, { de::Deserializer::deserialize(self.de, visitor) } } struct UnitVariantVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, } impl<'a, R: Read + 'a> UnitVariantVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { UnitVariantVisitor { de: de, } } } impl<'a, R: Read + 'a> de::EnumVisitor for UnitVariantVisitor<'a, R> { type Error = Error; type Variant = Self; fn visit_variant_seed<V>(self, seed: V) -> Result<(V::Value, Self)> where V: de::DeserializeSeed, { let variant = try!(seed.deserialize(&mut *self.de)); Ok((variant, self)) } } impl<'a, R: Read + 'a> de::VariantVisitor for UnitVariantVisitor<'a, R> { type Error = Error; fn visit_unit(self) -> Result<()> { Ok(()) } fn visit_newtype_seed<T>(self, _seed: T) -> Result<T::Value> where T: de::DeserializeSeed, { Err(de::Error::invalid_type(Unexpected::UnitVariant, &"newtype variant")) } fn visit_tuple<V>(self, _len: usize, _visitor: V) -> Result<V::Value> where V: de::Visitor, { Err(de::Error::invalid_type(Unexpected::UnitVariant, &"tuple variant")) } fn visit_struct<V>( self, _fields: &'static [&'static str], _visitor: V ) -> Result<V::Value> where V: de::Visitor, { Err(de::Error::invalid_type(Unexpected::UnitVariant, &"struct variant")) } } ////////////////////////////////////////////////////////////////////////////// /// Iterator that deserializes a stream into multiple JSON values. /// /// A stream deserializer can be created from any JSON deserializer using the /// `Deserializer::into_iter` method. /// /// ```rust /// extern crate serde_json; /// /// use serde_json::{Deserializer, Value}; /// /// fn main() { /// let data = "1 2 {\"k\": 3}"; /// /// let stream = Deserializer::from_str(data).into_iter::<Value>(); /// /// for value in stream { /// println!("{}", value.unwrap()); /// } /// } /// ``` pub struct StreamDeserializer<R, T> { de: Deserializer<R>, _marker: PhantomData<T>, } impl<R, T> StreamDeserializer<R, T> where R: read::Read, T: de::Deserialize { /// Create a JSON stream deserializer from one of the possible serde_json /// input sources. /// /// Typically it is more convenient to use one of these methods instead: /// /// - Deserializer::from_str(...).into_iter() /// - Deserializer::from_bytes(...).into_iter() /// - Deserializer::from_iter(...).into_iter() /// - Deserializer::from_reader(...).into_iter() pub fn new(read: R) -> Self { StreamDeserializer { de: Deserializer::new(read), _marker: PhantomData, } } } impl<R, T> Iterator for StreamDeserializer<R, T> where R: Read, T: de::Deserialize, { type Item = Result<T>; fn next(&mut self) -> Option<Result<T>> { // skip whitespaces, if any // this helps with trailing whitespaces, since whitespaces between // values are handled for us. match self.de.parse_whitespace() { Ok(true) => None, // eof Ok(false) => { match de::Deserialize::deserialize(&mut self.de) { Ok(v) => Some(Ok(v)), Err(e) => Some(Err(e)), } } Err(e) => Some(Err(e)), } } } ////////////////////////////////////////////////////////////////////////////// fn from_trait<R, T>(read: R) -> Result<T> where R: Read, T: de::Deserialize, { let mut de = Deserializer::new(read); let value = try!(de::Deserialize::deserialize(&mut de)); // Make sure the whole stream has been consumed. try!(de.end()); Ok(value) } /// Deserialize an instance of type `T` from an iterator over bytes of JSON. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_iter<I, T>(iter: I) -> Result<T> where I: Iterator<Item = io::Result<u8>>, T: de::Deserialize, { from_trait(read::IteratorRead::new(iter)) } /// Deserialize an instance of type `T` from an IO stream of JSON. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_reader<R, T>(rdr: R) -> Result<T> where R: io::Read, T: de::Deserialize, { from_iter(rdr.bytes()) } /// Deserialize an instance of type `T` from bytes of JSON text. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_slice<T>(v: &[u8]) -> Result<T> where T: de::Deserialize, { from_trait(read::SliceRead::new(v)) } /// Deserialize an instance of type `T` from a string of JSON text. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_str<T>(s: &str) -> Result<T> where T: de::Deserialize, { from_trait(read::StrRead::new(s)) } Fix performance regression caused by 8123ecc474ea599d4659f20c3cfbfb7011b77628 //! Deserialize JSON data to a Rust data structure. use std::{i32, u64}; use std::io; use std::marker::PhantomData; use serde::de::{self, Unexpected}; use super::error::{Error, ErrorCode, Result}; use read; pub use read::{Read, IteratorRead, SliceRead, StrRead}; ////////////////////////////////////////////////////////////////////////////// /// A structure that deserializes JSON into Rust values. pub struct Deserializer<R> { read: R, str_buf: Vec<u8>, remaining_depth: u8, } impl<R> Deserializer<R> where R: read::Read { /// Create a JSON deserializer from one of the possible serde_json input /// sources. /// /// Typically it is more convenient to use one of these methods instead: /// /// - Deserializer::from_str /// - Deserializer::from_bytes /// - Deserializer::from_iter /// - Deserializer::from_reader pub fn new(read: R) -> Self { Deserializer { read: read, str_buf: Vec::with_capacity(128), remaining_depth: 128, } } } impl<I> Deserializer<read::IteratorRead<I>> where I: Iterator<Item = io::Result<u8>> { /// Creates a JSON deserializer from a `std::iter::Iterator`. pub fn from_iter(iter: I) -> Self { Deserializer::new(read::IteratorRead::new(iter)) } } impl<R> Deserializer<read::IoRead<R>> where R: io::Read { /// Creates a JSON deserializer from an `io::Read`. pub fn from_reader(reader: R) -> Self { Deserializer::new(read::IoRead::new(reader)) } } impl<'a> Deserializer<read::SliceRead<'a>> { /// Creates a JSON deserializer from a `&[u8]`. pub fn from_slice(bytes: &'a [u8]) -> Self { Deserializer::new(read::SliceRead::new(bytes)) } } impl<'a> Deserializer<read::StrRead<'a>> { /// Creates a JSON deserializer from a `&str`. pub fn from_str(s: &'a str) -> Self { Deserializer::new(read::StrRead::new(s)) } } macro_rules! overflow { ($a:ident * 10 + $b:ident, $c:expr) => { $a >= $c / 10 && ($a > $c / 10 || $b > $c % 10) } } impl<R: Read> Deserializer<R> { /// The `Deserializer::end` method should be called after a value has been fully deserialized. /// This allows the `Deserializer` to validate that the input stream is at the end or that it /// only has trailing whitespace. pub fn end(&mut self) -> Result<()> { if try!(self.parse_whitespace()) { // true if eof Ok(()) } else { Err(self.peek_error(ErrorCode::TrailingCharacters)) } } /// Turn a JSON deserializer into an iterator over values of type T. pub fn into_iter<T>(self) -> StreamDeserializer<R, T> where T: de::Deserialize { // This cannot be an implementation of std::iter::IntoIterator because // we need the caller to choose what T is. StreamDeserializer { de: self, _marker: PhantomData, } } fn peek(&mut self) -> Result<Option<u8>> { self.read.peek().map_err(Into::into) } fn peek_or_null(&mut self) -> Result<u8> { Ok(try!(self.peek()).unwrap_or(b'\x00')) } fn eat_char(&mut self) { self.read.discard(); } fn next_char(&mut self) -> Result<Option<u8>> { self.read.next().map_err(Into::into) } fn next_char_or_null(&mut self) -> Result<u8> { Ok(try!(self.next_char()).unwrap_or(b'\x00')) } /// Error caused by a byte from next_char(). fn error(&mut self, reason: ErrorCode) -> Error { let pos = self.read.position(); Error::syntax(reason, pos.line, pos.column) } /// Error caused by a byte from peek(). fn peek_error(&mut self, reason: ErrorCode) -> Error { let pos = self.read.peek_position(); Error::syntax(reason, pos.line, pos.column) } /// Consume whitespace until the next non-whitespace character. /// /// Return `Ok(true)` if EOF was encountered in the process and `Ok(false)` otherwise. fn parse_whitespace(&mut self) -> Result<bool> { loop { match try!(self.peek()) { Some(b) => match b { b' ' | b'\n' | b'\t' | b'\r' => { self.eat_char(); } _ => { return Ok(false); } }, None => return Ok(true), } } } fn parse_value<V>(&mut self, visitor: V) -> Result<V::Value> where V: de::Visitor, { if try!(self.parse_whitespace()) { // true if eof return Err(self.peek_error(ErrorCode::EOFWhileParsingValue)); } let value = match try!(self.peek_or_null()) { b'n' => { self.eat_char(); try!(self.parse_ident(b"ull")); visitor.visit_unit() } b't' => { self.eat_char(); try!(self.parse_ident(b"rue")); visitor.visit_bool(true) } b'f' => { self.eat_char(); try!(self.parse_ident(b"alse")); visitor.visit_bool(false) } b'-' => { self.eat_char(); self.parse_integer(false, visitor) } b'0'...b'9' => self.parse_integer(true, visitor), b'"' => { self.eat_char(); self.str_buf.clear(); let s = try!(self.read.parse_str(&mut self.str_buf)); visitor.visit_str(s) } b'[' => { self.remaining_depth -= 1; if self.remaining_depth == 0 { return Err(self.peek_error(ErrorCode::RecursionLimitExceeded)); } self.eat_char(); let ret = visitor.visit_seq(SeqVisitor::new(self)); self.remaining_depth += 1; match (ret, self.end_seq()) { (Ok(ret), Ok(())) => Ok(ret), (Err(err), _) | (_, Err(err)) => Err(err), } } b'{' => { self.remaining_depth -= 1; if self.remaining_depth == 0 { return Err(self.peek_error(ErrorCode::RecursionLimitExceeded)); } self.eat_char(); let ret = visitor.visit_map(MapVisitor::new(self)); self.remaining_depth += 1; match (ret, self.end_map()) { (Ok(ret), Ok(())) => Ok(ret), (Err(err), _) | (_, Err(err)) => Err(err), } } _ => Err(self.peek_error(ErrorCode::ExpectedSomeValue)), }; match value { Ok(value) => Ok(value), // The de::Error and From<de::value::Error> impls both create errors // with unknown line and column. Fill in the position here by // looking at the current index in the input. There is no way to // tell whether this should call `error` or `peek_error` so pick the // one that seems correct more often. Worst case, the position is // off by one character. Err(err) => Err(err.fix_position(|code| self.error(code))), } } fn parse_ident(&mut self, ident: &[u8]) -> Result<()> { for c in ident { if Some(*c) != try!(self.next_char()) { return Err(self.error(ErrorCode::ExpectedSomeIdent)); } } Ok(()) } fn parse_integer<V>(&mut self, pos: bool, visitor: V) -> Result<V::Value> where V: de::Visitor, { match try!(self.next_char_or_null()) { b'0' => { // There can be only one leading '0'. match try!(self.peek_or_null()) { b'0'...b'9' => { Err(self.peek_error(ErrorCode::InvalidNumber)) } _ => self.parse_number(pos, 0, visitor), } } c @ b'1'...b'9' => { let mut res = (c - b'0') as u64; loop { match try!(self.peek_or_null()) { c @ b'0'...b'9' => { self.eat_char(); let digit = (c - b'0') as u64; // We need to be careful with overflow. If we can, try to keep the // number as a `u64` until we grow too large. At that point, switch to // parsing the value as a `f64`. if overflow!(res * 10 + digit, u64::MAX) { return self.parse_long_integer(pos, res, 1, // res * 10^1 visitor); } res = res * 10 + digit; } _ => { return self.parse_number(pos, res, visitor); } } } } _ => Err(self.error(ErrorCode::InvalidNumber)), } } fn parse_long_integer<V>( &mut self, pos: bool, significand: u64, mut exponent: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { loop { match try!(self.peek_or_null()) { b'0'...b'9' => { self.eat_char(); // This could overflow... if your integer is gigabytes long. // Ignore that possibility. exponent += 1; } b'.' => { return self.parse_decimal(pos, significand, exponent, visitor); } b'e' | b'E' => { return self.parse_exponent(pos, significand, exponent, visitor); } _ => { return self.visit_f64_from_parts(pos, significand, exponent, visitor); } } } } fn parse_number<V>( &mut self, pos: bool, significand: u64, visitor: V ) -> Result<V::Value> where V: de::Visitor, { match try!(self.peek_or_null()) { b'.' => self.parse_decimal(pos, significand, 0, visitor), b'e' | b'E' => self.parse_exponent(pos, significand, 0, visitor), _ => { if pos { visitor.visit_u64(significand) } else { let neg = (significand as i64).wrapping_neg(); // Convert into a float if we underflow. if neg > 0 { visitor.visit_f64(-(significand as f64)) } else { visitor.visit_i64(neg) } } } } } fn parse_decimal<V>( &mut self, pos: bool, mut significand: u64, mut exponent: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { self.eat_char(); let mut at_least_one_digit = false; while let c @ b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); let digit = (c - b'0') as u64; at_least_one_digit = true; if overflow!(significand * 10 + digit, u64::MAX) { // The next multiply/add would overflow, so just ignore all // further digits. while let b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); } break; } significand = significand * 10 + digit; exponent -= 1; } if !at_least_one_digit { return Err(self.peek_error(ErrorCode::InvalidNumber)); } match try!(self.peek_or_null()) { b'e' | b'E' => { self.parse_exponent(pos, significand, exponent, visitor) } _ => self.visit_f64_from_parts(pos, significand, exponent, visitor), } } fn parse_exponent<V>( &mut self, pos: bool, significand: u64, starting_exp: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { self.eat_char(); let pos_exp = match try!(self.peek_or_null()) { b'+' => { self.eat_char(); true } b'-' => { self.eat_char(); false } _ => true, }; // Make sure a digit follows the exponent place. let mut exp = match try!(self.next_char_or_null()) { c @ b'0'...b'9' => (c - b'0') as i32, _ => { return Err(self.error(ErrorCode::InvalidNumber)); } }; while let c @ b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); let digit = (c - b'0') as i32; if overflow!(exp * 10 + digit, i32::MAX) { return self.parse_exponent_overflow(pos, significand, pos_exp, visitor); } exp = exp * 10 + digit; } let final_exp = if pos_exp { starting_exp.saturating_add(exp) } else { starting_exp.saturating_sub(exp) }; self.visit_f64_from_parts(pos, significand, final_exp, visitor) } // This cold code should not be inlined into the middle of the hot // exponent-parsing loop above. #[cold] #[inline(never)] fn parse_exponent_overflow<V>( &mut self, pos: bool, significand: u64, pos_exp: bool, visitor: V ) -> Result<V::Value> where V: de::Visitor, { // Error instead of +/- infinity. if significand != 0 && pos_exp { return Err(self.error(ErrorCode::NumberOutOfRange)); } while let b'0'...b'9' = try!(self.peek_or_null()) { self.eat_char(); } visitor.visit_f64(if pos { 0.0 } else { -0.0 }) } fn visit_f64_from_parts<V>( &mut self, pos: bool, significand: u64, mut exponent: i32, visitor: V ) -> Result<V::Value> where V: de::Visitor, { let mut f = significand as f64; loop { match POW10.get(exponent.abs() as usize) { Some(&pow) => { if exponent >= 0 { f *= pow; if f.is_infinite() { return Err(self.error(ErrorCode::NumberOutOfRange)); } } else { f /= pow; } break; } None => { if f == 0.0 { break; } if exponent >= 0 { return Err(self.error(ErrorCode::NumberOutOfRange)); } f /= 1e308; exponent += 308; } } } visitor.visit_f64(if pos { f } else { -f }) } fn parse_object_colon(&mut self) -> Result<()> { try!(self.parse_whitespace()); match try!(self.peek()) { Some(b':') => { self.eat_char(); Ok(()) } Some(_) => Err(self.peek_error(ErrorCode::ExpectedColon)), None => Err(self.peek_error(ErrorCode::EOFWhileParsingObject)), } } fn end_seq(&mut self) -> Result<()> { try!(self.parse_whitespace()); match try!(self.next_char()) { Some(b']') => Ok(()), Some(_) => Err(self.error(ErrorCode::TrailingCharacters)), None => Err(self.error(ErrorCode::EOFWhileParsingList)), } } fn end_map(&mut self) -> Result<()> { try!(self.parse_whitespace()); match try!(self.next_char()) { Some(b'}') => Ok(()), Some(_) => Err(self.error(ErrorCode::TrailingCharacters)), None => Err(self.error(ErrorCode::EOFWhileParsingObject)), } } } static POW10: [f64; 309] = [1e000, 1e001, 1e002, 1e003, 1e004, 1e005, 1e006, 1e007, 1e008, 1e009, 1e010, 1e011, 1e012, 1e013, 1e014, 1e015, 1e016, 1e017, 1e018, 1e019, 1e020, 1e021, 1e022, 1e023, 1e024, 1e025, 1e026, 1e027, 1e028, 1e029, 1e030, 1e031, 1e032, 1e033, 1e034, 1e035, 1e036, 1e037, 1e038, 1e039, 1e040, 1e041, 1e042, 1e043, 1e044, 1e045, 1e046, 1e047, 1e048, 1e049, 1e050, 1e051, 1e052, 1e053, 1e054, 1e055, 1e056, 1e057, 1e058, 1e059, 1e060, 1e061, 1e062, 1e063, 1e064, 1e065, 1e066, 1e067, 1e068, 1e069, 1e070, 1e071, 1e072, 1e073, 1e074, 1e075, 1e076, 1e077, 1e078, 1e079, 1e080, 1e081, 1e082, 1e083, 1e084, 1e085, 1e086, 1e087, 1e088, 1e089, 1e090, 1e091, 1e092, 1e093, 1e094, 1e095, 1e096, 1e097, 1e098, 1e099, 1e100, 1e101, 1e102, 1e103, 1e104, 1e105, 1e106, 1e107, 1e108, 1e109, 1e110, 1e111, 1e112, 1e113, 1e114, 1e115, 1e116, 1e117, 1e118, 1e119, 1e120, 1e121, 1e122, 1e123, 1e124, 1e125, 1e126, 1e127, 1e128, 1e129, 1e130, 1e131, 1e132, 1e133, 1e134, 1e135, 1e136, 1e137, 1e138, 1e139, 1e140, 1e141, 1e142, 1e143, 1e144, 1e145, 1e146, 1e147, 1e148, 1e149, 1e150, 1e151, 1e152, 1e153, 1e154, 1e155, 1e156, 1e157, 1e158, 1e159, 1e160, 1e161, 1e162, 1e163, 1e164, 1e165, 1e166, 1e167, 1e168, 1e169, 1e170, 1e171, 1e172, 1e173, 1e174, 1e175, 1e176, 1e177, 1e178, 1e179, 1e180, 1e181, 1e182, 1e183, 1e184, 1e185, 1e186, 1e187, 1e188, 1e189, 1e190, 1e191, 1e192, 1e193, 1e194, 1e195, 1e196, 1e197, 1e198, 1e199, 1e200, 1e201, 1e202, 1e203, 1e204, 1e205, 1e206, 1e207, 1e208, 1e209, 1e210, 1e211, 1e212, 1e213, 1e214, 1e215, 1e216, 1e217, 1e218, 1e219, 1e220, 1e221, 1e222, 1e223, 1e224, 1e225, 1e226, 1e227, 1e228, 1e229, 1e230, 1e231, 1e232, 1e233, 1e234, 1e235, 1e236, 1e237, 1e238, 1e239, 1e240, 1e241, 1e242, 1e243, 1e244, 1e245, 1e246, 1e247, 1e248, 1e249, 1e250, 1e251, 1e252, 1e253, 1e254, 1e255, 1e256, 1e257, 1e258, 1e259, 1e260, 1e261, 1e262, 1e263, 1e264, 1e265, 1e266, 1e267, 1e268, 1e269, 1e270, 1e271, 1e272, 1e273, 1e274, 1e275, 1e276, 1e277, 1e278, 1e279, 1e280, 1e281, 1e282, 1e283, 1e284, 1e285, 1e286, 1e287, 1e288, 1e289, 1e290, 1e291, 1e292, 1e293, 1e294, 1e295, 1e296, 1e297, 1e298, 1e299, 1e300, 1e301, 1e302, 1e303, 1e304, 1e305, 1e306, 1e307, 1e308]; impl<'a, R: Read> de::Deserializer for &'a mut Deserializer<R> { type Error = Error; #[inline] fn deserialize<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor, { self.parse_value(visitor) } /// Parses a `null` as a None, and any other values as a `Some(...)`. #[inline] fn deserialize_option<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor, { try!(self.parse_whitespace()); match try!(self.peek_or_null()) { b'n' => { self.eat_char(); try!(self.parse_ident(b"ull")); visitor.visit_none() } _ => visitor.visit_some(self), } } /// Parses a newtype struct as the underlying value. #[inline] fn deserialize_newtype_struct<V>( self, _name: &str, visitor: V ) -> Result<V::Value> where V: de::Visitor, { visitor.visit_newtype_struct(self) } /// Parses an enum as an object like `{"$KEY":$VALUE}`, where $VALUE is either a straight /// value, a `[..]`, or a `{..}`. #[inline] fn deserialize_enum<V>( self, _name: &str, _variants: &'static [&'static str], visitor: V ) -> Result<V::Value> where V: de::Visitor, { try!(self.parse_whitespace()); match try!(self.peek_or_null()) { b'{' => { self.remaining_depth -= 1; if self.remaining_depth == 0 { return Err(self.peek_error(ErrorCode::RecursionLimitExceeded)); } self.eat_char(); let value = try!(visitor.visit_enum(VariantVisitor::new(self))); self.remaining_depth += 1; try!(self.parse_whitespace()); match try!(self.next_char_or_null()) { b'}' => Ok(value), _ => Err(self.error(ErrorCode::ExpectedSomeValue)), } } b'"' => visitor.visit_enum(UnitVariantVisitor::new(self)), _ => Err(self.peek_error(ErrorCode::ExpectedSomeValue)), } } /// Parses a JSON string as bytes. Note that this function does not /// check whether the bytes represent valid unicode code points. /// /// The JSON specification requires that strings only contain valid /// unicode characters. To deal with non-conforming JSON, you may use /// this function, which attempts to parse a string without checking /// whether the bytes represent valid unicode code points. /// /// Escape sequences are processed as usual, and for `\uXXXX` escapes /// it is still checked if the hex number represents a valid unicode /// code point. /// /// # Example usage /// /// You can use this to parse JSON strings containing non-unicode bytes: /// /// ``` /// # extern crate serde; /// # extern crate serde_json; /// # /// let bytes = serde::bytes::ByteBuf::from(b"some raw bytes: \xe5\x00\xe5".to_vec()); /// let parsed = serde_json::from_slice( b"\"some raw bytes: \xe5\x00\xe5\"").unwrap(); /// /// assert_eq!(bytes, parsed); /// ``` /// /// `\u` escape sequences with invalid unicode code points still fail to parse: /// /// ``` /// # extern crate serde; /// # extern crate serde_json; /// # /// let json = "\"invalid unicode surrogate: \\uD801\""; /// let parsed: Result<serde::bytes::ByteBuf, _> = serde_json::from_str(json); /// assert!(parsed.is_err(), "{} should not parse: {:?}", json, parsed); /// ``` fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor { if try!(self.parse_whitespace()) { // true if eof return Err(self.peek_error(ErrorCode::EOFWhileParsingValue)); } match try!(self.peek_or_null()) { b'"' => { self.eat_char(); let slice = try!(self.read.parse_str_raw(&mut self.str_buf)); visitor.visit_bytes(slice) } _ => self.deserialize(visitor), } } fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value> where V: de::Visitor { self.deserialize_bytes(visitor) } forward_to_deserialize! { bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string unit seq seq_fixed_size map unit_struct tuple_struct struct struct_field tuple ignored_any } } struct SeqVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, first: bool, } impl<'a, R: Read + 'a> SeqVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { SeqVisitor { de: de, first: true, } } } impl<'a, R: Read + 'a> de::SeqVisitor for SeqVisitor<'a, R> { type Error = Error; fn visit_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>> where T: de::DeserializeSeed, { try!(self.de.parse_whitespace()); match try!(self.de.peek()) { Some(b']') => { return Ok(None); } Some(b',') if !self.first => { self.de.eat_char(); } Some(_) => { if self.first { self.first = false; } else { return Err(self.de .peek_error(ErrorCode::ExpectedListCommaOrEnd)); } } None => { return Err(self.de.peek_error(ErrorCode::EOFWhileParsingList)); } } let value = try!(seed.deserialize(&mut *self.de)); Ok(Some(value)) } } struct MapVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, first: bool, } impl<'a, R: Read + 'a> MapVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { MapVisitor { de: de, first: true, } } } impl<'a, R: Read + 'a> de::MapVisitor for MapVisitor<'a, R> { type Error = Error; fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>> where K: de::DeserializeSeed, { try!(self.de.parse_whitespace()); match try!(self.de.peek()) { Some(b'}') => { return Ok(None); } Some(b',') if !self.first => { self.de.eat_char(); try!(self.de.parse_whitespace()); } Some(_) => { if self.first { self.first = false; } else { return Err(self.de .peek_error(ErrorCode::ExpectedObjectCommaOrEnd)); } } None => { return Err(self.de .peek_error(ErrorCode::EOFWhileParsingObject)); } } match try!(self.de.peek()) { Some(b'"') => Ok(Some(try!(seed.deserialize(&mut *self.de)))), Some(_) => Err(self.de.peek_error(ErrorCode::KeyMustBeAString)), None => Err(self.de.peek_error(ErrorCode::EOFWhileParsingValue)), } } fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value> where V: de::DeserializeSeed, { try!(self.de.parse_object_colon()); seed.deserialize(&mut *self.de) } } struct VariantVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, } impl<'a, R: Read + 'a> VariantVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { VariantVisitor { de: de, } } } impl<'a, R: Read + 'a> de::EnumVisitor for VariantVisitor<'a, R> { type Error = Error; type Variant = Self; fn visit_variant_seed<V>(self, seed: V) -> Result<(V::Value, Self)> where V: de::DeserializeSeed, { let val = try!(seed.deserialize(&mut *self.de)); try!(self.de.parse_object_colon()); Ok((val, self)) } } impl<'a, R: Read + 'a> de::VariantVisitor for VariantVisitor<'a, R> { type Error = Error; fn visit_unit(self) -> Result<()> { de::Deserialize::deserialize(self.de) } fn visit_newtype_seed<T>(self, seed: T) -> Result<T::Value> where T: de::DeserializeSeed, { seed.deserialize(self.de) } fn visit_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value> where V: de::Visitor, { de::Deserializer::deserialize(self.de, visitor) } fn visit_struct<V>( self, _fields: &'static [&'static str], visitor: V ) -> Result<V::Value> where V: de::Visitor, { de::Deserializer::deserialize(self.de, visitor) } } struct UnitVariantVisitor<'a, R: Read + 'a> { de: &'a mut Deserializer<R>, } impl<'a, R: Read + 'a> UnitVariantVisitor<'a, R> { fn new(de: &'a mut Deserializer<R>) -> Self { UnitVariantVisitor { de: de, } } } impl<'a, R: Read + 'a> de::EnumVisitor for UnitVariantVisitor<'a, R> { type Error = Error; type Variant = Self; fn visit_variant_seed<V>(self, seed: V) -> Result<(V::Value, Self)> where V: de::DeserializeSeed, { let variant = try!(seed.deserialize(&mut *self.de)); Ok((variant, self)) } } impl<'a, R: Read + 'a> de::VariantVisitor for UnitVariantVisitor<'a, R> { type Error = Error; fn visit_unit(self) -> Result<()> { Ok(()) } fn visit_newtype_seed<T>(self, _seed: T) -> Result<T::Value> where T: de::DeserializeSeed, { Err(de::Error::invalid_type(Unexpected::UnitVariant, &"newtype variant")) } fn visit_tuple<V>(self, _len: usize, _visitor: V) -> Result<V::Value> where V: de::Visitor, { Err(de::Error::invalid_type(Unexpected::UnitVariant, &"tuple variant")) } fn visit_struct<V>( self, _fields: &'static [&'static str], _visitor: V ) -> Result<V::Value> where V: de::Visitor, { Err(de::Error::invalid_type(Unexpected::UnitVariant, &"struct variant")) } } ////////////////////////////////////////////////////////////////////////////// /// Iterator that deserializes a stream into multiple JSON values. /// /// A stream deserializer can be created from any JSON deserializer using the /// `Deserializer::into_iter` method. /// /// ```rust /// extern crate serde_json; /// /// use serde_json::{Deserializer, Value}; /// /// fn main() { /// let data = "1 2 {\"k\": 3}"; /// /// let stream = Deserializer::from_str(data).into_iter::<Value>(); /// /// for value in stream { /// println!("{}", value.unwrap()); /// } /// } /// ``` pub struct StreamDeserializer<R, T> { de: Deserializer<R>, _marker: PhantomData<T>, } impl<R, T> StreamDeserializer<R, T> where R: read::Read, T: de::Deserialize { /// Create a JSON stream deserializer from one of the possible serde_json /// input sources. /// /// Typically it is more convenient to use one of these methods instead: /// /// - Deserializer::from_str(...).into_iter() /// - Deserializer::from_bytes(...).into_iter() /// - Deserializer::from_iter(...).into_iter() /// - Deserializer::from_reader(...).into_iter() pub fn new(read: R) -> Self { StreamDeserializer { de: Deserializer::new(read), _marker: PhantomData, } } } impl<R, T> Iterator for StreamDeserializer<R, T> where R: Read, T: de::Deserialize, { type Item = Result<T>; fn next(&mut self) -> Option<Result<T>> { // skip whitespaces, if any // this helps with trailing whitespaces, since whitespaces between // values are handled for us. match self.de.parse_whitespace() { Ok(true) => None, // eof Ok(false) => { match de::Deserialize::deserialize(&mut self.de) { Ok(v) => Some(Ok(v)), Err(e) => Some(Err(e)), } } Err(e) => Some(Err(e)), } } } ////////////////////////////////////////////////////////////////////////////// fn from_trait<R, T>(read: R) -> Result<T> where R: Read, T: de::Deserialize, { let mut de = Deserializer::new(read); let value = try!(de::Deserialize::deserialize(&mut de)); // Make sure the whole stream has been consumed. try!(de.end()); Ok(value) } /// Deserialize an instance of type `T` from an iterator over bytes of JSON. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_iter<I, T>(iter: I) -> Result<T> where I: Iterator<Item = io::Result<u8>>, T: de::Deserialize, { from_trait(read::IteratorRead::new(iter)) } /// Deserialize an instance of type `T` from an IO stream of JSON. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_reader<R, T>(rdr: R) -> Result<T> where R: io::Read, T: de::Deserialize, { from_iter(rdr.bytes()) } /// Deserialize an instance of type `T` from bytes of JSON text. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_slice<T>(v: &[u8]) -> Result<T> where T: de::Deserialize, { from_trait(read::SliceRead::new(v)) } /// Deserialize an instance of type `T` from a string of JSON text. /// /// This conversion can fail if the structure of the Value does not match the /// structure expected by `T`, for example if `T` is a struct type but the Value /// contains something other than a JSON map. It can also fail if the structure /// is correct but `T`'s implementation of `Deserialize` decides that something /// is wrong with the data, for example required struct fields are missing from /// the JSON map or some number is too big to fit in the expected primitive /// type. pub fn from_str<T>(s: &str) -> Result<T> where T: de::Deserialize, { from_trait(read::StrRead::new(s)) }
#![allow(dead_code)] // This file contains the memory allocator used by the rust_alloc module // // The implementation in this file is a simple first-fit allocator. // // Invariants: // * All blocks will be a multiple of 16B // * All blocks will be 16B-aligned // // TODO: lock free list extern crate core; use core::mem::{transmute_copy, size_of}; static mut START: usize = 0; static mut END: usize = 0; static mut free_list: *mut Block = (0 as *mut Block); // memory block // the last word of every block is its allocation size #[repr(C, packed)] struct Block { magic: usize, // If this is a free block, then it is magical: 0xCAFEFACE size: usize, // Includes the size of the footer next: *mut Block, prev: *mut Block, } impl Block { // METHODS FOR ALL BLOCKS // returns true if this is a valid free block fn is_free(&self) -> bool { self.magic == 0xCAFEFACE && self.size % 0x10 == 0 } // get the prev block in memory // // note: this is distinct from the prev block in the free list // behavior is undefined if this is the last block. unsafe fn get_prev(&self) -> *mut Block { // check for corner cases if self.this() == START { panic!("Free block has no previous block: {}", self.this()); } // get the addr of previous block's size let prev_foot: *const usize = (self.this() - core::mem::size_of::<usize>()) as *const usize; let prev_size = *prev_foot; // get previous block's addr (self.this() - prev_size) as *mut Block } // get the next block in memory // // note: this is distinct from the next block in the free list // behavior is undefined if this is the last block. unsafe fn get_next(&self) -> *mut Block { // check for corner cases if self.this() + self.size == END { panic!("Free block has no next block: {}", self.this()); } (self.this() + self.size) as *mut Block } // Set the footer for this block. This method does not error checking, so // be careful! unsafe fn set_footer(&mut self, size: usize) { let footer = (self.this() + self.size - core::mem::size_of::<usize>()) as *mut usize; *footer = size; } #[inline] unsafe fn this(&self) -> usize { self as *const Block as usize } // METHODS FOR ONLY FREE BLOCKS // split the block into two blocks. The first block will be of the // given size. The block must be free unsafe fn split(&mut self, size: usize) { // check that the math works out if !self.is_free() { panic!("Attempt to split non-free block 0x{:X}", (self as *const Block) as usize); } if round_to_16(size + core::mem::size_of::<usize>()) >= self.size { panic!("Splitting block that is too small: 0x{:X}, size {}", (self as *const Block) as usize, size); } // get new block addr let new_size = round_to_16(size + core::mem::size_of::<usize>()); let new_addr = (self.this() + new_size) as *const Block; // create new block and set magic bits let mut block: Block = core::mem::transmute_copy(&*new_addr); block.magic = 0xCAFEFACE; block.size = self.size - new_size; // adjust this block's metadata self.size = new_size; // insert at tail of free list block.insert(); } // coalesce this block with the next one. The two blocks must be free unsafe fn combine(&mut self) { // check that both are free! if !self.is_free() { panic!("Attempt to coalesce non-free block 0x{:X}", (self as *const Block) as usize); } else if !(*self.get_next()).is_free() { panic!("Attempt to coalesce non-free block 0x{:X}", self.get_next() as *const Block as usize); } let next = self.get_next(); // increase the size of this block self.size += (*next).size + core::mem::size_of::<usize>(); // remove next block from free list (*next).remove(); } // remove this block from the free list. The block // must be free. After this operation, the block's magic word is // set to 0xDEADBEEF. unsafe fn remove(&mut self) { if !self.is_free() { panic!("Attempt to remove non-free block from free list: {}", self.this()); } // Set magic, so that this is definitely not a free block self.magic = 0xDEADBEEF; // get prev and next in free list // corner cases: // - if this is the head, prev = NULL // - if this is the tail, next = NULL if self.prev != (0 as *mut Block) { (*self.prev).next = self.next; } else { // remove the head of the list free_list = self.next; } if self.next != (0 as *mut Block) { // not the tail (*self.next).prev = self.prev; } } // METHODS FOR ONLY USED BLOCKS // inserts the given block at the head of the free list and sets // the magic bits. The block cannot already be free. unsafe fn insert(&mut self) { if self.is_free() { panic!("Attempt to insert a free block: {}", self.this()); } // set magic bits self.magic = 0xCAFEFACE; // insert at head let next = free_list; self.next = free_list; if next != (0 as *mut Block) { (*next).prev = self.this() as *mut Block; } } } // round up to the nearest multiple of 16 fn round_to_16(size: usize) -> usize { } // Init the heap pub fn init(start: usize, end: usize) { // Round to nearest multiple of 16 unsafe { // round START up START = if (start & 0xF) == 0 {start} else {(start & !0xF) + 0x10}; // round END down END = end & !0xF; // bounds check if END <= START { panic!("No heap space"); } printf! ("In heap init\nstart addr: {:x}, end addr: {:x}\n", START, END); } // TODO: create first block and free list } /// Return a pointer to `size` bytes of memory aligned to `align`. /// /// On failure, return a null pointer. /// /// Behavior is undefined if the requested size is 0 or the alignment is not a /// power of 2. The alignment must be no larger than the largest supported page /// size on the platform. pub unsafe fn malloc(size: usize, align: usize) -> *mut u8 { 0 as *mut u8 // TODO } /// Deallocates the memory referenced by `ptr`. /// /// The `ptr` parameter must not be null. /// /// The `old_size` and `align` parameters are the parameters that were used to /// create the allocation referenced by `ptr`. The `old_size` parameter may be /// any value in range_inclusive(requested_size, usable_size). pub unsafe fn free(ptr: *mut u8, old_size: usize) { } /// Returns the usable size of an allocation created with the specified the /// `size` and `align`. pub fn usable_size(size: usize, align: usize) -> usize { 0 // TODO } /// Prints implementation-defined allocator statistics. /// /// These statistics may be inconsistent if other threads use the allocator /// during the call. pub fn print_stats() { } Implementing malloc #![allow(dead_code)] // This file contains the memory allocator used by the rust_alloc module // // The implementation in this file is a simple first-fit allocator. // // Invariants: // * All blocks will be a multiple of 16B // * All blocks will be 16B-aligned // // TODO: lock free list // TODO: make this more rustic // TODO: out of memory error extern crate core; use core::mem::{size_of}; use core::option::Option::{self, Some, None}; static mut START: usize = 0; static mut END: usize = 0; static mut free_list: *mut Block = (0 as *mut Block); // memory block // the last word of every block is its allocation size #[repr(C, packed)] struct Block { magic: usize, // If this is a free block, then it is magical: 0xCAFEFACE size: usize, // Includes the size of the footer next: *mut Block, prev: *mut Block, } impl Block { // METHODS FOR ALL BLOCKS // returns true if this is a valid free block fn is_free(&self) -> bool { self.magic == 0xCAFEFACE && self.size % 0x10 == 0 } // get the prev block in memory // // note: this is distinct from the prev block in the free list // behavior is undefined if this is the last block. unsafe fn get_prev(&self) -> *mut Block { // check for corner cases if self.this() == START { panic!("Free block has no previous block: {}", self.this()); } // get the addr of previous block's size let prev_foot: *const usize = (self.this() - core::mem::size_of::<usize>()) as *const usize; let prev_size = *prev_foot; // get previous block's addr (self.this() - prev_size) as *mut Block } // get the next block in memory // // note: this is distinct from the next block in the free list // behavior is undefined if this is the last block. unsafe fn get_next(&self) -> *mut Block { // check for corner cases if self.this() + self.size == END { panic!("Free block has no next block: {}", self.this()); } (self.this() + self.size) as *mut Block } // Set the footer for this block. This method does not error checking, so // be careful! unsafe fn set_footer(&mut self, size: usize) { let footer = (self.this() + self.size - core::mem::size_of::<usize>()) as *mut usize; *footer = size; } #[inline] unsafe fn this(&self) -> usize { self as *const Block as usize } // METHODS FOR ONLY FREE BLOCKS // split the block into two blocks. The first block will be of the // given size. The block must be free unsafe fn split(&mut self, size: usize) { // check that the math works out if !self.is_free() { panic!("Attempt to split non-free block 0x{:X}", (self as *const Block) as usize); } if round_to_16(size + core::mem::size_of::<usize>()) >= self.size { panic!("Splitting block that is too small: 0x{:X}, size {}", (self as *const Block) as usize, size); } // get new block addr let new_size = round_to_16(size + core::mem::size_of::<usize>()); let new_addr = (self.this() + new_size) as *const Block; // create new block and set magic bits let mut block: Block = core::mem::transmute_copy(&*new_addr); block.magic = 0xCAFEFACE; block.size = self.size - new_size; // adjust this block's metadata self.size = new_size; // insert at tail of free list block.insert(); } // coalesce this block with the next one. The two blocks must be free unsafe fn combine(&mut self) { // check that both are free! if !self.is_free() { panic!("Attempt to coalesce non-free block 0x{:X}", (self as *const Block) as usize); } else if !(*self.get_next()).is_free() { panic!("Attempt to coalesce non-free block 0x{:X}", self.get_next() as *const Block as usize); } let next = self.get_next(); // increase the size of this block self.size += (*next).size + core::mem::size_of::<usize>(); // remove next block from free list (*next).remove(); } // remove this block from the free list. The block // must be free. After this operation, the block's magic word is // set to 0xDEADBEEF. unsafe fn remove(&mut self) { if !self.is_free() { panic!("Attempt to remove non-free block from free list: {}", self.this()); } // Set magic, so that this is definitely not a free block self.magic = 0xDEADBEEF; // get prev and next in free list // corner cases: // - if this is the head, prev = NULL // - if this is the tail, next = NULL if self.prev != (0 as *mut Block) { (*self.prev).next = self.next; } else { // remove the head of the list free_list = self.next; } if self.next != (0 as *mut Block) { // not the tail (*self.next).prev = self.prev; } } // METHODS FOR ONLY USED BLOCKS // inserts the given block at the head of the free list and sets // the magic bits. The block cannot already be free. unsafe fn insert(&mut self) { if self.is_free() { panic!("Attempt to insert a free block: {}", self.this()); } // set magic bits self.magic = 0xCAFEFACE; // insert at head let next = free_list; self.next = free_list; if next != (0 as *mut Block) { (*next).prev = self.this() as *mut Block; } } // STATIC // Find a block that fits the bill unsafe fn find(size: usize, align: usize) -> Option<*mut Block> { //TODO Option::None } } // round up to the nearest multiple of 16 fn round_to_16(size: usize) -> usize { if (size & 0xF) == 0 { size } else { (size & !0xF) + 0x10 } } // Init the heap pub fn init(start: usize, end: usize) { // Round to nearest multiple of 16 unsafe { // round START up START = round_to_16(start); // round END down END = end & !0xF; // bounds check if END <= START { panic!("No heap space"); } printf! ("In heap init\nstart addr: {:x}, end addr: {:x}\n", START, END); } // TODO: create first block and free list } /// Return a pointer to `size` bytes of memory aligned to `align`. /// /// On failure, return a null pointer. /// /// Behavior is undefined if the requested size is 0 or the alignment is not a /// power of 2. The alignment must be no larger than the largest supported page /// size on the platform. pub unsafe fn malloc(size: usize, align: usize) -> *mut u8 { // TODO: alignment // get free block let block_addr = Block::find(size, align); match block_addr { None => { 0 as *mut u8 } Some(addr) => { let block: &mut Block = &mut*addr; // Split the block if it is too big if block.size > round_to_16(size + core::mem::size_of::<usize>()) { block.split(size); } // Remove the block from the free list block.remove(); // return ptr addr as *mut u8 } } } /// Deallocates the memory referenced by `ptr`. /// /// The `ptr` parameter must not be null. /// /// The `old_size` and `align` parameters are the parameters that were used to /// create the allocation referenced by `ptr`. The `old_size` parameter may be /// any value in range_inclusive(requested_size, usable_size). pub unsafe fn free(ptr: *mut u8, old_size: usize) { } /// Returns the usable size of an allocation created with the specified the /// `size` and `align`. pub fn usable_size(size: usize, align: usize) -> usize { 0 // TODO } /// Prints implementation-defined allocator statistics. /// /// These statistics may be inconsistent if other threads use the allocator /// during the call. pub fn print_stats() { }
//! Utility methods and constructors for Lua objects use std::convert::From; use std::cell; use std::marker::PhantomData; use rlua::{self, AnyUserData, FromLua, Function, Lua, MetaMethod, Table, ToLua, UserData, UserDataMethods, Value}; use super::class::Class; use super::property::Property; use super::signal; /// The ObjectStateType trait is used to constrain the generic data types in the Object and Class structs. /// They can be transferred to and from Lua user data and force type checking pub trait ObjectStateType: UserData + Default + Send {} impl<T> ObjectStateType for T where T: UserData + Default + Send {} /// All Lua objects can be cast to this. #[derive(Debug)] pub struct Object<'lua, S: ObjectStateType>{ pub obj: AnyUserData<'lua>, state: PhantomData<S>, } impl<'lua, S: ObjectStateType> Clone for Object<'lua, S> { fn clone(&self) -> Self { Object { obj: self.obj.clone(), state: PhantomData } } } impl<'lua, S: ObjectStateType> From<AnyUserData<'lua>> for Object<'lua, S> { fn from(obj: AnyUserData<'lua>) -> Self { Object { obj, state: PhantomData } } } impl<'lua, S: ObjectStateType> Into<AnyUserData<'lua>> for Object<'lua, S> { fn into(self) -> AnyUserData<'lua> { self.obj } } /// Construct a new object, used when using the default Objectable::new. pub struct ObjectBuilder<'lua, S: ObjectStateType> { lua: &'lua Lua, object: Object<'lua, S> } impl<'lua, S: ObjectStateType> ObjectBuilder<'lua, S> { pub fn add_to_meta(self, new_meta: Table<'lua>) -> rlua::Result<Self> { let meta = self.object.get_metatable()? .expect("Object had no meta table"); for entry in new_meta.pairs::<rlua::Value, rlua::Value>() { let (key, value) = entry?; meta.set(key, value)?; } self.object.set_metatable(meta)?; Ok(self) } #[allow(dead_code)] pub fn add_to_signals(self, name: String, func: rlua::Function) -> rlua::Result<Self> { signal::connect_signal(self.lua, self.object.clone(), name, &[func])?; Ok(self) } pub fn handle_constructor_argument(self, args: Table) -> rlua::Result<Self> { let meta = self.object .get_metatable()? .expect("Object had no meta table"); let class = meta.get::<_, AnyUserData>("__class")?; let class_table = class.get_user_value::<Table>()?; let props = class_table.get::<_, Vec<Property>>("properties")?; // Handle all table entries that correspond to known properties, // silently ignore all other keys for pair in args.pairs() { let (key, value): (Value, Value) = pair?; if let rlua::Value::String(key) = key { if let Ok(key) = key.to_str() { // Find the property for prop in props.iter() { if prop.name == key { // Property exists and has a cb_new callback if let Some(ref new) = prop.cb_new { let _: () = new.bind(self.object.clone())?.call(value)?; break } } } } } } Ok(self) } pub fn build(self) -> Object<'lua, S> { self.object } } /// Objects that represent OO lua objects. /// /// Allows casting an object gotten back from the Lua runtime /// into a concrete object so that Rust can do things with it. /// impl<'lua, S: ObjectStateType> Object<'lua, S> { pub fn cast(obj: AnyUserData<'lua>) -> rlua::Result<Self> { if obj.is::<S>()? { Ok(obj.into()) } else { use rlua::Error::RuntimeError; Err(RuntimeError("Could not cast object to concrete type".into())) } } /// Gets a reference to the internal state for the concrete object. pub fn state(&self) -> rlua::Result<cell::Ref<S>> { Ok(self.obj.borrow::<S>()?) } /// Gets a mutable reference to the internal state for the concrete object. pub fn state_mut(&mut self) -> rlua::Result<cell::RefMut<S>> { Ok(self.obj.borrow_mut::<S>()?) } /// Get the signals of the for this object pub fn signals(&self) -> rlua::Result<rlua::Table<'lua>> { self.get_associated_data::<Table>("signals") } /// Set a value to keep inside lua associate with the object, but /// which should not be transfered to Rust for various reason /// (e.g. reference to other objects which cause GC problems) pub fn set_associated_data<D: ToLua<'lua>>(&self, key: &str, value: D) -> rlua::Result<()> { self.obj.get_user_value::<Table<'lua>>()?.set::<_, D>(key, value) } /// Get a value to keep inside lua associate with the object pub fn get_associated_data<D: FromLua<'lua>>(&self, key: &str) -> rlua::Result<D> { self.obj.get_user_value::<Table<'lua>>()?.get::<_, D>(key) } /// Get the metatable for this object pub fn get_metatable(&self) -> rlua::Result<Option<Table<'lua>>> { Ok(self.obj.get_user_value::<Table<'lua>>()?.get_metatable()) } /// Set the metatable for this object pub fn set_metatable(&self, meta: Table<'lua>) -> rlua::Result<()> { self.obj.get_user_value::<Table<'lua>>()?.set_metatable(Some(meta)); Ok(()) } /// Lua objects in Way Cooler are just how they are in Awesome: /// * We expose the user data directly. /// * ObjectStateType for the object is stored using set_user_value in a "wrapper" /// table. * The wrapper table has a data field which hosts the data. /// * Class methods/attributes are on the meta table of the wrapper table. pub fn allocate(lua: &'lua Lua, class: Class<S>) -> rlua::Result<ObjectBuilder<'lua, S>> { let obj = lua.create_userdata(S::default())?; // TODO Increment the instance count let wrapper_table = lua.create_table()?; let data_table = lua.create_table()?; wrapper_table.set("data", data_table)?; let meta = lua.create_table()?; meta.set("__class", class)?; meta.set("properties", Vec::<Property>::new().to_lua(lua)?)?; meta.set("signals", lua.create_table()?)?; meta.set("connect_signal", lua.create_function(connect_signal::<S>)?)?; meta.set("disconnect_signal", lua.create_function(disconnect_signal::<S>)?)?; meta.set("emit_signal", lua.create_function(emit_signal::<S>)?)?; meta.set("__index", meta.clone())?; meta.set("__tostring", lua.create_function(default_tostring::<S>)?)?; wrapper_table.set_metatable(Some(meta)); obj.set_user_value(wrapper_table)?; // TODO Emit new signal event let object = Object { obj, state: PhantomData }; Ok(ObjectBuilder { object, lua }) } } impl<'lua, S: ObjectStateType> ToLua<'lua> for Object<'lua, S> { fn to_lua(self, _: &'lua Lua) -> rlua::Result<Value<'lua>> { Ok(Value::UserData(self.obj)) } } impl<'lua, S: ObjectStateType> FromLua<'lua> for Object<'lua, S> { fn from_lua(val: Value<'lua>, _lua: &'lua Lua) -> rlua::Result<Self> { if let Value::UserData(obj) = val { Object::cast(obj) } else { Err(rlua::Error::RuntimeError("Invalid data supplied".into())) } } } /// Can be used for implementing UserData for Lua objects. This provides some /// default metafunctions. pub fn default_add_methods<S>(methods: &mut UserDataMethods<S>) where S: ObjectStateType { methods.add_meta_function(MetaMethod::Index, default_index::<S>); methods.add_meta_function(MetaMethod::NewIndex, default_newindex::<S>); methods.add_meta_function(MetaMethod::ToString, default_tostring::<S>); } /// Default indexing of an Awesome object. /// /// Automatically looks up contents in meta table, so instead of overriding this /// it's easier to just add the required data in the meta table. pub fn default_index<'lua, S: ObjectStateType>(lua: &'lua Lua, (obj, index): (Object<'lua, S>, Value<'lua>)) -> rlua::Result<Value<'lua>> { // Look up in metatable first let meta = obj.get_metatable()?.expect("Object had no metatable"); if meta.get::<_, AnyUserData>("__class").is_ok() { if let Ok(val) = meta.raw_get::<_, Value>(index.clone()) { match val { Value::Nil => {} val => return Ok(val) } } } let index = match String::from_lua(index, lua) { Ok(s) => s, Err(_) => return Ok(Value::Nil) }; match index.as_str() { "valid" => { Ok(Value::Boolean(if let Ok(class) = meta.get::<_, AnyUserData>("__class") { let class: Class<S> = class.into(); class.checker()?.map(|checker| checker(obj)).unwrap_or(true) } else { false })) } "data" => obj.obj.get_user_value(), index => { // Try see if there is a property of the class with the name if let Ok(class) = meta.get::<_, AnyUserData>("__class") { let class_table = class.get_user_value::<Table>()?; let props = class_table.get::<_, Vec<Property>>("properties")?; for prop in props { if prop.name.as_str() == index { // Property exists and has an index callback if let Some(index) = prop.cb_index { return index.call(obj) } } } if let Some(meta) = class_table.get_metatable() { match meta.get::<_, Function>("__index_miss_handler") { Ok(function) => return function.bind(obj)?.call(index), Err(_) => {} } } } // TODO property miss handler if index doesn't exst Ok(rlua::Value::Nil) } } } /// Default new indexing (assignment) of an Awesome object. /// /// Automatically looks up contents in meta table, so instead of overriding this /// it's easier to just add the required data in the meta table. pub fn default_newindex<'lua, S: ObjectStateType>( _: &'lua Lua, (obj, index, val): (Object<'lua, S>, String, Value<'lua>)) -> rlua::Result<Value<'lua>> { // Look up in metatable first if let Some(meta) = obj.get_metatable()? { if let Ok(val) = meta.raw_get::<_, Value>(index.clone()) { match val { Value::Nil => {} val => return Ok(val) } } let class = meta.get::<_, AnyUserData>("__class")?; let class_table = class.get_user_value::<Table>()?; let props = class_table.get::<_, Vec<Property>>("properties")?; for prop in props { if prop.name.as_str() == index { // Property exists and has a newindex callback if let Some(newindex) = prop.cb_newindex { return newindex.bind(obj.clone())?.call(val) } } } if let Some(meta) = class_table.get_metatable() { match meta.get::<_, Function>("__newindex_miss_handler") { Ok(function) => return function.bind(obj)?.call((index, val)), Err(_) => {} } } // TODO property miss handler if index doesn't exist } Ok(Value::Nil) } pub fn default_tostring<'lua, S>(_: &'lua Lua, obj: Object<'lua, S>) -> rlua::Result<String> where S: ObjectStateType { if let Some(meta) = obj.get_metatable()? { let class = meta.get::<_, AnyUserData>("__class")?; let class_table = class.get_user_value::<Table>()?; let name = class_table.get::<_, String>("name")?; return Ok(format!("{}: {:p}", name, &*obj.state()?)) } Err(rlua::Error::UserDataTypeMismatch) } fn connect_signal<S: ObjectStateType>(lua: &Lua, (obj, signal, func): (Object<S>, String, Function)) -> rlua::Result<()> { signal::connect_signal(lua, obj.into(), signal, &[func]) } fn disconnect_signal<S: ObjectStateType>(lua: &Lua, (obj, signal): (Object<S>, String)) -> rlua::Result<()> { signal::disconnect_signal(lua, obj.into(), signal) } fn emit_signal<S: ObjectStateType>(lua: &Lua, (obj, signal, args): (Object<S>, String, Value)) -> rlua::Result<()> { signal::emit_object_signal(lua, obj.into(), signal, args) } Make `obj` property private No need to make `obj` public, since it is all abstracted inside the implementation. //! Utility methods and constructors for Lua objects use std::convert::From; use std::cell; use std::marker::PhantomData; use rlua::{self, AnyUserData, FromLua, Function, Lua, MetaMethod, Table, ToLua, UserData, UserDataMethods, Value}; use super::class::Class; use super::property::Property; use super::signal; /// The ObjectStateType trait is used to constrain the generic data types in the Object and Class structs. /// They can be transferred to and from Lua user data and force type checking pub trait ObjectStateType: UserData + Default + Send {} impl<T> ObjectStateType for T where T: UserData + Default + Send {} /// All Lua objects can be cast to this. #[derive(Debug)] pub struct Object<'lua, S: ObjectStateType>{ obj: AnyUserData<'lua>, state: PhantomData<S>, } impl<'lua, S: ObjectStateType> Clone for Object<'lua, S> { fn clone(&self) -> Self { Object { obj: self.obj.clone(), state: PhantomData } } } impl<'lua, S: ObjectStateType> From<AnyUserData<'lua>> for Object<'lua, S> { fn from(obj: AnyUserData<'lua>) -> Self { Object { obj, state: PhantomData } } } impl<'lua, S: ObjectStateType> Into<AnyUserData<'lua>> for Object<'lua, S> { fn into(self) -> AnyUserData<'lua> { self.obj } } /// Construct a new object, used when using the default Objectable::new. pub struct ObjectBuilder<'lua, S: ObjectStateType> { lua: &'lua Lua, object: Object<'lua, S> } impl<'lua, S: ObjectStateType> ObjectBuilder<'lua, S> { pub fn add_to_meta(self, new_meta: Table<'lua>) -> rlua::Result<Self> { let meta = self.object.get_metatable()? .expect("Object had no meta table"); for entry in new_meta.pairs::<rlua::Value, rlua::Value>() { let (key, value) = entry?; meta.set(key, value)?; } self.object.set_metatable(meta)?; Ok(self) } #[allow(dead_code)] pub fn add_to_signals(self, name: String, func: rlua::Function) -> rlua::Result<Self> { signal::connect_signal(self.lua, self.object.clone(), name, &[func])?; Ok(self) } pub fn handle_constructor_argument(self, args: Table) -> rlua::Result<Self> { let meta = self.object .get_metatable()? .expect("Object had no meta table"); let class = meta.get::<_, AnyUserData>("__class")?; let class_table = class.get_user_value::<Table>()?; let props = class_table.get::<_, Vec<Property>>("properties")?; // Handle all table entries that correspond to known properties, // silently ignore all other keys for pair in args.pairs() { let (key, value): (Value, Value) = pair?; if let rlua::Value::String(key) = key { if let Ok(key) = key.to_str() { // Find the property for prop in props.iter() { if prop.name == key { // Property exists and has a cb_new callback if let Some(ref new) = prop.cb_new { let _: () = new.bind(self.object.clone())?.call(value)?; break } } } } } } Ok(self) } pub fn build(self) -> Object<'lua, S> { self.object } } /// Objects that represent OO lua objects. /// /// Allows casting an object gotten back from the Lua runtime /// into a concrete object so that Rust can do things with it. /// impl<'lua, S: ObjectStateType> Object<'lua, S> { pub fn cast(obj: AnyUserData<'lua>) -> rlua::Result<Self> { if obj.is::<S>()? { Ok(obj.into()) } else { use rlua::Error::RuntimeError; Err(RuntimeError("Could not cast object to concrete type".into())) } } /// Gets a reference to the internal state for the concrete object. pub fn state(&self) -> rlua::Result<cell::Ref<S>> { Ok(self.obj.borrow::<S>()?) } /// Gets a mutable reference to the internal state for the concrete object. pub fn state_mut(&mut self) -> rlua::Result<cell::RefMut<S>> { Ok(self.obj.borrow_mut::<S>()?) } /// Get the signals of the for this object pub fn signals(&self) -> rlua::Result<rlua::Table<'lua>> { self.get_associated_data::<Table>("signals") } /// Set a value to keep inside lua associate with the object, but /// which should not be transfered to Rust for various reason /// (e.g. reference to other objects which cause GC problems) pub fn set_associated_data<D: ToLua<'lua>>(&self, key: &str, value: D) -> rlua::Result<()> { self.obj .get_user_value::<Table<'lua>>()? .set::<_, D>(key, value) } /// Get a value to keep inside lua associate with the object pub fn get_associated_data<D: FromLua<'lua>>(&self, key: &str) -> rlua::Result<D> { self.obj .get_user_value::<Table<'lua>>()? .get::<_, D>(key) } /// Get the metatable for this object pub fn get_metatable(&self) -> rlua::Result<Option<Table<'lua>>> { Ok(self.obj.get_user_value::<Table<'lua>>()?.get_metatable()) } /// Set the metatable for this object pub fn set_metatable(&self, meta: Table<'lua>) -> rlua::Result<()> { self.obj.get_user_value::<Table<'lua>>()?.set_metatable(Some(meta)); Ok(()) } /// Lua objects in Way Cooler are just how they are in Awesome: /// * We expose the user data directly. /// * ObjectStateType for the object is stored using set_user_value in a "wrapper" /// table. * The wrapper table has a data field which hosts the data. /// * Class methods/attributes are on the meta table of the wrapper table. pub fn allocate(lua: &'lua Lua, class: Class<S>) -> rlua::Result<ObjectBuilder<'lua, S>> { let obj = lua.create_userdata(S::default())?; // TODO Increment the instance count let wrapper_table = lua.create_table()?; let data_table = lua.create_table()?; wrapper_table.set("data", data_table)?; let meta = lua.create_table()?; meta.set("__class", class)?; meta.set("properties", Vec::<Property>::new().to_lua(lua)?)?; meta.set("signals", lua.create_table()?)?; meta.set("connect_signal", lua.create_function(connect_signal::<S>)?)?; meta.set("disconnect_signal", lua.create_function(disconnect_signal::<S>)?)?; meta.set("emit_signal", lua.create_function(emit_signal::<S>)?)?; meta.set("__index", meta.clone())?; meta.set("__tostring", lua.create_function(default_tostring::<S>)?)?; wrapper_table.set_metatable(Some(meta)); obj.set_user_value(wrapper_table)?; // TODO Emit new signal event let object = Object { obj, state: PhantomData }; Ok(ObjectBuilder { object, lua }) } } impl<'lua, S: ObjectStateType> ToLua<'lua> for Object<'lua, S> { fn to_lua(self, _: &'lua Lua) -> rlua::Result<Value<'lua>> { Ok(Value::UserData(self.obj)) } } impl<'lua, S: ObjectStateType> FromLua<'lua> for Object<'lua, S> { fn from_lua(val: Value<'lua>, _lua: &'lua Lua) -> rlua::Result<Self> { if let Value::UserData(obj) = val { Object::cast(obj) } else { Err(rlua::Error::RuntimeError("Invalid data supplied".into())) } } } /// Can be used for implementing UserData for Lua objects. This provides some /// default metafunctions. pub fn default_add_methods<S>(methods: &mut UserDataMethods<S>) where S: ObjectStateType { methods.add_meta_function(MetaMethod::Index, default_index::<S>); methods.add_meta_function(MetaMethod::NewIndex, default_newindex::<S>); methods.add_meta_function(MetaMethod::ToString, default_tostring::<S>); } /// Default indexing of an Awesome object. /// /// Automatically looks up contents in meta table, so instead of overriding this /// it's easier to just add the required data in the meta table. pub fn default_index<'lua, S: ObjectStateType>(lua: &'lua Lua, (obj, index): (Object<'lua, S>, Value<'lua>)) -> rlua::Result<Value<'lua>> { // Look up in metatable first let meta = obj.get_metatable()?.expect("Object had no metatable"); if meta.get::<_, AnyUserData>("__class").is_ok() { if let Ok(val) = meta.raw_get::<_, Value>(index.clone()) { match val { Value::Nil => {} val => return Ok(val) } } } let index = match String::from_lua(index, lua) { Ok(s) => s, Err(_) => return Ok(Value::Nil) }; match index.as_str() { "valid" => { Ok(Value::Boolean(if let Ok(class) = meta.get::<_, AnyUserData>("__class") { let class: Class<S> = class.into(); class.checker()?.map(|checker| checker(obj)).unwrap_or(true) } else { false })) } "data" => obj.obj.get_user_value(), index => { // Try see if there is a property of the class with the name if let Ok(class) = meta.get::<_, AnyUserData>("__class") { let class_table = class.get_user_value::<Table>()?; let props = class_table.get::<_, Vec<Property>>("properties")?; for prop in props { if prop.name.as_str() == index { // Property exists and has an index callback if let Some(index) = prop.cb_index { return index.call(obj) } } } if let Some(meta) = class_table.get_metatable() { match meta.get::<_, Function>("__index_miss_handler") { Ok(function) => return function.bind(obj)?.call(index), Err(_) => {} } } } // TODO property miss handler if index doesn't exst Ok(rlua::Value::Nil) } } } /// Default new indexing (assignment) of an Awesome object. /// /// Automatically looks up contents in meta table, so instead of overriding this /// it's easier to just add the required data in the meta table. pub fn default_newindex<'lua, S: ObjectStateType>( _: &'lua Lua, (obj, index, val): (Object<'lua, S>, String, Value<'lua>)) -> rlua::Result<Value<'lua>> { // Look up in metatable first if let Some(meta) = obj.get_metatable()? { if let Ok(val) = meta.raw_get::<_, Value>(index.clone()) { match val { Value::Nil => {} val => return Ok(val) } } let class = meta.get::<_, AnyUserData>("__class")?; let class_table = class.get_user_value::<Table>()?; let props = class_table.get::<_, Vec<Property>>("properties")?; for prop in props { if prop.name.as_str() == index { // Property exists and has a newindex callback if let Some(newindex) = prop.cb_newindex { return newindex.bind(obj.clone())?.call(val) } } } if let Some(meta) = class_table.get_metatable() { match meta.get::<_, Function>("__newindex_miss_handler") { Ok(function) => return function.bind(obj)?.call((index, val)), Err(_) => {} } } // TODO property miss handler if index doesn't exist } Ok(Value::Nil) } pub fn default_tostring<'lua, S>(_: &'lua Lua, obj: Object<'lua, S>) -> rlua::Result<String> where S: ObjectStateType { if let Some(meta) = obj.get_metatable()? { let class = meta.get::<_, AnyUserData>("__class")?; let class_table = class.get_user_value::<Table>()?; let name = class_table.get::<_, String>("name")?; return Ok(format!("{}: {:p}", name, &*obj.state()?)) } Err(rlua::Error::UserDataTypeMismatch) } fn connect_signal<S: ObjectStateType>(lua: &Lua, (obj, signal, func): (Object<S>, String, Function)) -> rlua::Result<()> { signal::connect_signal(lua, obj.into(), signal, &[func]) } fn disconnect_signal<S: ObjectStateType>(lua: &Lua, (obj, signal): (Object<S>, String)) -> rlua::Result<()> { signal::disconnect_signal(lua, obj.into(), signal) } fn emit_signal<S: ObjectStateType>(lua: &Lua, (obj, signal, args): (Object<S>, String, Value)) -> rlua::Result<()> { signal::emit_object_signal(lua, obj.into(), signal, args) }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{DeviceIntPoint, DeviceIntRect, DeviceIntSize, DeviceSize, DeviceIntSideOffsets, ImageDescriptor, ImageFormat}; #[cfg(feature = "pathfinder")] use api::FontRenderMode; use border::BorderCacheKey; use box_shadow::{BoxShadowCacheKey}; use clip::{ClipItem, ClipStore, ClipNodeRange}; use clip_scroll_tree::SpatialNodeIndex; use device::TextureFilter; #[cfg(feature = "pathfinder")] use euclid::{TypedPoint2D, TypedVector2D}; use freelist::{FreeList, FreeListHandle, WeakFreeListHandle}; use glyph_rasterizer::GpuGlyphCacheKey; use gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle}; use gpu_types::{BorderInstance, ImageSource, RasterizationSpace, UvRectKind}; use internal_types::{FastHashMap, SavedTargetIndex, SourceTexture}; #[cfg(feature = "pathfinder")] use pathfinder_partitioner::mesh::Mesh; use picture::PictureCacheKey; use prim_store::{PrimitiveIndex, ImageCacheKey}; #[cfg(feature = "debugger")] use print_tree::{PrintTreePrinter}; use render_backend::FrameId; use resource_cache::{CacheItem, ResourceCache}; use std::{cmp, ops, usize, f32, i32}; use texture_cache::{TextureCache, TextureCacheHandle, Eviction}; use tiling::{RenderPass, RenderTargetIndex}; use tiling::{RenderTargetKind}; #[cfg(feature = "pathfinder")] use webrender_api::DevicePixel; const RENDER_TASK_SIZE_SANITY_CHECK: i32 = 16000; const FLOATS_PER_RENDER_TASK_INFO: usize = 8; pub const MAX_BLUR_STD_DEVIATION: f32 = 4.0; pub const MIN_DOWNSCALING_RT_SIZE: i32 = 128; fn render_task_sanity_check(size: &DeviceIntSize) { if size.width > RENDER_TASK_SIZE_SANITY_CHECK || size.height > RENDER_TASK_SIZE_SANITY_CHECK { error!("Attempting to create a render task of size {}x{}", size.width, size.height); panic!(); } } #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskId(pub u32, FrameId); // TODO(gw): Make private when using GPU cache! #[derive(Debug, Copy, Clone)] #[repr(C)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskAddress(pub u32); #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskTree { pub tasks: Vec<RenderTask>, pub task_data: Vec<RenderTaskData>, next_saved: SavedTargetIndex, frame_id: FrameId, } impl RenderTaskTree { pub fn new(frame_id: FrameId) -> Self { RenderTaskTree { tasks: Vec::new(), task_data: Vec::new(), next_saved: SavedTargetIndex(0), frame_id, } } pub fn add(&mut self, task: RenderTask) -> RenderTaskId { let id = self.tasks.len(); self.tasks.push(task); RenderTaskId(id as _, self.frame_id) } pub fn max_depth(&self, id: RenderTaskId, depth: usize, max_depth: &mut usize) { debug_assert_eq!(self.frame_id, id.1); let depth = depth + 1; *max_depth = cmp::max(*max_depth, depth); let task = &self.tasks[id.0 as usize]; for child in &task.children { self.max_depth(*child, depth, max_depth); } } pub fn assign_to_passes( &self, id: RenderTaskId, pass_index: usize, passes: &mut [RenderPass], ) { debug_assert_eq!(self.frame_id, id.1); let task = &self.tasks[id.0 as usize]; for child in &task.children { self.assign_to_passes(*child, pass_index - 1, passes); } // Sanity check - can be relaxed if needed match task.location { RenderTaskLocation::Fixed(..) => { debug_assert!(pass_index == passes.len() - 1); } RenderTaskLocation::Dynamic(..) | RenderTaskLocation::TextureCache(..) => { debug_assert!(pass_index < passes.len() - 1); } } // If this task can be shared between multiple // passes, render it in the first pass so that // it is available to all subsequent passes. let pass_index = if task.is_shared() { debug_assert!(task.children.is_empty()); 0 } else { pass_index }; let pass = &mut passes[pass_index]; pass.add_render_task(id, task.get_dynamic_size(), task.target_kind()); } pub fn prepare_for_render(&mut self) { for task in &mut self.tasks { task.prepare_for_render(); } } pub fn get_task_address(&self, id: RenderTaskId) -> RenderTaskAddress { debug_assert_eq!(self.frame_id, id.1); RenderTaskAddress(id.0) } pub fn write_task_data(&mut self) { for task in &self.tasks { self.task_data.push(task.write_task_data()); } } pub fn save_target(&mut self) -> SavedTargetIndex { let id = self.next_saved; self.next_saved.0 += 1; id } #[cfg(debug_assertions)] pub fn frame_id(&self) -> FrameId { self.frame_id } } impl ops::Index<RenderTaskId> for RenderTaskTree { type Output = RenderTask; fn index(&self, id: RenderTaskId) -> &RenderTask { debug_assert_eq!(self.frame_id, id.1); &self.tasks[id.0 as usize] } } impl ops::IndexMut<RenderTaskId> for RenderTaskTree { fn index_mut(&mut self, id: RenderTaskId) -> &mut RenderTask { debug_assert_eq!(self.frame_id, id.1); &mut self.tasks[id.0 as usize] } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum RenderTaskLocation { Fixed(DeviceIntRect), Dynamic(Option<(DeviceIntPoint, RenderTargetIndex)>, DeviceIntSize), TextureCache(SourceTexture, i32, DeviceIntRect), } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct CacheMaskTask { actual_rect: DeviceIntRect, pub root_spatial_node_index: SpatialNodeIndex, pub clip_node_range: ClipNodeRange, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ClipRegionTask { pub clip_data_address: GpuCacheAddress, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PictureTask { pub prim_index: PrimitiveIndex, pub can_merge: bool, pub content_origin: DeviceIntPoint, pub uv_rect_handle: GpuCacheHandle, pub root_spatial_node_index: SpatialNodeIndex, uv_rect_kind: UvRectKind, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BlurTask { pub blur_std_deviation: f32, pub target_kind: RenderTargetKind, pub uv_rect_handle: GpuCacheHandle, uv_rect_kind: UvRectKind, } impl BlurTask { #[cfg(feature = "debugger")] fn print_with<T: PrintTreePrinter>(&self, pt: &mut T) { pt.add_item(format!("std deviation: {}", self.blur_std_deviation)); pt.add_item(format!("target: {:?}", self.target_kind)); } } #[derive(Debug)] #[cfg(feature = "pathfinder")] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct GlyphTask { /// After job building, this becomes `None`. pub mesh: Option<Mesh>, pub origin: DeviceIntPoint, pub subpixel_offset: TypedPoint2D<f32, DevicePixel>, pub render_mode: FontRenderMode, pub embolden_amount: TypedVector2D<f32, DevicePixel>, } #[cfg(not(feature = "pathfinder"))] #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct GlyphTask; // Where the source data for a blit task can be found. #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum BlitSource { Image { key: ImageCacheKey, }, RenderTask { task_id: RenderTaskId, }, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BorderTask { pub instances: Vec<BorderInstance>, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BlitTask { pub source: BlitSource, pub padding: DeviceIntSideOffsets, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskData { pub data: [f32; FLOATS_PER_RENDER_TASK_INFO], } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum RenderTaskKind { Picture(PictureTask), CacheMask(CacheMaskTask), ClipRegion(ClipRegionTask), VerticalBlur(BlurTask), HorizontalBlur(BlurTask), #[allow(dead_code)] Glyph(GlyphTask), Readback(DeviceIntRect), Scaling(RenderTargetKind), Blit(BlitTask), Border(BorderTask), } #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum ClearMode { // Applicable to color and alpha targets. Zero, One, // Applicable to color targets only. Transparent, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTask { pub location: RenderTaskLocation, pub children: Vec<RenderTaskId>, pub kind: RenderTaskKind, pub clear_mode: ClearMode, pub saved_index: Option<SavedTargetIndex>, } impl RenderTask { #[inline] pub fn with_dynamic_location( size: DeviceIntSize, children: Vec<RenderTaskId>, kind: RenderTaskKind, clear_mode: ClearMode, ) -> Self { render_task_sanity_check(&size); RenderTask { location: RenderTaskLocation::Dynamic(None, size), children, kind, clear_mode, saved_index: None, } } pub fn new_picture( location: RenderTaskLocation, unclipped_size: DeviceSize, prim_index: PrimitiveIndex, content_origin: DeviceIntPoint, children: Vec<RenderTaskId>, uv_rect_kind: UvRectKind, root_spatial_node_index: SpatialNodeIndex, ) -> Self { let size = match location { RenderTaskLocation::Dynamic(_, size) => size, RenderTaskLocation::Fixed(rect) => rect.size, RenderTaskLocation::TextureCache(_, _, rect) => rect.size, }; render_task_sanity_check(&size); let can_merge = size.width as f32 >= unclipped_size.width && size.height as f32 >= unclipped_size.height; RenderTask { location, children, kind: RenderTaskKind::Picture(PictureTask { prim_index, content_origin, can_merge, uv_rect_handle: GpuCacheHandle::new(), uv_rect_kind, root_spatial_node_index, }), clear_mode: ClearMode::Transparent, saved_index: None, } } pub fn new_readback(screen_rect: DeviceIntRect) -> Self { RenderTask::with_dynamic_location( screen_rect.size, Vec::new(), RenderTaskKind::Readback(screen_rect), ClearMode::Transparent, ) } pub fn new_blit( size: DeviceIntSize, source: BlitSource, ) -> Self { RenderTask::new_blit_with_padding(size, &DeviceIntSideOffsets::zero(), source) } pub fn new_blit_with_padding( mut size: DeviceIntSize, padding: &DeviceIntSideOffsets, source: BlitSource, ) -> Self { let mut children = Vec::new(); // If this blit uses a render task as a source, // ensure it's added as a child task. This will // ensure it gets allocated in the correct pass // and made available as an input when this task // executes. if let BlitSource::RenderTask { task_id } = source { children.push(task_id); } size.width += padding.horizontal(); size.height += padding.vertical(); RenderTask::with_dynamic_location( size, children, RenderTaskKind::Blit(BlitTask { source, padding: *padding, }), ClearMode::Transparent, ) } pub fn new_mask( outer_rect: DeviceIntRect, clip_node_range: ClipNodeRange, root_spatial_node_index: SpatialNodeIndex, clip_store: &mut ClipStore, gpu_cache: &mut GpuCache, resource_cache: &mut ResourceCache, render_tasks: &mut RenderTaskTree, ) -> Self { let mut children = Vec::new(); // Step through the clip sources that make up this mask. If we find // any box-shadow clip sources, request that image from the render // task cache. This allows the blurred box-shadow rect to be cached // in the texture cache across frames. // TODO(gw): Consider moving this logic outside this function, especially // as we add more clip sources that depend on render tasks. // TODO(gw): If this ever shows up in a profile, we could pre-calculate // whether a ClipSources contains any box-shadows and skip // this iteration for the majority of cases. for i in 0 .. clip_node_range.count { let (clip_node, _) = clip_store.get_node_from_range_mut(&clip_node_range, i); match clip_node.item { ClipItem::BoxShadow(ref mut info) => { let (cache_size, cache_key) = info.cache_key .as_ref() .expect("bug: no cache key set") .clone(); let blur_radius_dp = cache_key.blur_radius_dp as f32; let clip_data_address = gpu_cache.get_address(&info.clip_data_handle); // Request a cacheable render task with a blurred, minimal // sized box-shadow rect. info.cache_handle = Some(resource_cache.request_render_task( RenderTaskCacheKey { size: cache_size, kind: RenderTaskCacheKeyKind::BoxShadow(cache_key), }, gpu_cache, render_tasks, None, false, |render_tasks| { // Draw the rounded rect. let mask_task = RenderTask::new_rounded_rect_mask( cache_size, clip_data_address, ); let mask_task_id = render_tasks.add(mask_task); // Blur it let blur_render_task = RenderTask::new_blur( blur_radius_dp, mask_task_id, render_tasks, RenderTargetKind::Alpha, ClearMode::Zero, ); let root_task_id = render_tasks.add(blur_render_task); children.push(root_task_id); root_task_id } )); } ClipItem::Rectangle(..) | ClipItem::RoundedRectangle(..) | ClipItem::Image(..) | ClipItem::LineDecoration(..) => {} } } RenderTask::with_dynamic_location( outer_rect.size, children, RenderTaskKind::CacheMask(CacheMaskTask { actual_rect: outer_rect, clip_node_range, root_spatial_node_index, }), ClearMode::One, ) } pub fn new_rounded_rect_mask( size: DeviceIntSize, clip_data_address: GpuCacheAddress, ) -> Self { RenderTask::with_dynamic_location( size, Vec::new(), RenderTaskKind::ClipRegion(ClipRegionTask { clip_data_address, }), ClearMode::One, ) } // Construct a render task to apply a blur to a primitive. // The render task chain that is constructed looks like: // // PrimitiveCacheTask: Draw the primitives. // ^ // | // DownscalingTask(s): Each downscaling task reduces the size of render target to // ^ half. Also reduce the std deviation to half until the std // | deviation less than 4.0. // | // | // VerticalBlurTask: Apply the separable vertical blur to the primitive. // ^ // | // HorizontalBlurTask: Apply the separable horizontal blur to the vertical blur. // | // +---- This is stored as the input task to the primitive shader. // pub fn new_blur( blur_std_deviation: f32, src_task_id: RenderTaskId, render_tasks: &mut RenderTaskTree, target_kind: RenderTargetKind, clear_mode: ClearMode, ) -> Self { // Adjust large std deviation value. let mut adjusted_blur_std_deviation = blur_std_deviation; let (blur_target_size, uv_rect_kind) = { let src_task = &render_tasks[src_task_id]; (src_task.get_dynamic_size(), src_task.uv_rect_kind()) }; let mut adjusted_blur_target_size = blur_target_size; let mut downscaling_src_task_id = src_task_id; let mut scale_factor = 1.0; while adjusted_blur_std_deviation > MAX_BLUR_STD_DEVIATION { if adjusted_blur_target_size.width < MIN_DOWNSCALING_RT_SIZE || adjusted_blur_target_size.height < MIN_DOWNSCALING_RT_SIZE { break; } adjusted_blur_std_deviation *= 0.5; scale_factor *= 2.0; adjusted_blur_target_size = (blur_target_size.to_f32() / scale_factor).to_i32(); let downscaling_task = RenderTask::new_scaling( target_kind, downscaling_src_task_id, adjusted_blur_target_size, ); downscaling_src_task_id = render_tasks.add(downscaling_task); } let blur_task_v = RenderTask::with_dynamic_location( adjusted_blur_target_size, vec![downscaling_src_task_id], RenderTaskKind::VerticalBlur(BlurTask { blur_std_deviation: adjusted_blur_std_deviation, target_kind, uv_rect_handle: GpuCacheHandle::new(), uv_rect_kind, }), clear_mode, ); let blur_task_v_id = render_tasks.add(blur_task_v); RenderTask::with_dynamic_location( adjusted_blur_target_size, vec![blur_task_v_id], RenderTaskKind::HorizontalBlur(BlurTask { blur_std_deviation: adjusted_blur_std_deviation, target_kind, uv_rect_handle: GpuCacheHandle::new(), uv_rect_kind, }), clear_mode, ) } pub fn new_border( size: DeviceIntSize, instances: Vec<BorderInstance>, ) -> Self { RenderTask::with_dynamic_location( size, Vec::new(), RenderTaskKind::Border(BorderTask { instances, }), ClearMode::Transparent, ) } pub fn new_scaling( target_kind: RenderTargetKind, src_task_id: RenderTaskId, target_size: DeviceIntSize, ) -> Self { RenderTask::with_dynamic_location( target_size, vec![src_task_id], RenderTaskKind::Scaling(target_kind), match target_kind { RenderTargetKind::Color => ClearMode::Transparent, RenderTargetKind::Alpha => ClearMode::One, }, ) } #[cfg(feature = "pathfinder")] pub fn new_glyph(location: RenderTaskLocation, mesh: Mesh, origin: &DeviceIntPoint, subpixel_offset: &TypedPoint2D<f32, DevicePixel>, render_mode: FontRenderMode, embolden_amount: &TypedVector2D<f32, DevicePixel>) -> Self { RenderTask { children: vec![], location: location, kind: RenderTaskKind::Glyph(GlyphTask { mesh: Some(mesh), origin: *origin, subpixel_offset: *subpixel_offset, render_mode: render_mode, embolden_amount: *embolden_amount, }), clear_mode: ClearMode::Transparent, saved_index: None, } } fn uv_rect_kind(&self) -> UvRectKind { match self.kind { RenderTaskKind::CacheMask(..) | RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) => { unreachable!("bug: unexpected render task"); } RenderTaskKind::Picture(ref task) => { task.uv_rect_kind } RenderTaskKind::VerticalBlur(ref task) | RenderTaskKind::HorizontalBlur(ref task) => { task.uv_rect_kind } RenderTaskKind::ClipRegion(..) | RenderTaskKind::Glyph(_) | RenderTaskKind::Border(..) | RenderTaskKind::Blit(..) => { UvRectKind::Rect } } } // Write (up to) 8 floats of data specific to the type // of render task that is provided to the GPU shaders // via a vertex texture. pub fn write_task_data(&self) -> RenderTaskData { // NOTE: The ordering and layout of these structures are // required to match both the GPU structures declared // in prim_shared.glsl, and also the uses in submit_batch() // in renderer.rs. // TODO(gw): Maybe there's a way to make this stuff a bit // more type-safe. Although, it will always need // to be kept in sync with the GLSL code anyway. let data = match self.kind { RenderTaskKind::Picture(ref task) => { // Note: has to match `PICTURE_TYPE_*` in shaders [ task.content_origin.x as f32, task.content_origin.y as f32, 0.0, ] } RenderTaskKind::CacheMask(ref task) => { [ task.actual_rect.origin.x as f32, task.actual_rect.origin.y as f32, RasterizationSpace::Screen as i32 as f32, ] } RenderTaskKind::ClipRegion(..) => { [ 0.0, 0.0, RasterizationSpace::Local as i32 as f32, ] } RenderTaskKind::VerticalBlur(ref task) | RenderTaskKind::HorizontalBlur(ref task) => { [ task.blur_std_deviation, 0.0, 0.0, ] } RenderTaskKind::Glyph(_) => { [1.0, 0.0, 0.0] } RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::Border(..) | RenderTaskKind::Blit(..) => { [0.0; 3] } }; let (mut target_rect, target_index) = self.get_target_rect(); // The primitives inside a fixed-location render task // are already placed to their corresponding positions, // so the shader doesn't need to shift by the origin. if let RenderTaskLocation::Fixed(_) = self.location { target_rect.origin = DeviceIntPoint::origin(); }; RenderTaskData { data: [ target_rect.origin.x as f32, target_rect.origin.y as f32, target_rect.size.width as f32, target_rect.size.height as f32, target_index.0 as f32, data[0], data[1], data[2], ] } } pub fn get_texture_address(&self, gpu_cache: &GpuCache) -> GpuCacheAddress { match self.kind { RenderTaskKind::Picture(ref info) => { gpu_cache.get_address(&info.uv_rect_handle) } RenderTaskKind::VerticalBlur(ref info) | RenderTaskKind::HorizontalBlur(ref info) => { gpu_cache.get_address(&info.uv_rect_handle) } RenderTaskKind::ClipRegion(..) | RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::Blit(..) | RenderTaskKind::Border(..) | RenderTaskKind::CacheMask(..) | RenderTaskKind::Glyph(..) => { panic!("texture handle not supported for this task kind"); } } } pub fn get_dynamic_size(&self) -> DeviceIntSize { match self.location { RenderTaskLocation::Fixed(..) => DeviceIntSize::zero(), RenderTaskLocation::Dynamic(_, size) => size, RenderTaskLocation::TextureCache(_, _, rect) => rect.size, } } pub fn get_target_rect(&self) -> (DeviceIntRect, RenderTargetIndex) { match self.location { RenderTaskLocation::Fixed(rect) => { (rect, RenderTargetIndex(0)) } // Previously, we only added render tasks after the entire // primitive chain was determined visible. This meant that // we could assert any render task in the list was also // allocated (assigned to passes). Now, we add render // tasks earlier, and the picture they belong to may be // culled out later, so we can't assert that the task // has been allocated. // Render tasks that are created but not assigned to // passes consume a row in the render task texture, but // don't allocate any space in render targets nor // draw any pixels. // TODO(gw): Consider some kind of tag or other method // to mark a task as unused explicitly. This // would allow us to restore this debug check. RenderTaskLocation::Dynamic(Some((origin, target_index)), size) => { (DeviceIntRect::new(origin, size), target_index) } RenderTaskLocation::Dynamic(None, _) => { (DeviceIntRect::zero(), RenderTargetIndex(0)) } RenderTaskLocation::TextureCache(_, layer, rect) => { (rect, RenderTargetIndex(layer as usize)) } } } pub fn target_kind(&self) -> RenderTargetKind { match self.kind { RenderTaskKind::Readback(..) => RenderTargetKind::Color, RenderTaskKind::ClipRegion(..) | RenderTaskKind::CacheMask(..) => { RenderTargetKind::Alpha } RenderTaskKind::VerticalBlur(ref task_info) | RenderTaskKind::HorizontalBlur(ref task_info) => { task_info.target_kind } RenderTaskKind::Glyph(..) => { RenderTargetKind::Color } RenderTaskKind::Scaling(target_kind) => { target_kind } RenderTaskKind::Border(..) | RenderTaskKind::Picture(..) => { RenderTargetKind::Color } RenderTaskKind::Blit(..) => { RenderTargetKind::Color } } } // Check if this task wants to be made available as an input // to all passes (except the first) in the render task tree. // To qualify for this, the task needs to have no children / dependencies. // Currently, this is only supported for A8 targets, but it can be // trivially extended to also support RGBA8 targets in the future // if we decide that is useful. pub fn is_shared(&self) -> bool { match self.kind { RenderTaskKind::Picture(..) | RenderTaskKind::VerticalBlur(..) | RenderTaskKind::Readback(..) | RenderTaskKind::HorizontalBlur(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::ClipRegion(..) | RenderTaskKind::Blit(..) | RenderTaskKind::Border(..) | RenderTaskKind::Glyph(..) => false, // TODO(gw): For now, we've disabled the shared clip mask // optimization. It's of dubious value in the // future once we start to cache clip tasks anyway. // I have left shared texture support here though, // just in case we want it in the future. RenderTaskKind::CacheMask(..) => false, } } // Optionally, prepare the render task for drawing. This is executed // after all resource cache items (textures and glyphs) have been // resolved and can be queried. It also allows certain render tasks // to defer calculating an exact size until now, if desired. pub fn prepare_for_render(&mut self) { } pub fn write_gpu_blocks( &mut self, gpu_cache: &mut GpuCache, ) { let (target_rect, target_index) = self.get_target_rect(); let (cache_handle, uv_rect_kind) = match self.kind { RenderTaskKind::HorizontalBlur(ref mut info) | RenderTaskKind::VerticalBlur(ref mut info) => { (&mut info.uv_rect_handle, info.uv_rect_kind) } RenderTaskKind::Picture(ref mut info) => { (&mut info.uv_rect_handle, info.uv_rect_kind) } RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::Blit(..) | RenderTaskKind::ClipRegion(..) | RenderTaskKind::Border(..) | RenderTaskKind::CacheMask(..) | RenderTaskKind::Glyph(..) => { return; } }; if let Some(mut request) = gpu_cache.request(cache_handle) { let p0 = target_rect.origin.to_f32(); let p1 = target_rect.bottom_right().to_f32(); let image_source = ImageSource { p0, p1, texture_layer: target_index.0 as f32, user_data: [0.0; 3], uv_rect_kind, }; image_source.write_gpu_blocks(&mut request); } } #[cfg(feature = "debugger")] pub fn print_with<T: PrintTreePrinter>(&self, pt: &mut T, tree: &RenderTaskTree) -> bool { match self.kind { RenderTaskKind::Picture(ref task) => { pt.new_level(format!("Picture of {:?}", task.prim_index)); } RenderTaskKind::CacheMask(ref task) => { pt.new_level(format!("CacheMask with {} clips", task.clip_node_range.count)); pt.add_item(format!("rect: {:?}", task.actual_rect)); } RenderTaskKind::ClipRegion(..) => { pt.new_level("ClipRegion".to_owned()); } RenderTaskKind::VerticalBlur(ref task) => { pt.new_level("VerticalBlur".to_owned()); task.print_with(pt); } RenderTaskKind::HorizontalBlur(ref task) => { pt.new_level("HorizontalBlur".to_owned()); task.print_with(pt); } RenderTaskKind::Readback(ref rect) => { pt.new_level("Readback".to_owned()); pt.add_item(format!("rect: {:?}", rect)); } RenderTaskKind::Scaling(ref kind) => { pt.new_level("Scaling".to_owned()); pt.add_item(format!("kind: {:?}", kind)); } RenderTaskKind::Border(..) => { pt.new_level("Border".to_owned()); } RenderTaskKind::Blit(ref task) => { pt.new_level("Blit".to_owned()); pt.add_item(format!("source: {:?}", task.source)); } RenderTaskKind::Glyph(..) => { pt.new_level("Glyph".to_owned()); } } pt.add_item(format!("clear to: {:?}", self.clear_mode)); for &child_id in &self.children { if tree[child_id].print_with(pt, tree) { pt.add_item(format!("self: {:?}", child_id)) } } pt.end_level(); true } /// Mark this render task for keeping the results alive up until the end of the frame. pub fn mark_for_saving(&mut self) { match self.location { RenderTaskLocation::Fixed(..) | RenderTaskLocation::Dynamic(..) => { self.saved_index = Some(SavedTargetIndex::PENDING); } RenderTaskLocation::TextureCache(..) => { panic!("Unable to mark a permanently cached task for saving!"); } } } } #[derive(Clone, Debug, Hash, PartialEq, Eq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum RenderTaskCacheKeyKind { BoxShadow(BoxShadowCacheKey), Image(ImageCacheKey), #[allow(dead_code)] Glyph(GpuGlyphCacheKey), Picture(PictureCacheKey), Border(BorderCacheKey), } #[derive(Clone, Debug, Hash, PartialEq, Eq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskCacheKey { pub size: DeviceIntSize, pub kind: RenderTaskCacheKeyKind, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskCacheEntry { pending_render_task_id: Option<RenderTaskId>, user_data: Option<[f32; 3]>, is_opaque: bool, pub handle: TextureCacheHandle, } #[derive(Debug)] pub enum RenderTaskCacheMarker {} // A cache of render tasks that are stored in the texture // cache for usage across frames. #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskCache { map: FastHashMap<RenderTaskCacheKey, FreeListHandle<RenderTaskCacheMarker>>, cache_entries: FreeList<RenderTaskCacheEntry, RenderTaskCacheMarker>, } pub type RenderTaskCacheEntryHandle = WeakFreeListHandle<RenderTaskCacheMarker>; impl RenderTaskCache { pub fn new() -> Self { RenderTaskCache { map: FastHashMap::default(), cache_entries: FreeList::new(), } } pub fn clear(&mut self) { self.map.clear(); self.cache_entries.clear(); } pub fn begin_frame( &mut self, texture_cache: &mut TextureCache, ) { // Drop any items from the cache that have been // evicted from the texture cache. // // This isn't actually necessary for the texture // cache to be able to evict old render tasks. // It will evict render tasks as required, since // the access time in the texture cache entry will // be stale if this task hasn't been requested // for a while. // // Nonetheless, we should remove stale entries // from here so that this hash map doesn't // grow indefinitely! let mut keys_to_remove = Vec::new(); for (key, handle) in &self.map { let entry = self.cache_entries.get(handle); if !texture_cache.is_allocated(&entry.handle) { keys_to_remove.push(key.clone()) } } for key in &keys_to_remove { let handle = self.map.remove(key).unwrap(); self.cache_entries.free(handle); } } pub fn update( &mut self, gpu_cache: &mut GpuCache, texture_cache: &mut TextureCache, render_tasks: &mut RenderTaskTree, ) { // Iterate the list of render task cache entries, // and allocate / update the texture cache location // if the entry has been evicted or not yet allocated. for (_, handle) in &self.map { let entry = self.cache_entries.get_mut(handle); if let Some(pending_render_task_id) = entry.pending_render_task_id.take() { let render_task = &mut render_tasks[pending_render_task_id]; let target_kind = render_task.target_kind(); // Find out what size to alloc in the texture cache. let size = match render_task.location { RenderTaskLocation::Fixed(..) | RenderTaskLocation::TextureCache(..) => { panic!("BUG: dynamic task was expected"); } RenderTaskLocation::Dynamic(_, size) => size, }; // Select the right texture page to allocate from. let image_format = match target_kind { RenderTargetKind::Color => ImageFormat::BGRA8, RenderTargetKind::Alpha => ImageFormat::R8, }; let descriptor = ImageDescriptor::new( size.width as u32, size.height as u32, image_format, entry.is_opaque, false, ); // Allocate space in the texture cache, but don't supply // and CPU-side data to be uploaded. texture_cache.update( &mut entry.handle, descriptor, TextureFilter::Linear, None, entry.user_data.unwrap_or([0.0; 3]), None, gpu_cache, None, render_task.uv_rect_kind(), Eviction::Auto, ); // Get the allocation details in the texture cache, and store // this in the render task. The renderer will draw this // task into the appropriate layer and rect of the texture // cache on this frame. let (texture_id, texture_layer, uv_rect) = texture_cache.get_cache_location(&entry.handle); render_task.location = RenderTaskLocation::TextureCache( texture_id, texture_layer, uv_rect.to_i32() ); } } } pub fn request_render_task<F>( &mut self, key: RenderTaskCacheKey, texture_cache: &mut TextureCache, gpu_cache: &mut GpuCache, render_tasks: &mut RenderTaskTree, user_data: Option<[f32; 3]>, is_opaque: bool, mut f: F, ) -> Result<RenderTaskCacheEntryHandle, ()> where F: FnMut(&mut RenderTaskTree) -> Result<RenderTaskId, ()> { // Get the texture cache handle for this cache key, // or create one. let cache_entries = &mut self.cache_entries; let entry_handle = self.map .entry(key) .or_insert_with(|| { let entry = RenderTaskCacheEntry { handle: TextureCacheHandle::new(), pending_render_task_id: None, user_data, is_opaque, }; cache_entries.insert(entry) }); let cache_entry = cache_entries.get_mut(entry_handle); if cache_entry.pending_render_task_id.is_none() { // Check if this texture cache handle is valid. if texture_cache.request(&cache_entry.handle, gpu_cache) { // Invoke user closure to get render task chain // to draw this into the texture cache. let render_task_id = try!(f(render_tasks)); cache_entry.pending_render_task_id = Some(render_task_id); cache_entry.user_data = user_data; cache_entry.is_opaque = is_opaque; } } Ok(entry_handle.weak()) } pub fn get_cache_entry( &self, handle: &RenderTaskCacheEntryHandle, ) -> &RenderTaskCacheEntry { self.cache_entries .get_opt(handle) .expect("bug: invalid render task cache handle") } #[allow(dead_code)] pub fn get_cache_item_for_render_task(&self, texture_cache: &TextureCache, key: &RenderTaskCacheKey) -> CacheItem { // Get the texture cache handle for this cache key. let handle = self.map.get(key).unwrap(); let cache_entry = self.cache_entries.get(handle); texture_cache.get(&cache_entry.handle) } #[allow(dead_code)] pub fn cache_item_is_allocated_for_render_task(&self, texture_cache: &TextureCache, key: &RenderTaskCacheKey) -> bool { let handle = self.map.get(key).unwrap(); let cache_entry = self.cache_entries.get(handle); texture_cache.is_allocated(&cache_entry.handle) } } // TODO(gw): Rounding the content rect here to device pixels is not // technically correct. Ideally we should ceil() here, and ensure that // the extra part pixel in the case of fractional sizes is correctly // handled. For now, just use rounding which passes the existing // Gecko tests. // Note: zero-square tasks are prohibited in WR task tree, so // we ensure each dimension to be at least the length of 1 after rounding. pub fn to_cache_size(size: DeviceSize) -> DeviceIntSize { DeviceIntSize::new( 1.max(size.width.round() as i32), 1.max(size.height.round() as i32), ) } render_task: Remove a stray semicolon. /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use api::{DeviceIntPoint, DeviceIntRect, DeviceIntSize, DeviceSize, DeviceIntSideOffsets, ImageDescriptor, ImageFormat}; #[cfg(feature = "pathfinder")] use api::FontRenderMode; use border::BorderCacheKey; use box_shadow::{BoxShadowCacheKey}; use clip::{ClipItem, ClipStore, ClipNodeRange}; use clip_scroll_tree::SpatialNodeIndex; use device::TextureFilter; #[cfg(feature = "pathfinder")] use euclid::{TypedPoint2D, TypedVector2D}; use freelist::{FreeList, FreeListHandle, WeakFreeListHandle}; use glyph_rasterizer::GpuGlyphCacheKey; use gpu_cache::{GpuCache, GpuCacheAddress, GpuCacheHandle}; use gpu_types::{BorderInstance, ImageSource, RasterizationSpace, UvRectKind}; use internal_types::{FastHashMap, SavedTargetIndex, SourceTexture}; #[cfg(feature = "pathfinder")] use pathfinder_partitioner::mesh::Mesh; use picture::PictureCacheKey; use prim_store::{PrimitiveIndex, ImageCacheKey}; #[cfg(feature = "debugger")] use print_tree::{PrintTreePrinter}; use render_backend::FrameId; use resource_cache::{CacheItem, ResourceCache}; use std::{cmp, ops, usize, f32, i32}; use texture_cache::{TextureCache, TextureCacheHandle, Eviction}; use tiling::{RenderPass, RenderTargetIndex}; use tiling::{RenderTargetKind}; #[cfg(feature = "pathfinder")] use webrender_api::DevicePixel; const RENDER_TASK_SIZE_SANITY_CHECK: i32 = 16000; const FLOATS_PER_RENDER_TASK_INFO: usize = 8; pub const MAX_BLUR_STD_DEVIATION: f32 = 4.0; pub const MIN_DOWNSCALING_RT_SIZE: i32 = 128; fn render_task_sanity_check(size: &DeviceIntSize) { if size.width > RENDER_TASK_SIZE_SANITY_CHECK || size.height > RENDER_TASK_SIZE_SANITY_CHECK { error!("Attempting to create a render task of size {}x{}", size.width, size.height); panic!(); } } #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskId(pub u32, FrameId); // TODO(gw): Make private when using GPU cache! #[derive(Debug, Copy, Clone)] #[repr(C)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskAddress(pub u32); #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskTree { pub tasks: Vec<RenderTask>, pub task_data: Vec<RenderTaskData>, next_saved: SavedTargetIndex, frame_id: FrameId, } impl RenderTaskTree { pub fn new(frame_id: FrameId) -> Self { RenderTaskTree { tasks: Vec::new(), task_data: Vec::new(), next_saved: SavedTargetIndex(0), frame_id, } } pub fn add(&mut self, task: RenderTask) -> RenderTaskId { let id = self.tasks.len(); self.tasks.push(task); RenderTaskId(id as _, self.frame_id) } pub fn max_depth(&self, id: RenderTaskId, depth: usize, max_depth: &mut usize) { debug_assert_eq!(self.frame_id, id.1); let depth = depth + 1; *max_depth = cmp::max(*max_depth, depth); let task = &self.tasks[id.0 as usize]; for child in &task.children { self.max_depth(*child, depth, max_depth); } } pub fn assign_to_passes( &self, id: RenderTaskId, pass_index: usize, passes: &mut [RenderPass], ) { debug_assert_eq!(self.frame_id, id.1); let task = &self.tasks[id.0 as usize]; for child in &task.children { self.assign_to_passes(*child, pass_index - 1, passes); } // Sanity check - can be relaxed if needed match task.location { RenderTaskLocation::Fixed(..) => { debug_assert!(pass_index == passes.len() - 1); } RenderTaskLocation::Dynamic(..) | RenderTaskLocation::TextureCache(..) => { debug_assert!(pass_index < passes.len() - 1); } } // If this task can be shared between multiple // passes, render it in the first pass so that // it is available to all subsequent passes. let pass_index = if task.is_shared() { debug_assert!(task.children.is_empty()); 0 } else { pass_index }; let pass = &mut passes[pass_index]; pass.add_render_task(id, task.get_dynamic_size(), task.target_kind()); } pub fn prepare_for_render(&mut self) { for task in &mut self.tasks { task.prepare_for_render(); } } pub fn get_task_address(&self, id: RenderTaskId) -> RenderTaskAddress { debug_assert_eq!(self.frame_id, id.1); RenderTaskAddress(id.0) } pub fn write_task_data(&mut self) { for task in &self.tasks { self.task_data.push(task.write_task_data()); } } pub fn save_target(&mut self) -> SavedTargetIndex { let id = self.next_saved; self.next_saved.0 += 1; id } #[cfg(debug_assertions)] pub fn frame_id(&self) -> FrameId { self.frame_id } } impl ops::Index<RenderTaskId> for RenderTaskTree { type Output = RenderTask; fn index(&self, id: RenderTaskId) -> &RenderTask { debug_assert_eq!(self.frame_id, id.1); &self.tasks[id.0 as usize] } } impl ops::IndexMut<RenderTaskId> for RenderTaskTree { fn index_mut(&mut self, id: RenderTaskId) -> &mut RenderTask { debug_assert_eq!(self.frame_id, id.1); &mut self.tasks[id.0 as usize] } } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum RenderTaskLocation { Fixed(DeviceIntRect), Dynamic(Option<(DeviceIntPoint, RenderTargetIndex)>, DeviceIntSize), TextureCache(SourceTexture, i32, DeviceIntRect), } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct CacheMaskTask { actual_rect: DeviceIntRect, pub root_spatial_node_index: SpatialNodeIndex, pub clip_node_range: ClipNodeRange, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct ClipRegionTask { pub clip_data_address: GpuCacheAddress, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct PictureTask { pub prim_index: PrimitiveIndex, pub can_merge: bool, pub content_origin: DeviceIntPoint, pub uv_rect_handle: GpuCacheHandle, pub root_spatial_node_index: SpatialNodeIndex, uv_rect_kind: UvRectKind, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BlurTask { pub blur_std_deviation: f32, pub target_kind: RenderTargetKind, pub uv_rect_handle: GpuCacheHandle, uv_rect_kind: UvRectKind, } impl BlurTask { #[cfg(feature = "debugger")] fn print_with<T: PrintTreePrinter>(&self, pt: &mut T) { pt.add_item(format!("std deviation: {}", self.blur_std_deviation)); pt.add_item(format!("target: {:?}", self.target_kind)); } } #[derive(Debug)] #[cfg(feature = "pathfinder")] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct GlyphTask { /// After job building, this becomes `None`. pub mesh: Option<Mesh>, pub origin: DeviceIntPoint, pub subpixel_offset: TypedPoint2D<f32, DevicePixel>, pub render_mode: FontRenderMode, pub embolden_amount: TypedVector2D<f32, DevicePixel>, } #[cfg(not(feature = "pathfinder"))] #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct GlyphTask; // Where the source data for a blit task can be found. #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum BlitSource { Image { key: ImageCacheKey, }, RenderTask { task_id: RenderTaskId, }, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BorderTask { pub instances: Vec<BorderInstance>, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct BlitTask { pub source: BlitSource, pub padding: DeviceIntSideOffsets, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskData { pub data: [f32; FLOATS_PER_RENDER_TASK_INFO], } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum RenderTaskKind { Picture(PictureTask), CacheMask(CacheMaskTask), ClipRegion(ClipRegionTask), VerticalBlur(BlurTask), HorizontalBlur(BlurTask), #[allow(dead_code)] Glyph(GlyphTask), Readback(DeviceIntRect), Scaling(RenderTargetKind), Blit(BlitTask), Border(BorderTask), } #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum ClearMode { // Applicable to color and alpha targets. Zero, One, // Applicable to color targets only. Transparent, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTask { pub location: RenderTaskLocation, pub children: Vec<RenderTaskId>, pub kind: RenderTaskKind, pub clear_mode: ClearMode, pub saved_index: Option<SavedTargetIndex>, } impl RenderTask { #[inline] pub fn with_dynamic_location( size: DeviceIntSize, children: Vec<RenderTaskId>, kind: RenderTaskKind, clear_mode: ClearMode, ) -> Self { render_task_sanity_check(&size); RenderTask { location: RenderTaskLocation::Dynamic(None, size), children, kind, clear_mode, saved_index: None, } } pub fn new_picture( location: RenderTaskLocation, unclipped_size: DeviceSize, prim_index: PrimitiveIndex, content_origin: DeviceIntPoint, children: Vec<RenderTaskId>, uv_rect_kind: UvRectKind, root_spatial_node_index: SpatialNodeIndex, ) -> Self { let size = match location { RenderTaskLocation::Dynamic(_, size) => size, RenderTaskLocation::Fixed(rect) => rect.size, RenderTaskLocation::TextureCache(_, _, rect) => rect.size, }; render_task_sanity_check(&size); let can_merge = size.width as f32 >= unclipped_size.width && size.height as f32 >= unclipped_size.height; RenderTask { location, children, kind: RenderTaskKind::Picture(PictureTask { prim_index, content_origin, can_merge, uv_rect_handle: GpuCacheHandle::new(), uv_rect_kind, root_spatial_node_index, }), clear_mode: ClearMode::Transparent, saved_index: None, } } pub fn new_readback(screen_rect: DeviceIntRect) -> Self { RenderTask::with_dynamic_location( screen_rect.size, Vec::new(), RenderTaskKind::Readback(screen_rect), ClearMode::Transparent, ) } pub fn new_blit( size: DeviceIntSize, source: BlitSource, ) -> Self { RenderTask::new_blit_with_padding(size, &DeviceIntSideOffsets::zero(), source) } pub fn new_blit_with_padding( mut size: DeviceIntSize, padding: &DeviceIntSideOffsets, source: BlitSource, ) -> Self { let mut children = Vec::new(); // If this blit uses a render task as a source, // ensure it's added as a child task. This will // ensure it gets allocated in the correct pass // and made available as an input when this task // executes. if let BlitSource::RenderTask { task_id } = source { children.push(task_id); } size.width += padding.horizontal(); size.height += padding.vertical(); RenderTask::with_dynamic_location( size, children, RenderTaskKind::Blit(BlitTask { source, padding: *padding, }), ClearMode::Transparent, ) } pub fn new_mask( outer_rect: DeviceIntRect, clip_node_range: ClipNodeRange, root_spatial_node_index: SpatialNodeIndex, clip_store: &mut ClipStore, gpu_cache: &mut GpuCache, resource_cache: &mut ResourceCache, render_tasks: &mut RenderTaskTree, ) -> Self { let mut children = Vec::new(); // Step through the clip sources that make up this mask. If we find // any box-shadow clip sources, request that image from the render // task cache. This allows the blurred box-shadow rect to be cached // in the texture cache across frames. // TODO(gw): Consider moving this logic outside this function, especially // as we add more clip sources that depend on render tasks. // TODO(gw): If this ever shows up in a profile, we could pre-calculate // whether a ClipSources contains any box-shadows and skip // this iteration for the majority of cases. for i in 0 .. clip_node_range.count { let (clip_node, _) = clip_store.get_node_from_range_mut(&clip_node_range, i); match clip_node.item { ClipItem::BoxShadow(ref mut info) => { let (cache_size, cache_key) = info.cache_key .as_ref() .expect("bug: no cache key set") .clone(); let blur_radius_dp = cache_key.blur_radius_dp as f32; let clip_data_address = gpu_cache.get_address(&info.clip_data_handle); // Request a cacheable render task with a blurred, minimal // sized box-shadow rect. info.cache_handle = Some(resource_cache.request_render_task( RenderTaskCacheKey { size: cache_size, kind: RenderTaskCacheKeyKind::BoxShadow(cache_key), }, gpu_cache, render_tasks, None, false, |render_tasks| { // Draw the rounded rect. let mask_task = RenderTask::new_rounded_rect_mask( cache_size, clip_data_address, ); let mask_task_id = render_tasks.add(mask_task); // Blur it let blur_render_task = RenderTask::new_blur( blur_radius_dp, mask_task_id, render_tasks, RenderTargetKind::Alpha, ClearMode::Zero, ); let root_task_id = render_tasks.add(blur_render_task); children.push(root_task_id); root_task_id } )); } ClipItem::Rectangle(..) | ClipItem::RoundedRectangle(..) | ClipItem::Image(..) | ClipItem::LineDecoration(..) => {} } } RenderTask::with_dynamic_location( outer_rect.size, children, RenderTaskKind::CacheMask(CacheMaskTask { actual_rect: outer_rect, clip_node_range, root_spatial_node_index, }), ClearMode::One, ) } pub fn new_rounded_rect_mask( size: DeviceIntSize, clip_data_address: GpuCacheAddress, ) -> Self { RenderTask::with_dynamic_location( size, Vec::new(), RenderTaskKind::ClipRegion(ClipRegionTask { clip_data_address, }), ClearMode::One, ) } // Construct a render task to apply a blur to a primitive. // The render task chain that is constructed looks like: // // PrimitiveCacheTask: Draw the primitives. // ^ // | // DownscalingTask(s): Each downscaling task reduces the size of render target to // ^ half. Also reduce the std deviation to half until the std // | deviation less than 4.0. // | // | // VerticalBlurTask: Apply the separable vertical blur to the primitive. // ^ // | // HorizontalBlurTask: Apply the separable horizontal blur to the vertical blur. // | // +---- This is stored as the input task to the primitive shader. // pub fn new_blur( blur_std_deviation: f32, src_task_id: RenderTaskId, render_tasks: &mut RenderTaskTree, target_kind: RenderTargetKind, clear_mode: ClearMode, ) -> Self { // Adjust large std deviation value. let mut adjusted_blur_std_deviation = blur_std_deviation; let (blur_target_size, uv_rect_kind) = { let src_task = &render_tasks[src_task_id]; (src_task.get_dynamic_size(), src_task.uv_rect_kind()) }; let mut adjusted_blur_target_size = blur_target_size; let mut downscaling_src_task_id = src_task_id; let mut scale_factor = 1.0; while adjusted_blur_std_deviation > MAX_BLUR_STD_DEVIATION { if adjusted_blur_target_size.width < MIN_DOWNSCALING_RT_SIZE || adjusted_blur_target_size.height < MIN_DOWNSCALING_RT_SIZE { break; } adjusted_blur_std_deviation *= 0.5; scale_factor *= 2.0; adjusted_blur_target_size = (blur_target_size.to_f32() / scale_factor).to_i32(); let downscaling_task = RenderTask::new_scaling( target_kind, downscaling_src_task_id, adjusted_blur_target_size, ); downscaling_src_task_id = render_tasks.add(downscaling_task); } let blur_task_v = RenderTask::with_dynamic_location( adjusted_blur_target_size, vec![downscaling_src_task_id], RenderTaskKind::VerticalBlur(BlurTask { blur_std_deviation: adjusted_blur_std_deviation, target_kind, uv_rect_handle: GpuCacheHandle::new(), uv_rect_kind, }), clear_mode, ); let blur_task_v_id = render_tasks.add(blur_task_v); RenderTask::with_dynamic_location( adjusted_blur_target_size, vec![blur_task_v_id], RenderTaskKind::HorizontalBlur(BlurTask { blur_std_deviation: adjusted_blur_std_deviation, target_kind, uv_rect_handle: GpuCacheHandle::new(), uv_rect_kind, }), clear_mode, ) } pub fn new_border( size: DeviceIntSize, instances: Vec<BorderInstance>, ) -> Self { RenderTask::with_dynamic_location( size, Vec::new(), RenderTaskKind::Border(BorderTask { instances, }), ClearMode::Transparent, ) } pub fn new_scaling( target_kind: RenderTargetKind, src_task_id: RenderTaskId, target_size: DeviceIntSize, ) -> Self { RenderTask::with_dynamic_location( target_size, vec![src_task_id], RenderTaskKind::Scaling(target_kind), match target_kind { RenderTargetKind::Color => ClearMode::Transparent, RenderTargetKind::Alpha => ClearMode::One, }, ) } #[cfg(feature = "pathfinder")] pub fn new_glyph(location: RenderTaskLocation, mesh: Mesh, origin: &DeviceIntPoint, subpixel_offset: &TypedPoint2D<f32, DevicePixel>, render_mode: FontRenderMode, embolden_amount: &TypedVector2D<f32, DevicePixel>) -> Self { RenderTask { children: vec![], location: location, kind: RenderTaskKind::Glyph(GlyphTask { mesh: Some(mesh), origin: *origin, subpixel_offset: *subpixel_offset, render_mode: render_mode, embolden_amount: *embolden_amount, }), clear_mode: ClearMode::Transparent, saved_index: None, } } fn uv_rect_kind(&self) -> UvRectKind { match self.kind { RenderTaskKind::CacheMask(..) | RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) => { unreachable!("bug: unexpected render task"); } RenderTaskKind::Picture(ref task) => { task.uv_rect_kind } RenderTaskKind::VerticalBlur(ref task) | RenderTaskKind::HorizontalBlur(ref task) => { task.uv_rect_kind } RenderTaskKind::ClipRegion(..) | RenderTaskKind::Glyph(_) | RenderTaskKind::Border(..) | RenderTaskKind::Blit(..) => { UvRectKind::Rect } } } // Write (up to) 8 floats of data specific to the type // of render task that is provided to the GPU shaders // via a vertex texture. pub fn write_task_data(&self) -> RenderTaskData { // NOTE: The ordering and layout of these structures are // required to match both the GPU structures declared // in prim_shared.glsl, and also the uses in submit_batch() // in renderer.rs. // TODO(gw): Maybe there's a way to make this stuff a bit // more type-safe. Although, it will always need // to be kept in sync with the GLSL code anyway. let data = match self.kind { RenderTaskKind::Picture(ref task) => { // Note: has to match `PICTURE_TYPE_*` in shaders [ task.content_origin.x as f32, task.content_origin.y as f32, 0.0, ] } RenderTaskKind::CacheMask(ref task) => { [ task.actual_rect.origin.x as f32, task.actual_rect.origin.y as f32, RasterizationSpace::Screen as i32 as f32, ] } RenderTaskKind::ClipRegion(..) => { [ 0.0, 0.0, RasterizationSpace::Local as i32 as f32, ] } RenderTaskKind::VerticalBlur(ref task) | RenderTaskKind::HorizontalBlur(ref task) => { [ task.blur_std_deviation, 0.0, 0.0, ] } RenderTaskKind::Glyph(_) => { [1.0, 0.0, 0.0] } RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::Border(..) | RenderTaskKind::Blit(..) => { [0.0; 3] } }; let (mut target_rect, target_index) = self.get_target_rect(); // The primitives inside a fixed-location render task // are already placed to their corresponding positions, // so the shader doesn't need to shift by the origin. if let RenderTaskLocation::Fixed(_) = self.location { target_rect.origin = DeviceIntPoint::origin(); } RenderTaskData { data: [ target_rect.origin.x as f32, target_rect.origin.y as f32, target_rect.size.width as f32, target_rect.size.height as f32, target_index.0 as f32, data[0], data[1], data[2], ] } } pub fn get_texture_address(&self, gpu_cache: &GpuCache) -> GpuCacheAddress { match self.kind { RenderTaskKind::Picture(ref info) => { gpu_cache.get_address(&info.uv_rect_handle) } RenderTaskKind::VerticalBlur(ref info) | RenderTaskKind::HorizontalBlur(ref info) => { gpu_cache.get_address(&info.uv_rect_handle) } RenderTaskKind::ClipRegion(..) | RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::Blit(..) | RenderTaskKind::Border(..) | RenderTaskKind::CacheMask(..) | RenderTaskKind::Glyph(..) => { panic!("texture handle not supported for this task kind"); } } } pub fn get_dynamic_size(&self) -> DeviceIntSize { match self.location { RenderTaskLocation::Fixed(..) => DeviceIntSize::zero(), RenderTaskLocation::Dynamic(_, size) => size, RenderTaskLocation::TextureCache(_, _, rect) => rect.size, } } pub fn get_target_rect(&self) -> (DeviceIntRect, RenderTargetIndex) { match self.location { RenderTaskLocation::Fixed(rect) => { (rect, RenderTargetIndex(0)) } // Previously, we only added render tasks after the entire // primitive chain was determined visible. This meant that // we could assert any render task in the list was also // allocated (assigned to passes). Now, we add render // tasks earlier, and the picture they belong to may be // culled out later, so we can't assert that the task // has been allocated. // Render tasks that are created but not assigned to // passes consume a row in the render task texture, but // don't allocate any space in render targets nor // draw any pixels. // TODO(gw): Consider some kind of tag or other method // to mark a task as unused explicitly. This // would allow us to restore this debug check. RenderTaskLocation::Dynamic(Some((origin, target_index)), size) => { (DeviceIntRect::new(origin, size), target_index) } RenderTaskLocation::Dynamic(None, _) => { (DeviceIntRect::zero(), RenderTargetIndex(0)) } RenderTaskLocation::TextureCache(_, layer, rect) => { (rect, RenderTargetIndex(layer as usize)) } } } pub fn target_kind(&self) -> RenderTargetKind { match self.kind { RenderTaskKind::Readback(..) => RenderTargetKind::Color, RenderTaskKind::ClipRegion(..) | RenderTaskKind::CacheMask(..) => { RenderTargetKind::Alpha } RenderTaskKind::VerticalBlur(ref task_info) | RenderTaskKind::HorizontalBlur(ref task_info) => { task_info.target_kind } RenderTaskKind::Glyph(..) => { RenderTargetKind::Color } RenderTaskKind::Scaling(target_kind) => { target_kind } RenderTaskKind::Border(..) | RenderTaskKind::Picture(..) => { RenderTargetKind::Color } RenderTaskKind::Blit(..) => { RenderTargetKind::Color } } } // Check if this task wants to be made available as an input // to all passes (except the first) in the render task tree. // To qualify for this, the task needs to have no children / dependencies. // Currently, this is only supported for A8 targets, but it can be // trivially extended to also support RGBA8 targets in the future // if we decide that is useful. pub fn is_shared(&self) -> bool { match self.kind { RenderTaskKind::Picture(..) | RenderTaskKind::VerticalBlur(..) | RenderTaskKind::Readback(..) | RenderTaskKind::HorizontalBlur(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::ClipRegion(..) | RenderTaskKind::Blit(..) | RenderTaskKind::Border(..) | RenderTaskKind::Glyph(..) => false, // TODO(gw): For now, we've disabled the shared clip mask // optimization. It's of dubious value in the // future once we start to cache clip tasks anyway. // I have left shared texture support here though, // just in case we want it in the future. RenderTaskKind::CacheMask(..) => false, } } // Optionally, prepare the render task for drawing. This is executed // after all resource cache items (textures and glyphs) have been // resolved and can be queried. It also allows certain render tasks // to defer calculating an exact size until now, if desired. pub fn prepare_for_render(&mut self) { } pub fn write_gpu_blocks( &mut self, gpu_cache: &mut GpuCache, ) { let (target_rect, target_index) = self.get_target_rect(); let (cache_handle, uv_rect_kind) = match self.kind { RenderTaskKind::HorizontalBlur(ref mut info) | RenderTaskKind::VerticalBlur(ref mut info) => { (&mut info.uv_rect_handle, info.uv_rect_kind) } RenderTaskKind::Picture(ref mut info) => { (&mut info.uv_rect_handle, info.uv_rect_kind) } RenderTaskKind::Readback(..) | RenderTaskKind::Scaling(..) | RenderTaskKind::Blit(..) | RenderTaskKind::ClipRegion(..) | RenderTaskKind::Border(..) | RenderTaskKind::CacheMask(..) | RenderTaskKind::Glyph(..) => { return; } }; if let Some(mut request) = gpu_cache.request(cache_handle) { let p0 = target_rect.origin.to_f32(); let p1 = target_rect.bottom_right().to_f32(); let image_source = ImageSource { p0, p1, texture_layer: target_index.0 as f32, user_data: [0.0; 3], uv_rect_kind, }; image_source.write_gpu_blocks(&mut request); } } #[cfg(feature = "debugger")] pub fn print_with<T: PrintTreePrinter>(&self, pt: &mut T, tree: &RenderTaskTree) -> bool { match self.kind { RenderTaskKind::Picture(ref task) => { pt.new_level(format!("Picture of {:?}", task.prim_index)); } RenderTaskKind::CacheMask(ref task) => { pt.new_level(format!("CacheMask with {} clips", task.clip_node_range.count)); pt.add_item(format!("rect: {:?}", task.actual_rect)); } RenderTaskKind::ClipRegion(..) => { pt.new_level("ClipRegion".to_owned()); } RenderTaskKind::VerticalBlur(ref task) => { pt.new_level("VerticalBlur".to_owned()); task.print_with(pt); } RenderTaskKind::HorizontalBlur(ref task) => { pt.new_level("HorizontalBlur".to_owned()); task.print_with(pt); } RenderTaskKind::Readback(ref rect) => { pt.new_level("Readback".to_owned()); pt.add_item(format!("rect: {:?}", rect)); } RenderTaskKind::Scaling(ref kind) => { pt.new_level("Scaling".to_owned()); pt.add_item(format!("kind: {:?}", kind)); } RenderTaskKind::Border(..) => { pt.new_level("Border".to_owned()); } RenderTaskKind::Blit(ref task) => { pt.new_level("Blit".to_owned()); pt.add_item(format!("source: {:?}", task.source)); } RenderTaskKind::Glyph(..) => { pt.new_level("Glyph".to_owned()); } } pt.add_item(format!("clear to: {:?}", self.clear_mode)); for &child_id in &self.children { if tree[child_id].print_with(pt, tree) { pt.add_item(format!("self: {:?}", child_id)) } } pt.end_level(); true } /// Mark this render task for keeping the results alive up until the end of the frame. pub fn mark_for_saving(&mut self) { match self.location { RenderTaskLocation::Fixed(..) | RenderTaskLocation::Dynamic(..) => { self.saved_index = Some(SavedTargetIndex::PENDING); } RenderTaskLocation::TextureCache(..) => { panic!("Unable to mark a permanently cached task for saving!"); } } } } #[derive(Clone, Debug, Hash, PartialEq, Eq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub enum RenderTaskCacheKeyKind { BoxShadow(BoxShadowCacheKey), Image(ImageCacheKey), #[allow(dead_code)] Glyph(GpuGlyphCacheKey), Picture(PictureCacheKey), Border(BorderCacheKey), } #[derive(Clone, Debug, Hash, PartialEq, Eq)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskCacheKey { pub size: DeviceIntSize, pub kind: RenderTaskCacheKeyKind, } #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskCacheEntry { pending_render_task_id: Option<RenderTaskId>, user_data: Option<[f32; 3]>, is_opaque: bool, pub handle: TextureCacheHandle, } #[derive(Debug)] pub enum RenderTaskCacheMarker {} // A cache of render tasks that are stored in the texture // cache for usage across frames. #[derive(Debug)] #[cfg_attr(feature = "capture", derive(Serialize))] #[cfg_attr(feature = "replay", derive(Deserialize))] pub struct RenderTaskCache { map: FastHashMap<RenderTaskCacheKey, FreeListHandle<RenderTaskCacheMarker>>, cache_entries: FreeList<RenderTaskCacheEntry, RenderTaskCacheMarker>, } pub type RenderTaskCacheEntryHandle = WeakFreeListHandle<RenderTaskCacheMarker>; impl RenderTaskCache { pub fn new() -> Self { RenderTaskCache { map: FastHashMap::default(), cache_entries: FreeList::new(), } } pub fn clear(&mut self) { self.map.clear(); self.cache_entries.clear(); } pub fn begin_frame( &mut self, texture_cache: &mut TextureCache, ) { // Drop any items from the cache that have been // evicted from the texture cache. // // This isn't actually necessary for the texture // cache to be able to evict old render tasks. // It will evict render tasks as required, since // the access time in the texture cache entry will // be stale if this task hasn't been requested // for a while. // // Nonetheless, we should remove stale entries // from here so that this hash map doesn't // grow indefinitely! let mut keys_to_remove = Vec::new(); for (key, handle) in &self.map { let entry = self.cache_entries.get(handle); if !texture_cache.is_allocated(&entry.handle) { keys_to_remove.push(key.clone()) } } for key in &keys_to_remove { let handle = self.map.remove(key).unwrap(); self.cache_entries.free(handle); } } pub fn update( &mut self, gpu_cache: &mut GpuCache, texture_cache: &mut TextureCache, render_tasks: &mut RenderTaskTree, ) { // Iterate the list of render task cache entries, // and allocate / update the texture cache location // if the entry has been evicted or not yet allocated. for (_, handle) in &self.map { let entry = self.cache_entries.get_mut(handle); if let Some(pending_render_task_id) = entry.pending_render_task_id.take() { let render_task = &mut render_tasks[pending_render_task_id]; let target_kind = render_task.target_kind(); // Find out what size to alloc in the texture cache. let size = match render_task.location { RenderTaskLocation::Fixed(..) | RenderTaskLocation::TextureCache(..) => { panic!("BUG: dynamic task was expected"); } RenderTaskLocation::Dynamic(_, size) => size, }; // Select the right texture page to allocate from. let image_format = match target_kind { RenderTargetKind::Color => ImageFormat::BGRA8, RenderTargetKind::Alpha => ImageFormat::R8, }; let descriptor = ImageDescriptor::new( size.width as u32, size.height as u32, image_format, entry.is_opaque, false, ); // Allocate space in the texture cache, but don't supply // and CPU-side data to be uploaded. texture_cache.update( &mut entry.handle, descriptor, TextureFilter::Linear, None, entry.user_data.unwrap_or([0.0; 3]), None, gpu_cache, None, render_task.uv_rect_kind(), Eviction::Auto, ); // Get the allocation details in the texture cache, and store // this in the render task. The renderer will draw this // task into the appropriate layer and rect of the texture // cache on this frame. let (texture_id, texture_layer, uv_rect) = texture_cache.get_cache_location(&entry.handle); render_task.location = RenderTaskLocation::TextureCache( texture_id, texture_layer, uv_rect.to_i32() ); } } } pub fn request_render_task<F>( &mut self, key: RenderTaskCacheKey, texture_cache: &mut TextureCache, gpu_cache: &mut GpuCache, render_tasks: &mut RenderTaskTree, user_data: Option<[f32; 3]>, is_opaque: bool, mut f: F, ) -> Result<RenderTaskCacheEntryHandle, ()> where F: FnMut(&mut RenderTaskTree) -> Result<RenderTaskId, ()> { // Get the texture cache handle for this cache key, // or create one. let cache_entries = &mut self.cache_entries; let entry_handle = self.map .entry(key) .or_insert_with(|| { let entry = RenderTaskCacheEntry { handle: TextureCacheHandle::new(), pending_render_task_id: None, user_data, is_opaque, }; cache_entries.insert(entry) }); let cache_entry = cache_entries.get_mut(entry_handle); if cache_entry.pending_render_task_id.is_none() { // Check if this texture cache handle is valid. if texture_cache.request(&cache_entry.handle, gpu_cache) { // Invoke user closure to get render task chain // to draw this into the texture cache. let render_task_id = try!(f(render_tasks)); cache_entry.pending_render_task_id = Some(render_task_id); cache_entry.user_data = user_data; cache_entry.is_opaque = is_opaque; } } Ok(entry_handle.weak()) } pub fn get_cache_entry( &self, handle: &RenderTaskCacheEntryHandle, ) -> &RenderTaskCacheEntry { self.cache_entries .get_opt(handle) .expect("bug: invalid render task cache handle") } #[allow(dead_code)] pub fn get_cache_item_for_render_task(&self, texture_cache: &TextureCache, key: &RenderTaskCacheKey) -> CacheItem { // Get the texture cache handle for this cache key. let handle = self.map.get(key).unwrap(); let cache_entry = self.cache_entries.get(handle); texture_cache.get(&cache_entry.handle) } #[allow(dead_code)] pub fn cache_item_is_allocated_for_render_task(&self, texture_cache: &TextureCache, key: &RenderTaskCacheKey) -> bool { let handle = self.map.get(key).unwrap(); let cache_entry = self.cache_entries.get(handle); texture_cache.is_allocated(&cache_entry.handle) } } // TODO(gw): Rounding the content rect here to device pixels is not // technically correct. Ideally we should ceil() here, and ensure that // the extra part pixel in the case of fractional sizes is correctly // handled. For now, just use rounding which passes the existing // Gecko tests. // Note: zero-square tasks are prohibited in WR task tree, so // we ensure each dimension to be at least the length of 1 after rounding. pub fn to_cache_size(size: DeviceSize) -> DeviceIntSize { DeviceIntSize::new( 1.max(size.width.round() as i32), 1.max(size.height.round() as i32), ) }
//! This crate contains a single public function //! [`get_path_for_executable`](fn.get_path_for_executable.html). //! See docs there for more information. use std::{env, iter, path::PathBuf}; pub fn cargo() -> PathBuf { get_path_for_executable("cargo") } pub fn rustc() -> PathBuf { get_path_for_executable("rustc") } pub fn rustup() -> PathBuf { get_path_for_executable("rustup") } pub fn rustfmt() -> PathBuf { get_path_for_executable("rustfmt") } /// Return a `PathBuf` to use for the given executable. /// /// E.g., `get_path_for_executable("cargo")` may return just `cargo` if that /// gives a valid Cargo executable; or it may return a full path to a valid /// Cargo. fn get_path_for_executable(executable_name: &'static str) -> PathBuf { // The current implementation checks three places for an executable to use: // 1) Appropriate environment variable (erroring if this is set but not a usable executable) // example: for cargo, this checks $CARGO environment variable; for rustc, $RUSTC; etc // 2) `<executable_name>` // example: for cargo, this tries just `cargo`, which will succeed if `cargo` is on the $PATH // 3) `~/.cargo/bin/<executable_name>` // example: for cargo, this tries ~/.cargo/bin/cargo // It seems that this is a reasonable place to try for cargo, rustc, and rustup let env_var = executable_name.to_ascii_uppercase(); if let Some(path) = env::var_os(&env_var) { return path.into(); } if lookup_in_path(executable_name) { return executable_name.into(); } if let Some(mut path) = home::home_dir() { path.push(".cargo"); path.push("bin"); path.push(executable_name); if path.is_file() { return path; } } executable_name.into() } fn lookup_in_path(exec: &str) -> bool { let paths = env::var_os("PATH").unwrap_or_default(); let mut candidates = env::split_paths(&paths).flat_map(|path| { let candidate = path.join(&exec); let with_exe = match env::consts::EXE_EXTENSION { "" => None, it => Some(candidate.with_extension(it)), }; iter::once(candidate).chain(with_exe) }); candidates.any(|it| it.is_file()) } Better exe probing //! This crate contains a single public function //! [`get_path_for_executable`](fn.get_path_for_executable.html). //! See docs there for more information. use std::{env, iter, path::PathBuf}; pub fn cargo() -> PathBuf { get_path_for_executable("cargo") } pub fn rustc() -> PathBuf { get_path_for_executable("rustc") } pub fn rustup() -> PathBuf { get_path_for_executable("rustup") } pub fn rustfmt() -> PathBuf { get_path_for_executable("rustfmt") } /// Return a `PathBuf` to use for the given executable. /// /// E.g., `get_path_for_executable("cargo")` may return just `cargo` if that /// gives a valid Cargo executable; or it may return a full path to a valid /// Cargo. fn get_path_for_executable(executable_name: &'static str) -> PathBuf { // The current implementation checks three places for an executable to use: // 1) Appropriate environment variable (erroring if this is set but not a usable executable) // example: for cargo, this checks $CARGO environment variable; for rustc, $RUSTC; etc // 2) `<executable_name>` // example: for cargo, this tries just `cargo`, which will succeed if `cargo` is on the $PATH // 3) `~/.cargo/bin/<executable_name>` // example: for cargo, this tries ~/.cargo/bin/cargo // It seems that this is a reasonable place to try for cargo, rustc, and rustup let env_var = executable_name.to_ascii_uppercase(); if let Some(path) = env::var_os(&env_var) { return path.into(); } if lookup_in_path(executable_name) { return executable_name.into(); } if let Some(mut path) = home::home_dir() { path.push(".cargo"); path.push("bin"); path.push(executable_name); if let Some(path) = probe(path) { return path; } } executable_name.into() } fn lookup_in_path(exec: &str) -> bool { let paths = env::var_os("PATH").unwrap_or_default(); env::split_paths(&paths).map(|path| path.join(exec)).find_map(probe).is_some() } fn probe(path: PathBuf) -> Option<PathBuf> { let with_extension = match env::consts::EXE_EXTENSION { "" => None, it => Some(path.with_extension(it)), }; iter::once(path).chain(with_extension).find(|it| it.is_file()) }
use std::fmt; use world::{HEX_INNER_RADIUS, HEX_OUTER_RADIUS}; use super::{AxialType, DefaultFloat, Point2f}; use std::ops::{Add, Div, Mul, Sub}; /// A 2-dimensional point in axial coordinates. See [here][hex-blog] for more /// information. /// /// [hex-blog]: http://www.redblobgames.com/grids/hexagons/#coordinates #[derive(Clone, Copy, PartialEq, Eq, Hash)] #[repr(C, packed)] pub struct AxialPoint { pub q: AxialType, pub r: AxialType, } // TODO: implement cgmath::Array // TODO: implement cgmath::MatricSpace // TODO: implement cgmath::EuclideanSpace // TODO: implement ops::{ ... } // For all of the above, see // http://bjz.github.io/cgmath/cgmath/struct.Point2.html // impl AxialPoint { pub fn new(q: AxialType, r: AxialType) -> Self { AxialPoint { q: q, r: r } } /// Returns the position of the hexagons center in the standard coordinate /// system using `world::{HEX_INNER_RADIUS, HEX_OUTER_RADIUS}`. pub fn to_real(&self) -> Point2f { Point2f { x: ((2 * self.q + self.r) as DefaultFloat) * HEX_INNER_RADIUS, y: (self.r as DefaultFloat) * (3.0 / 2.0) * HEX_OUTER_RADIUS, } } /// Returns the `s` component of corresponding cube coordinates. In cube /// coordinates 'q + r + s = 0', so saving `s` is redundant and can be /// calculated on the fly when needed. pub fn s(&self) -> AxialType { -self.q - self.r } } impl fmt::Debug for AxialPoint { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("") .field(&self.q) .field(&self.r) .finish() } } impl Add<AxialPoint> for AxialPoint { type Output = AxialPoint; /// adds two Points together similar to vectors /// Returns an AxialPoint fn add(self, _rhs: AxialPoint) -> AxialPoint { AxialPoint { q: self.q + _rhs.q, r: self.r + _rhs.r, } } } impl Sub<AxialPoint> for AxialPoint { type Output = AxialPoint; /// substracts two Points similar to vectors /// Returns an AxialPoint fn sub(self, _rhs: AxialPoint) -> AxialPoint { AxialPoint { q: self.q - _rhs.q, r: self.r - _rhs.r, } } } impl Mul<AxialType> for AxialPoint { type Output = AxialPoint; /// Multiplies a point and a scalar /// Returns an AxialPoint fn mul(self, _rhs: AxialType) -> AxialPoint { AxialPoint { q: self.q * _rhs, r: self.r * _rhs, } } } impl Div<AxialType> for AxialPoint { type Output = AxialPoint; /// Divides a point and a scalar /// Returns an AxialPoint fn div(self, _rhs: AxialType) -> AxialPoint { AxialPoint { q: self.q / _rhs, r: self.r / _rhs, } } } Basic functions for axial_point Basic Arithmetics: - Addition of two points - Substraction of two points - Multiplication of a point and a scalar - Division of a Point and a scalar Zero-Implementation: - Definition of the additive identity of zero - Tests if self equals the additive identity Index: - Added implementation to access the variables of the axial_point via an index Array-Implementation: - Ability to create an axial_point through a number - Functions to calculate the sum and the product of the two variables - Ability to return min and max of the two variables use std::fmt; use world::{HEX_INNER_RADIUS, HEX_OUTER_RADIUS}; use super::{AxialType, DefaultFloat, Point2f}; use std::ops::{Add, Div, Index, IndexMut, Mul, Sub}; use math::cgmath::{Array, Zero}; /// A 2-dimensional point in axial coordinates. See [here][hex-blog] for more /// information. /// /// [hex-blog]: http://www.redblobgames.com/grids/hexagons/#coordinates #[derive(Clone, Copy, PartialEq, Eq, Hash)] #[repr(C, packed)] pub struct AxialPoint { pub q: AxialType, pub r: AxialType, } // TODO: implement cgmath::Array // TODO: implement cgmath::MatricSpace // TODO: implement cgmath::EuclideanSpace // TODO: implement ops::{ ... } // For all of the above, see // http://bjz.github.io/cgmath/cgmath/struct.Point2.html // impl AxialPoint { pub fn new(q: AxialType, r: AxialType) -> Self { AxialPoint { q: q, r: r } } /// Returns the position of the hexagons center in the standard coordinate /// system using `world::{HEX_INNER_RADIUS, HEX_OUTER_RADIUS}`. pub fn to_real(&self) -> Point2f { Point2f { x: ((2 * self.q + self.r) as DefaultFloat) * HEX_INNER_RADIUS, y: (self.r as DefaultFloat) * (3.0 / 2.0) * HEX_OUTER_RADIUS, } } /// Returns the `s` component of corresponding cube coordinates. In cube /// coordinates 'q + r + s = 0', so saving `s` is redundant and can be /// calculated on the fly when needed. pub fn s(&self) -> AxialType { -self.q - self.r } } impl fmt::Debug for AxialPoint { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("") .field(&self.q) .field(&self.r) .finish() } } /// ********************Basic Arithmetics************ impl Add<AxialPoint> for AxialPoint { type Output = AxialPoint; /// adds two Points together similar to vectors /// Returns an AxialPoint fn add(self, _rhs: AxialPoint) -> AxialPoint { AxialPoint { q: self.q + _rhs.q, r: self.r + _rhs.r, } } } impl Sub<AxialPoint> for AxialPoint { type Output = AxialPoint; /// substracts two Points similar to vectors /// Returns an AxialPoint fn sub(self, _rhs: AxialPoint) -> AxialPoint { AxialPoint { q: self.q - _rhs.q, r: self.r - _rhs.r, } } } impl Mul<AxialType> for AxialPoint { type Output = AxialPoint; /// Multiplies a point and a scalar /// Returns an AxialPoint fn mul(self, _rhs: AxialType) -> AxialPoint { AxialPoint { q: self.q * _rhs, r: self.r * _rhs, } } } impl Div<AxialType> for AxialPoint { type Output = AxialPoint; /// Divides a point and a scalar /// Returns an AxialPoint fn div(self, _rhs: AxialType) -> AxialPoint { AxialPoint { q: self.q / _rhs, r: self.r / _rhs, } } } /// ********************Zero-Implementation************ impl Zero for AxialPoint { fn zero() -> AxialPoint { AxialPoint { q: 0, r: 0 } } fn is_zero(&self) -> bool { self.q == 0 && self.r == 0 } } /// ********************Index************ impl Index<usize> for AxialPoint { type Output = AxialType; fn index<'a>(&'a self, index: usize) -> &'a AxialType { match index { 0 => &self.q, 1 => &self.r, _ => panic!("Index out of bounds!"), } } } impl IndexMut<usize> for AxialPoint { fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut AxialType { match index { 0 => &mut self.q, 1 => &mut self.r, _ => panic!("Index out of bounds!"), } } } /// ********************Array************ impl Array for AxialPoint { type Element = AxialType; fn from_value(x: AxialType) -> AxialPoint { AxialPoint { q: x, r: x } } fn sum(self) -> AxialType { self.q + self.r } fn product(self) -> AxialType { self.q * self.r } fn min(self) -> AxialType { if self.q < self.r { self.q } else { self.r } } fn max(self) -> AxialType { if self.q < self.r { self.r } else { self.q } } }
// Copyright 2018 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::proto_grpc::OctreeClient; use futures::{Future, Stream}; use grpcio::{ChannelBuilder, EnvBuilder}; use nalgebra::Point3; use point_viewer::color::Color; use point_viewer::data_provider::{DataProvider, DataProviderFactoryResult}; use point_viewer::errors::*; use point_viewer::geometry::Aabb; use point_viewer::proto::Meta; use point_viewer::Point; pub use point_viewer_grpc_proto_rust::proto; pub use point_viewer_grpc_proto_rust::proto_grpc; use std::collections::HashMap; use std::io::{Cursor, Read}; use std::sync::Arc; pub mod service; pub struct GrpcOctreeDataProvider { client: OctreeClient, octree_id: String, } impl GrpcOctreeDataProvider { pub fn from_address(addr: &str) -> Result<Self> { let mut addr_parts = addr.trim_matches('/').splitn(2, '/'); let addr = addr_parts.next().ok_or_else(|| "Invalid address.")?; let octree_id = addr_parts.next().unwrap_or_default().to_string(); let env = Arc::new(EnvBuilder::new().build()); let ch = ChannelBuilder::new(env) .max_receive_message_len(::std::i32::MAX) .connect(addr); let client = OctreeClient::new(ch); Ok(GrpcOctreeDataProvider { client, octree_id }) } pub fn get_points_in_box( &self, bounding_box: &Aabb, mut func: impl FnMut(&[Point]) -> bool, ) -> Result<()> { let mut req = proto::GetPointsInBoxRequest::new(); req.set_octree_id(self.octree_id.clone()); req.mut_bounding_box().mut_min().set_x(bounding_box.min().x); req.mut_bounding_box().mut_min().set_y(bounding_box.min().y); req.mut_bounding_box().mut_min().set_z(bounding_box.min().z); req.mut_bounding_box().mut_max().set_x(bounding_box.max().x); req.mut_bounding_box().mut_max().set_y(bounding_box.max().y); req.mut_bounding_box().mut_max().set_z(bounding_box.max().z); let replies = self .client .get_points_in_box(&req) .map_err(|_| point_viewer::errors::ErrorKind::Grpc)?; let mut points = Vec::new(); let mut interrupted = false; let result = replies .for_each(|reply| { let last_num_points = points.len(); for (p, color) in reply.positions.iter().zip(reply.colors.iter()) { points.push(Point { position: Point3::from(p), color: Color { red: color.red, green: color.green, blue: color.blue, alpha: color.alpha, } .to_u8(), intensity: None, }); } if reply.intensities.len() == reply.positions.len() { for (i, p) in reply.intensities.iter().zip(&mut points[last_num_points..]) { p.intensity = Some(*i); } } if !func(&points) { interrupted = true; return Err(grpcio::Error::QueueShutdown); } points.clear(); Ok(()) }) .wait() .map_err(|_| point_viewer::errors::ErrorKind::Grpc); if result.is_err() && !interrupted { result?; } Ok(()) } } impl DataProvider for GrpcOctreeDataProvider { fn meta_proto(&self) -> Result<Meta> { let mut req = proto::GetMetaRequest::new(); req.set_octree_id(self.octree_id.clone()); let reply = self .client .get_meta(&req) .map_err(|_| point_viewer::errors::ErrorKind::Grpc)?; Ok(reply.meta.unwrap()) } fn data( &self, node_id: &str, node_attributes: &[&str], ) -> Result<HashMap<String, Box<dyn Read + Send>>> { let mut req = proto::GetNodeDataRequest::new(); req.set_octree_id(self.octree_id.clone()); req.set_id(node_id.to_string()); let reply = self .client .get_node_data(&req) .map_err(|_| point_viewer::errors::ErrorKind::Grpc)?; let mut readers = HashMap::<String, Box<dyn Read + Send>>::new(); for node_attribute in node_attributes { let reader: Box<dyn Read + Send> = match *node_attribute { "position" => Box::new(Cursor::new(reply.position.clone())), "color" => Box::new(Cursor::new(reply.color.clone())), _ => { return Err("Unsupported node extension.".into()); } }; readers.insert((*node_attribute).to_string(), reader); } Ok(readers) } } pub fn data_provider_from_grpc_address(addr: &str) -> DataProviderFactoryResult { let prefix = "grpc://"; if !addr.starts_with(prefix) { return Err(format!("Invalid grpc address: it has to start with {}.", prefix).into()); } let addr_no_prefix: &str = &addr[prefix.len()..]; GrpcOctreeDataProvider::from_address(addr_no_prefix) .map(|provider| Box::new(provider) as Box<dyn DataProvider>) } Fix clippy. (#470) Signed-off-by: Marco Feuerstein <db343c71bfecd25e2e3204c3a54a9666c70fbafe@lyft.com> // Copyright 2018 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::proto_grpc::OctreeClient; use futures::{Future, Stream}; use grpcio::{ChannelBuilder, EnvBuilder}; use nalgebra::Point3; use point_viewer::color::Color; use point_viewer::data_provider::{DataProvider, DataProviderFactoryResult}; use point_viewer::errors::*; use point_viewer::geometry::Aabb; use point_viewer::proto::Meta; use point_viewer::Point; pub use point_viewer_grpc_proto_rust::proto; pub use point_viewer_grpc_proto_rust::proto_grpc; use std::collections::HashMap; use std::io::{Cursor, Read}; use std::sync::Arc; pub mod service; pub struct GrpcOctreeDataProvider { client: OctreeClient, octree_id: String, } impl GrpcOctreeDataProvider { pub fn from_address(addr: &str) -> Result<Self> { let mut addr_parts = addr.trim_matches('/').splitn(2, '/'); let addr = addr_parts.next().ok_or("Invalid address.")?; let octree_id = addr_parts.next().unwrap_or_default().to_string(); let env = Arc::new(EnvBuilder::new().build()); let ch = ChannelBuilder::new(env) .max_receive_message_len(::std::i32::MAX) .connect(addr); let client = OctreeClient::new(ch); Ok(GrpcOctreeDataProvider { client, octree_id }) } pub fn get_points_in_box( &self, bounding_box: &Aabb, mut func: impl FnMut(&[Point]) -> bool, ) -> Result<()> { let mut req = proto::GetPointsInBoxRequest::new(); req.set_octree_id(self.octree_id.clone()); req.mut_bounding_box().mut_min().set_x(bounding_box.min().x); req.mut_bounding_box().mut_min().set_y(bounding_box.min().y); req.mut_bounding_box().mut_min().set_z(bounding_box.min().z); req.mut_bounding_box().mut_max().set_x(bounding_box.max().x); req.mut_bounding_box().mut_max().set_y(bounding_box.max().y); req.mut_bounding_box().mut_max().set_z(bounding_box.max().z); let replies = self .client .get_points_in_box(&req) .map_err(|_| point_viewer::errors::ErrorKind::Grpc)?; let mut points = Vec::new(); let mut interrupted = false; let result = replies .for_each(|reply| { let last_num_points = points.len(); for (p, color) in reply.positions.iter().zip(reply.colors.iter()) { points.push(Point { position: Point3::from(p), color: Color { red: color.red, green: color.green, blue: color.blue, alpha: color.alpha, } .to_u8(), intensity: None, }); } if reply.intensities.len() == reply.positions.len() { for (i, p) in reply.intensities.iter().zip(&mut points[last_num_points..]) { p.intensity = Some(*i); } } if !func(&points) { interrupted = true; return Err(grpcio::Error::QueueShutdown); } points.clear(); Ok(()) }) .wait() .map_err(|_| point_viewer::errors::ErrorKind::Grpc); if result.is_err() && !interrupted { result?; } Ok(()) } } impl DataProvider for GrpcOctreeDataProvider { fn meta_proto(&self) -> Result<Meta> { let mut req = proto::GetMetaRequest::new(); req.set_octree_id(self.octree_id.clone()); let reply = self .client .get_meta(&req) .map_err(|_| point_viewer::errors::ErrorKind::Grpc)?; Ok(reply.meta.unwrap()) } fn data( &self, node_id: &str, node_attributes: &[&str], ) -> Result<HashMap<String, Box<dyn Read + Send>>> { let mut req = proto::GetNodeDataRequest::new(); req.set_octree_id(self.octree_id.clone()); req.set_id(node_id.to_string()); let reply = self .client .get_node_data(&req) .map_err(|_| point_viewer::errors::ErrorKind::Grpc)?; let mut readers = HashMap::<String, Box<dyn Read + Send>>::new(); for node_attribute in node_attributes { let reader: Box<dyn Read + Send> = match *node_attribute { "position" => Box::new(Cursor::new(reply.position.clone())), "color" => Box::new(Cursor::new(reply.color.clone())), _ => { return Err("Unsupported node extension.".into()); } }; readers.insert((*node_attribute).to_string(), reader); } Ok(readers) } } pub fn data_provider_from_grpc_address(addr: &str) -> DataProviderFactoryResult { let prefix = "grpc://"; if !addr.starts_with(prefix) { return Err(format!("Invalid grpc address: it has to start with {}.", prefix).into()); } let addr_no_prefix: &str = &addr[prefix.len()..]; GrpcOctreeDataProvider::from_address(addr_no_prefix) .map(|provider| Box::new(provider) as Box<dyn DataProvider>) }
#[doc = "Generate markdown from a document tree"]; import markdown_writer::writer; import markdown_writer::writer_util; import markdown_writer::writer_factory; export mk_pass; export header_kind, header_name, header_text; fn mk_pass(writer_factory: writer_factory) -> pass { let f = fn~(srv: astsrv::srv, doc: doc::doc) -> doc::doc { run(srv, doc, writer_factory) }; { name: "markdown", f: f } } fn run( srv: astsrv::srv, doc: doc::doc, writer_factory: writer_factory ) -> doc::doc { fn mods_last(item1: doc::itemtag, item2: doc::itemtag) -> bool { fn is_mod(item: doc::itemtag) -> bool { alt item { doc::modtag(_) { true } _ { false } } } let lteq = !is_mod(item1) || is_mod(item2); lteq } // Sort the items so mods come last. All mods will be // output at the same header level so sorting mods last // makes the headers come out nested correctly. let sorted_doc = sort_pass::mk_pass( "mods last", mods_last ).f(srv, doc); write_markdown(sorted_doc, writer_factory); ret doc; } #[test] fn should_write_modules_last() { /* Because the markdown pass writes all modules at the same level of indentation (it doesn't 'nest' them), we need to make sure that we write all of the modules contained in each module after all other types of items, or else the header nesting will end up wrong, with modules appearing to contain items that they do not. */ let markdown = test::render( "mod a { }\ fn b() { }\ mod c { }\ fn d() { }" ); let idx_a = option::get(str::find_str(markdown, "# Module `a`")); let idx_b = option::get(str::find_str(markdown, "## Function `b`")); let idx_c = option::get(str::find_str(markdown, "# Module `c`")); let idx_d = option::get(str::find_str(markdown, "## Function `d`")); assert idx_b < idx_d; assert idx_d < idx_a; assert idx_a < idx_c; } type ctxt = { w: writer }; fn write_markdown( doc: doc::doc, writer_factory: writer_factory ) { par::anymap(doc.pages) {|page| let ctxt = { w: writer_factory(page) }; write_page(ctxt, page) }; } fn write_page(ctxt: ctxt, page: doc::page) { alt page { doc::cratepage(doc) { write_crate(ctxt, doc); } doc::itempage(doc) { write_item(ctxt, doc); } } ctxt.w.write_done(); } #[test] fn should_request_new_writer_for_each_page() { // This port will send us a (page, str) pair for every writer // that was created let (writer_factory, po) = markdown_writer::future_writer_factory(); let (srv, doc) = test::create_doc_srv("mod a { }"); // Split the document up into pages let doc = page_pass::mk_pass(config::doc_per_mod).f(srv, doc); write_markdown(doc, writer_factory); // We expect two pages to have been written iter::repeat(2u) {|| comm::recv(po); } } enum hlvl { h1 = 1, h2 = 2, h3 = 3 } fn write_header(ctxt: ctxt, lvl: hlvl, doc: doc::itemtag) { let text = header_text(doc); write_header_(ctxt, lvl, text); } fn write_header_(ctxt: ctxt, lvl: hlvl, title: str) { let hashes = str::from_chars(vec::init_elt(lvl as uint, '#')); ctxt.w.write_line(#fmt("%s %s", hashes, title)); ctxt.w.write_line(""); } fn write_lead(ctxt: ctxt, title: str) { ctxt.w.write_str(#fmt("__%s__: ", title)) } fn header_kind(doc: doc::itemtag) -> str { alt doc { doc::modtag(_) { if doc.id() == rustc::syntax::ast::crate_node_id { "Crate" } else { "Module" } } doc::nmodtag(_) { "Native module" } doc::fntag(_) { "Function" } doc::consttag(_) { "Const" } doc::enumtag(_) { "Enum" } doc::restag(_) { "Resource" } doc::ifacetag(_) { "Interface" } doc::impltag(doc) { "Implementation" } doc::tytag(_) { "Type" } } } fn header_name(doc: doc::itemtag) -> str { let fullpath = str::connect(doc.path() + [doc.name()], "::"); alt doc { doc::modtag(_) if doc.id() != rustc::syntax::ast::crate_node_id { fullpath } doc::nmodtag(_) { fullpath } doc::impltag(doc) { assert option::is_some(doc.self_ty); let self_ty = option::get(doc.self_ty); alt doc.iface_ty { some(iface_ty) { #fmt("%s of %s for %s", doc.name(), iface_ty, self_ty) } none { #fmt("%s for %s", doc.name(), self_ty) } } } _ { doc.name() } } } fn header_text(doc: doc::itemtag) -> str { header_text_(header_kind(doc), header_name(doc)) } fn header_text_(kind: str, name: str) -> str { #fmt("%s `%s`", kind, name) } fn write_crate( ctxt: ctxt, doc: doc::cratedoc ) { write_header(ctxt, h1, doc::modtag(doc.topmod)); write_top_module(ctxt, doc.topmod); } fn write_top_module( ctxt: ctxt, moddoc: doc::moddoc ) { write_mod_contents(ctxt, moddoc); } fn write_mod( ctxt: ctxt, moddoc: doc::moddoc ) { write_header(ctxt, h1, doc::modtag(moddoc)); write_mod_contents(ctxt, moddoc); } #[test] fn should_write_full_path_to_mod() { let markdown = test::render("mod a { mod b { mod c { } } }"); assert str::contains(markdown, "# Module `a::b::c`"); } fn write_mod_contents( ctxt: ctxt, doc: doc::moddoc ) { write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); if option::is_some(doc.index) { write_index(ctxt, option::get(doc.index)); } for itemtag in doc.items { write_item(ctxt, itemtag); } } fn write_item(ctxt: ctxt, doc: doc::itemtag) { alt doc { doc::modtag(moddoc) { write_mod(ctxt, moddoc) } doc::nmodtag(nmoddoc) { write_nmod(ctxt, nmoddoc) } doc::fntag(fndoc) { write_fn(ctxt, fndoc) } doc::consttag(constdoc) { write_const(ctxt, constdoc) } doc::enumtag(enumdoc) { write_enum(ctxt, enumdoc) } doc::restag(resdoc) { write_res(ctxt, resdoc) } doc::ifacetag(ifacedoc) { write_iface(ctxt, ifacedoc) } doc::impltag(impldoc) { write_impl(ctxt, impldoc) } doc::tytag(tydoc) { write_type(ctxt, tydoc) } } } #[test] fn should_write_crate_brief_description() { let markdown = test::render("#[doc(brief = \"this is the crate\")];"); assert str::contains(markdown, "this is the crate"); } #[test] fn should_write_crate_description() { let markdown = test::render("#[doc = \"this is the crate\"];"); assert str::contains(markdown, "this is the crate"); } fn write_index(ctxt: ctxt, index: doc::index) { if vec::is_empty(index.entries) { ret; } for entry in index.entries { let header = header_text_(entry.kind, entry.name); let id = entry.link; if option::is_some(entry.brief) { ctxt.w.write_line(#fmt("* [%s](%s) - %s", header, id, option::get(entry.brief))); } else { ctxt.w.write_line(#fmt("* [%s](%s)", header, id)); } } ctxt.w.write_line(""); } #[test] fn should_write_index() { let markdown = test::render("mod a { } mod b { }"); assert str::contains( markdown, "\n\n* [Module `a`](#module-a)\n\ * [Module `b`](#module-b)\n\n" ); } #[test] fn should_write_index_brief() { let markdown = test::render("#[doc(brief = \"test\")] mod a { }"); assert str::contains(markdown, "(#module-a) - test\n"); } #[test] fn should_not_write_index_if_no_entries() { let markdown = test::render(""); assert !str::contains(markdown, "\n\n\n"); } fn write_nmod(ctxt: ctxt, doc: doc::nmoddoc) { write_header(ctxt, h1, doc::nmodtag(doc)); write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); for fndoc in doc.fns { write_fn(ctxt, fndoc); } } #[test] fn should_write_native_mods() { let markdown = test::render("#[doc = \"test\"] native mod a { }"); assert str::contains(markdown, "Native module `a`"); assert str::contains(markdown, "test"); } #[test] fn should_write_native_fns() { let markdown = test::render("native mod a { #[doc = \"test\"] fn a(); }"); assert str::contains(markdown, "test"); } fn write_fn( ctxt: ctxt, doc: doc::fndoc ) { write_header(ctxt, h2, doc::fntag(doc)); write_fnlike( ctxt, doc.sig, doc.brief(), doc.desc(), doc.args, doc.return, doc.failure ); } fn write_fnlike( ctxt: ctxt, sig: option<str>, brief: option<str>, desc: option<str>, args: [doc::argdoc], return: doc::retdoc, failure: option<str> ) { write_sig(ctxt, sig); write_brief(ctxt, brief); write_desc(ctxt, desc); write_args(ctxt, args); write_return(ctxt, return); write_failure(ctxt, failure); } fn write_sig(ctxt: ctxt, sig: option<str>) { alt sig { some(sig) { ctxt.w.write_line(code_block_indent(sig)); ctxt.w.write_line(""); } none { fail "unimplemented" } } } fn code_block_indent(s: str) -> str { let lines = str::lines_any(s); let indented = par::seqmap(lines, { |line| #fmt(" %s", line) }); str::connect(indented, "\n") } #[test] fn write_markdown_should_write_function_header() { let markdown = test::render("fn func() { }"); assert str::contains(markdown, "## Function `func`"); } #[test] fn should_write_the_function_signature() { let markdown = test::render("#[doc = \"f\"] fn a() { }"); assert str::contains(markdown, "\n fn a()\n"); } #[test] fn should_insert_blank_line_after_fn_signature() { let markdown = test::render("#[doc = \"f\"] fn a() { }"); assert str::contains(markdown, "fn a()\n\n"); } #[test] fn should_correctly_indent_fn_signature() { let doc = test::create_doc("fn a() { }"); let doc = { pages: [ doc::cratepage({ topmod: { items: [doc::fntag({ sig: some("line 1\nline 2") with doc.cratemod().fns()[0] })] with doc.cratemod() } with doc.cratedoc() }) ] }; let markdown = test::write_markdown_str(doc); assert str::contains(markdown, " line 1\n line 2"); } #[test] fn should_leave_blank_line_between_fn_header_and_sig() { let markdown = test::render("#[doc(brief = \"brief\")] fn a() { }"); assert str::contains(markdown, "Function `a`\n\n fn a()"); } fn write_brief( ctxt: ctxt, brief: option<str> ) { alt brief { some(brief) { ctxt.w.write_line(brief); ctxt.w.write_line(""); } none { } } } #[test] fn should_leave_blank_line_after_brief() { let markdown = test::render("#[doc(brief = \"brief\")] fn a() { }"); assert str::contains(markdown, "brief\n\n"); } #[test] fn should_leave_blank_line_between_brief_and_desc() { let markdown = test::render( "#[doc(brief = \"brief\", desc = \"desc\")] fn a() { }" ); assert str::contains(markdown, "brief\n\ndesc"); } fn write_desc( ctxt: ctxt, desc: option<str> ) { alt desc { some(desc) { ctxt.w.write_line(desc); ctxt.w.write_line(""); } none { } } } fn write_args( ctxt: ctxt, args: [doc::argdoc] ) { if vec::is_not_empty(args) { write_lead(ctxt, "Arguments"); ctxt.w.write_line(""); ctxt.w.write_line(""); vec::iter(args) {|arg| write_arg(ctxt, arg) }; ctxt.w.write_line(""); } } fn write_arg(ctxt: ctxt, arg: doc::argdoc) { ctxt.w.write_str(#fmt( "* `%s`", arg.name )); alt arg.desc { some(desc) { ctxt.w.write_str(#fmt(" - %s", desc)); } none { } } ctxt.w.write_line(""); } #[test] fn should_write_argument_list() { let source = "fn a(b: int, c: int) { }"; let markdown = test::render(source); assert str::contains( markdown, "__Arguments__: \n\ \n\ * `b`\n\ * `c`\n\ \n" ); } #[test] fn should_not_write_arguments_if_none() { let source = "fn a() { } fn b() { }"; let markdown = test::render(source); assert !str::contains(markdown, "Arguments"); } #[test] fn should_write_argument_description() { let source = "#[doc(args(a = \"milk\"))] fn f(a: bool) { }"; let markdown = test::render(source); assert str::contains(markdown, "`a` - milk"); } fn write_return( ctxt: ctxt, doc: doc::retdoc ) { alt doc.desc { some(d) { write_lead(ctxt, "Return value"); ctxt.w.write_line(d); ctxt.w.write_line(""); } none { } } } #[test] fn should_write_return_type_on_new_line() { let markdown = test::render( "#[doc(return = \"test\")] fn a() -> int { }"); assert str::contains(markdown, "\n__Return value__: test"); } #[test] fn should_write_blank_line_between_return_type_and_next_header() { let markdown = test::render( "#[doc(return = \"test\")] fn a() -> int { } \ fn b() -> int { }" ); assert str::contains(markdown, "__Return value__: test\n\n##"); } #[test] fn should_not_write_return_type_when_there_is_none() { let markdown = test::render("fn a() { }"); assert !str::contains(markdown, "Return value"); } #[test] fn should_write_blank_line_after_return_description() { let markdown = test::render( "#[doc(return = \"blorp\")] fn a() -> int { }" ); assert str::contains(markdown, "blorp\n\n"); } fn write_failure(ctxt: ctxt, str: option<str>) { alt str { some(str) { write_lead(ctxt, "Failure conditions"); ctxt.w.write_line(str); ctxt.w.write_line(""); } none { } } } #[test] fn should_write_failure_conditions() { let markdown = test::render( "#[doc(failure = \"it's the fail\")] fn a () { }"); assert str::contains( markdown, "\n\n__Failure conditions__: it's the fail\n\n"); } fn write_const( ctxt: ctxt, doc: doc::constdoc ) { write_header(ctxt, h2, doc::consttag(doc)); write_sig(ctxt, doc.ty); write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); } #[test] fn should_write_const_header() { let markdown = test::render("const a: bool = true;"); assert str::contains(markdown, "## Const `a`\n\n"); } #[test] fn should_write_const_description() { let markdown = test::render( "#[doc(brief = \"a\", desc = \"b\")]\ const a: bool = true;"); assert str::contains(markdown, "\n\na\n\nb\n\n"); } fn write_enum( ctxt: ctxt, doc: doc::enumdoc ) { write_header(ctxt, h2, doc::enumtag(doc)); write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); write_variants(ctxt, doc.variants); } #[test] fn should_write_enum_header() { let markdown = test::render("enum a { b }"); assert str::contains(markdown, "## Enum `a`\n\n"); } #[test] fn should_write_enum_description() { let markdown = test::render( "#[doc(brief = \"a\", desc = \"b\")] enum a { b }"); assert str::contains(markdown, "\n\na\n\nb\n\n"); } fn write_variants( ctxt: ctxt, docs: [doc::variantdoc] ) { if vec::is_empty(docs) { ret; } ctxt.w.write_line("Variants:"); ctxt.w.write_line(""); vec::iter(docs, {|variant| write_variant(ctxt, variant) }); ctxt.w.write_line(""); } fn write_variant(ctxt: ctxt, doc: doc::variantdoc) { assert option::is_some(doc.sig); let sig = option::get(doc.sig); alt doc.desc { some(desc) { ctxt.w.write_line(#fmt("* `%s` - %s", sig, desc)); } none { ctxt.w.write_line(#fmt("* `%s`", sig)); } } } #[test] fn should_write_variant_list() { let markdown = test::render( "enum a { \ #[doc = \"test\"] b, \ #[doc = \"test\"] c }"); assert str::contains( markdown, "\n\nVariants:\n\ \n* `b` - test\ \n* `c` - test\n\n"); } #[test] fn should_write_variant_list_without_descs() { let markdown = test::render("enum a { b, c }"); assert str::contains( markdown, "\n\nVariants:\n\ \n* `b`\ \n* `c`\n\n"); } #[test] fn should_write_variant_list_with_signatures() { let markdown = test::render("enum a { b(int), #[doc = \"a\"] c(int) }"); assert str::contains( markdown, "\n\nVariants:\n\ \n* `b(int)`\ \n* `c(int)` - a\n\n"); } fn write_res(ctxt: ctxt, doc: doc::resdoc) { write_header(ctxt, h2, doc::restag(doc)); write_sig(ctxt, doc.sig); write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); write_args(ctxt, doc.args); } #[test] fn should_write_resource_header() { let markdown = test::render("resource r(a: bool) { }"); assert str::contains(markdown, "## Resource `r`"); } #[test] fn should_write_resource_signature() { let markdown = test::render("resource r(a: bool) { }"); assert str::contains(markdown, "\n resource r(a: bool)\n"); } #[test] fn should_write_resource_args() { let markdown = test::render("#[doc(args(a = \"b\"))]\ resource r(a: bool) { }"); assert str::contains(markdown, "__Arguments__: \n\n* `a` - b"); } fn write_iface(ctxt: ctxt, doc: doc::ifacedoc) { write_header(ctxt, h2, doc::ifacetag(doc)); write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); write_methods(ctxt, doc.methods); } fn write_methods(ctxt: ctxt, docs: [doc::methoddoc]) { vec::iter(docs) {|doc| write_method(ctxt, doc) } } fn write_method(ctxt: ctxt, doc: doc::methoddoc) { write_header_(ctxt, h3, header_text_("Method", doc.name)); write_fnlike( ctxt, doc.sig, doc.brief, doc.desc, doc.args, doc.return, doc.failure ); } #[test] fn should_write_iface_header() { let markdown = test::render("iface i { fn a(); }"); assert str::contains(markdown, "## Interface `i`"); } #[test] fn should_write_iface_brief() { let markdown = test::render( "#[doc(brief = \"brief\")] iface i { fn a(); }"); assert str::contains(markdown, "brief"); } #[test] fn should_write_iface_desc() { let markdown = test::render( "#[doc(desc = \"desc\")] iface i { fn a(); }"); assert str::contains(markdown, "desc"); } #[test] fn should_write_iface_method_header() { let markdown = test::render( "iface i { fn a(); }"); assert str::contains(markdown, "### Method `a`"); } #[test] fn should_write_iface_method_signature() { let markdown = test::render( "iface i { fn a(); }"); assert str::contains(markdown, "\n fn a()"); } #[test] fn should_write_iface_method_argument_header() { let markdown = test::render( "iface a { fn a(b: int); }"); assert str::contains(markdown, "\n\n__Arguments__: \n\n"); } #[test] fn should_write_iface_method_arguments() { let markdown = test::render( "iface a { fn a(b: int); }"); assert str::contains(markdown, "* `b`\n"); } #[test] fn should_not_write_iface_method_arguments_if_none() { let markdown = test::render( "iface a { fn a(); }"); assert !str::contains(markdown, "Arguments"); } #[test] fn should_write_iface_method_return_info() { let markdown = test::render( "iface a { #[doc(return = \"test\")] fn a() -> int; }"); assert str::contains(markdown, "__Return value__: test"); } #[test] fn should_write_iface_method_failure_conditions() { let markdown = test::render( "iface a { #[doc(failure = \"nuked\")] fn a(); }"); assert str::contains(markdown, "__Failure conditions__: nuked"); } fn write_impl(ctxt: ctxt, doc: doc::impldoc) { write_header(ctxt, h2, doc::impltag(doc)); write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); write_methods(ctxt, doc.methods); } #[test] fn should_write_impl_header() { let markdown = test::render("impl i for int { fn a() { } }"); assert str::contains(markdown, "## Implementation `i for int`"); } #[test] fn should_write_impl_header_with_iface() { let markdown = test::render("impl i of j for int { fn a() { } }"); assert str::contains(markdown, "## Implementation `i of j for int`"); } #[test] fn should_write_impl_brief() { let markdown = test::render( "#[doc(brief = \"brief\")] impl i for int { fn a() { } }"); assert str::contains(markdown, "brief"); } #[test] fn should_write_impl_desc() { let markdown = test::render( "#[doc(desc = \"desc\")] impl i for int { fn a() { } }"); assert str::contains(markdown, "desc"); } #[test] fn should_write_impl_method_header() { let markdown = test::render( "impl i for int { fn a() { } }"); assert str::contains(markdown, "### Method `a`"); } #[test] fn should_write_impl_method_signature() { let markdown = test::render( "impl i for int { fn a() { } }"); assert str::contains(markdown, "\n fn a()"); } #[test] fn should_write_impl_method_argument_header() { let markdown = test::render( "impl a for int { fn a(b: int) { } }"); assert str::contains(markdown, "\n\n__Arguments__: \n\n"); } #[test] fn should_write_impl_method_arguments() { let markdown = test::render( "impl a for int { fn a(b: int) { } }"); assert str::contains(markdown, "* `b`\n"); } #[test] fn should_not_write_impl_method_arguments_if_none() { let markdown = test::render( "impl a for int { fn a() { } }"); assert !str::contains(markdown, "Arguments"); } #[test] fn should_write_impl_method_return_info() { let markdown = test::render( "impl a for int { #[doc(return = \"test\")] fn a() -> int { } }"); assert str::contains(markdown, "__Return value__: test"); } #[test] fn should_write_impl_method_failure_conditions() { let markdown = test::render( "impl a for int { #[doc(failure = \"nuked\")] fn a() { } }"); assert str::contains(markdown, "__Failure conditions__: nuked"); } fn write_type( ctxt: ctxt, doc: doc::tydoc ) { write_header(ctxt, h2, doc::tytag(doc)); write_sig(ctxt, doc.sig); write_brief(ctxt, doc.brief()); write_desc(ctxt, doc.desc()); } #[test] fn should_write_type_header() { let markdown = test::render("type t = int;"); assert str::contains(markdown, "## Type `t`"); } #[test] fn should_write_type_brief() { let markdown = test::render( "#[doc(brief = \"brief\")] type t = int;"); assert str::contains(markdown, "\n\nbrief\n\n"); } #[test] fn should_write_type_desc() { let markdown = test::render( "#[doc(desc = \"desc\")] type t = int;"); assert str::contains(markdown, "\n\ndesc\n\n"); } #[test] fn should_write_type_signature() { let markdown = test::render("type t = int;"); assert str::contains(markdown, "\n\n type t = int\n\n"); } #[cfg(test)] mod test { fn render(source: str) -> str { let (srv, doc) = create_doc_srv(source); let markdown = write_markdown_str_srv(srv, doc); #debug("markdown: %s", markdown); markdown } fn create_doc_srv(source: str) -> (astsrv::srv, doc::doc) { astsrv::from_str(source) {|srv| let config = { output_style: config::doc_per_crate with config::default_config("whatever") }; let doc = extract::from_srv(srv, ""); #debug("doc (extract): %?", doc); let doc = tystr_pass::mk_pass().f(srv, doc); #debug("doc (tystr): %?", doc); let doc = path_pass::mk_pass().f(srv, doc); #debug("doc (path): %?", doc); let doc = attr_pass::mk_pass().f(srv, doc); #debug("doc (attr): %?", doc); let doc = markdown_index_pass::mk_pass(config).f(srv, doc); #debug("doc (index): %?", doc); (srv, doc) } } fn create_doc(source: str) -> doc::doc { let (_, doc) = create_doc_srv(source); doc } fn write_markdown_str( doc: doc::doc ) -> str { let (writer_factory, po) = markdown_writer::future_writer_factory(); write_markdown(doc, writer_factory); ret tuple::second(comm::recv(po)); } fn write_markdown_str_srv( srv: astsrv::srv, doc: doc::doc ) -> str { let (writer_factory, po) = markdown_writer::future_writer_factory(); let pass = mk_pass(writer_factory); pass.f(srv, doc); ret tuple::second(comm::recv(po)); } #[test] fn write_markdown_should_write_crate_header() { astsrv::from_str("") {|srv| let doc = extract::from_srv(srv, "belch"); let doc = attr_pass::mk_pass().f(srv, doc); let markdown = write_markdown_str(doc); assert str::contains(markdown, "# Crate `belch`"); } } #[test] fn write_markdown_should_write_mod_headers() { let markdown = render("mod moo { }"); assert str::contains(markdown, "# Module `moo`"); } #[test] fn should_leave_blank_line_after_header() { let markdown = render("mod morp { }"); assert str::contains(markdown, "Module `morp`\n\n"); } } rustdoc: Write sections in markdown #[doc = "Generate markdown from a document tree"]; import markdown_writer::writer; import markdown_writer::writer_util; import markdown_writer::writer_factory; export mk_pass; export header_kind, header_name, header_text; fn mk_pass(writer_factory: writer_factory) -> pass { let f = fn~(srv: astsrv::srv, doc: doc::doc) -> doc::doc { run(srv, doc, writer_factory) }; { name: "markdown", f: f } } fn run( srv: astsrv::srv, doc: doc::doc, writer_factory: writer_factory ) -> doc::doc { fn mods_last(item1: doc::itemtag, item2: doc::itemtag) -> bool { fn is_mod(item: doc::itemtag) -> bool { alt item { doc::modtag(_) { true } _ { false } } } let lteq = !is_mod(item1) || is_mod(item2); lteq } // Sort the items so mods come last. All mods will be // output at the same header level so sorting mods last // makes the headers come out nested correctly. let sorted_doc = sort_pass::mk_pass( "mods last", mods_last ).f(srv, doc); write_markdown(sorted_doc, writer_factory); ret doc; } #[test] fn should_write_modules_last() { /* Because the markdown pass writes all modules at the same level of indentation (it doesn't 'nest' them), we need to make sure that we write all of the modules contained in each module after all other types of items, or else the header nesting will end up wrong, with modules appearing to contain items that they do not. */ let markdown = test::render( "mod a { }\ fn b() { }\ mod c { }\ fn d() { }" ); let idx_a = option::get(str::find_str(markdown, "# Module `a`")); let idx_b = option::get(str::find_str(markdown, "## Function `b`")); let idx_c = option::get(str::find_str(markdown, "# Module `c`")); let idx_d = option::get(str::find_str(markdown, "## Function `d`")); assert idx_b < idx_d; assert idx_d < idx_a; assert idx_a < idx_c; } type ctxt = { w: writer }; fn write_markdown( doc: doc::doc, writer_factory: writer_factory ) { par::anymap(doc.pages) {|page| let ctxt = { w: writer_factory(page) }; write_page(ctxt, page) }; } fn write_page(ctxt: ctxt, page: doc::page) { alt page { doc::cratepage(doc) { write_crate(ctxt, doc); } doc::itempage(doc) { write_item(ctxt, doc); } } ctxt.w.write_done(); } #[test] fn should_request_new_writer_for_each_page() { // This port will send us a (page, str) pair for every writer // that was created let (writer_factory, po) = markdown_writer::future_writer_factory(); let (srv, doc) = test::create_doc_srv("mod a { }"); // Split the document up into pages let doc = page_pass::mk_pass(config::doc_per_mod).f(srv, doc); write_markdown(doc, writer_factory); // We expect two pages to have been written iter::repeat(2u) {|| comm::recv(po); } } enum hlvl { h1 = 1, h2 = 2, h3 = 3, h4 = 4 } fn write_header(ctxt: ctxt, lvl: hlvl, doc: doc::itemtag) { let text = header_text(doc); write_header_(ctxt, lvl, text); } fn write_header_(ctxt: ctxt, lvl: hlvl, title: str) { let hashes = str::from_chars(vec::init_elt(lvl as uint, '#')); ctxt.w.write_line(#fmt("%s %s", hashes, title)); ctxt.w.write_line(""); } fn write_lead(ctxt: ctxt, title: str) { ctxt.w.write_str(#fmt("__%s__: ", title)) } fn header_kind(doc: doc::itemtag) -> str { alt doc { doc::modtag(_) { if doc.id() == rustc::syntax::ast::crate_node_id { "Crate" } else { "Module" } } doc::nmodtag(_) { "Native module" } doc::fntag(_) { "Function" } doc::consttag(_) { "Const" } doc::enumtag(_) { "Enum" } doc::restag(_) { "Resource" } doc::ifacetag(_) { "Interface" } doc::impltag(doc) { "Implementation" } doc::tytag(_) { "Type" } } } fn header_name(doc: doc::itemtag) -> str { let fullpath = str::connect(doc.path() + [doc.name()], "::"); alt doc { doc::modtag(_) if doc.id() != rustc::syntax::ast::crate_node_id { fullpath } doc::nmodtag(_) { fullpath } doc::impltag(doc) { assert option::is_some(doc.self_ty); let self_ty = option::get(doc.self_ty); alt doc.iface_ty { some(iface_ty) { #fmt("%s of %s for %s", doc.name(), iface_ty, self_ty) } none { #fmt("%s for %s", doc.name(), self_ty) } } } _ { doc.name() } } } fn header_text(doc: doc::itemtag) -> str { header_text_(header_kind(doc), header_name(doc)) } fn header_text_(kind: str, name: str) -> str { #fmt("%s `%s`", kind, name) } fn write_crate( ctxt: ctxt, doc: doc::cratedoc ) { write_header(ctxt, h1, doc::modtag(doc.topmod)); write_top_module(ctxt, doc.topmod); } fn write_top_module( ctxt: ctxt, moddoc: doc::moddoc ) { write_mod_contents(ctxt, moddoc); } fn write_mod( ctxt: ctxt, moddoc: doc::moddoc ) { write_header(ctxt, h1, doc::modtag(moddoc)); write_mod_contents(ctxt, moddoc); } #[test] fn should_write_full_path_to_mod() { let markdown = test::render("mod a { mod b { mod c { } } }"); assert str::contains(markdown, "# Module `a::b::c`"); } fn write_common( ctxt: ctxt, brief: option<str>, desc: option<str>, sections: [doc::section] ) { write_brief(ctxt, brief); write_desc(ctxt, desc); write_sections(ctxt, sections); } fn write_brief( ctxt: ctxt, brief: option<str> ) { alt brief { some(brief) { ctxt.w.write_line(brief); ctxt.w.write_line(""); } none { } } } #[test] fn should_leave_blank_line_after_brief() { let markdown = test::render("#[doc(brief = \"brief\")] fn a() { }"); assert str::contains(markdown, "brief\n\n"); } #[test] fn should_leave_blank_line_between_brief_and_desc() { let markdown = test::render( "#[doc(brief = \"brief\", desc = \"desc\")] fn a() { }" ); assert str::contains(markdown, "brief\n\ndesc"); } fn write_desc( ctxt: ctxt, desc: option<str> ) { alt desc { some(desc) { ctxt.w.write_line(desc); ctxt.w.write_line(""); } none { } } } fn write_sections(ctxt: ctxt, sections: [doc::section]) { vec::iter(sections) {|section| write_section(ctxt, section); } } fn write_section(ctxt: ctxt, section: doc::section) { write_header_(ctxt, h4, section.header); ctxt.w.write_line(section.body); ctxt.w.write_line(""); } #[test] fn should_write_sections() { let markdown = test::render( "#[doc = \"\ # Header\n\ Body\"]\ mod a { }"); assert str::contains(markdown, "#### Header\n\nBody\n\n"); } fn write_mod_contents( ctxt: ctxt, doc: doc::moddoc ) { write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); if option::is_some(doc.index) { write_index(ctxt, option::get(doc.index)); } for itemtag in doc.items { write_item(ctxt, itemtag); } } fn write_item(ctxt: ctxt, doc: doc::itemtag) { alt doc { doc::modtag(moddoc) { write_mod(ctxt, moddoc) } doc::nmodtag(nmoddoc) { write_nmod(ctxt, nmoddoc) } doc::fntag(fndoc) { write_fn(ctxt, fndoc) } doc::consttag(constdoc) { write_const(ctxt, constdoc) } doc::enumtag(enumdoc) { write_enum(ctxt, enumdoc) } doc::restag(resdoc) { write_res(ctxt, resdoc) } doc::ifacetag(ifacedoc) { write_iface(ctxt, ifacedoc) } doc::impltag(impldoc) { write_impl(ctxt, impldoc) } doc::tytag(tydoc) { write_type(ctxt, tydoc) } } } #[test] fn should_write_crate_brief_description() { let markdown = test::render("#[doc(brief = \"this is the crate\")];"); assert str::contains(markdown, "this is the crate"); } #[test] fn should_write_crate_description() { let markdown = test::render("#[doc = \"this is the crate\"];"); assert str::contains(markdown, "this is the crate"); } fn write_index(ctxt: ctxt, index: doc::index) { if vec::is_empty(index.entries) { ret; } for entry in index.entries { let header = header_text_(entry.kind, entry.name); let id = entry.link; if option::is_some(entry.brief) { ctxt.w.write_line(#fmt("* [%s](%s) - %s", header, id, option::get(entry.brief))); } else { ctxt.w.write_line(#fmt("* [%s](%s)", header, id)); } } ctxt.w.write_line(""); } #[test] fn should_write_index() { let markdown = test::render("mod a { } mod b { }"); assert str::contains( markdown, "\n\n* [Module `a`](#module-a)\n\ * [Module `b`](#module-b)\n\n" ); } #[test] fn should_write_index_brief() { let markdown = test::render("#[doc(brief = \"test\")] mod a { }"); assert str::contains(markdown, "(#module-a) - test\n"); } #[test] fn should_not_write_index_if_no_entries() { let markdown = test::render(""); assert !str::contains(markdown, "\n\n\n"); } fn write_nmod(ctxt: ctxt, doc: doc::nmoddoc) { write_header(ctxt, h1, doc::nmodtag(doc)); write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); for fndoc in doc.fns { write_fn(ctxt, fndoc); } } #[test] fn should_write_native_mods() { let markdown = test::render("#[doc = \"test\"] native mod a { }"); assert str::contains(markdown, "Native module `a`"); assert str::contains(markdown, "test"); } #[test] fn should_write_native_fns() { let markdown = test::render("native mod a { #[doc = \"test\"] fn a(); }"); assert str::contains(markdown, "test"); } fn write_fn( ctxt: ctxt, doc: doc::fndoc ) { write_header(ctxt, h2, doc::fntag(doc)); write_fnlike( ctxt, doc.sig, doc.brief(), doc.desc(), doc.sections(), doc.args, doc.return, doc.failure ); } fn write_fnlike( ctxt: ctxt, sig: option<str>, brief: option<str>, desc: option<str>, sections: [doc::section], args: [doc::argdoc], return: doc::retdoc, failure: option<str> ) { write_sig(ctxt, sig); write_common(ctxt, brief, desc, sections); write_args(ctxt, args); write_return(ctxt, return); write_failure(ctxt, failure); } fn write_sig(ctxt: ctxt, sig: option<str>) { alt sig { some(sig) { ctxt.w.write_line(code_block_indent(sig)); ctxt.w.write_line(""); } none { fail "unimplemented" } } } fn code_block_indent(s: str) -> str { let lines = str::lines_any(s); let indented = par::seqmap(lines, { |line| #fmt(" %s", line) }); str::connect(indented, "\n") } #[test] fn write_markdown_should_write_function_header() { let markdown = test::render("fn func() { }"); assert str::contains(markdown, "## Function `func`"); } #[test] fn should_write_the_function_signature() { let markdown = test::render("#[doc = \"f\"] fn a() { }"); assert str::contains(markdown, "\n fn a()\n"); } #[test] fn should_insert_blank_line_after_fn_signature() { let markdown = test::render("#[doc = \"f\"] fn a() { }"); assert str::contains(markdown, "fn a()\n\n"); } #[test] fn should_correctly_indent_fn_signature() { let doc = test::create_doc("fn a() { }"); let doc = { pages: [ doc::cratepage({ topmod: { items: [doc::fntag({ sig: some("line 1\nline 2") with doc.cratemod().fns()[0] })] with doc.cratemod() } with doc.cratedoc() }) ] }; let markdown = test::write_markdown_str(doc); assert str::contains(markdown, " line 1\n line 2"); } #[test] fn should_leave_blank_line_between_fn_header_and_sig() { let markdown = test::render("#[doc(brief = \"brief\")] fn a() { }"); assert str::contains(markdown, "Function `a`\n\n fn a()"); } fn write_args( ctxt: ctxt, args: [doc::argdoc] ) { if vec::is_not_empty(args) { write_lead(ctxt, "Arguments"); ctxt.w.write_line(""); ctxt.w.write_line(""); vec::iter(args) {|arg| write_arg(ctxt, arg) }; ctxt.w.write_line(""); } } fn write_arg(ctxt: ctxt, arg: doc::argdoc) { ctxt.w.write_str(#fmt( "* `%s`", arg.name )); alt arg.desc { some(desc) { ctxt.w.write_str(#fmt(" - %s", desc)); } none { } } ctxt.w.write_line(""); } #[test] fn should_write_argument_list() { let source = "fn a(b: int, c: int) { }"; let markdown = test::render(source); assert str::contains( markdown, "__Arguments__: \n\ \n\ * `b`\n\ * `c`\n\ \n" ); } #[test] fn should_not_write_arguments_if_none() { let source = "fn a() { } fn b() { }"; let markdown = test::render(source); assert !str::contains(markdown, "Arguments"); } #[test] fn should_write_argument_description() { let source = "#[doc(args(a = \"milk\"))] fn f(a: bool) { }"; let markdown = test::render(source); assert str::contains(markdown, "`a` - milk"); } fn write_return( ctxt: ctxt, doc: doc::retdoc ) { alt doc.desc { some(d) { write_lead(ctxt, "Return value"); ctxt.w.write_line(d); ctxt.w.write_line(""); } none { } } } #[test] fn should_write_return_type_on_new_line() { let markdown = test::render( "#[doc(return = \"test\")] fn a() -> int { }"); assert str::contains(markdown, "\n__Return value__: test"); } #[test] fn should_write_blank_line_between_return_type_and_next_header() { let markdown = test::render( "#[doc(return = \"test\")] fn a() -> int { } \ fn b() -> int { }" ); assert str::contains(markdown, "__Return value__: test\n\n##"); } #[test] fn should_not_write_return_type_when_there_is_none() { let markdown = test::render("fn a() { }"); assert !str::contains(markdown, "Return value"); } #[test] fn should_write_blank_line_after_return_description() { let markdown = test::render( "#[doc(return = \"blorp\")] fn a() -> int { }" ); assert str::contains(markdown, "blorp\n\n"); } fn write_failure(ctxt: ctxt, str: option<str>) { alt str { some(str) { write_lead(ctxt, "Failure conditions"); ctxt.w.write_line(str); ctxt.w.write_line(""); } none { } } } #[test] fn should_write_failure_conditions() { let markdown = test::render( "#[doc(failure = \"it's the fail\")] fn a () { }"); assert str::contains( markdown, "\n\n__Failure conditions__: it's the fail\n\n"); } fn write_const( ctxt: ctxt, doc: doc::constdoc ) { write_header(ctxt, h2, doc::consttag(doc)); write_sig(ctxt, doc.ty); write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); } #[test] fn should_write_const_header() { let markdown = test::render("const a: bool = true;"); assert str::contains(markdown, "## Const `a`\n\n"); } #[test] fn should_write_const_description() { let markdown = test::render( "#[doc(brief = \"a\", desc = \"b\")]\ const a: bool = true;"); assert str::contains(markdown, "\n\na\n\nb\n\n"); } fn write_enum( ctxt: ctxt, doc: doc::enumdoc ) { write_header(ctxt, h2, doc::enumtag(doc)); write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); write_variants(ctxt, doc.variants); } #[test] fn should_write_enum_header() { let markdown = test::render("enum a { b }"); assert str::contains(markdown, "## Enum `a`\n\n"); } #[test] fn should_write_enum_description() { let markdown = test::render( "#[doc(brief = \"a\", desc = \"b\")] enum a { b }"); assert str::contains(markdown, "\n\na\n\nb\n\n"); } fn write_variants( ctxt: ctxt, docs: [doc::variantdoc] ) { if vec::is_empty(docs) { ret; } ctxt.w.write_line("Variants:"); ctxt.w.write_line(""); vec::iter(docs, {|variant| write_variant(ctxt, variant) }); ctxt.w.write_line(""); } fn write_variant(ctxt: ctxt, doc: doc::variantdoc) { assert option::is_some(doc.sig); let sig = option::get(doc.sig); alt doc.desc { some(desc) { ctxt.w.write_line(#fmt("* `%s` - %s", sig, desc)); } none { ctxt.w.write_line(#fmt("* `%s`", sig)); } } } #[test] fn should_write_variant_list() { let markdown = test::render( "enum a { \ #[doc = \"test\"] b, \ #[doc = \"test\"] c }"); assert str::contains( markdown, "\n\nVariants:\n\ \n* `b` - test\ \n* `c` - test\n\n"); } #[test] fn should_write_variant_list_without_descs() { let markdown = test::render("enum a { b, c }"); assert str::contains( markdown, "\n\nVariants:\n\ \n* `b`\ \n* `c`\n\n"); } #[test] fn should_write_variant_list_with_signatures() { let markdown = test::render("enum a { b(int), #[doc = \"a\"] c(int) }"); assert str::contains( markdown, "\n\nVariants:\n\ \n* `b(int)`\ \n* `c(int)` - a\n\n"); } fn write_res(ctxt: ctxt, doc: doc::resdoc) { write_header(ctxt, h2, doc::restag(doc)); write_sig(ctxt, doc.sig); write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); write_args(ctxt, doc.args); } #[test] fn should_write_resource_header() { let markdown = test::render("resource r(a: bool) { }"); assert str::contains(markdown, "## Resource `r`"); } #[test] fn should_write_resource_signature() { let markdown = test::render("resource r(a: bool) { }"); assert str::contains(markdown, "\n resource r(a: bool)\n"); } #[test] fn should_write_resource_args() { let markdown = test::render("#[doc(args(a = \"b\"))]\ resource r(a: bool) { }"); assert str::contains(markdown, "__Arguments__: \n\n* `a` - b"); } fn write_iface(ctxt: ctxt, doc: doc::ifacedoc) { write_header(ctxt, h2, doc::ifacetag(doc)); write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); write_methods(ctxt, doc.methods); } fn write_methods(ctxt: ctxt, docs: [doc::methoddoc]) { vec::iter(docs) {|doc| write_method(ctxt, doc) } } fn write_method(ctxt: ctxt, doc: doc::methoddoc) { write_header_(ctxt, h3, header_text_("Method", doc.name)); write_fnlike( ctxt, doc.sig, doc.brief, doc.desc, doc.sections, doc.args, doc.return, doc.failure ); } #[test] fn should_write_iface_header() { let markdown = test::render("iface i { fn a(); }"); assert str::contains(markdown, "## Interface `i`"); } #[test] fn should_write_iface_brief() { let markdown = test::render( "#[doc(brief = \"brief\")] iface i { fn a(); }"); assert str::contains(markdown, "brief"); } #[test] fn should_write_iface_desc() { let markdown = test::render( "#[doc(desc = \"desc\")] iface i { fn a(); }"); assert str::contains(markdown, "desc"); } #[test] fn should_write_iface_method_header() { let markdown = test::render( "iface i { fn a(); }"); assert str::contains(markdown, "### Method `a`"); } #[test] fn should_write_iface_method_signature() { let markdown = test::render( "iface i { fn a(); }"); assert str::contains(markdown, "\n fn a()"); } #[test] fn should_write_iface_method_argument_header() { let markdown = test::render( "iface a { fn a(b: int); }"); assert str::contains(markdown, "\n\n__Arguments__: \n\n"); } #[test] fn should_write_iface_method_arguments() { let markdown = test::render( "iface a { fn a(b: int); }"); assert str::contains(markdown, "* `b`\n"); } #[test] fn should_not_write_iface_method_arguments_if_none() { let markdown = test::render( "iface a { fn a(); }"); assert !str::contains(markdown, "Arguments"); } #[test] fn should_write_iface_method_return_info() { let markdown = test::render( "iface a { #[doc(return = \"test\")] fn a() -> int; }"); assert str::contains(markdown, "__Return value__: test"); } #[test] fn should_write_iface_method_failure_conditions() { let markdown = test::render( "iface a { #[doc(failure = \"nuked\")] fn a(); }"); assert str::contains(markdown, "__Failure conditions__: nuked"); } fn write_impl(ctxt: ctxt, doc: doc::impldoc) { write_header(ctxt, h2, doc::impltag(doc)); write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); write_methods(ctxt, doc.methods); } #[test] fn should_write_impl_header() { let markdown = test::render("impl i for int { fn a() { } }"); assert str::contains(markdown, "## Implementation `i for int`"); } #[test] fn should_write_impl_header_with_iface() { let markdown = test::render("impl i of j for int { fn a() { } }"); assert str::contains(markdown, "## Implementation `i of j for int`"); } #[test] fn should_write_impl_brief() { let markdown = test::render( "#[doc(brief = \"brief\")] impl i for int { fn a() { } }"); assert str::contains(markdown, "brief"); } #[test] fn should_write_impl_desc() { let markdown = test::render( "#[doc(desc = \"desc\")] impl i for int { fn a() { } }"); assert str::contains(markdown, "desc"); } #[test] fn should_write_impl_method_header() { let markdown = test::render( "impl i for int { fn a() { } }"); assert str::contains(markdown, "### Method `a`"); } #[test] fn should_write_impl_method_signature() { let markdown = test::render( "impl i for int { fn a() { } }"); assert str::contains(markdown, "\n fn a()"); } #[test] fn should_write_impl_method_argument_header() { let markdown = test::render( "impl a for int { fn a(b: int) { } }"); assert str::contains(markdown, "\n\n__Arguments__: \n\n"); } #[test] fn should_write_impl_method_arguments() { let markdown = test::render( "impl a for int { fn a(b: int) { } }"); assert str::contains(markdown, "* `b`\n"); } #[test] fn should_not_write_impl_method_arguments_if_none() { let markdown = test::render( "impl a for int { fn a() { } }"); assert !str::contains(markdown, "Arguments"); } #[test] fn should_write_impl_method_return_info() { let markdown = test::render( "impl a for int { #[doc(return = \"test\")] fn a() -> int { } }"); assert str::contains(markdown, "__Return value__: test"); } #[test] fn should_write_impl_method_failure_conditions() { let markdown = test::render( "impl a for int { #[doc(failure = \"nuked\")] fn a() { } }"); assert str::contains(markdown, "__Failure conditions__: nuked"); } fn write_type( ctxt: ctxt, doc: doc::tydoc ) { write_header(ctxt, h2, doc::tytag(doc)); write_sig(ctxt, doc.sig); write_common(ctxt, doc.brief(), doc.desc(), doc.sections()); } #[test] fn should_write_type_header() { let markdown = test::render("type t = int;"); assert str::contains(markdown, "## Type `t`"); } #[test] fn should_write_type_brief() { let markdown = test::render( "#[doc(brief = \"brief\")] type t = int;"); assert str::contains(markdown, "\n\nbrief\n\n"); } #[test] fn should_write_type_desc() { let markdown = test::render( "#[doc(desc = \"desc\")] type t = int;"); assert str::contains(markdown, "\n\ndesc\n\n"); } #[test] fn should_write_type_signature() { let markdown = test::render("type t = int;"); assert str::contains(markdown, "\n\n type t = int\n\n"); } #[cfg(test)] mod test { fn render(source: str) -> str { let (srv, doc) = create_doc_srv(source); let markdown = write_markdown_str_srv(srv, doc); #debug("markdown: %s", markdown); markdown } fn create_doc_srv(source: str) -> (astsrv::srv, doc::doc) { astsrv::from_str(source) {|srv| let config = { output_style: config::doc_per_crate with config::default_config("whatever") }; let doc = extract::from_srv(srv, ""); #debug("doc (extract): %?", doc); let doc = tystr_pass::mk_pass().f(srv, doc); #debug("doc (tystr): %?", doc); let doc = path_pass::mk_pass().f(srv, doc); #debug("doc (path): %?", doc); let doc = attr_pass::mk_pass().f(srv, doc); #debug("doc (attr): %?", doc); let doc = unindent_pass::mk_pass().f(srv, doc); #debug("doc (unindent): %?", doc); let doc = sectionalize_pass::mk_pass().f(srv, doc); #debug("doc (trim): %?", doc); let doc = trim_pass::mk_pass().f(srv, doc); #debug("doc (sectionalize): %?", doc); let doc = markdown_index_pass::mk_pass(config).f(srv, doc); #debug("doc (index): %?", doc); (srv, doc) } } fn create_doc(source: str) -> doc::doc { let (_, doc) = create_doc_srv(source); doc } fn write_markdown_str( doc: doc::doc ) -> str { let (writer_factory, po) = markdown_writer::future_writer_factory(); write_markdown(doc, writer_factory); ret tuple::second(comm::recv(po)); } fn write_markdown_str_srv( srv: astsrv::srv, doc: doc::doc ) -> str { let (writer_factory, po) = markdown_writer::future_writer_factory(); let pass = mk_pass(writer_factory); pass.f(srv, doc); ret tuple::second(comm::recv(po)); } #[test] fn write_markdown_should_write_crate_header() { astsrv::from_str("") {|srv| let doc = extract::from_srv(srv, "belch"); let doc = attr_pass::mk_pass().f(srv, doc); let markdown = write_markdown_str(doc); assert str::contains(markdown, "# Crate `belch`"); } } #[test] fn write_markdown_should_write_mod_headers() { let markdown = render("mod moo { }"); assert str::contains(markdown, "# Module `moo`"); } #[test] fn should_leave_blank_line_after_header() { let markdown = render("mod morp { }"); assert str::contains(markdown, "Module `morp`\n\n"); } }
extern crate structopt; #[macro_use] extern crate structopt_derive; use structopt::StructOpt; use std::collections::HashMap; use std::error::Error; use std::fs::File; use std::io::Read; use std::path::PathBuf; type Literal = i64; type Address = char; #[derive(Debug)] struct ParseError { bad_val: String, underlying: Option<Box<Error>>, } impl ParseError { fn new( bad_val: std::borrow::Cow<str>, underlying: Option<Box<Error>>, ) -> ParseError { ParseError { bad_val: bad_val.into_owned(), underlying: underlying, } } } impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { write!(f, "{}", self.description()) } } impl Error for ParseError { fn description(&self) -> &str { "Could not parse the value as asked" } fn cause(&self) -> Option<&Error> { match self.underlying { Some(ref e) => Some(e.as_ref()), None => None, } } } #[derive(Debug, Clone, Copy)] enum Value { Literal(Literal), Address(Address), } impl std::str::FromStr for Value { type Err = ParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { i64::from_str_radix(s, 10).map(Value::Literal).or_else(|_| { if s.len() == 1 { Ok(Value::Address(s.chars().next().unwrap())) } else { Err(ParseError::new(s.into(), None)) } }) } } enum Operation { Sound(Value), Set(Address, Value), Add(Address, Value), Mul(Address, Value), Mod(Address, Value), Recover(Value), Jgz(Value, Value), } impl std::str::FromStr for Operation { type Err = ParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { use Operation::*; let err = ParseError::new(s.into(), None); let parts = s.split_whitespace().collect::<Vec<_>>(); let assert_len = |len: usize| { if parts.len() == len { Ok(()) } else { Err(ParseError::new(s.into(), None)) } }; match parts.first() { Some(&"snd") => { assert_len(2)?; Ok(Sound(parts[1].parse()?)) } Some(&"set") => { assert_len(3)?; Ok(Set(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"add") => { assert_len(3)?; Ok(Add(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"mul") => { assert_len(3)?; Ok(Mul(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"mod") => { assert_len(3)?; Ok(Mod(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"rcv") => { assert_len(2)?; Ok(Recover(parts[1].parse()?)) } Some(&"jgz") => { assert_len(3)?; Ok(Jgz(parts[1].parse()?, parts[2].parse()?)) } _ => Err(ParseError::new(s.into(), None)), } } } fn parse(input: &str) -> Result<Vec<Operation>, ParseError> { input.lines().map(|line| line.parse()).collect() } fn part1(instructions: &[Operation]) -> Option<Literal> { let mut computer = HashMap::new(); let mut sounds = Vec::new(); let mut instr = 0i64; let get = |v: Value, cpu: &HashMap<Address, Literal>| match v { Value::Literal(l) => l, Value::Address(a) => *cpu.get(&a).unwrap_or(&0), }; while 0 <= instr && (instr as usize) < instructions.len() { use Operation::*; match instructions[instr as usize] { Sound(v) => sounds.push(get(v, &computer)), Set(addr, v) => { *computer.entry(addr).or_insert(0) = get(v, &computer) } Add(addr, v) => { *computer.entry(addr).or_insert(0) += get(v, &computer) } Mul(addr, v) => { *computer.entry(addr).or_insert(0) *= get(v, &computer) } Mod(addr, v) => { *computer.entry(addr).or_insert(0) %= get(v, &computer) } Recover(v) => { if get(v, &computer) != 0 { return sounds.last().cloned(); } } Jgz(v, jump) => { if get(v, &computer) > 0 { instr += get(jump, &computer); continue; } } } instr += 1; } None } fn main() { let opt = Opt::from_args(); let mut contents = String::new(); if opt.input.to_str() == Some("-") { std::io::stdin() .read_to_string(&mut contents) .expect("could not read stdin"); } else { let mut file = File::open(&opt.input) .expect(&format!("file {} not found", opt.input.display())); file.read_to_string(&mut contents) .expect(&format!("could not read file {}", opt.input.display())); } let instructions = parse(&contents).expect("Could not parse instructions"); println!( "Part 1: {}", part1(&instructions).expect("Could not recover a sound!") ); } #[derive(StructOpt, Debug)] #[structopt(name = "day13", about = "Advent of code 2017 day 13")] struct Opt { #[structopt(help = "Input file", parse(from_os_str))] input: PathBuf, } #[cfg(test)] mod tests { use super::*; static INPUT: &str = "set a 1\n\ add a 2\n\ mul a a\n\ mod a 5\n\ snd a\n\ set a 0\n\ rcv a\n\ jgz a -1\n\ set a 1\n\ jgz a -2"; #[test] fn part1_test() { let instructions = parse(INPUT).unwrap(); assert_eq!(part1(&instructions), Some(4)); } } 2017 day 18 part 2 extern crate structopt; #[macro_use] extern crate structopt_derive; use structopt::StructOpt; use std::collections::HashMap; use std::collections::VecDeque; use std::error::Error; use std::fs::File; use std::io::Read; use std::path::PathBuf; type Literal = i64; type Address = char; #[derive(Debug)] struct ParseError { bad_val: String, underlying: Option<Box<Error>>, } impl ParseError { fn new( bad_val: std::borrow::Cow<str>, underlying: Option<Box<Error>>, ) -> ParseError { ParseError { bad_val: bad_val.into_owned(), underlying: underlying, } } } impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { write!(f, "{}", self.description()) } } impl Error for ParseError { fn description(&self) -> &str { "Could not parse the value as asked" } fn cause(&self) -> Option<&Error> { match self.underlying { Some(ref e) => Some(e.as_ref()), None => None, } } } #[derive(Debug, Clone, Copy)] enum Value { Literal(Literal), Address(Address), } impl std::str::FromStr for Value { type Err = ParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { i64::from_str_radix(s, 10).map(Value::Literal).or_else(|_| { if s.len() == 1 { Ok(Value::Address(s.chars().next().unwrap())) } else { Err(ParseError::new(s.into(), None)) } }) } } #[derive(Debug, Clone, Copy)] enum Operation { Snd(Value), Set(Address, Value), Add(Address, Value), Mul(Address, Value), Mod(Address, Value), Rcv(Address), Jgz(Value, Value), } impl std::str::FromStr for Operation { type Err = ParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { use Operation::*; let err = ParseError::new(s.into(), None); let parts = s.split_whitespace().collect::<Vec<_>>(); let assert_len = |len: usize| { if parts.len() == len { Ok(()) } else { Err(ParseError::new(s.into(), None)) } }; match parts.first() { Some(&"snd") => { assert_len(2)?; Ok(Snd(parts[1].parse()?)) } Some(&"set") => { assert_len(3)?; Ok(Set(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"add") => { assert_len(3)?; Ok(Add(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"mul") => { assert_len(3)?; Ok(Mul(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"mod") => { assert_len(3)?; Ok(Mod(parts[1].chars().next().ok_or(err)?, parts[2].parse()?)) } Some(&"rcv") => { assert_len(2)?; Ok(Rcv(parts[1].chars().next().ok_or(err)?)) } Some(&"jgz") => { assert_len(3)?; Ok(Jgz(parts[1].parse()?, parts[2].parse()?)) } _ => Err(ParseError::new(s.into(), None)), } } } fn parse(input: &str) -> Result<Vec<Operation>, ParseError> { input.lines().map(|line| line.parse()).collect() } fn part1(instructions: &[Operation]) -> Option<Literal> { let mut computer = HashMap::new(); let mut snds = Vec::new(); let mut instr = 0i64; let get = |v: Value, cpu: &HashMap<Address, Literal>| match v { Value::Literal(l) => l, Value::Address(a) => *cpu.get(&a).unwrap_or(&0), }; while 0 <= instr && (instr as usize) < instructions.len() { use Operation::*; match instructions[instr as usize] { Snd(v) => snds.push(get(v, &computer)), Set(addr, v) => { *computer.entry(addr).or_insert(0) = get(v, &computer) } Add(addr, v) => { *computer.entry(addr).or_insert(0) += get(v, &computer) } Mul(addr, v) => { *computer.entry(addr).or_insert(0) *= get(v, &computer) } Mod(addr, v) => { *computer.entry(addr).or_insert(0) %= get(v, &computer) } Rcv(v) => { if get(Value::Address(v), &computer) != 0 { return snds.last().cloned(); } } Jgz(v, jump) => { if get(v, &computer) > 0 { instr += get(jump, &computer); continue; } } } instr += 1; } None } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Status { Running, Waiting, Terminated, } #[derive(Debug)] struct Computer { id: u8, status: Status, instruction_ptr: usize, registers: HashMap<Address, Literal>, queue: VecDeque<Literal>, } impl Computer { fn new(id: u8) -> Computer { Computer { id: id, status: Status::Running, instruction_ptr: 0, registers: std::iter::once(('p', Literal::from(id))).collect(), queue: VecDeque::new(), } } fn lookup(&self, val: Value) -> Literal { match val { Value::Literal(l) => l, Value::Address(a) => *self.registers.get(&a).unwrap_or(&0), } } // returns Some(send) when sending a value fn perform(&mut self, op: Operation) -> Option<Literal> { //println!("{:?}", self); assert_eq!(self.status, Status::Running); use Operation::*; let mut ret = None; let mut incr = true; match op { Snd(v) => { let send = self.lookup(v); ret = Some(send) } Set(addr, v) => { *self.registers.entry(addr).or_insert(0) = self.lookup(v); } Add(addr, v) => { *self.registers.entry(addr).or_insert(0) += self.lookup(v); } Mul(addr, v) => { *self.registers.entry(addr).or_insert(0) *= self.lookup(v); } Mod(addr, v) => { *self.registers.entry(addr).or_insert(0) %= self.lookup(v) } Rcv(addr) => match self.queue.pop_front() { Some(lit) => *self.registers.entry(addr).or_insert(0) = lit, None => { self.status = Status::Waiting; incr = false; } }, Jgz(v, jump) => { if self.lookup(v) > 0 { let new_pos = self.instruction_ptr as Literal + self.lookup(jump); if new_pos < 0 { self.status = Status::Terminated; } else { self.instruction_ptr = new_pos as usize; incr = false; } } } } if incr { self.instruction_ptr += 1; } ret } } fn part2(instructions: &[Operation]) -> usize { let mut send_count = 0; let mut computers = [Computer::new(0), Computer::new(1)]; let mut current = 0; let mut other = if current == 0 { 1 } else { 0 }; loop { let instr = instructions[computers[current].instruction_ptr]; if let Some(rcv) = computers[current].perform(instr) { computers[other].queue.push_back(rcv); if current == 1 { send_count += 1; } } if computers[current].instruction_ptr > instructions.len() { computers[current].status = Status::Terminated; } if computers[current].status != Status::Running { std::mem::swap(&mut current, &mut other); match computers[current].status { Status::Running => {} Status::Waiting => if computers[current].queue.is_empty() { break; // deadlock } else { computers[current].status = Status::Running; // resume }, Status::Terminated => break, } } } send_count } fn main() { let opt = Opt::from_args(); let mut contents = String::new(); if opt.input.to_str() == Some("-") { std::io::stdin() .read_to_string(&mut contents) .expect("could not read stdin"); } else { let mut file = File::open(&opt.input) .expect(&format!("file {} not found", opt.input.display())); file.read_to_string(&mut contents) .expect(&format!("could not read file {}", opt.input.display())); } let instructions = parse(&contents).expect("Could not parse instructions"); println!( "Part 1: {}", part1(&instructions).expect("Could not recover a sound!") ); println!("Part 2: {}", part2(&instructions)); } #[derive(StructOpt, Debug)] #[structopt(name = "day13", about = "Advent of code 2017 day 13")] struct Opt { #[structopt(help = "Input file", parse(from_os_str))] input: PathBuf, } #[cfg(test)] mod tests { use super::*; #[test] fn part1_test() { let input = "set a 1\n\ add a 2\n\ mul a a\n\ mod a 5\n\ snd a\n\ set a 0\n\ rcv a\n\ jgz a -1\n\ set a 1\n\ jgz a -2"; let instructions = parse(input).unwrap(); assert_eq!(part1(&instructions), Some(4)); } #[test] fn part2_test() { let input = "snd 1\n\ snd 2\n\ snd p\n\ rcv a\n\ rcv b\n\ rcv c\n\ rcv d"; let instructions = parse(input).unwrap(); assert_eq!(part2(&instructions), 3); } #[test] fn part2_test2() { let input = "set a 1\n\ add a 2\n\ mul a a\n\ mod a 5\n\ snd a\n\ set a 0\n\ rcv a\n\ jgz a -1\n\ set a 1\n\ jgz a -2"; let instructions = parse(input).unwrap(); assert_eq!(part2(&instructions), 1); } }
// Third Party #[cfg(feature = "yaml")] use yaml_rust::Yaml; // Internal use App; use ArgMatches; /// The abstract representation of a command line subcommand. /// /// This struct describes all the valid options of the subcommand for the program. Subcommands are /// essentially "sub-[`App`]s" and contain all the same possibilities (such as their own /// [arguments], subcommands, and settings). /// /// # Examples /// /// ```rust /// # use clap::{App, Arg, SubCommand}; /// App::new("myprog") /// .subcommand( /// SubCommand::with_name("config") /// .about("Used for configuration") /// .arg(Arg::with_name("config_file") /// .help("The configuration file to use") /// .index(1))) /// # ; /// ``` /// [`App`]: ./struct.App.html /// [arguments]: ./struct.Arg.html #[derive(Debug, Clone)] pub struct SubCommand<'a> { #[doc(hidden)] pub name: String, #[doc(hidden)] pub matches: ArgMatches<'a>, } impl<'a> SubCommand<'a> { /// Creates a new instance of a subcommand requiring a name. The name will be displayed /// to the user when they print version or help and usage information. /// /// # Examples /// /// ```rust /// # use clap::{App, Arg, SubCommand}; /// App::new("myprog") /// .subcommand( /// SubCommand::with_name("config")) /// # ; /// ``` pub fn with_name<'b>(name: &str) -> App<'a, 'b> { App::new(name) } /// Creates a new instance of a subcommand from a YAML (.yml) document /// /// # Examples /// /// ```ignore /// # #[macro_use] /// # extern crate clap; /// # use clap::Subcommand; /// # fn main() { /// let sc_yaml = load_yaml!("test_subcommand.yml"); /// let sc = SubCommand::from_yaml(sc_yaml); /// # } /// ``` #[cfg(feature = "yaml")] pub fn from_yaml(yaml: &Yaml) -> App { App::from_yaml(yaml) } } depr(SubCommand::with_name): deprecates SubCommand facade to prefer App::new // Third Party #[cfg(feature = "yaml")] use yaml_rust::Yaml; // Internal use App; use ArgMatches; /// The abstract representation of a command line subcommand. /// /// This struct describes all the valid options of the subcommand for the program. Subcommands are /// essentially "sub-[`App`]s" and contain all the same possibilities (such as their own /// [arguments], subcommands, and settings). /// /// # Examples /// /// ```rust /// # use clap::{App, Arg, SubCommand}; /// App::new("myprog") /// .subcommand( /// SubCommand::with_name("config") /// .about("Used for configuration") /// .arg(Arg::with_name("config_file") /// .help("The configuration file to use") /// .index(1))) /// # ; /// ``` /// [`App`]: ./struct.App.html /// [arguments]: ./struct.Arg.html #[derive(Debug, Clone)] pub struct SubCommand<'a> { #[doc(hidden)] pub name: String, #[doc(hidden)] pub matches: ArgMatches<'a>, } impl<'a> SubCommand<'a> { // @TODO-v3-beta: remove /// **Deprecated** #[deprecated( since = "2.32.0", note = "Use App::new instead. Will be removed in v3-beta" )] pub fn with_name<'b>(name: &str) -> App<'a, 'b> { App::new(name) } // @TODO-v3-beta: remove /// **Deprecated** #[cfg_attr( feature = "yaml", deprecated( since = "2.32.0", note = "Use App::from instead. Will be removed in v3-beta" ) )] #[cfg(feature = "yaml")] pub fn from_yaml(yaml: &Yaml) -> App { App::from_yaml(yaml) } }
use nom::{ self, bytes::streaming::{tag, take}, character::streaming::digit1, sequence::tuple, IResult, }; use std::str; // list-wildcards = "%" / "*" pub fn is_list_wildcards(c: u8) -> bool { c == b'%' || c == b'*' } // quoted-specials = DQUOTE / "\" pub fn is_quoted_specials(c: u8) -> bool { c == b'"' || c == b'\\' } // resp-specials = "]" pub fn is_resp_specials(c: u8) -> bool { c == b']' } // atom-specials = "(" / ")" / "{" / SP / CTL / list-wildcards / quoted-specials / resp-specials pub fn is_atom_specials(c: u8) -> bool { c == b'(' || c == b')' || c == b'{' || c == b' ' || c < 32 || is_list_wildcards(c) || is_quoted_specials(c) || is_resp_specials(c) } // ATOM-CHAR = <any CHAR except atom-specials> pub fn is_atom_char(c: u8) -> bool { is_char(c) && !is_atom_specials(c) } // nil = "NIL" named!(pub nil, tag_no_case!("NIL")); // ASTRING-CHAR = ATOM-CHAR / resp-specials pub fn is_astring_char(c: u8) -> bool { is_atom_char(c) || is_resp_specials(c) } // QUOTED-CHAR = <any TEXT-CHAR except quoted-specials> / "\" quoted-specials pub fn quoted_data(i: &[u8]) -> IResult<&[u8], &[u8]> { // Ideally this should use nom's `escaped` macro, but it suffers from broken // type inference unless compiled with the verbose-errors feature enabled. let mut escape = false; let mut len = 0; for c in i { if *c == b'"' && !escape { break; } len += 1; if *c == b'\\' && !escape { escape = true } else if escape { escape = false; } } Ok((&i[len..], &i[..len])) } // quoted = DQUOTE *QUOTED-CHAR DQUOTE named!(pub quoted<&[u8]>, delimited!( char!('"'), quoted_data, char!('"') )); // quoted bytes as as utf8 named!(pub quoted_utf8<&str>, map_res!(quoted, str::from_utf8)); /// literal = "{" number "}" CRLF *CHAR8 /// ; Number represents the number of CHAR8s pub fn literal(input: &[u8]) -> IResult<&[u8], &[u8]> { let parser = tuple((tag(b"{"), number, tag(b"}"), tag("\r\n"))); let (remaining, (_, count, _, _)) = parser(input)?; let (remaining, data) = take(count)(remaining)?; if !data.iter().all(|byte| is_char8(*byte)) { // FIXME: what ErrorKind should this have? return Err(nom::Err::Error((remaining, nom::error::ErrorKind::Verify))); } Ok((remaining, data)) } /// CHAR8 = %x01-ff ; any OCTET except NUL, %x00 pub fn is_char8(i: u8) -> bool { i != 0 } // string = quoted / literal named!(pub string<&[u8]>, alt!(quoted | literal)); // string bytes as as utf8 named!(pub string_utf8<&str>, map_res!(string, str::from_utf8)); // nstring = string / nil named!(pub nstring<Option<&[u8]>>, alt!( map!(nil, |_| None) | map!(string, |s| Some(s)) )); // nstring bytes as utf8 named!(pub nstring_utf8<Option<&str>>, alt!( map!(nil, |_| None) | map!(string_utf8, |s| Some(s)) )); // number = 1*DIGIT // ; Unsigned 32-bit integer // ; (0 <= n < 4,294,967,296) named!(pub number<u32>, flat_map!(digit1, parse_to!(u32))); // same as `number` but 64-bit named!(pub number_64<u64>, flat_map!(digit1, parse_to!(u64))); // atom = 1*ATOM-CHAR named!(pub atom<&str>, map_res!(take_while1!(is_atom_char), str::from_utf8 )); // astring = 1*ASTRING-CHAR / string named!(pub astring<&[u8]>, alt!( take_while1!(is_astring_char) | string )); // astring bytes as as utf8 named!(pub astring_utf8<&str>, map_res!(astring, str::from_utf8)); // text = 1*TEXT-CHAR named!(pub text<&str>, map_res!(take_while!(is_text_char), str::from_utf8 )); // TEXT-CHAR = <any CHAR except CR and LF> pub fn is_text_char(c: u8) -> bool { is_char(c) && c != b'\r' && c != b'\n' } // CHAR = %x01-7F // ; any 7-bit US-ASCII character, // ; excluding NUL // From RFC5234 pub fn is_char(c: u8) -> bool { match c { 0x01..=0x7F => true, _ => false, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_string_literal() { match string(b"{3}\r\nXYZ") { Ok((_, value)) => { assert_eq!(value, b"XYZ"); } rsp => panic!("unexpected response {:?}", rsp), } } #[test] fn test_astring() { match astring(b"text ") { Ok((_, value)) => { assert_eq!(value, b"text"); } rsp => panic!("unexpected response {:?}", rsp), } } } group number use nom::{ self, bytes::streaming::{tag, take}, character::streaming::digit1, sequence::tuple, IResult, }; use std::str; // ----- number ----- // number = 1*DIGIT // ; Unsigned 32-bit integer // ; (0 <= n < 4,294,967,296) named!(pub number<u32>, flat_map!(digit1, parse_to!(u32))); // same as `number` but 64-bit named!(pub number_64<u64>, flat_map!(digit1, parse_to!(u64))); // list-wildcards = "%" / "*" pub fn is_list_wildcards(c: u8) -> bool { c == b'%' || c == b'*' } // quoted-specials = DQUOTE / "\" pub fn is_quoted_specials(c: u8) -> bool { c == b'"' || c == b'\\' } // resp-specials = "]" pub fn is_resp_specials(c: u8) -> bool { c == b']' } // atom-specials = "(" / ")" / "{" / SP / CTL / list-wildcards / quoted-specials / resp-specials pub fn is_atom_specials(c: u8) -> bool { c == b'(' || c == b')' || c == b'{' || c == b' ' || c < 32 || is_list_wildcards(c) || is_quoted_specials(c) || is_resp_specials(c) } // ATOM-CHAR = <any CHAR except atom-specials> pub fn is_atom_char(c: u8) -> bool { is_char(c) && !is_atom_specials(c) } // nil = "NIL" named!(pub nil, tag_no_case!("NIL")); // ASTRING-CHAR = ATOM-CHAR / resp-specials pub fn is_astring_char(c: u8) -> bool { is_atom_char(c) || is_resp_specials(c) } // QUOTED-CHAR = <any TEXT-CHAR except quoted-specials> / "\" quoted-specials pub fn quoted_data(i: &[u8]) -> IResult<&[u8], &[u8]> { // Ideally this should use nom's `escaped` macro, but it suffers from broken // type inference unless compiled with the verbose-errors feature enabled. let mut escape = false; let mut len = 0; for c in i { if *c == b'"' && !escape { break; } len += 1; if *c == b'\\' && !escape { escape = true } else if escape { escape = false; } } Ok((&i[len..], &i[..len])) } // quoted = DQUOTE *QUOTED-CHAR DQUOTE named!(pub quoted<&[u8]>, delimited!( char!('"'), quoted_data, char!('"') )); // quoted bytes as as utf8 named!(pub quoted_utf8<&str>, map_res!(quoted, str::from_utf8)); /// literal = "{" number "}" CRLF *CHAR8 /// ; Number represents the number of CHAR8s pub fn literal(input: &[u8]) -> IResult<&[u8], &[u8]> { let parser = tuple((tag(b"{"), number, tag(b"}"), tag("\r\n"))); let (remaining, (_, count, _, _)) = parser(input)?; let (remaining, data) = take(count)(remaining)?; if !data.iter().all(|byte| is_char8(*byte)) { // FIXME: what ErrorKind should this have? return Err(nom::Err::Error((remaining, nom::error::ErrorKind::Verify))); } Ok((remaining, data)) } /// CHAR8 = %x01-ff ; any OCTET except NUL, %x00 pub fn is_char8(i: u8) -> bool { i != 0 } // string = quoted / literal named!(pub string<&[u8]>, alt!(quoted | literal)); // string bytes as as utf8 named!(pub string_utf8<&str>, map_res!(string, str::from_utf8)); // nstring = string / nil named!(pub nstring<Option<&[u8]>>, alt!( map!(nil, |_| None) | map!(string, |s| Some(s)) )); // nstring bytes as utf8 named!(pub nstring_utf8<Option<&str>>, alt!( map!(nil, |_| None) | map!(string_utf8, |s| Some(s)) )); // atom = 1*ATOM-CHAR named!(pub atom<&str>, map_res!(take_while1!(is_atom_char), str::from_utf8 )); // astring = 1*ASTRING-CHAR / string named!(pub astring<&[u8]>, alt!( take_while1!(is_astring_char) | string )); // astring bytes as as utf8 named!(pub astring_utf8<&str>, map_res!(astring, str::from_utf8)); // text = 1*TEXT-CHAR named!(pub text<&str>, map_res!(take_while!(is_text_char), str::from_utf8 )); // TEXT-CHAR = <any CHAR except CR and LF> pub fn is_text_char(c: u8) -> bool { is_char(c) && c != b'\r' && c != b'\n' } // CHAR = %x01-7F // ; any 7-bit US-ASCII character, // ; excluding NUL // From RFC5234 pub fn is_char(c: u8) -> bool { match c { 0x01..=0x7F => true, _ => false, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_string_literal() { match string(b"{3}\r\nXYZ") { Ok((_, value)) => { assert_eq!(value, b"XYZ"); } rsp => panic!("unexpected response {:?}", rsp), } } #[test] fn test_astring() { match astring(b"text ") { Ok((_, value)) => { assert_eq!(value, b"text"); } rsp => panic!("unexpected response {:?}", rsp), } } }
// min-version: 1.30.0 // === GDB TESTS =================================================================================== // gdb-command:run // gdb-command:print a // gdbg-check:[...]$1 = enum::EnumA::Var3 = {Var3 = enum::EnumA::Var3 = {a = 5, b = enum::TestEnumB::Var2 = {Var2 = enum::TestEnumB::Var2 = {a = 5, b = "hello", c = enum::EnumC::Var1 = {Var1 = size=1 = {8}}}}}} // gdb-command:print d // gdbg-check:[...]$2 = enum::EnumD enum EnumA { Var1 { a: u32 }, Var2(u64), Var3 { a: u32, b: TestEnumB }, } enum TestEnumB { Var1(u64), Var2 { a: u32, b: String, c: EnumC }, } enum EnumC { Var1(u64), } enum EnumD {} fn main() { let a = EnumA::Var3 { a: 5, b: TestEnumB::Var2 { a: 5, b: "hello".to_owned(), c: EnumC::Var1(8), }, }; let d: EnumD; print!(""); // #break } T: disable pretty printer test for enum See https://github.com/intellij-rust/intellij-rust/issues/6746 // min-version: 1.30.0 // Temporarily disable this test because it fails on 2021.1 // https://github.com/intellij-rust/intellij-rust/issues/6746 // max-version: 1.30.0 // === GDB TESTS =================================================================================== // gdb-command:run // gdb-command:print a // gdbg-check:[...]$1 = enum::EnumA::Var3 = {Var3 = enum::EnumA::Var3 = {a = 5, b = enum::TestEnumB::Var2 = {Var2 = enum::TestEnumB::Var2 = {a = 5, b = "hello", c = enum::EnumC::Var1 = {Var1 = size=1 = {8}}}}}} // gdb-command:print d // gdbg-check:[...]$2 = enum::EnumD enum EnumA { Var1 { a: u32 }, Var2(u64), Var3 { a: u32, b: TestEnumB }, } enum TestEnumB { Var1(u64), Var2 { a: u32, b: String, c: EnumC }, } enum EnumC { Var1(u64), } enum EnumD {} fn main() { let a = EnumA::Var3 { a: 5, b: TestEnumB::Var2 { a: 5, b: "hello".to_owned(), c: EnumC::Var1(8), }, }; let d: EnumD; print!(""); // #break }
use Module; use socket::Socket; #[derive(Copy, Clone, Debug, PartialEq)] pub enum Gain { LV, HV, } impl ::std::convert::Into<String> for Gain { fn into(self) -> String { let s = match self { Gain::LV => "LV", Gain::HV => "HV", }; String::from(s) } } impl ::std::str::FromStr for Gain { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "LV" => Ok(Gain::LV), "HV" => Ok(Gain::HV), gain => Err(format!("Unknow gain '{}'", gain)), } } } impl ::std::fmt::Display for Gain { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { let display = match self { &Gain::LV => "LV", &Gain::HV => "HV", }; write!(f, "{}", display) } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum Source { IN1, IN2, } impl ::std::convert::Into<String> for Source { fn into(self) -> String { let s = match self { Source::IN1 => "SOUR1", Source::IN2 => "SOUR2", }; String::from(s) } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum Decimation { DEC_1, DEC_8, DEC_64, DEC_1024, DEC_8192, DEC_65536, } impl ::std::convert::Into<String> for Decimation { fn into(self) -> String { let s = match self { Decimation::DEC_1 => "1", Decimation::DEC_8 => "8", Decimation::DEC_64 => "64", Decimation::DEC_1024 => "1024", Decimation::DEC_8192 => "8192", Decimation::DEC_65536 => "65536", }; String::from(s) } } impl ::std::str::FromStr for Decimation { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "1" => Ok(Decimation::DEC_1), "8" => Ok(Decimation::DEC_8), "64" => Ok(Decimation::DEC_64), "1024" => Ok(Decimation::DEC_1024), "8192" => Ok(Decimation::DEC_8192), "65536" => Ok(Decimation::DEC_65536), decimation => Err(format!("Unknow decimation '{}'", decimation)), } } } impl ::std::convert::Into<SamplingRate> for Decimation { fn into(self) -> SamplingRate { match self { Decimation::DEC_1 => SamplingRate::RATE_125MHz, Decimation::DEC_8 => SamplingRate::RATE_15_6MHz, Decimation::DEC_64 => SamplingRate::RATE_1_9MHz, Decimation::DEC_1024 => SamplingRate::RATE_103_8kHz, Decimation::DEC_8192 => SamplingRate::RATE_15_2kHz, Decimation::DEC_65536 => SamplingRate::RATE_1_9kHz, } } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum SamplingRate { RATE_125MHz, RATE_15_6MHz, RATE_1_9MHz, RATE_103_8kHz, RATE_15_2kHz, RATE_1_9kHz, } impl SamplingRate { pub fn get_buffer_duration(self) -> ::std::time::Duration { let (s, ns) = match self { SamplingRate::RATE_125MHz => (0, 131_072), SamplingRate::RATE_15_6MHz => (0, 1_049_000), SamplingRate::RATE_1_9MHz => (0, 8_389_000), SamplingRate::RATE_103_8kHz => (0, 134_218_000), SamplingRate::RATE_15_2kHz => (1, 740_000_000), SamplingRate::RATE_1_9kHz => (8, 590_000_000), }; ::std::time::Duration::new(s, ns) } } impl ::std::fmt::Display for SamplingRate { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { let display = match self { &SamplingRate::RATE_125MHz => "125 MHz", &SamplingRate::RATE_15_6MHz => "15.6 MHz", &SamplingRate::RATE_1_9MHz => "1.9 MHz", &SamplingRate::RATE_103_8kHz => "103.8 kHz", &SamplingRate::RATE_15_2kHz => "15.2 kHz", &SamplingRate::RATE_1_9kHz => "1.9 kHz", }; write!(f, "{}", display) } } impl ::std::convert::Into<Decimation> for SamplingRate { fn into(self) -> Decimation { match self { SamplingRate::RATE_125MHz => Decimation::DEC_1, SamplingRate::RATE_15_6MHz => Decimation::DEC_8, SamplingRate::RATE_1_9MHz => Decimation::DEC_64, SamplingRate::RATE_103_8kHz => Decimation::DEC_1024, SamplingRate::RATE_15_2kHz => Decimation::DEC_8192, SamplingRate::RATE_1_9kHz => Decimation::DEC_65536, } } } impl ::std::convert::Into<String> for SamplingRate { fn into(self) -> String { let s = match self { SamplingRate::RATE_125MHz => "125MHz", SamplingRate::RATE_15_6MHz => "15_6MHz", SamplingRate::RATE_1_9MHz => "1_9MHz", SamplingRate::RATE_103_8kHz => "103_8kHz", SamplingRate::RATE_15_2kHz => "15_2kHz", SamplingRate::RATE_1_9kHz => "1_9kHz", }; String::from(s) } } impl ::std::str::FromStr for SamplingRate { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "125000000 Hz" => Ok(SamplingRate::RATE_125MHz), "15600000 Hz" => Ok(SamplingRate::RATE_15_6MHz), "1900000 Hz" => Ok(SamplingRate::RATE_1_9MHz), "103800 Hz" => Ok(SamplingRate::RATE_103_8kHz), "15200 Hz" => Ok(SamplingRate::RATE_15_2kHz), "1900 Hz" => Ok(SamplingRate::RATE_1_9kHz), rate => Err(format!("Unknow sampling rate {}", rate)), } } } #[derive(Clone)] pub struct Acquire { socket: ::std::cell::RefCell<Socket>, started: bool, } impl ::Module for Acquire { fn get_socket<'a>(&'a self) -> ::std::cell::RefMut<'a, ::socket::Socket> { self.socket.borrow_mut() } } impl Acquire { pub fn new(socket: Socket) -> Self { Acquire { socket: ::std::cell::RefCell::new(socket), started: false, } } /** * Starts acquisition. */ pub fn start(&mut self) { self.send("ACQ:START"); self.started = true; } /** * Stops acquisition. */ pub fn stop(&mut self) { self.send("ACQ:STOP"); self.started = false; } pub fn is_started(&self) -> bool { self.started } /** * Stops acquisition and sets all parameters to default values. */ pub fn reset(&self) { self.send("ACQ:RST"); } /** * Set decimation factor. */ pub fn set_decimation(&self, decimation: Decimation) { self.send(format!("ACQ:DEC {}", Into::<String>::into(decimation))); } /** * Get decimation factor. */ pub fn get_decimation(&self) -> Result<Decimation, String> { self.send("ACQ:DEC?"); self.receive() .parse() } /** * Get sampling rate. * * # Panics * * Calling this command makes buffer overflow. * See https://github.com/RedPitaya/RedPitaya/pull/110 */ pub fn get_sampling_rate(&self) -> Result<SamplingRate, String> { self.send("ACQ:SRAT?"); self.receive() .parse() } /** * Enable averaging. */ pub fn enable_average(&self) { self.send("ACQ:AVG ON"); } /** * Disable averaging. */ pub fn disable_average(&self) { self.send("ACQ:AVG OFF"); } /** * Get averaging status. */ pub fn is_average_enabled(&self) -> bool { self.send("ACQ:AVG?"); let message = self.receive(); match message.as_str() { "ON" => true, _ => false, } } /** * Set gain settings to HIGH or LOW. * * This gain is referring to jumper settings on Red Pitaya fast analog inputs. */ pub fn set_gain(&self, source: Source, gain: Gain) { self.send(format!("ACQ:{}:GAIN {}", Into::<String>::into(source), Into::<String>::into(gain))); } /** * Get gain settings to HIGH or LOW. */ pub fn get_gain(&self, source: Source) -> Result<Gain, String> { self.send(format!("ACQ:{}:GAIN?", Into::<String>::into(source))); self.receive() .parse() } } #[cfg(test)] mod test { macro_rules! acquire_assert { ($f:ident, $e:expr) => { let (rx, acquire) = create_acquire(); acquire.$f(); assert_eq!($e, rx.recv().unwrap()); } } #[test] fn test_sampling_rate_get_buffer_duration() { let duration = ::std::time::Duration::new(8, 590_000_000); assert_eq!(duration, ::acquire::SamplingRate::RATE_1_9kHz.get_buffer_duration()); } #[test] fn test_start() { let (rx, mut acquire) = create_acquire(); acquire.start(); assert_eq!("ACQ:START\r\n", rx.recv().unwrap()); } #[test] fn test_stop() { let (rx, mut acquire) = create_acquire(); acquire.stop(); assert_eq!("ACQ:STOP\r\n", rx.recv().unwrap()); } #[test] fn test_is_started() { let (_, mut acquire) = create_acquire(); assert_eq!(acquire.is_started(), false); acquire.start(); assert_eq!(acquire.is_started(), true); acquire.stop(); assert_eq!(acquire.is_started(), false); } #[test] fn test_reset() { acquire_assert!(reset, "ACQ:RST\r\n"); } #[test] fn test_set_decimation() { let (rx, acquire) = create_acquire(); acquire.set_decimation(::acquire::Decimation::DEC_8); assert_eq!("ACQ:DEC 8\r\n", rx.recv().unwrap()); } #[test] fn test_get_decimation() { let (_, acquire) = create_acquire(); assert_eq!(acquire.get_decimation(), Ok(::acquire::Decimation::DEC_1)); } #[test] fn test_get_sampling_rate() { let (_, acquire) = create_acquire(); assert_eq!(acquire.get_sampling_rate(), Ok(::acquire::SamplingRate::RATE_125MHz)); } #[test] fn test_enable_average() { acquire_assert!(enable_average, "ACQ:AVG ON\r\n"); } #[test] fn test_disable_average() { acquire_assert!(disable_average, "ACQ:AVG OFF\r\n"); } #[test] fn test_is_average_enabled() { let (_, acquire) = create_acquire(); assert_eq!(acquire.is_average_enabled(), true); } #[test] fn test_set_gain() { let (rx, acquire) = create_acquire(); acquire.set_gain(::acquire::Source::IN1, ::acquire::Gain::LV); assert_eq!("ACQ:SOUR1:GAIN LV\r\n", rx.recv().unwrap()); } #[test] fn test_get_gain() { let (_, acquire) = create_acquire(); assert_eq!(acquire.get_gain(::acquire::Source::IN1), Ok(::acquire::Gain::HV)); } fn create_acquire() -> (::std::sync::mpsc::Receiver<String>, ::acquire::Acquire) { let (addr, rx) = ::test::launch_server(); let socket = ::socket::Socket::new(addr); (rx, ::acquire::Acquire::new(socket)) } } [acquire] Remove internal stat use Module; use socket::Socket; #[derive(Copy, Clone, Debug, PartialEq)] pub enum Gain { LV, HV, } impl ::std::convert::Into<String> for Gain { fn into(self) -> String { let s = match self { Gain::LV => "LV", Gain::HV => "HV", }; String::from(s) } } impl ::std::str::FromStr for Gain { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "LV" => Ok(Gain::LV), "HV" => Ok(Gain::HV), gain => Err(format!("Unknow gain '{}'", gain)), } } } impl ::std::fmt::Display for Gain { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { let display = match self { &Gain::LV => "LV", &Gain::HV => "HV", }; write!(f, "{}", display) } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum Source { IN1, IN2, } impl ::std::convert::Into<String> for Source { fn into(self) -> String { let s = match self { Source::IN1 => "SOUR1", Source::IN2 => "SOUR2", }; String::from(s) } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum Decimation { DEC_1, DEC_8, DEC_64, DEC_1024, DEC_8192, DEC_65536, } impl ::std::convert::Into<String> for Decimation { fn into(self) -> String { let s = match self { Decimation::DEC_1 => "1", Decimation::DEC_8 => "8", Decimation::DEC_64 => "64", Decimation::DEC_1024 => "1024", Decimation::DEC_8192 => "8192", Decimation::DEC_65536 => "65536", }; String::from(s) } } impl ::std::str::FromStr for Decimation { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "1" => Ok(Decimation::DEC_1), "8" => Ok(Decimation::DEC_8), "64" => Ok(Decimation::DEC_64), "1024" => Ok(Decimation::DEC_1024), "8192" => Ok(Decimation::DEC_8192), "65536" => Ok(Decimation::DEC_65536), decimation => Err(format!("Unknow decimation '{}'", decimation)), } } } impl ::std::convert::Into<SamplingRate> for Decimation { fn into(self) -> SamplingRate { match self { Decimation::DEC_1 => SamplingRate::RATE_125MHz, Decimation::DEC_8 => SamplingRate::RATE_15_6MHz, Decimation::DEC_64 => SamplingRate::RATE_1_9MHz, Decimation::DEC_1024 => SamplingRate::RATE_103_8kHz, Decimation::DEC_8192 => SamplingRate::RATE_15_2kHz, Decimation::DEC_65536 => SamplingRate::RATE_1_9kHz, } } } #[derive(Copy, Clone, Debug, PartialEq)] pub enum SamplingRate { RATE_125MHz, RATE_15_6MHz, RATE_1_9MHz, RATE_103_8kHz, RATE_15_2kHz, RATE_1_9kHz, } impl SamplingRate { pub fn get_buffer_duration(self) -> ::std::time::Duration { let (s, ns) = match self { SamplingRate::RATE_125MHz => (0, 131_072), SamplingRate::RATE_15_6MHz => (0, 1_049_000), SamplingRate::RATE_1_9MHz => (0, 8_389_000), SamplingRate::RATE_103_8kHz => (0, 134_218_000), SamplingRate::RATE_15_2kHz => (1, 740_000_000), SamplingRate::RATE_1_9kHz => (8, 590_000_000), }; ::std::time::Duration::new(s, ns) } } impl ::std::fmt::Display for SamplingRate { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { let display = match self { &SamplingRate::RATE_125MHz => "125 MHz", &SamplingRate::RATE_15_6MHz => "15.6 MHz", &SamplingRate::RATE_1_9MHz => "1.9 MHz", &SamplingRate::RATE_103_8kHz => "103.8 kHz", &SamplingRate::RATE_15_2kHz => "15.2 kHz", &SamplingRate::RATE_1_9kHz => "1.9 kHz", }; write!(f, "{}", display) } } impl ::std::convert::Into<Decimation> for SamplingRate { fn into(self) -> Decimation { match self { SamplingRate::RATE_125MHz => Decimation::DEC_1, SamplingRate::RATE_15_6MHz => Decimation::DEC_8, SamplingRate::RATE_1_9MHz => Decimation::DEC_64, SamplingRate::RATE_103_8kHz => Decimation::DEC_1024, SamplingRate::RATE_15_2kHz => Decimation::DEC_8192, SamplingRate::RATE_1_9kHz => Decimation::DEC_65536, } } } impl ::std::convert::Into<String> for SamplingRate { fn into(self) -> String { let s = match self { SamplingRate::RATE_125MHz => "125MHz", SamplingRate::RATE_15_6MHz => "15_6MHz", SamplingRate::RATE_1_9MHz => "1_9MHz", SamplingRate::RATE_103_8kHz => "103_8kHz", SamplingRate::RATE_15_2kHz => "15_2kHz", SamplingRate::RATE_1_9kHz => "1_9kHz", }; String::from(s) } } impl ::std::str::FromStr for SamplingRate { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "125000000 Hz" => Ok(SamplingRate::RATE_125MHz), "15600000 Hz" => Ok(SamplingRate::RATE_15_6MHz), "1900000 Hz" => Ok(SamplingRate::RATE_1_9MHz), "103800 Hz" => Ok(SamplingRate::RATE_103_8kHz), "15200 Hz" => Ok(SamplingRate::RATE_15_2kHz), "1900 Hz" => Ok(SamplingRate::RATE_1_9kHz), rate => Err(format!("Unknow sampling rate {}", rate)), } } } #[derive(Clone)] pub struct Acquire { socket: ::std::cell::RefCell<Socket>, } impl ::Module for Acquire { fn get_socket<'a>(&'a self) -> ::std::cell::RefMut<'a, ::socket::Socket> { self.socket.borrow_mut() } } impl Acquire { pub fn new(socket: Socket) -> Self { Acquire { socket: ::std::cell::RefCell::new(socket), } } /** * Starts acquisition. */ pub fn start(&mut self) { self.send("ACQ:START"); } /** * Stops acquisition. */ pub fn stop(&mut self) { self.send("ACQ:STOP"); } /** * Stops acquisition and sets all parameters to default values. */ pub fn reset(&self) { self.send("ACQ:RST"); } /** * Set decimation factor. */ pub fn set_decimation(&self, decimation: Decimation) { self.send(format!("ACQ:DEC {}", Into::<String>::into(decimation))); } /** * Get decimation factor. */ pub fn get_decimation(&self) -> Result<Decimation, String> { self.send("ACQ:DEC?"); self.receive() .parse() } /** * Get sampling rate. * * # Panics * * Calling this command makes buffer overflow. * See https://github.com/RedPitaya/RedPitaya/pull/110 */ pub fn get_sampling_rate(&self) -> Result<SamplingRate, String> { self.send("ACQ:SRAT?"); self.receive() .parse() } /** * Enable averaging. */ pub fn enable_average(&self) { self.send("ACQ:AVG ON"); } /** * Disable averaging. */ pub fn disable_average(&self) { self.send("ACQ:AVG OFF"); } /** * Get averaging status. */ pub fn is_average_enabled(&self) -> bool { self.send("ACQ:AVG?"); let message = self.receive(); match message.as_str() { "ON" => true, _ => false, } } /** * Set gain settings to HIGH or LOW. * * This gain is referring to jumper settings on Red Pitaya fast analog inputs. */ pub fn set_gain(&self, source: Source, gain: Gain) { self.send(format!("ACQ:{}:GAIN {}", Into::<String>::into(source), Into::<String>::into(gain))); } /** * Get gain settings to HIGH or LOW. */ pub fn get_gain(&self, source: Source) -> Result<Gain, String> { self.send(format!("ACQ:{}:GAIN?", Into::<String>::into(source))); self.receive() .parse() } } #[cfg(test)] mod test { macro_rules! acquire_assert { ($f:ident, $e:expr) => { let (rx, acquire) = create_acquire(); acquire.$f(); assert_eq!($e, rx.recv().unwrap()); } } #[test] fn test_sampling_rate_get_buffer_duration() { let duration = ::std::time::Duration::new(8, 590_000_000); assert_eq!(duration, ::acquire::SamplingRate::RATE_1_9kHz.get_buffer_duration()); } #[test] fn test_start() { let (rx, mut acquire) = create_acquire(); acquire.start(); assert_eq!("ACQ:START\r\n", rx.recv().unwrap()); } #[test] fn test_stop() { let (rx, mut acquire) = create_acquire(); acquire.stop(); assert_eq!("ACQ:STOP\r\n", rx.recv().unwrap()); } #[test] fn test_reset() { acquire_assert!(reset, "ACQ:RST\r\n"); } #[test] fn test_set_decimation() { let (rx, acquire) = create_acquire(); acquire.set_decimation(::acquire::Decimation::DEC_8); assert_eq!("ACQ:DEC 8\r\n", rx.recv().unwrap()); } #[test] fn test_get_decimation() { let (_, acquire) = create_acquire(); assert_eq!(acquire.get_decimation(), Ok(::acquire::Decimation::DEC_1)); } #[test] fn test_get_sampling_rate() { let (_, acquire) = create_acquire(); assert_eq!(acquire.get_sampling_rate(), Ok(::acquire::SamplingRate::RATE_125MHz)); } #[test] fn test_enable_average() { acquire_assert!(enable_average, "ACQ:AVG ON\r\n"); } #[test] fn test_disable_average() { acquire_assert!(disable_average, "ACQ:AVG OFF\r\n"); } #[test] fn test_is_average_enabled() { let (_, acquire) = create_acquire(); assert_eq!(acquire.is_average_enabled(), true); } #[test] fn test_set_gain() { let (rx, acquire) = create_acquire(); acquire.set_gain(::acquire::Source::IN1, ::acquire::Gain::LV); assert_eq!("ACQ:SOUR1:GAIN LV\r\n", rx.recv().unwrap()); } #[test] fn test_get_gain() { let (_, acquire) = create_acquire(); assert_eq!(acquire.get_gain(::acquire::Source::IN1), Ok(::acquire::Gain::HV)); } fn create_acquire() -> (::std::sync::mpsc::Receiver<String>, ::acquire::Acquire) { let (addr, rx) = ::test::launch_server(); let socket = ::socket::Socket::new(addr); (rx, ::acquire::Acquire::new(socket)) } }
// functions to ask the user for data, with crate:spinner use std::io::stdin; use regex::Regex; use ansi_term::Colour::*; /// Ask the user for a Yes/No answer. Optionally provide a default value. If none is provided, this /// keeps loop{}ing pub fn ask_bool(s: &str, default: Option<bool>) -> bool { lazy_static! { static ref R_YES: Regex = Regex::new(r"^[Yy]$").unwrap(); static ref R_NO: Regex = Regex::new(r"^[Nn]$").unwrap(); } loop { ask_question(s, false); if match default { Some(s) => s, _ => true } { println!(" [Yn]: "); } else { println!(" [yN]: "); } let mut s = String::new(); let _ = stdin().read_line(&mut s); if R_YES.is_match(&s[..]) { return true } else if R_NO.is_match(&s[..]) { return false } else { if default.is_some() { return default.unwrap(); } // else again... } } } pub fn ask_uint(s: &str) -> u64 { unimplemented!() } pub fn ask_string(s: &str) -> String { unimplemented!() } pub fn ask_enum<E: From<String>>(s: &str) -> E { unimplemented!() } /// Helper function to print a imag question string. The `question` argument may not contain a /// trailing questionmark. /// /// The `nl` parameter can be used to configure whether a newline character should be printed pub fn ask_question(question: &str, nl: bool) { if nl { println!("[imag]: {}?", Yellow.paint(question)); } else { print!("[imag]: {}?", Yellow.paint(question)); } } Move real implementation to helper, so we can test. Tests were added, and yes I'm doing some `assert!((foo == true)` here, so it is easier to spot what happens here, as a `!` is overlooked sometimes. // functions to ask the user for data, with crate:spinner use std::io::stdin; use std::io::BufRead; use std::io::BufReader; use regex::Regex; use ansi_term::Colour::*; /// Ask the user for a Yes/No answer. Optionally provide a default value. If none is provided, this /// keeps loop{}ing pub fn ask_bool(s: &str, default: Option<bool>) -> bool { ask_bool_(s, default, &mut BufReader::new(stdin())) } fn ask_bool_<R: BufRead>(s: &str, default: Option<bool>, input: &mut R) -> bool { lazy_static! { static ref R_YES: Regex = Regex::new(r"^[Yy]$").unwrap(); static ref R_NO: Regex = Regex::new(r"^[Nn]$").unwrap(); } loop { ask_question(s, false); if match default { Some(s) => s, _ => true } { println!(" [Yn]: "); } else { println!(" [yN]: "); } let mut s = String::new(); let _ = input.read_line(&mut s); if R_YES.is_match(&s[..]) { return true } else if R_NO.is_match(&s[..]) { return false } else { if default.is_some() { return default.unwrap(); } // else again... } } } pub fn ask_uint(s: &str) -> u64 { unimplemented!() } pub fn ask_string(s: &str) -> String { unimplemented!() } pub fn ask_enum<E: From<String>>(s: &str) -> E { unimplemented!() } /// Helper function to print a imag question string. The `question` argument may not contain a /// trailing questionmark. /// /// The `nl` parameter can be used to configure whether a newline character should be printed pub fn ask_question(question: &str, nl: bool) { if nl { println!("[imag]: {}?", Yellow.paint(question)); } else { print!("[imag]: {}?", Yellow.paint(question)); } } #[cfg(test)] mod test { use std::io::BufReader; use super::ask_bool_; #[test] fn test_ask_bool_nodefault_yes() { let question = "Is this true"; let default = None; let answers = "\n\n\n\n\ny"; assert!(ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } #[test] fn test_ask_bool_nodefault_no() { let question = "Is this true"; let default = None; let answers = "n"; assert!(false == ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } #[test] fn test_ask_bool_default_no() { let question = "Is this true"; let default = Some(false); let answers = "n"; assert!(false == ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } #[test] fn test_ask_bool_default_yes() { let question = "Is this true"; let default = Some(true); let answers = "y"; assert!(true == ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } #[test] fn test_ask_bool_default_yes_answer_no() { let question = "Is this true"; let default = Some(true); let answers = "n"; assert!(false == ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } #[test] fn test_ask_bool_default_no_answer_yes() { let question = "Is this true"; let default = Some(false); let answers = "y"; assert!(true == ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } #[test] fn test_ask_bool_default_no_without_answer() { let question = "Is this true"; let default = Some(false); let answers = "\n"; assert!(false == ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } #[test] fn test_ask_bool_default_yes_without_answer() { let question = "Is this true"; let default = Some(true); let answers = "\n"; assert!(true == ask_bool_(question, default, &mut BufReader::new(answers.as_bytes()))); } }
extern crate futures; extern crate indy_sys; use indy::{IndyError, ErrorCode}; use indy::ledger; use self::futures::Future; use self::indy_sys::ledger::{CustomTransactionParser, CustomFree, indy_register_transaction_parser_for_sp}; use utils::{timeout, anoncreds, blob_storage, did, wallet, pool, callback}; use utils::constants::*; use std::sync::{Once, ONCE_INIT}; use std::mem; use std::ffi::CString; pub static mut SCHEMA_ID: &'static str = ""; pub static mut CRED_DEF_ID: &'static str = ""; pub static mut REV_REG_DEF_ID: &'static str = ""; pub const SCHEMA_DATA: &'static str = r#"{"id":"id","name":"gvt","version":"1.0","attr_names":["name", "age", "sex", "height"]}"#; const SUBMIT_RETRY_CNT: usize = 3; pub fn sign_and_submit_request(pool_handle: i32, wallet_handle: i32, submitter_did: &str, request_json: &str) -> Result<String, IndyError> { ledger::sign_and_submit_request(pool_handle, wallet_handle, submitter_did, request_json).wait() } pub fn submit_request_with_retries(pool_handle: i32, request_json: &str, previous_response: &str) -> Result<String, IndyError> { _submit_retry(extract_seq_no_from_reply(previous_response).unwrap(), || { submit_request(pool_handle, request_json) }) } pub fn submit_request(pool_handle: i32, request_json: &str) -> Result<String, IndyError> { ledger::submit_request(pool_handle, request_json).wait() } pub fn submit_action(pool_handle: i32, request_json: &str, nodes: Option<&str>, timeout: Option<i32>) -> Result<String, IndyError> { ledger::submit_action(pool_handle, request_json, nodes, timeout).wait() } pub fn sign_request(wallet_handle: i32, submitter_did: &str, request_json: &str) -> Result<String, IndyError> { ledger::sign_request(wallet_handle, submitter_did, request_json).wait() } pub fn multi_sign_request(wallet_handle: i32, submitter_did: &str, request_json: &str) -> Result<String, IndyError> { ledger::multi_sign_request(wallet_handle, submitter_did, request_json).wait() } pub fn extract_seq_no_from_reply(reply: &str) -> Result<u64, &'static str> { let metadata = get_response_metadata(reply).map_err(|_| "Can not get Metadata from Reply")?; ::serde_json::from_str::<::serde_json::Value>(&metadata).map_err(|_| "Metadata isn't valid JSON")? ["seqNo"] .as_u64().ok_or("Missed seqNo in reply") } fn _submit_retry<F>(minimal_timestamp: u64, submit_action: F) -> Result<String, IndyError> where F: Fn() -> Result<String, IndyError> { let mut i = 0; let action_result = loop { let action_result = submit_action()?; let retry = extract_seq_no_from_reply(&action_result) .map(|received_timestamp| received_timestamp < minimal_timestamp) .unwrap_or(true); if retry && i < SUBMIT_RETRY_CNT { ::std::thread::sleep(timeout::short_timeout()); i += 1; } else { break action_result; } }; Ok(action_result) } pub fn build_get_ddo_request(submitter_did: Option<&str>, target_did: &str) -> Result<String, IndyError> { ledger::build_get_ddo_request(submitter_did, target_did).wait() } pub fn build_nym_request(submitter_did: &str, target_did: &str, verkey: Option<&str>, alias: Option<&str>, role: Option<&str>) -> Result<String, IndyError> { ledger::build_nym_request(submitter_did, target_did, verkey, alias, role).wait() } pub fn build_attrib_request(submitter_did: &str, target_did: &str, hash: Option<&str>, raw: Option<&str>, enc: Option<&str>) -> Result<String, IndyError> { ledger::build_attrib_request(submitter_did, target_did, hash, raw, enc).wait() } pub fn build_get_attrib_request(submitter_did: Option<&str>, target_did: &str, raw: Option<&str>, hash: Option<&str>, enc: Option<&str>) -> Result<String, IndyError> { ledger::build_get_attrib_request(submitter_did, target_did, raw, hash, enc).wait() } pub fn build_get_nym_request(submitter_did: Option<&str>, target_did: &str) -> Result<String, IndyError> { ledger::build_get_nym_request(submitter_did, target_did).wait() } pub fn build_schema_request(submitter_did: &str, data: &str) -> Result<String, IndyError> { ledger::build_schema_request(submitter_did, data).wait() } pub fn build_get_schema_request(submitter_did: Option<&str>, id: &str) -> Result<String, IndyError> { ledger::build_get_schema_request(submitter_did, id).wait() } pub fn build_cred_def_txn(submitter_did: &str, cred_def_json: &str) -> Result<String, IndyError> { ledger::build_cred_def_request(submitter_did, cred_def_json).wait() } pub fn build_get_cred_def_request(submitter_did: Option<&str>, id: &str) -> Result<String, IndyError> { ledger::build_get_cred_def_request(submitter_did, id).wait() } pub fn build_node_request(submitter_did: &str, target_did: &str, data: &str) -> Result<String, IndyError> { ledger::build_node_request(submitter_did, target_did, data).wait() } pub fn build_get_validator_info_request(submitter_did: &str) -> Result<String, IndyError> { ledger::build_get_validator_info_request(submitter_did).wait() } pub fn build_get_txn_request(submitter_did: Option<&str>, data: i32, ledger_type: Option<&str>) -> Result<String, IndyError> { ledger::build_get_txn_request(submitter_did, ledger_type, data).wait() } pub fn build_pool_config_request(submitter_did: &str, writes: bool, force: bool) -> Result<String, IndyError> { ledger::build_pool_config_request(submitter_did, writes, force).wait() } pub fn build_pool_restart_request(submitter_did: &str, action: &str, datetime: Option<&str>) -> Result<String, IndyError> { ledger::build_pool_restart_request(submitter_did, action, datetime).wait() } pub fn build_pool_upgrade_request(submitter_did: &str, name: &str, version: &str, action: &str, sha256: &str, timeout: Option<u32>, schedule: Option<&str>, justification: Option<&str>, reinstall: bool, force: bool, package: Option<&str>) -> Result<String, IndyError> { ledger::build_pool_upgrade_request(submitter_did, name, version, action, sha256, timeout, schedule, justification, reinstall, force, package).wait() } pub fn build_revoc_reg_def_request(submitter_did: &str, data: &str) -> Result<String, IndyError> { ledger::build_revoc_reg_def_request(submitter_did, data).wait() } pub fn build_revoc_reg_entry_request(submitter_did: &str, rev_reg_def_id: &str, rev_reg_type: &str, value: &str) -> Result<String, IndyError> { ledger::build_revoc_reg_entry_request(submitter_did, rev_reg_def_id, rev_reg_type, value).wait() } pub fn build_get_revoc_reg_def_request(submitter_did: Option<&str>, id: &str) -> Result<String, IndyError> { ledger::build_get_revoc_reg_def_request(submitter_did, id).wait() } pub fn build_get_revoc_reg_request(submitter_did: Option<&str>, rev_reg_def_id: &str, timestamp: u64) -> Result<String, IndyError> { ledger::build_get_revoc_reg_request(submitter_did, rev_reg_def_id, timestamp as i64).wait() } pub fn build_get_revoc_reg_delta_request(submitter_did: Option<&str>, rev_reg_def_id: &str, from: Option<u64>, to: u64) -> Result<String, IndyError> { ledger::build_get_revoc_reg_delta_request(submitter_did, rev_reg_def_id, from.map(|f| f as i64).unwrap_or(-1), to as i64).wait() } pub fn parse_get_schema_response(get_schema_response: &str) -> Result<(String, String), IndyError> { ledger::parse_get_schema_response(get_schema_response).wait() } pub fn parse_get_cred_def_response(get_cred_def_response: &str) -> Result<(String, String), IndyError> { ledger::parse_get_cred_def_response(get_cred_def_response).wait() } pub fn parse_get_revoc_reg_def_response(get_revoc_reg_def_response: &str) -> Result<(String, String), IndyError> { ledger::parse_get_revoc_reg_def_response(get_revoc_reg_def_response).wait() } pub fn parse_get_revoc_reg_response(get_revoc_reg_response: &str) -> Result<(String, String, u64), IndyError> { ledger::parse_get_revoc_reg_response(get_revoc_reg_response).wait() } pub fn parse_get_revoc_reg_delta_response(get_revoc_reg_delta_response: &str) -> Result<(String, String, u64), IndyError> { ledger::parse_get_revoc_reg_delta_response(get_revoc_reg_delta_response).wait() } pub fn register_transaction_parser_for_sp(txn_type: &str, parse: CustomTransactionParser, free: CustomFree) -> Result<(), ErrorCode> { let (receiver, command_handle, cb) = callback::_closure_to_cb_ec(); let txn_type = CString::new(txn_type).unwrap(); let err = unsafe { indy_register_transaction_parser_for_sp(command_handle, txn_type.as_ptr(), Some(parse), Some(free), cb) }; super::results::result_to_empty(err, receiver) } pub fn get_response_metadata(response: &str) -> Result<String, IndyError> { ledger::get_response_metadata(response).wait() } pub fn build_auth_rule_request(submitter_did: &str, txn_type: &str, action: &str, field: &str, old_value: Option<&str>, new_value: Option<&str>, constraint: &str, ) -> Result<String, IndyError> { ledger::build_auth_rule_request(submitter_did, txn_type, action, field, old_value, new_value, constraint).wait() } pub fn build_get_auth_rule_request(submitter_did: Option<&str>, auth_type: Option<&str>, auth_action: Option<&str>, field: Option<&str>, old_value: Option<&str>, new_value: Option<&str>, ) -> Result<String, IndyError> { ledger::build_get_auth_rule_request(submitter_did, auth_type, auth_action, field, old_value, new_value).wait() } pub fn build_txn_author_agreement_request(submitter_did: &str, text: &str, version: &str) -> Result<String, IndyError> { ledger::build_txn_author_agreement_request(submitter_did, text, version).wait() } pub fn build_get_txn_author_agreement_request(submitter_did: Option<&str>, data: Option<&str>, ) -> Result<String, IndyError> { ledger::build_get_txn_author_agreement_request(submitter_did, data).wait() } pub fn build_acceptance_mechanisms_request(submitter_did: &str, aml: &str, version: &str, aml_context: Option<&str>) -> Result<String, IndyError> { ledger::build_acceptance_mechanisms_request(submitter_did, aml, version, aml_context).wait() } pub fn build_get_acceptance_mechanisms_request(submitter_did: Option<&str>, timestamp: Option<i64>, version: Option<&str>) -> Result<String, IndyError> { ledger::build_get_acceptance_mechanisms_request(submitter_did, timestamp, version).wait() } pub fn append_txn_author_agreement_acceptance_to_request(request_json: &str, text: Option<&str>, version: Option<&str>, taa_digest: Option<&str>, acc_mech_type: &str, time_of_acceptance: u64) -> Result<String, IndyError> { ledger::append_txn_author_agreement_acceptance_to_request(request_json, text, version, taa_digest, acc_mech_type, time_of_acceptance).wait() } pub fn post_entities() -> (&'static str, &'static str, &'static str) { lazy_static! { static ref COMMON_ENTITIES_INIT: Once = ONCE_INIT; } unsafe { COMMON_ENTITIES_INIT.call_once(|| { let pool_name = "COMMON_ENTITIES_POOL"; let pool_handle = pool::create_and_open_pool_ledger(pool_name).unwrap(); let (wallet_handle, wallet_config) = wallet::create_and_open_default_wallet(pool_name).unwrap(); let (issuer_did, _) = did::create_store_and_publish_my_did_from_trustee(wallet_handle, pool_handle).unwrap(); let (schema_id, schema_json) = anoncreds::issuer_create_schema(&issuer_did, GVT_SCHEMA_NAME, SCHEMA_VERSION, GVT_SCHEMA_ATTRIBUTES).unwrap(); let schema_request = build_schema_request(&issuer_did, &schema_json).unwrap(); let schema_response = sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &schema_request).unwrap(); pool::check_response_type(&schema_response, ::utils::types::ResponseType::REPLY); let get_schema_request = build_get_schema_request(Some(&issuer_did), &schema_id).unwrap(); let get_schema_response = submit_request_with_retries(pool_handle, &get_schema_request, &schema_response).unwrap(); let (schema_id, schema_json) = parse_get_schema_response(&get_schema_response).unwrap(); let (cred_def_id, cred_def_json) = anoncreds::issuer_create_credential_definition(wallet_handle, &issuer_did, &schema_json, TAG_1, None, Some(&anoncreds::revocation_cred_def_config())).unwrap(); let cred_def_request = build_cred_def_txn(&issuer_did, &cred_def_json).unwrap(); let cred_def_response = sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &cred_def_request).unwrap(); pool::check_response_type(&cred_def_response, ::utils::types::ResponseType::REPLY); let tails_writer_config = anoncreds::tails_writer_config(); let tails_writer_handle = blob_storage::open_writer("default", &tails_writer_config).unwrap(); let (rev_reg_id, revoc_reg_def_json, rev_reg_entry_json) = anoncreds::issuer_create_and_store_revoc_reg(wallet_handle, &issuer_did, None, TAG_1, &cred_def_id, &anoncreds::issuance_on_demand_rev_reg_config(), tails_writer_handle).unwrap(); let rev_reg_def_request = build_revoc_reg_def_request(&issuer_did, &revoc_reg_def_json).unwrap(); let rev_reg_def_response = sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &rev_reg_def_request).unwrap(); pool::check_response_type(&rev_reg_def_response, ::utils::types::ResponseType::REPLY); let rev_reg_entry_request = build_revoc_reg_entry_request(&issuer_did, &rev_reg_id, REVOC_REG_TYPE, &rev_reg_entry_json).unwrap(); sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &rev_reg_entry_request).unwrap(); let res = mem::transmute(&schema_id as &str); mem::forget(schema_id); SCHEMA_ID = res; let res = mem::transmute(&cred_def_id as &str); mem::forget(cred_def_id); CRED_DEF_ID = res; let res = mem::transmute(&rev_reg_id as &str); mem::forget(rev_reg_id); REV_REG_DEF_ID = res; pool::close(pool_handle).unwrap(); pool::delete(pool_name).unwrap(); wallet::close_wallet(wallet_handle).unwrap(); wallet::delete_wallet(&wallet_config, WALLET_CREDENTIALS).unwrap(); }); (SCHEMA_ID, CRED_DEF_ID, REV_REG_DEF_ID) } } Correct initialization of common wallet for anoncreds integration tests. Signed-off-by: Sergey Minaev <322af3f2df10918c6ef5280f56be0b711278b1ae@dsr-corporation.com> extern crate futures; extern crate indy_sys; use indy::{IndyError, ErrorCode}; use indy::ledger; use self::futures::Future; use self::indy_sys::ledger::{CustomTransactionParser, CustomFree, indy_register_transaction_parser_for_sp}; use utils::{timeout, anoncreds, blob_storage, did, wallet, pool, callback}; use utils::constants::*; use std::sync::{Once, ONCE_INIT}; use std::mem; use std::ffi::CString; pub static mut SCHEMA_ID: &'static str = ""; pub static mut CRED_DEF_ID: &'static str = ""; pub static mut REV_REG_DEF_ID: &'static str = ""; pub const SCHEMA_DATA: &'static str = r#"{"id":"id","name":"gvt","version":"1.0","attr_names":["name", "age", "sex", "height"]}"#; const SUBMIT_RETRY_CNT: usize = 3; pub fn sign_and_submit_request(pool_handle: i32, wallet_handle: i32, submitter_did: &str, request_json: &str) -> Result<String, IndyError> { ledger::sign_and_submit_request(pool_handle, wallet_handle, submitter_did, request_json).wait() } pub fn submit_request_with_retries(pool_handle: i32, request_json: &str, previous_response: &str) -> Result<String, IndyError> { _submit_retry(extract_seq_no_from_reply(previous_response).unwrap(), || { submit_request(pool_handle, request_json) }) } pub fn submit_request(pool_handle: i32, request_json: &str) -> Result<String, IndyError> { ledger::submit_request(pool_handle, request_json).wait() } pub fn submit_action(pool_handle: i32, request_json: &str, nodes: Option<&str>, timeout: Option<i32>) -> Result<String, IndyError> { ledger::submit_action(pool_handle, request_json, nodes, timeout).wait() } pub fn sign_request(wallet_handle: i32, submitter_did: &str, request_json: &str) -> Result<String, IndyError> { ledger::sign_request(wallet_handle, submitter_did, request_json).wait() } pub fn multi_sign_request(wallet_handle: i32, submitter_did: &str, request_json: &str) -> Result<String, IndyError> { ledger::multi_sign_request(wallet_handle, submitter_did, request_json).wait() } pub fn extract_seq_no_from_reply(reply: &str) -> Result<u64, &'static str> { let metadata = get_response_metadata(reply).map_err(|_| "Can not get Metadata from Reply")?; ::serde_json::from_str::<::serde_json::Value>(&metadata).map_err(|_| "Metadata isn't valid JSON")? ["seqNo"] .as_u64().ok_or("Missed seqNo in reply") } fn _submit_retry<F>(minimal_timestamp: u64, submit_action: F) -> Result<String, IndyError> where F: Fn() -> Result<String, IndyError> { let mut i = 0; let action_result = loop { let action_result = submit_action()?; let retry = extract_seq_no_from_reply(&action_result) .map(|received_timestamp| received_timestamp < minimal_timestamp) .unwrap_or(true); if retry && i < SUBMIT_RETRY_CNT { ::std::thread::sleep(timeout::short_timeout()); i += 1; } else { break action_result; } }; Ok(action_result) } pub fn build_get_ddo_request(submitter_did: Option<&str>, target_did: &str) -> Result<String, IndyError> { ledger::build_get_ddo_request(submitter_did, target_did).wait() } pub fn build_nym_request(submitter_did: &str, target_did: &str, verkey: Option<&str>, alias: Option<&str>, role: Option<&str>) -> Result<String, IndyError> { ledger::build_nym_request(submitter_did, target_did, verkey, alias, role).wait() } pub fn build_attrib_request(submitter_did: &str, target_did: &str, hash: Option<&str>, raw: Option<&str>, enc: Option<&str>) -> Result<String, IndyError> { ledger::build_attrib_request(submitter_did, target_did, hash, raw, enc).wait() } pub fn build_get_attrib_request(submitter_did: Option<&str>, target_did: &str, raw: Option<&str>, hash: Option<&str>, enc: Option<&str>) -> Result<String, IndyError> { ledger::build_get_attrib_request(submitter_did, target_did, raw, hash, enc).wait() } pub fn build_get_nym_request(submitter_did: Option<&str>, target_did: &str) -> Result<String, IndyError> { ledger::build_get_nym_request(submitter_did, target_did).wait() } pub fn build_schema_request(submitter_did: &str, data: &str) -> Result<String, IndyError> { ledger::build_schema_request(submitter_did, data).wait() } pub fn build_get_schema_request(submitter_did: Option<&str>, id: &str) -> Result<String, IndyError> { ledger::build_get_schema_request(submitter_did, id).wait() } pub fn build_cred_def_txn(submitter_did: &str, cred_def_json: &str) -> Result<String, IndyError> { ledger::build_cred_def_request(submitter_did, cred_def_json).wait() } pub fn build_get_cred_def_request(submitter_did: Option<&str>, id: &str) -> Result<String, IndyError> { ledger::build_get_cred_def_request(submitter_did, id).wait() } pub fn build_node_request(submitter_did: &str, target_did: &str, data: &str) -> Result<String, IndyError> { ledger::build_node_request(submitter_did, target_did, data).wait() } pub fn build_get_validator_info_request(submitter_did: &str) -> Result<String, IndyError> { ledger::build_get_validator_info_request(submitter_did).wait() } pub fn build_get_txn_request(submitter_did: Option<&str>, data: i32, ledger_type: Option<&str>) -> Result<String, IndyError> { ledger::build_get_txn_request(submitter_did, ledger_type, data).wait() } pub fn build_pool_config_request(submitter_did: &str, writes: bool, force: bool) -> Result<String, IndyError> { ledger::build_pool_config_request(submitter_did, writes, force).wait() } pub fn build_pool_restart_request(submitter_did: &str, action: &str, datetime: Option<&str>) -> Result<String, IndyError> { ledger::build_pool_restart_request(submitter_did, action, datetime).wait() } pub fn build_pool_upgrade_request(submitter_did: &str, name: &str, version: &str, action: &str, sha256: &str, timeout: Option<u32>, schedule: Option<&str>, justification: Option<&str>, reinstall: bool, force: bool, package: Option<&str>) -> Result<String, IndyError> { ledger::build_pool_upgrade_request(submitter_did, name, version, action, sha256, timeout, schedule, justification, reinstall, force, package).wait() } pub fn build_revoc_reg_def_request(submitter_did: &str, data: &str) -> Result<String, IndyError> { ledger::build_revoc_reg_def_request(submitter_did, data).wait() } pub fn build_revoc_reg_entry_request(submitter_did: &str, rev_reg_def_id: &str, rev_reg_type: &str, value: &str) -> Result<String, IndyError> { ledger::build_revoc_reg_entry_request(submitter_did, rev_reg_def_id, rev_reg_type, value).wait() } pub fn build_get_revoc_reg_def_request(submitter_did: Option<&str>, id: &str) -> Result<String, IndyError> { ledger::build_get_revoc_reg_def_request(submitter_did, id).wait() } pub fn build_get_revoc_reg_request(submitter_did: Option<&str>, rev_reg_def_id: &str, timestamp: u64) -> Result<String, IndyError> { ledger::build_get_revoc_reg_request(submitter_did, rev_reg_def_id, timestamp as i64).wait() } pub fn build_get_revoc_reg_delta_request(submitter_did: Option<&str>, rev_reg_def_id: &str, from: Option<u64>, to: u64) -> Result<String, IndyError> { ledger::build_get_revoc_reg_delta_request(submitter_did, rev_reg_def_id, from.map(|f| f as i64).unwrap_or(-1), to as i64).wait() } pub fn parse_get_schema_response(get_schema_response: &str) -> Result<(String, String), IndyError> { ledger::parse_get_schema_response(get_schema_response).wait() } pub fn parse_get_cred_def_response(get_cred_def_response: &str) -> Result<(String, String), IndyError> { ledger::parse_get_cred_def_response(get_cred_def_response).wait() } pub fn parse_get_revoc_reg_def_response(get_revoc_reg_def_response: &str) -> Result<(String, String), IndyError> { ledger::parse_get_revoc_reg_def_response(get_revoc_reg_def_response).wait() } pub fn parse_get_revoc_reg_response(get_revoc_reg_response: &str) -> Result<(String, String, u64), IndyError> { ledger::parse_get_revoc_reg_response(get_revoc_reg_response).wait() } pub fn parse_get_revoc_reg_delta_response(get_revoc_reg_delta_response: &str) -> Result<(String, String, u64), IndyError> { ledger::parse_get_revoc_reg_delta_response(get_revoc_reg_delta_response).wait() } pub fn register_transaction_parser_for_sp(txn_type: &str, parse: CustomTransactionParser, free: CustomFree) -> Result<(), ErrorCode> { let (receiver, command_handle, cb) = callback::_closure_to_cb_ec(); let txn_type = CString::new(txn_type).unwrap(); let err = unsafe { indy_register_transaction_parser_for_sp(command_handle, txn_type.as_ptr(), Some(parse), Some(free), cb) }; super::results::result_to_empty(err, receiver) } pub fn get_response_metadata(response: &str) -> Result<String, IndyError> { ledger::get_response_metadata(response).wait() } pub fn build_auth_rule_request(submitter_did: &str, txn_type: &str, action: &str, field: &str, old_value: Option<&str>, new_value: Option<&str>, constraint: &str, ) -> Result<String, IndyError> { ledger::build_auth_rule_request(submitter_did, txn_type, action, field, old_value, new_value, constraint).wait() } pub fn build_get_auth_rule_request(submitter_did: Option<&str>, auth_type: Option<&str>, auth_action: Option<&str>, field: Option<&str>, old_value: Option<&str>, new_value: Option<&str>, ) -> Result<String, IndyError> { ledger::build_get_auth_rule_request(submitter_did, auth_type, auth_action, field, old_value, new_value).wait() } pub fn build_txn_author_agreement_request(submitter_did: &str, text: &str, version: &str) -> Result<String, IndyError> { ledger::build_txn_author_agreement_request(submitter_did, text, version).wait() } pub fn build_get_txn_author_agreement_request(submitter_did: Option<&str>, data: Option<&str>, ) -> Result<String, IndyError> { ledger::build_get_txn_author_agreement_request(submitter_did, data).wait() } pub fn build_acceptance_mechanisms_request(submitter_did: &str, aml: &str, version: &str, aml_context: Option<&str>) -> Result<String, IndyError> { ledger::build_acceptance_mechanisms_request(submitter_did, aml, version, aml_context).wait() } pub fn build_get_acceptance_mechanisms_request(submitter_did: Option<&str>, timestamp: Option<i64>, version: Option<&str>) -> Result<String, IndyError> { ledger::build_get_acceptance_mechanisms_request(submitter_did, timestamp, version).wait() } pub fn append_txn_author_agreement_acceptance_to_request(request_json: &str, text: Option<&str>, version: Option<&str>, taa_digest: Option<&str>, acc_mech_type: &str, time_of_acceptance: u64) -> Result<String, IndyError> { ledger::append_txn_author_agreement_acceptance_to_request(request_json, text, version, taa_digest, acc_mech_type, time_of_acceptance).wait() } pub fn post_entities() -> (&'static str, &'static str, &'static str) { lazy_static! { static ref COMMON_ENTITIES_INIT: Once = ONCE_INIT; } unsafe { COMMON_ENTITIES_INIT.call_once(|| { let pool_and_wallet_name = "COMMON_ENTITIES_POOL"; super::test::cleanup_storage(pool_and_wallet_name); let pool_handle = pool::create_and_open_pool_ledger(pool_and_wallet_name).unwrap(); let (wallet_handle, wallet_config) = wallet::create_and_open_default_wallet(pool_and_wallet_name).unwrap(); let (issuer_did, _) = did::create_store_and_publish_my_did_from_trustee(wallet_handle, pool_handle).unwrap(); let (schema_id, schema_json) = anoncreds::issuer_create_schema(&issuer_did, GVT_SCHEMA_NAME, SCHEMA_VERSION, GVT_SCHEMA_ATTRIBUTES).unwrap(); let schema_request = build_schema_request(&issuer_did, &schema_json).unwrap(); let schema_response = sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &schema_request).unwrap(); pool::check_response_type(&schema_response, ::utils::types::ResponseType::REPLY); let get_schema_request = build_get_schema_request(Some(&issuer_did), &schema_id).unwrap(); let get_schema_response = submit_request_with_retries(pool_handle, &get_schema_request, &schema_response).unwrap(); let (schema_id, schema_json) = parse_get_schema_response(&get_schema_response).unwrap(); let (cred_def_id, cred_def_json) = anoncreds::issuer_create_credential_definition(wallet_handle, &issuer_did, &schema_json, TAG_1, None, Some(&anoncreds::revocation_cred_def_config())).unwrap(); let cred_def_request = build_cred_def_txn(&issuer_did, &cred_def_json).unwrap(); let cred_def_response = sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &cred_def_request).unwrap(); pool::check_response_type(&cred_def_response, ::utils::types::ResponseType::REPLY); let tails_writer_config = anoncreds::tails_writer_config(); let tails_writer_handle = blob_storage::open_writer("default", &tails_writer_config).unwrap(); let (rev_reg_id, revoc_reg_def_json, rev_reg_entry_json) = anoncreds::issuer_create_and_store_revoc_reg(wallet_handle, &issuer_did, None, TAG_1, &cred_def_id, &anoncreds::issuance_on_demand_rev_reg_config(), tails_writer_handle).unwrap(); let rev_reg_def_request = build_revoc_reg_def_request(&issuer_did, &revoc_reg_def_json).unwrap(); let rev_reg_def_response = sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &rev_reg_def_request).unwrap(); pool::check_response_type(&rev_reg_def_response, ::utils::types::ResponseType::REPLY); let rev_reg_entry_request = build_revoc_reg_entry_request(&issuer_did, &rev_reg_id, REVOC_REG_TYPE, &rev_reg_entry_json).unwrap(); sign_and_submit_request(pool_handle, wallet_handle, &issuer_did, &rev_reg_entry_request).unwrap(); let res = mem::transmute(&schema_id as &str); mem::forget(schema_id); SCHEMA_ID = res; let res = mem::transmute(&cred_def_id as &str); mem::forget(cred_def_id); CRED_DEF_ID = res; let res = mem::transmute(&rev_reg_id as &str); mem::forget(rev_reg_id); REV_REG_DEF_ID = res; pool::close(pool_handle).unwrap(); pool::delete(pool_and_wallet_name).unwrap(); wallet::close_wallet(wallet_handle).unwrap(); wallet::delete_wallet(&wallet_config, WALLET_CREDENTIALS).unwrap(); }); (SCHEMA_ID, CRED_DEF_ID, REV_REG_DEF_ID) } }
use rustc_serialize::{Encodable, Decodable}; use std; use nix::unistd::gethostname; use super::master::{self, Master, MasterResult}; use super::slave::Slave; use super::error::ServerError; use super::value::Topic; use super::naming::Resolver; use tcpros::{Client, Message, PublisherStream, ServicePair}; use rosxmlrpc::serde::XmlRpcValue; pub struct Ros { master: Master, slave: Slave, hostname: String, resolver: Resolver, name: String, } impl Ros { pub fn new(name: &str) -> Result<Ros, ServerError> { let namespace = std::env::var("ROS_NAMESPACE").unwrap_or(String::from("")); Ros::new_raw(&namespace, name) } pub fn new_raw(namespace: &str, name: &str) -> Result<Ros, ServerError> { let master_uri = std::env::var("ROS_MASTER_URI") .unwrap_or(String::from("http://localhost:11311/")); let mut hostname = [0u8; 50]; gethostname(&mut hostname)?; let hostname = hostname.into_iter().take_while(|&v| *v != 0u8).map(|v| *v).collect::<Vec<_>>(); let hostname = String::from_utf8(hostname)?; let name = format!("{}/{}", namespace, name); let resolver = Resolver::new(&name)?; let slave = Slave::new(&master_uri, &hostname, 0, &name)?; let master = Master::new(&master_uri, &name, &slave.uri()); Ok(Ros { master: master, slave: slave, hostname: hostname, resolver: resolver, name: name, }) } pub fn map(&mut self, source: &str, destination: &str) -> Result<(), ServerError> { self.resolver.map(source, destination).map_err(|v| ServerError::Naming(v)) } pub fn node_uri(&self) -> &str { return self.slave.uri(); } pub fn param<'a, 'b>(&'a self, name: &'b str) -> Option<Parameter<'a>> { self.resolver.translate(name).ok().map(|v| { Parameter { master: &self.master, name: v, } }) } pub fn parameters(&self) -> MasterResult<Vec<String>> { self.master.get_param_names() } pub fn state(&self) -> MasterResult<master::SystemState> { self.master.get_system_state() } pub fn topics(&self) -> MasterResult<Vec<Topic>> { self.master.get_topic_types() } pub fn client<T: ServicePair>(&self, service: &str) -> Result<Client<T>, ServerError> { let name = self.resolver.translate(service)?; let uri = self.master.lookup_service(&name)?; Ok(Client::new(&self.name, &uri, &name)) } pub fn service<T, F>(&mut self, service: &str, handler: F) -> Result<(), ServerError> where T: ServicePair, F: Fn(T::Request) -> T::Response + Send + Sync + 'static { let name = self.resolver.translate(service)?; let api = self.slave.add_service::<T, F>(&self.hostname, &name, handler)?; if let Err(err) = self.master.register_service(&name, &api) { self.slave.remove_service(&name); self.master.unregister_service(&name, &api)?; Err(ServerError::from(err)) } else { Ok(()) } } pub fn subscribe<T, F>(&mut self, topic: &str, callback: F) -> Result<(), ServerError> where T: Message, F: Fn(T) -> () + Send + 'static { let name = self.resolver.translate(topic)?; self.slave.add_subscription::<T, F>(&name, callback)?; match self.master.register_subscriber(&name, &T::msg_type()) { Ok(publishers) => { if let Err(err) = self.slave .add_publishers_to_subscription(&name, publishers.into_iter()) { error!("Failed to subscribe to all publishers of topic '{}': {}", name, err); } Ok(()) } Err(err) => { self.slave.remove_subscription(&name); self.master.unregister_subscriber(&name)?; Err(ServerError::from(err)) } } } pub fn publish<T>(&mut self, topic: &str) -> Result<PublisherStream<T>, ServerError> where T: Message { let name = self.resolver.translate(topic)?; let stream = self.slave.add_publication::<T>(&self.hostname, &name)?; match self.master.register_publisher(&name, &T::msg_type()) { Ok(_) => Ok(stream), Err(error) => { error!("Failed to register publisher for topic '{}': {}", name, error); self.slave.remove_publication(&name); self.master.unregister_publisher(&name)?; Err(ServerError::from(error)) } } } } pub struct Parameter<'a> { master: &'a Master, name: String, } impl<'a> Parameter<'a> { pub fn get<T: Decodable>(&self) -> MasterResult<T> { self.master.get_param::<T>(&self.name) } pub fn get_raw(&self) -> MasterResult<XmlRpcValue> { self.master.get_param_any(&self.name) } pub fn set<T: Encodable>(&self, value: &T) -> MasterResult<()> { self.master.set_param::<T>(&self.name, value).and(Ok(())) } pub fn delete(&self) -> MasterResult<()> { self.master.delete_param(&self.name).and(Ok(())) } pub fn exists(&self) -> MasterResult<bool> { self.master.has_param(&self.name) } pub fn search(&self) -> MasterResult<String> { self.master.search_param(&self.name) } } Clear trailing namespace backlashes use rustc_serialize::{Encodable, Decodable}; use std; use nix::unistd::gethostname; use super::master::{self, Master, MasterResult}; use super::slave::Slave; use super::error::ServerError; use super::value::Topic; use super::naming::Resolver; use tcpros::{Client, Message, PublisherStream, ServicePair}; use rosxmlrpc::serde::XmlRpcValue; pub struct Ros { master: Master, slave: Slave, hostname: String, resolver: Resolver, name: String, } impl Ros { pub fn new(name: &str) -> Result<Ros, ServerError> { let namespace = std::env::var("ROS_NAMESPACE").unwrap_or(String::from("")); Ros::new_raw(&namespace, name) } pub fn new_raw(namespace: &str, name: &str) -> Result<Ros, ServerError> { let namespace = namespace.trim_right_matches("/"); let master_uri = std::env::var("ROS_MASTER_URI") .unwrap_or(String::from("http://localhost:11311/")); let mut hostname = [0u8; 50]; gethostname(&mut hostname)?; let hostname = hostname.into_iter().take_while(|&v| *v != 0u8).map(|v| *v).collect::<Vec<_>>(); let hostname = String::from_utf8(hostname)?; let name = format!("{}/{}", namespace, name); let resolver = Resolver::new(&name)?; let slave = Slave::new(&master_uri, &hostname, 0, &name)?; let master = Master::new(&master_uri, &name, &slave.uri()); Ok(Ros { master: master, slave: slave, hostname: hostname, resolver: resolver, name: name, }) } pub fn map(&mut self, source: &str, destination: &str) -> Result<(), ServerError> { self.resolver.map(source, destination).map_err(|v| ServerError::Naming(v)) } pub fn node_uri(&self) -> &str { return self.slave.uri(); } pub fn param<'a, 'b>(&'a self, name: &'b str) -> Option<Parameter<'a>> { self.resolver.translate(name).ok().map(|v| { Parameter { master: &self.master, name: v, } }) } pub fn parameters(&self) -> MasterResult<Vec<String>> { self.master.get_param_names() } pub fn state(&self) -> MasterResult<master::SystemState> { self.master.get_system_state() } pub fn topics(&self) -> MasterResult<Vec<Topic>> { self.master.get_topic_types() } pub fn client<T: ServicePair>(&self, service: &str) -> Result<Client<T>, ServerError> { let name = self.resolver.translate(service)?; let uri = self.master.lookup_service(&name)?; Ok(Client::new(&self.name, &uri, &name)) } pub fn service<T, F>(&mut self, service: &str, handler: F) -> Result<(), ServerError> where T: ServicePair, F: Fn(T::Request) -> T::Response + Send + Sync + 'static { let name = self.resolver.translate(service)?; let api = self.slave.add_service::<T, F>(&self.hostname, &name, handler)?; if let Err(err) = self.master.register_service(&name, &api) { self.slave.remove_service(&name); self.master.unregister_service(&name, &api)?; Err(ServerError::from(err)) } else { Ok(()) } } pub fn subscribe<T, F>(&mut self, topic: &str, callback: F) -> Result<(), ServerError> where T: Message, F: Fn(T) -> () + Send + 'static { let name = self.resolver.translate(topic)?; self.slave.add_subscription::<T, F>(&name, callback)?; match self.master.register_subscriber(&name, &T::msg_type()) { Ok(publishers) => { if let Err(err) = self.slave .add_publishers_to_subscription(&name, publishers.into_iter()) { error!("Failed to subscribe to all publishers of topic '{}': {}", name, err); } Ok(()) } Err(err) => { self.slave.remove_subscription(&name); self.master.unregister_subscriber(&name)?; Err(ServerError::from(err)) } } } pub fn publish<T>(&mut self, topic: &str) -> Result<PublisherStream<T>, ServerError> where T: Message { let name = self.resolver.translate(topic)?; let stream = self.slave.add_publication::<T>(&self.hostname, &name)?; match self.master.register_publisher(&name, &T::msg_type()) { Ok(_) => Ok(stream), Err(error) => { error!("Failed to register publisher for topic '{}': {}", name, error); self.slave.remove_publication(&name); self.master.unregister_publisher(&name)?; Err(ServerError::from(error)) } } } } pub struct Parameter<'a> { master: &'a Master, name: String, } impl<'a> Parameter<'a> { pub fn get<T: Decodable>(&self) -> MasterResult<T> { self.master.get_param::<T>(&self.name) } pub fn get_raw(&self) -> MasterResult<XmlRpcValue> { self.master.get_param_any(&self.name) } pub fn set<T: Encodable>(&self, value: &T) -> MasterResult<()> { self.master.set_param::<T>(&self.name, value).and(Ok(())) } pub fn delete(&self) -> MasterResult<()> { self.master.delete_param(&self.name).and(Ok(())) } pub fn exists(&self) -> MasterResult<bool> { self.master.has_param(&self.name) } pub fn search(&self) -> MasterResult<String> { self.master.search_param(&self.name) } }
mod settings; pub mod parser; mod help; mod validator; mod usage; // Std use std::env; use std::ffi::OsString; use std::fmt; use std::io::{self, BufRead, BufWriter, Write}; use std::path::{Path, PathBuf}; use std::process; use std::fs::File; use std::iter::Peekable; // Third Party #[cfg(feature = "yaml")] use yaml_rust::Yaml; // Internal use app::parser::Parser; use app::help::Help; use args::{Arg, ArgGroup, ArgMatcher, ArgMatches}; use args::settings::ArgSettings; use errors::Result as ClapResult; pub use self::settings::{AppFlags, AppSettings}; use completions::{ComplGen, Shell}; use fmt::ColorWhen; #[doc(hidden)] #[allow(dead_code)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Propagation<'a> { To(&'a str), Full, NextLevel, None } /// Used to create a representation of a command line program and all possible command line /// arguments. Application settings are set using the "builder pattern" with the /// [`App::get_matches`] family of methods being the terminal methods that starts the /// runtime-parsing process. These methods then return information about the user supplied /// arguments (or lack there of). /// /// **NOTE:** There aren't any mandatory "options" that one must set. The "options" may /// also appear in any order (so long as one of the [`App::get_matches`] methods is the last method /// called). /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let m = App::new("My Program") /// .author("Me, me@mail.com") /// .version("1.0.2") /// .about("Explains in brief what the program does") /// .arg( /// Arg::with_name("in_file").index(1) /// ) /// .after_help("Longer explanation to appear after the options when \ /// displaying the help information from --help or -h") /// .get_matches(); /// /// // Your program logic starts here... /// ``` /// [`App::get_matches`]: ./struct.App.html#method.get_matches #[derive(Default, Debug, Clone)] pub struct App<'a, 'b> where 'a: 'b, { #[doc(hidden)] pub name: String, #[doc(hidden)] pub bin_name: Option<String>, #[doc(hidden)] pub author: Option<&'b str>, #[doc(hidden)] pub version: Option<&'b str>, #[doc(hidden)] pub long_version: Option<&'b str>, #[doc(hidden)] pub about: Option<&'b str>, #[doc(hidden)] pub long_about: Option<&'b str>, #[doc(hidden)] pub more_help: Option<&'b str>, #[doc(hidden)] pub pre_help: Option<&'b str>, #[doc(hidden)] pub aliases: Option<Vec<(&'b str, bool)>>, // (name, visible) #[doc(hidden)] pub usage_str: Option<&'b str>, #[doc(hidden)] pub usage: Option<String>, #[doc(hidden)] pub help_str: Option<&'b str>, #[doc(hidden)] pub disp_ord: usize, #[doc(hidden)] pub term_w: Option<usize>, #[doc(hidden)] pub max_w: Option<usize>, #[doc(hidden)] pub template: Option<&'b str>, #[doc(hidden)] pub settings: AppFlags, #[doc(hidden)] pub g_settings: AppFlags, #[doc(hidden)] pub args: Vec<Arg<'a, 'b>>, #[doc(hidden)] pub subcommands: Vec<App<'a, 'b>>, #[doc(hidden)] pub groups: Vec<ArgGroup<'a>>, #[doc(hidden)] help_short: Option<char>, #[doc(hidden)] version_short: Option<char>, #[doc(hidden)] pub help_message: Option<&'a str>, #[doc(hidden)] pub version_message: Option<&'a str>, #[doc(hidden)] pub help_headings: Vec<Option<&'a str>>, } impl<'a, 'b> App<'a, 'b> { /// Creates a new instance of an application requiring a name. The name may be, but doesn't /// have to be same as the binary. The name will be displayed to the user when they request to /// print version or help and usage information. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let prog = App::new("My Program") /// # ; /// ``` pub fn new<S: Into<String>>(n: S) -> Self { App { name: n.into(), ..Default::default() } } /// Get the name of the app pub fn get_name(&self) -> &str { &self.name } /// Get the name of the binary pub fn get_bin_name(&self) -> Option<&str> { self.bin_name.as_ref().map(|s| s.as_str()) } /// Sets a string of author(s) that will be displayed to the user when they /// request the help information with `--help` or `-h`. /// /// **Pro-tip:** Use `clap`s convenience macro [`crate_authors!`] to automatically set your /// application's author(s) to the same thing as your crate at compile time. See the [`examples/`] /// directory for more information /// /// See the [`examples/`] /// directory for more information /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .author("Me, me@mymain.com") /// # ; /// ``` /// [`crate_authors!`]: ./macro.crate_authors!.html /// [`examples/`]: https://github.com/kbknapp/clap-rs/tree/master/examples pub fn author<S: Into<&'b str>>(mut self, author: S) -> Self { self.author = Some(author.into()); self } /// Overrides the system-determined binary name. This should only be used when absolutely /// necessary, such as when the binary name for your application is misleading, or perhaps /// *not* how the user should invoke your program. /// /// **Pro-tip:** When building things such as third party `cargo` subcommands, this setting /// **should** be used! /// /// **NOTE:** This command **should not** be used for [`SubCommand`]s. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("My Program") /// .bin_name("my_binary") /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn bin_name<S: Into<String>>(mut self, name: S) -> Self { self.bin_name = Some(name.into()); self } /// Sets a string describing what the program does. This will be displayed when displaying help /// information with `-h`. /// /// **NOTE:** If only `about` is provided, and not [`App::long_about`] but the user requests /// `--help` clap will still display the contents of `about` appropriately /// /// **NOTE:** Only [`App::about`] is used in completion script generation in order to be /// concise /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .about("Does really amazing things to great people") /// # ; /// ``` /// [`App::long_about`]: ./struct.App.html#method.long_about pub fn about<S: Into<&'b str>>(mut self, about: S) -> Self { self.about = Some(about.into()); self } /// Sets a string describing what the program does. This will be displayed when displaying help /// information. /// /// **NOTE:** If only `long_about` is provided, and not [`App::about`] but the user requests /// `-h` clap will still display the contents of `long_about` appropriately /// /// **NOTE:** Only [`App::about`] is used in completion script generation in order to be /// concise /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .long_about( /// "Does really amazing things to great people. Now let's talk a little /// more in depth about how this subcommand really works. It may take about /// a few lines of text, but that's ok!") /// # ; /// ``` /// [`App::about`]: ./struct.App.html#method.about pub fn long_about<S: Into<&'b str>>(mut self, about: S) -> Self { self.long_about = Some(about.into()); self } /// Sets the program's name. This will be displayed when displaying help information. /// /// **Pro-top:** This function is particularly useful when configuring a program via /// [`App::from_yaml`] in conjunction with the [`crate_name!`] macro to derive the program's /// name from its `Cargo.toml`. /// /// # Examples /// ```ignore /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let yml = load_yaml!("app.yml"); /// let app = App::from_yaml(yml) /// .name(crate_name!()); /// /// // continued logic goes here, such as `app.get_matches()` etc. /// # } /// ``` /// /// [`App::from_yaml`]: ./struct.App.html#method.from_yaml /// [`crate_name!`]: ./macro.crate_name.html pub fn name<S: Into<String>>(mut self, name: S) -> Self { self.name = name.into(); self } /// Adds additional help information to be displayed in addition to auto-generated help. This /// information is displayed **after** the auto-generated help information. This is often used /// to describe how to use the arguments, or caveats to be noted. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .after_help("Does really amazing things to great people...but be careful with -R") /// # ; /// ``` pub fn after_help<S: Into<&'b str>>(mut self, help: S) -> Self { self.more_help = Some(help.into()); self } /// Adds additional help information to be displayed in addition to auto-generated help. This /// information is displayed **before** the auto-generated help information. This is often used /// for header information. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .before_help("Some info I'd like to appear before the help info") /// # ; /// ``` pub fn before_help<S: Into<&'b str>>(mut self, help: S) -> Self { self.pre_help = Some(help.into()); self } /// Sets a string of the version number to be displayed when displaying version or help /// information with `-V`. /// /// **NOTE:** If only `version` is provided, and not [`App::long_version`] but the user /// requests `--version` clap will still display the contents of `version` appropriately /// /// **Pro-tip:** Use `clap`s convenience macro [`crate_version!`] to automatically set your /// application's version to the same thing as your crate at compile time. See the [`examples/`] /// directory for more information /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .version("v0.1.24") /// # ; /// ``` /// [`crate_version!`]: ./macro.crate_version!.html /// [`examples/`]: https://github.com/kbknapp/clap-rs/tree/master/examples /// [`App::long_version`]: ./struct.App.html#method.long_version pub fn version<S: Into<&'b str>>(mut self, ver: S) -> Self { self.version = Some(ver.into()); self } /// Sets a string of the version number to be displayed when displaying version or help /// information with `--version`. /// /// **NOTE:** If only `long_version` is provided, and not [`App::version`] but the user /// requests `-V` clap will still display the contents of `long_version` appropriately /// /// **Pro-tip:** Use `clap`s convenience macro [`crate_version!`] to automatically set your /// application's version to the same thing as your crate at compile time. See the [`examples/`] /// directory for more information /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .long_version( /// "v0.1.24 /// commit: abcdef89726d /// revision: 123 /// release: 2 /// binary: myprog") /// # ; /// ``` /// [`crate_version!`]: ./macro.crate_version!.html /// [`examples/`]: https://github.com/kbknapp/clap-rs/tree/master/examples /// [`App::version`]: ./struct.App.html#method.version pub fn long_version<S: Into<&'b str>>(mut self, ver: S) -> Self { self.long_version = Some(ver.into()); self } /// Overrides the `clap` generated usage string. /// /// This will be displayed to the user when errors are found in argument parsing. /// /// **CAUTION:** Using this setting disables `clap`s "context-aware" usage strings. After this /// setting is set, this will be the only usage string displayed to the user! /// /// **NOTE:** This will not replace the entire help message, *only* the portion /// showing the usage. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .override_usage("myapp [-clDas] <some_file>") /// # ; /// ``` /// [`ArgMatches::usage`]: ./struct.ArgMatches.html#method.usage pub fn override_usage<S: Into<&'b str>>(mut self, usage: S) -> Self { self.usage_str = Some(usage.into()); self } /// Overrides the `clap` generated help message. This should only be used /// when the auto-generated message does not suffice. /// /// This will be displayed to the user when they use `--help` or `-h` /// /// **NOTE:** This replaces the **entire** help message, so nothing will be auto-generated. /// /// **NOTE:** This **only** replaces the help message for the current command, meaning if you /// are using subcommands, those help messages will still be auto-generated unless you /// specify a [`Arg::override_help`] for them as well. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myapp") /// .override_help("myapp v1.0\n\ /// Does awesome things\n\ /// (C) me@mail.com\n\n\ /// /// USAGE: myapp <opts> <comamnd>\n\n\ /// /// Options:\n\ /// -h, --helpe Dispay this message\n\ /// -V, --version Display version info\n\ /// -s <stuff> Do something with stuff\n\ /// -v Be verbose\n\n\ /// /// Commmands:\n\ /// help Prints this message\n\ /// work Do some work") /// # ; /// ``` /// [`Arg::override_help`]: ./struct.Arg.html#method.override_help pub fn override_help<S: Into<&'b str>>(mut self, help: S) -> Self { self.help_str = Some(help.into()); self } /// Sets the help template to be used, overriding the default format. /// /// Tags arg given inside curly brackets. /// /// Valid tags are: /// /// * `{bin}` - Binary name. /// * `{version}` - Version number. /// * `{author}` - Author information. /// * `{about}` - General description (from [`App::about`]) /// * `{usage}` - Automatically generated or given usage string. /// * `{all-args}` - Help for all arguments (options, flags, positionals arguments, /// and subcommands) including titles. /// * `{unified}` - Unified help for options and flags. Note, you must *also* set /// [`AppSettings::UnifiedHelpMessage`] to fully merge both options and /// flags, otherwise the ordering is "best effort" /// * `{flags}` - Help for flags. /// * `{options}` - Help for options. /// * `{positionals}` - Help for positionals arguments. /// * `{subcommands}` - Help for subcommands. /// * `{after-help}` - Help from [`App::after_help`] /// * `{before-help}` - Help from [`App::before_help`] /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .version("1.0") /// .help_template("{bin} ({version}) - {usage}") /// # ; /// ``` /// **NOTE:**The template system is, on purpose, very simple. Therefore the tags have to writen /// in the lowercase and without spacing. /// [`App::about`]: ./struct.App.html#method.about /// [`App::after_help`]: ./struct.App.html#method.after_help /// [`App::before_help`]: ./struct.App.html#method.before_help /// [`AppSettings::UnifiedHelpMessage`]: ./enum.AppSettings.html#variant.UnifiedHelpMessage pub fn help_template<S: Into<&'b str>>(mut self, s: S) -> Self { self.template = Some(s.into()); self } /// Enables a single command, or [`SubCommand`], level settings. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, AppSettings}; /// App::new("myprog") /// .setting(AppSettings::SubcommandRequired) /// .setting(AppSettings::WaitOnError) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`AppSettings`]: ./enum.AppSettings.html pub fn setting(mut self, setting: AppSettings) -> Self { self.settings.set(setting); self } /// Disables a single command, or [`SubCommand`], level setting. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// # Examples /// /// ```no_run /// # use clap::{App, AppSettings}; /// App::new("myprog") /// .unset_setting(AppSettings::ColorAuto) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`AppSettings`]: ./enum.AppSettings.html /// [global]: ./struct.App.html#method.global_setting pub fn unset_setting(mut self, setting: AppSettings) -> Self { self.settings.unset(setting); self } /// Enables a single setting that is propagated down through all child subcommands. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// **NOTE**: The setting is *only* propagated *down* and not up through parent commands. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, AppSettings}; /// App::new("myprog") /// .global_setting(AppSettings::SubcommandRequired) /// # ; /// ``` /// [`AppSettings`]: ./enum.AppSettings.html pub fn global_setting(mut self, setting: AppSettings) -> Self { self.settings.set(setting); self.g_settings.set(setting); self } /// Disables a global setting, and stops propagating down to child subcommands. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// **NOTE:** The setting being unset will be unset from both local and [global] settings /// /// # Examples /// /// ```no_run /// # use clap::{App, AppSettings}; /// App::new("myprog") /// .unset_global_setting(AppSettings::ColorAuto) /// # ; /// ``` /// [`AppSettings`]: ./enum.AppSettings.html /// [global]: ./struct.App.html#method.global_setting pub fn unset_global_setting(mut self, setting: AppSettings) -> Self { self.settings.unset(setting); self.g_settings.unset(setting); self } /// Sets the terminal width at which to wrap help messages. Defaults to `120`. Using `0` will /// ignore terminal widths and use source formatting. /// /// `clap` automatically tries to determine the terminal width on Unix, Linux, OSX and Windows /// if the `wrap_help` cargo "feature" has been used while compiling. If the terminal width /// cannot be determined, `clap` defaults to `120`. /// /// **NOTE:** This setting applies globally and *not* on a per-command basis. /// /// **NOTE:** This setting must be set **before** any subcommands are added! /// /// # Platform Specific /// /// Only Unix, Linux, OSX and Windows support automatic determination of terminal width. /// Even on those platforms, this setting is useful if for any reason the terminal width /// cannot be determined. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .set_term_width(80) /// # ; /// ``` pub fn set_term_width(mut self, width: usize) -> Self { self.term_w = Some(width); self } /// Sets the max terminal width at which to wrap help messages. Using `0` will ignore terminal /// widths and use source formatting. /// /// `clap` automatically tries to determine the terminal width on Unix, Linux, OSX and Windows /// if the `wrap_help` cargo "feature" has been used while compiling, but one might want to /// limit the size (e.g. when the terminal is running fullscreen). /// /// **NOTE:** This setting applies globally and *not* on a per-command basis. /// /// **NOTE:** This setting must be set **before** any subcommands are added! /// /// # Platform Specific /// /// Only Unix, Linux, OSX and Windows support automatic determination of terminal width. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .max_term_width(100) /// # ; /// ``` pub fn max_term_width(mut self, w: usize) -> Self { self.max_w = Some(w); self } /// Adds an [argument] to the list of valid possibilities. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// // Adding a single "flag" argument with a short and help text, using Arg::with_name() /// .arg( /// Arg::with_name("debug") /// .short("d") /// .help("turns on debugging mode") /// ) /// // Adding a single "option" argument with a short, a long, and help text using the less /// // verbose Arg::from_usage() /// .arg( /// Arg::from_usage("-c --config=[CONFIG] 'Optionally sets a config file to use'") /// ) /// # ; /// ``` /// [argument]: ./struct.Arg.html pub fn arg<A: Into<Arg<'a, 'b>>>(mut self, a: A) -> Self { let help_heading : Option<&'a str> = if let Some(option_str) = self.help_headings.last() { *option_str } else { None }; let arg = a.into().help_heading(help_heading); self.args.push(arg); self } /// Set a custom section heading for future args. Every call to arg will /// have this header (instead of its default header) until a subsequent /// call to help_heading pub fn help_heading(mut self, heading: &'a str) -> Self { self.help_headings.push(Some(heading)); self } /// Stop using custom section headings. pub fn stop_custom_headings(mut self) -> Self { self.help_headings.push(None); self } /// Adds multiple [arguments] to the list of valid possibilties /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .args(&[ /// Arg::from_usage("[debug] -d 'turns on debugging info'"), /// Arg::with_name("input").index(1).help("the input file to use") /// ]) /// # ; /// ``` /// [arguments]: ./struct.Arg.html pub fn args<I, T>(mut self, args: I) -> Self where I: IntoIterator<Item = T>, T: Into<Arg<'a, 'b>>, { // @TODO @perf @p4 @v3-beta: maybe extend_from_slice would be possible and perform better? // But that may also not let us do `&["-a 'some'", "-b 'other']` because of not Into<Arg> for arg in args.into_iter() { self.args.push(arg.into()); } self } /// Allows adding a [`SubCommand`] alias, which function as "hidden" subcommands that /// automatically dispatch as if this subcommand was used. This is more efficient, and easier /// than creating multiple hidden subcommands as one only needs to check for the existence of /// this command, and not all variants. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .alias("do-stuff")) /// .get_matches_from(vec!["myprog", "do-stuff"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn alias<S: Into<&'b str>>(mut self, name: S) -> Self { if let Some(ref mut als) = self.aliases { als.push((name.into(), false)); } else { self.aliases = Some(vec![(name.into(), false)]); } self } /// Allows adding [`SubCommand`] aliases, which function as "hidden" subcommands that /// automatically dispatch as if this subcommand was used. This is more efficient, and easier /// than creating multiple hidden subcommands as one only needs to check for the existence of /// this command, and not all variants. /// /// # Examples /// /// ```rust /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .aliases(&["do-stuff", "do-tests", "tests"])) /// .arg(Arg::with_name("input") /// .help("the file to add") /// .index(1) /// .required(false)) /// .get_matches_from(vec!["myprog", "do-tests"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn aliases(mut self, names: &[&'b str]) -> Self { if let Some(ref mut als) = self.aliases { for n in names { als.push((n, false)); } } else { self.aliases = Some(names.iter().map(|n| (*n, false)).collect::<Vec<_>>()); } self } /// Allows adding a [`SubCommand`] alias that functions exactly like those defined with /// [`App::alias`], except that they are visible inside the help message. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .visible_alias("do-stuff")) /// .get_matches_from(vec!["myprog", "do-stuff"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`App::alias`]: ./struct.App.html#method.alias pub fn visible_alias<S: Into<&'b str>>(mut self, name: S) -> Self { if let Some(ref mut als) = self.aliases { als.push((name.into(), true)); } else { self.aliases = Some(vec![(name.into(), true)]); } self } /// Allows adding multiple [`SubCommand`] aliases that functions exactly like those defined /// with [`App::aliases`], except that they are visible inside the help message. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .visible_aliases(&["do-stuff", "tests"])) /// .get_matches_from(vec!["myprog", "do-stuff"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`App::aliases`]: ./struct.App.html#method.aliases pub fn visible_aliases(mut self, names: &[&'b str]) -> Self { if let Some(ref mut als) = self.aliases { for n in names { als.push((n, true)); } } else { self.aliases = Some(names.iter().map(|n| (*n, true)).collect::<Vec<_>>()); } self } /// Adds an [`ArgGroup`] to the application. [`ArgGroup`]s are a family of related arguments. /// By placing them in a logical group, you can build easier requirement and exclusion rules. /// For instance, you can make an entire [`ArgGroup`] required, meaning that one (and *only* /// one) argument from that group must be present at runtime. /// /// You can also do things such as name an [`ArgGroup`] as a conflict to another argument. /// Meaning any of the arguments that belong to that group will cause a failure if present with /// the conflicting argument. /// /// Another added benefit of [`ArgGroup`]s is that you can extract a value from a group instead /// of determining exactly which argument was used. /// /// Finally, using [`ArgGroup`]s to ensure exclusion between arguments is another very common /// use /// /// # Examples /// /// The following example demonstrates using an [`ArgGroup`] to ensure that one, and only one, /// of the arguments from the specified group is present at runtime. /// /// ```no_run /// # use clap::{App, ArgGroup}; /// App::new("app") /// .args_from_usage( /// "--set-ver [ver] 'set the version manually' /// --major 'auto increase major' /// --minor 'auto increase minor' /// --patch 'auto increase patch'") /// .group(ArgGroup::with_name("vers") /// .args(&["set-ver", "major", "minor","patch"]) /// .required(true)) /// # ; /// ``` /// [`ArgGroup`]: ./struct.ArgGroup.html pub fn group(mut self, group: ArgGroup<'a>) -> Self { self.groups.push(group); self } /// Adds multiple [`ArgGroup`]s to the [`App`] at once. /// /// # Examples /// /// ```no_run /// # use clap::{App, ArgGroup}; /// App::new("app") /// .args_from_usage( /// "--set-ver [ver] 'set the version manually' /// --major 'auto increase major' /// --minor 'auto increase minor' /// --patch 'auto increase patch' /// -c [FILE] 'a config file' /// -i [IFACE] 'an interface'") /// .groups(&[ /// ArgGroup::with_name("vers") /// .args(&["set-ver", "major", "minor","patch"]) /// .required(true), /// ArgGroup::with_name("input") /// .args(&["c", "i"]) /// ]) /// # ; /// ``` /// [`ArgGroup`]: ./struct.ArgGroup.html /// [`App`]: ./struct.App.html pub fn groups(mut self, groups: &[ArgGroup<'a>]) -> Self { for g in groups { self = self.group(g.into()); } self } /// Adds a [`SubCommand`] to the list of valid possibilities. Subcommands are effectively /// sub-[`App`]s, because they can contain their own arguments, subcommands, version, usage, /// etc. They also function just like [`App`]s, in that they get their own auto generated help, /// version, and usage. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// App::new("myprog") /// .subcommand(SubCommand::with_name("config") /// .about("Controls configuration features") /// .arg_from_usage("<config> 'Required configuration file to use'")) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`App`]: ./struct.App.html pub fn subcommand(mut self, subcmd: App<'a, 'b>) -> Self { self.subcommands.push(subcmd); self } /// Adds multiple subcommands to the list of valid possibilities by iterating over an /// [`IntoIterator`] of [`SubCommand`]s /// /// # Examples /// /// ```rust /// # use clap::{App, Arg, SubCommand}; /// # App::new("myprog") /// .subcommands( vec![ /// SubCommand::with_name("config").about("Controls configuration functionality") /// .arg(Arg::with_name("config_file").index(1)), /// SubCommand::with_name("debug").about("Controls debug functionality")]) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html pub fn subcommands<I>(mut self, subcmds: I) -> Self where I: IntoIterator<Item = App<'a, 'b>>, { for subcmd in subcmds { self.subcommands.push(subcmd); } self } /// Allows custom ordering of [`SubCommand`]s within the help message. Subcommands with a lower /// value will be displayed first in the help message. This is helpful when one would like to /// emphasise frequently used subcommands, or prioritize those towards the top of the list. /// Duplicate values **are** allowed. Subcommands with duplicate display orders will be /// displayed in alphabetical order. /// /// **NOTE:** The default is 999 for all subcommands. /// /// # Examples /// /// ```rust /// # use clap::{App, SubCommand}; /// let m = App::new("cust-ord") /// .subcommand(SubCommand::with_name("alpha") // typically subcommands are grouped /// // alphabetically by name. Subcommands /// // without a display_order have a value of /// // 999 and are displayed alphabetically with /// // all other 999 subcommands /// .about("Some help and text")) /// .subcommand(SubCommand::with_name("beta") /// .display_order(1) // In order to force this subcommand to appear *first* /// // all we have to do is give it a value lower than 999. /// // Any other subcommands with a value of 1 will be displayed /// // alphabetically with this one...then 2 values, then 3, etc. /// .about("I should be first!")) /// .get_matches_from(vec![ /// "cust-ord", "--help" /// ]); /// ``` /// /// The above example displays the following help message /// /// ```text /// cust-ord /// /// USAGE: /// cust-ord [FLAGS] [OPTIONS] /// /// FLAGS: /// -h, --help Prints help information /// -V, --version Prints version information /// /// SUBCOMMANDS: /// beta I should be first! /// alpha Some help and text /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn display_order(mut self, ord: usize) -> Self { self.disp_ord = ord; self } /// Allows one to mutate an [`Arg`] after it's been added to an `App`. /// /// # Examples /// /// ```rust /// # use clap::{App, Arg}; /// /// let mut app = App::new("foo") /// .arg(Arg::with_name("bar") /// .short("b")) /// .mut_arg("bar", |a| a.short("B")); /// /// let res = app.try_get_matches_from_mut(vec!["foo", "-b"]); /// /// // Since we changed `bar`'s short to "B" this should err as there /// // is no `-b` anymore, only `-B` /// /// assert!(res.is_err()); /// /// let res = app.try_get_matches_from_mut(vec!["foo", "-B"]); /// assert!(res.is_ok()); /// ``` /// [`Arg`]: ./struct.Arg.html pub fn mut_arg<F>(mut self, arg: &'a str, f: F) -> Self where F: FnOnce(Arg<'a, 'b>) -> Arg<'a, 'b> { let i = self.args.iter().enumerate().filter(|&(i, a)| a.name == arg).map(|(i, _)| i).next(); let a = if let Some(idx) = i { let mut a = self.args.swap_remove(idx); f(a) } else { let mut a = Arg::with_name(arg); f(a) }; self.args.push(a); self } /// Prints the full help message to [`io::stdout()`] using a [`BufWriter`] using the same /// method as if someone ran `-h` to request the help message /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// let mut app = App::new("myprog"); /// app.print_help(); /// ``` /// [`io::stdout()`]: https://doc.rust-lang.org/std/io/fn.stdout.html /// [`BufWriter`]: https://doc.rust-lang.org/std/io/struct.BufWriter.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn print_help(&mut self) -> ClapResult<()> { // If there are global arguments, or settings we need to propagate them down to subcommands // before parsing incase we run into a subcommand self._build(Propagation::NextLevel); let out = io::stdout(); let mut buf_w = BufWriter::new(out.lock()); self.write_help(&mut buf_w) } /// Prints the full help message to [`io::stdout()`] using a [`BufWriter`] using the same /// method as if someone ran `--help` to request the help message /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// let mut app = App::new("myprog"); /// app.print_long_help(); /// ``` /// [`io::stdout()`]: https://doc.rust-lang.org/std/io/fn.stdout.html /// [`BufWriter`]: https://doc.rust-lang.org/std/io/struct.BufWriter.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn print_long_help(&mut self) -> ClapResult<()> { // If there are global arguments, or settings we need to propagate them down to subcommands // before parsing incase we run into a subcommand self._build(Propagation::NextLevel); let out = io::stdout(); let mut buf_w = BufWriter::new(out.lock()); self.write_long_help(&mut buf_w) } /// Writes the full help message to the user to a [`io::Write`] object in the same method as if /// the user ran `-h` /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_help(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn write_help<W: Write>(&mut self, w: &mut W) -> ClapResult<()> { self._build(Propagation::NextLevel); let p = Parser::new(self); Help::write_parser_help(w, &p, false) } /// Writes the full help message to the user to a [`io::Write`] object in the same method as if /// the user ran `--help` /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_long_help(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn write_long_help<W: Write>(&mut self, w: &mut W) -> ClapResult<()> { self._build(Propagation::NextLevel); let p = Parser::new(self); Help::write_parser_help(w, &p, true) } /// Writes the version message to the user to a [`io::Write`] object as if the user ran `-V`. /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" version messages /// depending on if the user ran [`-V` (short)] or [`--version` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_version(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-V` (short)]: ./struct.App.html#method.version /// [`--version` (long)]: ./struct.App.html#method.long_version pub fn write_version<W: Write>(&self, w: &mut W) -> ClapResult<()> { self._write_version(w, false).map_err(From::from) } /// Writes the version message to the user to a [`io::Write`] object /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" version messages /// depending on if the user ran [`-V` (short)] or [`--version` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_long_version(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-V` (short)]: ./struct.App.html#method.version /// [`--version` (long)]: ./struct.App.html#method.long_version pub fn write_long_version<W: Write>(&self, w: &mut W) -> ClapResult<()> { self._write_version(w, true).map_err(From::from) } /// Starts the parsing process, upon a failed parse an error will be displayed to the user and /// the process will exit with the appropriate error code. By default this method gets all user /// provided arguments from [`env::args_os`] in order to allow for invalid UTF-8 code points, /// which are legal on many platforms. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let matches = App::new("myprog") /// // Args and options go here... /// .get_matches(); /// ``` /// [`env::args_os`]: https://doc.rust-lang.org/std/env/fn.args_os.html pub fn get_matches(self) -> ArgMatches<'a> { self.get_matches_from(&mut env::args_os()) } /// Starts the parsing process, just like [`App::get_matches`] but doesn't consume the `App` /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let mut app = App::new("myprog") /// // Args and options go here... /// ; /// let matches = app.get_matches_mut(); /// ``` /// [`env::args_os`]: https://doc.rust-lang.org/std/env/fn.args_os.html /// [`App::get_matches`]: ./struct.App.html#method.get_matches pub fn get_matches_mut(&mut self) -> ArgMatches<'a> { self.try_get_matches_from_mut(&mut env::args_os()).unwrap_or_else(|e| { // Otherwise, write to stderr and exit if e.use_stderr() { wlnerr!("{}", e.message); if self.settings.is_set(AppSettings::WaitOnError) { wlnerr!("\nPress [ENTER] / [RETURN] to continue..."); let mut s = String::new(); let i = io::stdin(); i.lock().read_line(&mut s).unwrap(); } drop(self); drop(e); process::exit(1); } drop(self); e.exit() }) } /// Starts the parsing process. This method will return a [`clap::Result`] type instead of exiting /// the process on failed parse. By default this method gets matches from [`env::args_os`] /// /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are /// used. It will return a [`clap::Error`], where the [`kind`] is a /// [`ErrorKind::HelpDisplayed`] or [`ErrorKind::VersionDisplayed`] respectively. You must call /// [`Error::exit`] or perform a [`std::process::exit`]. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let matches = App::new("myprog") /// // Args and options go here... /// .try_get_matches() /// .unwrap_or_else( |e| e.exit() ); /// ``` /// [`env::args_os`]: https://doc.rust-lang.org/std/env/fn.args_os.html /// [`ErrorKind::HelpDisplayed`]: ./enum.ErrorKind.html#variant.HelpDisplayed /// [`ErrorKind::VersionDisplayed`]: ./enum.ErrorKind.html#variant.VersionDisplayed /// [`Error::exit`]: ./struct.Error.html#method.exit /// [`std::process::exit`]: https://doc.rust-lang.org/std/process/fn.exit.html /// [`clap::Result`]: ./type.Result.html /// [`clap::Error`]: ./struct.Error.html /// [`kind`]: ./struct.Error.html pub fn try_get_matches(self) -> ClapResult<ArgMatches<'a>> { // Start the parsing self.try_get_matches_from(&mut env::args_os()) } /// Starts the parsing process. Like [`App::get_matches`] this method does not return a [`clap::Result`] /// and will automatically exit with an error message. This method, however, lets you specify /// what iterator to use when performing matches, such as a [`Vec`] of your making. /// /// **NOTE:** The first argument will be parsed as the binary name unless /// [`AppSettings::NoBinaryName`] is used /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let arg_vec = vec!["my_prog", "some", "args", "to", "parse"]; /// /// let matches = App::new("myprog") /// // Args and options go here... /// .get_matches_from(arg_vec); /// ``` /// [`App::get_matches`]: ./struct.App.html#method.get_matches /// [`clap::Result`]: ./type.Result.html /// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html /// [`AppSettings::NoBinaryName`]: ./enum.AppSettings.html#variant.NoBinaryName pub fn get_matches_from<I, T>(mut self, itr: I) -> ArgMatches<'a> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr).unwrap_or_else(|e| { // Otherwise, write to stderr and exit if e.use_stderr() { wlnerr!("{}", e.message); if self.settings.is_set(AppSettings::WaitOnError) { wlnerr!("\nPress [ENTER] / [RETURN] to continue..."); let mut s = String::new(); let i = io::stdin(); i.lock().read_line(&mut s).unwrap(); } drop(self); drop(e); process::exit(1); } drop(self); e.exit() }) } /// Starts the parsing process. A combination of [`App::get_matches_from`], and /// [`App::try_get_matches`] /// /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are /// used. It will return a [`clap::Error`], where the [`kind`] is a [`ErrorKind::HelpDisplayed`] /// or [`ErrorKind::VersionDisplayed`] respectively. You must call [`Error::exit`] or /// perform a [`std::process::exit`] yourself. /// /// **NOTE:** The first argument will be parsed as the binary name unless /// [`AppSettings::NoBinaryName`] is used /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let arg_vec = vec!["my_prog", "some", "args", "to", "parse"]; /// /// let matches = App::new("myprog") /// // Args and options go here... /// .try_get_matches_from(arg_vec) /// .unwrap_or_else( |e| { panic!("An error occurs: {}", e) }); /// ``` /// [`App::get_matches_from`]: ./struct.App.html#method.get_matches_from /// [`App::try_get_matches`]: ./struct.App.html#method.get_matches_safe /// [`ErrorKind::HelpDisplayed`]: ./enum.ErrorKind.html#variant.HelpDisplayed /// [`ErrorKind::VersionDisplayed`]: ./enum.ErrorKind.html#variant.VersionDisplayed /// [`Error::exit`]: ./struct.Error.html#method.exit /// [`std::process::exit`]: https://doc.rust-lang.org/std/process/fn.exit.html /// [`clap::Error`]: ./struct.Error.html /// [`Error::exit`]: ./struct.Error.html#method.exit /// [`kind`]: ./struct.Error.html /// [`AppSettings::NoBinaryName`]: ./enum.AppSettings.html#variant.NoBinaryName pub fn try_get_matches_from<I, T>(mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr) } /// Starts the parsing process without consuming the [`App`] struct `self`. This is normally not /// the desired functionality, instead prefer [`App::try_get_matches_from`] which *does* /// consume `self`. /// /// **NOTE:** The first argument will be parsed as the binary name unless /// [`AppSettings::NoBinaryName`] is used /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let arg_vec = vec!["my_prog", "some", "args", "to", "parse"]; /// /// let mut app = App::new("myprog"); /// // Args and options go here... /// let matches = app.try_get_matches_from_mut(arg_vec) /// .unwrap_or_else( |e| { panic!("An error occurs: {}", e) }); /// ``` /// [`App`]: ./struct.App.html /// [`App::try_get_matches_from`]: ./struct.App.html#method.try_get_matches_from /// [`AppSettings::NoBinaryName`]: ./enum.AppSettings.html#variant.NoBinaryName pub fn try_get_matches_from_mut<I, T>(&mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { let mut it = itr.into_iter(); // Get the name of the program (argument 1 of env::args()) and determine the // actual file // that was used to execute the program. This is because a program called // ./target/release/my_prog -a // will have two arguments, './target/release/my_prog', '-a' but we don't want // to display // the full path when displaying help messages and such if !self.settings.is_set(AppSettings::NoBinaryName) { if let Some(name) = it.next() { let bn_os = name.into(); let p = Path::new(&*bn_os); if let Some(f) = p.file_name() { if let Some(s) = f.to_os_string().to_str() { if self.bin_name.is_none() { self.bin_name = Some(s.to_owned()); } } } } } self._do_parse(&mut it.peekable()) } } // Internally used only #[doc(hidden)] impl<'a, 'b> App<'a, 'b> { #[doc(hidden)] fn _do_parse<I, T>(&mut self, it: &mut Peekable<I>) -> ClapResult<ArgMatches<'a>> where I: Iterator<Item = T>, T: Into<OsString> + Clone, { debugln!("App::_do_parse;"); let mut matcher = ArgMatcher::new(); // If there are global arguments, or settings we need to propgate them down to subcommands // before parsing incase we run into a subcommand if !self.settings.is_set(AppSettings::Propagated) { self._build(Propagation::NextLevel); } { let mut parser = Parser::new(self); // do the real parsing if let Err(e) = parser.get_matches_with(&mut matcher, it) { return Err(e); } } let global_arg_vec: Vec<&str> = (&self) .args .iter() .filter(|a| a.is_set(ArgSettings::Global)) .map(|ga| ga.name) .collect(); matcher.propagate_globals(&global_arg_vec); Ok(matcher.into()) } fn _build(&mut self, prop: Propagation) { debugln!("App::_build;"); // Make sure all the globally set flags apply to us as well self.settings = self.settings | self.g_settings; // Depending on if DeriveDisplayOrder is set or not, we need to determine when we build // the help and version flags, otherwise help message orders get screwed up if self.settings.is_set(AppSettings::DeriveDisplayOrder) { self._derive_display_order(); self._create_help_and_version(); self._propagate(prop); } else { self._create_help_and_version(); self._propagate(prop); self._derive_display_order(); } // Perform expensive debug assertions debug_assert!({ for a in &self.args { self._arg_debug_asserts(a); } true }); for a in &mut self.args { // Fill in the groups if let Some(ref grps) = a.groups { for g in grps { let mut found = false; if let Some(ref mut ag) = groups_mut!(self).find(|grp| &grp.name == g) { ag.args.push(a.name); found = true; } if !found { let mut ag = ArgGroup::with_name(g); ag.args.push(a.name); self.groups.push(ag); } } } // Figure out implied settings if a.is_set(ArgSettings::Last) { // if an arg has `Last` set, we need to imply DontCollapseArgsInUsage so that args // in the usage string don't get confused or left out. self.settings.set(AppSettings::DontCollapseArgsInUsage); self.settings.set(AppSettings::ContainsLast); } a._build(); } debug_assert!(self._app_debug_asserts()); self.settings.set(AppSettings::Propagated); } // Perform some expensive assertions on the Parser itself fn _app_debug_asserts(&mut self) -> bool { debugln!("App::app_debug_asserts;"); // * Args listed inside groups should exist // * Groups should not have naming conflicts with Args let g = groups!(self).find(|g| { g.args .iter() .any(|arg| !(find!(self, arg).is_some() || groups!(self).any(|g| &g.name == arg))) }); assert!( g.is_none(), "The group '{}' contains an arg that doesn't exist or has a naming conflict with a group.", g.unwrap().name ); true } // @TODO @v3-alpha @perf: should only propagate globals to subcmd we find, or for help pub fn _propagate(&mut self, prop: Propagation) { debugln!("App::_propagate:{}", self.name); for sc in &mut self.subcommands { // We have to create a new scope in order to tell rustc the borrow of `sc` is // done and to recursively call this method { let vsc = self.settings.is_set(AppSettings::VersionlessSubcommands); let gv = self.settings.is_set(AppSettings::GlobalVersion); if vsc { sc.set(AppSettings::DisableVersion); } if gv && sc.version.is_none() && self.version.is_some() { sc.set(AppSettings::GlobalVersion); sc.version = Some(self.version.unwrap()); } sc.settings = sc.settings | self.g_settings; sc.g_settings = sc.g_settings | self.g_settings; sc.term_w = self.term_w; sc.max_w = self.max_w; } { for a in self.args.iter().filter(|a| a.is_set(ArgSettings::Global)) { sc.args.push(a.clone()); } } // @TODO @deadcode @perf @v3-alpha: Currently we're not propagating if prop == Propagation::Full { sc._build(Propagation::Full); } } } pub(crate) fn _create_help_and_version(&mut self) { debugln!("App::_create_help_and_version;"); // name is "hclap_help" because flags are sorted by name if !self.contains_long("help") { debugln!("App::_create_help_and_version: Building --help"); if self.help_short.is_none() && !self.contains_short('h') { self.help_short = Some('h'); } let mut arg = Arg::with_name("hclap_help") .long("help") .help(self.help_message.unwrap_or("Prints help information")); // we have to set short manually because we're dealing with char's arg.short = self.help_short; self.args.push(arg); } else { self.settings.unset(AppSettings::NeedsLongHelp); } if !self.is_set(AppSettings::DisableVersion) && !self.contains_long("version") { debugln!("App::_create_help_and_version: Building --version"); if self.version_short.is_none() && !self.contains_short('V') { self.version_short = Some('V'); } // name is "vclap_version" because flags are sorted by name let mut arg = Arg::with_name("vclap_version") .long("version") .help(self.version_message.unwrap_or("Prints version information")); // we have to set short manually because we're dealing with char's arg.short = self.version_short; self.args.push(arg); } else { self.settings.unset(AppSettings::NeedsLongVersion); } if self.has_subcommands() && !self.is_set(AppSettings::DisableHelpSubcommand) && !subcommands!(self).any(|s| s.name == "help") { debugln!("App::_create_help_and_version: Building help"); self.subcommands.push( App::new("help") .about("Prints this message or the help of the given subcommand(s)"), ); } else { self.settings.unset(AppSettings::NeedsSubcommandHelp); } } pub(crate) fn _derive_display_order(&mut self) { debugln!("App::_derive_display_order:{}", self.name); if self.settings.is_set(AppSettings::DeriveDisplayOrder) { for (i, a) in args_mut!(self).filter(|a| a.has_switch()) .filter(|a| a.disp_ord == 999) .enumerate() { a.disp_ord = i; } for (i, sc) in &mut subcommands_mut!(self) .enumerate() .filter(|&(_, ref sc)| sc.disp_ord == 999) { sc.disp_ord = i; } } for sc in subcommands_mut!(self) { sc._derive_display_order(); } } // Perform expensive assertions on the Arg instance fn _arg_debug_asserts(&self, a: &Arg) -> bool { debugln!("App::_arg_debug_asserts:{}", a.name); // No naming conflicts assert!( arg_names!(self).fold(0, |acc, n| if n == a.name { acc + 1 } else { acc }) < 2, format!("Non-unique argument name: {} is already in use", a.name) ); // Long conflicts if let Some(l) = a.long { assert!( args!(self).fold(0, |acc, arg| if arg.long == Some(l) { acc + 1 } else { acc }) < 2, "Argument long must be unique\n\n\t--{} is already in use", l ); } // Short conflicts if let Some(s) = a.short { assert!( args!(self).fold(0, |acc, arg| if arg.short == Some(s) { acc + 1 } else { acc }) < 2, "Argument short must be unique\n\n\t-{} is already in use", s ); } if let Some(idx) = a.index { // No index conflicts assert!( positionals!(self).fold(0, |acc, p| if p.index == Some(idx as u64){acc+1}else{acc}) < 2, "Argument '{}' has the same index as another positional \ argument\n\n\tUse Arg::setting(ArgSettings::MultipleValues) to allow one \ positional argument to take multiple values", a.name ); } if a.is_set(ArgSettings::Last) { assert!(a.long.is_none(), "Flags or Options may not have last(true) set. {} has both a long and \ last(true) set.", a.name); assert!(a.short.is_none(), "Flags or Options may not have last(true) set. {} has both a short and \ last(true) set.", a.name); } assert!( !(a.is_set(ArgSettings::Required) && a.is_set(ArgSettings::Global)), "Global arguments cannot be required.\n\n\t'{}' is marked as \ global and required", a.name ); true } fn _build_bin_names(&mut self) { debugln!("App::_build_bin_names;"); for sc in subcommands_mut!(self) { debug!("Parser::build_bin_names:iter: bin_name set..."); if sc.bin_name.is_none() { sdebugln!("No"); let bin_name = format!( "{}{}{}", self.bin_name.as_ref().unwrap_or(&self.name.clone()), if self.bin_name.is_some() { " " } else { "" }, &*sc.name ); debugln!( "Parser::build_bin_names:iter: Setting bin_name of {} to {}", self.name, bin_name ); sc.bin_name = Some(bin_name); } else { sdebugln!("yes ({:?})", sc.bin_name); } debugln!( "Parser::build_bin_names:iter: Calling build_bin_names from...{}", sc.name ); sc._build_bin_names(); } } pub(crate) fn _write_version<W: Write>(&self, w: &mut W, use_long: bool) -> io::Result<()> { debugln!("App::_write_version;"); let ver = if use_long { self.long_version .unwrap_or_else(|| self.version.unwrap_or("")) } else { self.version .unwrap_or_else(|| self.long_version.unwrap_or("")) }; if let Some(bn) = self.bin_name.as_ref() { if bn.contains(' ') { // Incase we're dealing with subcommands i.e. git mv is translated to git-mv write!(w, "{} {}", bn.replace(" ", "-"), ver) } else { write!(w, "{} {}", &self.name[..], ver) } } else { write!(w, "{} {}", &self.name[..], ver) } } } // Internal Query Methods #[doc(hidden)] impl<'a, 'b> App<'a, 'b> { // Should we color the output? None=determined by output location, true=yes, false=no #[doc(hidden)] pub fn color(&self) -> ColorWhen { debugln!("App::color;"); debug!("App::color: Color setting..."); if self.is_set(AppSettings::ColorNever) { sdebugln!("Never"); ColorWhen::Never } else if self.is_set(AppSettings::ColorAlways) { sdebugln!("Always"); ColorWhen::Always } else { sdebugln!("Auto"); ColorWhen::Auto } } fn contains_long(&self, l: &str) -> bool { longs!(self).any(|al| al == l) } fn contains_short(&self, s: char) -> bool { shorts!(self).any(|arg_s| arg_s == s) } pub fn is_set(&self, s: AppSettings) -> bool { self.settings.is_set(s) || self.g_settings.is_set(s) } pub fn set(&mut self, s: AppSettings) { self.settings.set(s) } pub fn set_global(&mut self, s: AppSettings) { self.g_settings.set(s) } pub fn unset_global(&mut self, s: AppSettings) { self.g_settings.unset(s) } pub fn unset(&mut self, s: AppSettings) { self.settings.unset(s) } pub fn has_subcommands(&self) -> bool { !self.subcommands.is_empty() } pub fn has_args(&self) -> bool { !self.args.is_empty() } pub fn has_opts(&self) -> bool { opts!(self).count() > 0 } pub fn has_flags(&self) -> bool { flags!(self).count() > 0 } pub fn has_positionals(&self) -> bool { positionals!(self).count() > 0 } pub fn has_visible_opts(&self) -> bool { opts!(self).any(|o| !o.is_set(ArgSettings::Hidden)) } pub fn has_visible_flags(&self) -> bool { flags!(self).any(|o| !o.is_set(ArgSettings::Hidden)) } pub fn has_visible_positionals(&self) -> bool { positionals!(self).any(|o| !o.is_set(ArgSettings::Hidden)) } pub fn has_visible_subcommands(&self) -> bool { subcommands!(self) .filter(|sc| sc.name != "help") .any(|sc| !sc.is_set(AppSettings::Hidden)) } fn use_long_help(&self) -> bool { self.long_about.is_some() || self.args.iter().any(|f| f.long_help.is_some()) || subcommands!(self).any(|s| s.long_about.is_some()) } } // @TODO @v3-beta: remove // Deprecations impl<'a, 'b> App<'a, 'b> { /// **Deprecated:** Use `App::global_setting( SettingOne | SettingTwo )` instead #[deprecated(since="2.33.0", note="Use `App::global_setting( SettingOne | SettingTwo )` instead")] pub fn global_settings(mut self, settings: &[AppSettings]) -> Self { for s in settings { self.settings.set(*s); self.g_settings.set(*s) } self } /// **Deprecated:** Use `App::setting( SettingOne | SettingTwo )` instead #[deprecated(since="2.33.0", note="Use `App::setting( SettingOne | SettingTwo )` instead")] pub fn settings(mut self, settings: &[AppSettings]) -> Self { for s in settings { self.settings.set(*s); } self } /// **Deprecated:** Use `App::unset_setting( SettingOne | SettingTwo )` instead #[deprecated(since="2.33.0", note="Use `App::unset_setting( SettingOne | SettingTwo )` instead")] pub fn unset_settings(mut self, settings: &[AppSettings]) -> Self { for s in settings { self.settings.unset(*s); self.g_settings.unset(*s); } self } /// **Deprecated:** Use explicit `App::author()` and `App::version()` calls instead. #[deprecated(since="2.14.1", note="Can never work; use explicit App::author() and \ App::version() calls instead. Will be removed in v3.0-beta")] pub fn with_defaults<S: Into<String>>(n: S) -> Self { App { name: n.into(), author: Some("Kevin K. <kbknapp@gmail.com>"), version: Some("2.19.2"), ..Default::default() } } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use serde instead. Will be removed in v3.0-beta")] #[cfg(feature = "yaml")] pub fn from_yaml(yaml: &'a Yaml) -> App<'a, 'a> { App::from(yaml) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"help\", |a| a.short(\"H\"))` instead. Will be removed in v3.0-beta")] pub fn help_short<S: AsRef<str> + 'b>(mut self, s: S) -> Self { let c = s.as_ref() .trim_left_matches(|c| c == '-') .chars() .nth(0) .unwrap_or('h'); self.help_short = Some(c); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"version\", |a| a.short(\"v\"))` instead. Will be removed in v3.0-beta")] pub fn version_short<S: AsRef<str>>(mut self, s: S) -> Self { let c = s.as_ref() .trim_left_matches(|c| c == '-') .chars() .nth(0) .unwrap_or('V'); self.version_short = Some(c); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"help\", |a| a.help(\"Some message\"))` instead. Will be removed in v3.0-beta")] pub fn help_message<S: Into<&'a str>>(mut self, s: S) -> Self { self.help_message = Some(s.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"version\", |a| a.short(\"Some message\"))` instead. Will be removed in v3.0-beta")] pub fn version_message<S: Into<&'a str>>(mut self, s: S) -> Self { self.version_message = Some(s.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed to `App::override_usage`. Will be removed in v3.0-beta")] pub fn usage<S: Into<&'b str>>(mut self, usage: S) -> Self { self.usage_str = Some(usage.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed to `App::override_help`. Will be removed in v3.0-beta")] pub fn help<S: Into<&'b str>>(mut self, help: S) -> Self { self.help_str = Some(help.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed to `App::help_template`. Will be removed in v3.0-beta")] pub fn template<S: Into<&'b str>>(mut self, s: S) -> Self { self.template = Some(s.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::arg(Arg::from(&str)` instead. Will be removed in v3.0-beta")] pub fn arg_from_usage(mut self, usage: &'a str) -> Self { self.args.push(Arg::from_usage(usage)); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::args(&str)` instead. Will be removed in v3.0-beta")] pub fn args_from_usage(mut self, usage: &'a str) -> Self { for line in usage.lines() { let l = line.trim(); if l.is_empty() { continue; } self.args.push(Arg::from_usage(l)); } self } /// **Deprecated:** Use #[allow(deprecated)] #[deprecated(since="2.30.0", note="Use `clap_completions crate and clap_completions::generate` instead. Will be removed in v3.0-beta")] pub fn gen_completions<T: Into<OsString>, S: Into<String>>( &mut self, bin_name: S, for_shell: Shell, out_dir: T, ) { use std::error::Error; let out_dir = PathBuf::from(out_dir.into()); let name = &*self.bin_name.as_ref().unwrap().clone(); let file_name = match for_shell { Shell::Bash => format!("{}.bash", name), Shell::Fish => format!("{}.fish", name), Shell::Zsh => format!("_{}", name), Shell::PowerShell => format!("_{}.ps1", name), }; let mut file = match File::create(out_dir.join(file_name)) { Err(why) => panic!("couldn't create completion file: {}", why.description()), Ok(file) => file, }; self.gen_completions_to(bin_name.into(), for_shell, &mut file) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `clap_completions crate and clap_completions::generate_to` instead. Will be removed in v3.0-beta")] pub fn gen_completions_to<W: Write, S: Into<String>>( &mut self, bin_name: S, for_shell: Shell, buf: &mut W, ) { self.bin_name = Some(bin_name.into()); if !self.is_set(AppSettings::Propagated) { self._build(Propagation::Full); self._build_bin_names(); } ComplGen::new(self).generate(for_shell, buf) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed `App::try_get_matches` to be consistent with Rust naming conventions. Will be removed in v3.0-beta")] pub fn get_matches_safe(self) -> ClapResult<ArgMatches<'a>> { // Start the parsing self.try_get_matches_from(&mut env::args_os()) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed `App::try_get_matches_from` to be consistent with Rust naming conventions. Will be removed in v3.0-beta")] pub fn get_matches_from_safe<I, T>(mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed `App::try_get_matches_from_mut` to be consistent with Rust naming conventions. Will be removed in v3.0-beta")] pub fn get_matches_from_safe_borrow<I, T>(&mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr) } } #[cfg(feature = "yaml")] impl<'a> From<&'a Yaml> for App<'a, 'a> { fn from(mut yaml: &'a Yaml) -> Self { use args::SubCommand; // We WANT this to panic on error...so expect() is good. let mut is_sc = None; let mut a = if let Some(name) = yaml["name"].as_str() { App::new(name) } else { let yaml_hash = yaml.as_hash().unwrap(); let sc_key = yaml_hash.keys().nth(0).unwrap(); is_sc = Some(yaml_hash.get(sc_key).unwrap()); App::new(sc_key.as_str().unwrap()) }; yaml = if let Some(sc) = is_sc { sc } else { yaml }; macro_rules! yaml_str { ($a:ident, $y:ident, $i:ident) => { if let Some(v) = $y[stringify!($i)].as_str() { $a = $a.$i(v); } else if $y[stringify!($i)] != Yaml::BadValue { panic!("Failed to convert YAML value {:?} to a string", $y[stringify!($i)]); } }; } yaml_str!(a, yaml, version); yaml_str!(a, yaml, author); yaml_str!(a, yaml, bin_name); yaml_str!(a, yaml, about); yaml_str!(a, yaml, before_help); yaml_str!(a, yaml, after_help); yaml_str!(a, yaml, template); yaml_str!(a, yaml, usage); yaml_str!(a, yaml, help); yaml_str!(a, yaml, help_short); yaml_str!(a, yaml, version_short); yaml_str!(a, yaml, help_message); yaml_str!(a, yaml, version_message); yaml_str!(a, yaml, alias); yaml_str!(a, yaml, visible_alias); if let Some(v) = yaml["display_order"].as_i64() { a = a.display_order(v as usize); } else if yaml["display_order"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to a u64", yaml["display_order"] ); } if let Some(v) = yaml["setting"].as_str() { a = a.setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["setting"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to an AppSetting", yaml["setting"] ); } if let Some(v) = yaml["settings"].as_vec() { for ys in v { if let Some(s) = ys.as_str() { a = a.setting(s.parse().expect("unknown AppSetting found in YAML file")); } } } else if let Some(v) = yaml["settings"].as_str() { a = a.setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["settings"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to a string", yaml["settings"] ); } if let Some(v) = yaml["global_setting"].as_str() { a = a.setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["global_setting"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to an AppSetting", yaml["setting"] ); } if let Some(v) = yaml["global_settings"].as_vec() { for ys in v { if let Some(s) = ys.as_str() { a = a.global_setting(s.parse().expect("unknown AppSetting found in YAML file")); } } } else if let Some(v) = yaml["global_settings"].as_str() { a = a.global_setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["global_settings"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to a string", yaml["global_settings"] ); } macro_rules! vec_or_str { ($a:ident, $y:ident, $as_vec:ident, $as_single:ident) => {{ let maybe_vec = $y[stringify!($as_vec)].as_vec(); if let Some(vec) = maybe_vec { for ys in vec { if let Some(s) = ys.as_str() { $a = $a.$as_single(s); } else { panic!("Failed to convert YAML value {:?} to a string", ys); } } } else { if let Some(s) = $y[stringify!($as_vec)].as_str() { $a = $a.$as_single(s); } else if $y[stringify!($as_vec)] != Yaml::BadValue { panic!("Failed to convert YAML value {:?} to either a vec or string", $y[stringify!($as_vec)]); } } $a } }; } a = vec_or_str!(a, yaml, aliases, alias); a = vec_or_str!(a, yaml, visible_aliases, visible_alias); if let Some(v) = yaml["args"].as_vec() { for arg_yaml in v { a = a.arg(Arg::from_yaml(arg_yaml.as_hash().unwrap())); } } if let Some(v) = yaml["subcommands"].as_vec() { for sc_yaml in v { a = a.subcommand(SubCommand::from_yaml(sc_yaml)); } } if let Some(v) = yaml["groups"].as_vec() { for ag_yaml in v { a = a.group(ArgGroup::from(ag_yaml.as_hash().unwrap())); } } a } } impl<'n, 'e> fmt::Display for App<'n, 'e> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.name) } } fix unused variable warning mod settings; pub mod parser; mod help; mod validator; mod usage; // Std use std::env; use std::ffi::OsString; use std::fmt; use std::io::{self, BufRead, BufWriter, Write}; use std::path::{Path, PathBuf}; use std::process; use std::fs::File; use std::iter::Peekable; // Third Party #[cfg(feature = "yaml")] use yaml_rust::Yaml; // Internal use app::parser::Parser; use app::help::Help; use args::{Arg, ArgGroup, ArgMatcher, ArgMatches}; use args::settings::ArgSettings; use errors::Result as ClapResult; pub use self::settings::{AppFlags, AppSettings}; use completions::{ComplGen, Shell}; use fmt::ColorWhen; #[doc(hidden)] #[allow(dead_code)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Propagation<'a> { To(&'a str), Full, NextLevel, None } /// Used to create a representation of a command line program and all possible command line /// arguments. Application settings are set using the "builder pattern" with the /// [`App::get_matches`] family of methods being the terminal methods that starts the /// runtime-parsing process. These methods then return information about the user supplied /// arguments (or lack there of). /// /// **NOTE:** There aren't any mandatory "options" that one must set. The "options" may /// also appear in any order (so long as one of the [`App::get_matches`] methods is the last method /// called). /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let m = App::new("My Program") /// .author("Me, me@mail.com") /// .version("1.0.2") /// .about("Explains in brief what the program does") /// .arg( /// Arg::with_name("in_file").index(1) /// ) /// .after_help("Longer explanation to appear after the options when \ /// displaying the help information from --help or -h") /// .get_matches(); /// /// // Your program logic starts here... /// ``` /// [`App::get_matches`]: ./struct.App.html#method.get_matches #[derive(Default, Debug, Clone)] pub struct App<'a, 'b> where 'a: 'b, { #[doc(hidden)] pub name: String, #[doc(hidden)] pub bin_name: Option<String>, #[doc(hidden)] pub author: Option<&'b str>, #[doc(hidden)] pub version: Option<&'b str>, #[doc(hidden)] pub long_version: Option<&'b str>, #[doc(hidden)] pub about: Option<&'b str>, #[doc(hidden)] pub long_about: Option<&'b str>, #[doc(hidden)] pub more_help: Option<&'b str>, #[doc(hidden)] pub pre_help: Option<&'b str>, #[doc(hidden)] pub aliases: Option<Vec<(&'b str, bool)>>, // (name, visible) #[doc(hidden)] pub usage_str: Option<&'b str>, #[doc(hidden)] pub usage: Option<String>, #[doc(hidden)] pub help_str: Option<&'b str>, #[doc(hidden)] pub disp_ord: usize, #[doc(hidden)] pub term_w: Option<usize>, #[doc(hidden)] pub max_w: Option<usize>, #[doc(hidden)] pub template: Option<&'b str>, #[doc(hidden)] pub settings: AppFlags, #[doc(hidden)] pub g_settings: AppFlags, #[doc(hidden)] pub args: Vec<Arg<'a, 'b>>, #[doc(hidden)] pub subcommands: Vec<App<'a, 'b>>, #[doc(hidden)] pub groups: Vec<ArgGroup<'a>>, #[doc(hidden)] help_short: Option<char>, #[doc(hidden)] version_short: Option<char>, #[doc(hidden)] pub help_message: Option<&'a str>, #[doc(hidden)] pub version_message: Option<&'a str>, #[doc(hidden)] pub help_headings: Vec<Option<&'a str>>, } impl<'a, 'b> App<'a, 'b> { /// Creates a new instance of an application requiring a name. The name may be, but doesn't /// have to be same as the binary. The name will be displayed to the user when they request to /// print version or help and usage information. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let prog = App::new("My Program") /// # ; /// ``` pub fn new<S: Into<String>>(n: S) -> Self { App { name: n.into(), ..Default::default() } } /// Get the name of the app pub fn get_name(&self) -> &str { &self.name } /// Get the name of the binary pub fn get_bin_name(&self) -> Option<&str> { self.bin_name.as_ref().map(|s| s.as_str()) } /// Sets a string of author(s) that will be displayed to the user when they /// request the help information with `--help` or `-h`. /// /// **Pro-tip:** Use `clap`s convenience macro [`crate_authors!`] to automatically set your /// application's author(s) to the same thing as your crate at compile time. See the [`examples/`] /// directory for more information /// /// See the [`examples/`] /// directory for more information /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .author("Me, me@mymain.com") /// # ; /// ``` /// [`crate_authors!`]: ./macro.crate_authors!.html /// [`examples/`]: https://github.com/kbknapp/clap-rs/tree/master/examples pub fn author<S: Into<&'b str>>(mut self, author: S) -> Self { self.author = Some(author.into()); self } /// Overrides the system-determined binary name. This should only be used when absolutely /// necessary, such as when the binary name for your application is misleading, or perhaps /// *not* how the user should invoke your program. /// /// **Pro-tip:** When building things such as third party `cargo` subcommands, this setting /// **should** be used! /// /// **NOTE:** This command **should not** be used for [`SubCommand`]s. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("My Program") /// .bin_name("my_binary") /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn bin_name<S: Into<String>>(mut self, name: S) -> Self { self.bin_name = Some(name.into()); self } /// Sets a string describing what the program does. This will be displayed when displaying help /// information with `-h`. /// /// **NOTE:** If only `about` is provided, and not [`App::long_about`] but the user requests /// `--help` clap will still display the contents of `about` appropriately /// /// **NOTE:** Only [`App::about`] is used in completion script generation in order to be /// concise /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .about("Does really amazing things to great people") /// # ; /// ``` /// [`App::long_about`]: ./struct.App.html#method.long_about pub fn about<S: Into<&'b str>>(mut self, about: S) -> Self { self.about = Some(about.into()); self } /// Sets a string describing what the program does. This will be displayed when displaying help /// information. /// /// **NOTE:** If only `long_about` is provided, and not [`App::about`] but the user requests /// `-h` clap will still display the contents of `long_about` appropriately /// /// **NOTE:** Only [`App::about`] is used in completion script generation in order to be /// concise /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .long_about( /// "Does really amazing things to great people. Now let's talk a little /// more in depth about how this subcommand really works. It may take about /// a few lines of text, but that's ok!") /// # ; /// ``` /// [`App::about`]: ./struct.App.html#method.about pub fn long_about<S: Into<&'b str>>(mut self, about: S) -> Self { self.long_about = Some(about.into()); self } /// Sets the program's name. This will be displayed when displaying help information. /// /// **Pro-top:** This function is particularly useful when configuring a program via /// [`App::from_yaml`] in conjunction with the [`crate_name!`] macro to derive the program's /// name from its `Cargo.toml`. /// /// # Examples /// ```ignore /// # #[macro_use] /// # extern crate clap; /// # use clap::App; /// # fn main() { /// let yml = load_yaml!("app.yml"); /// let app = App::from_yaml(yml) /// .name(crate_name!()); /// /// // continued logic goes here, such as `app.get_matches()` etc. /// # } /// ``` /// /// [`App::from_yaml`]: ./struct.App.html#method.from_yaml /// [`crate_name!`]: ./macro.crate_name.html pub fn name<S: Into<String>>(mut self, name: S) -> Self { self.name = name.into(); self } /// Adds additional help information to be displayed in addition to auto-generated help. This /// information is displayed **after** the auto-generated help information. This is often used /// to describe how to use the arguments, or caveats to be noted. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .after_help("Does really amazing things to great people...but be careful with -R") /// # ; /// ``` pub fn after_help<S: Into<&'b str>>(mut self, help: S) -> Self { self.more_help = Some(help.into()); self } /// Adds additional help information to be displayed in addition to auto-generated help. This /// information is displayed **before** the auto-generated help information. This is often used /// for header information. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .before_help("Some info I'd like to appear before the help info") /// # ; /// ``` pub fn before_help<S: Into<&'b str>>(mut self, help: S) -> Self { self.pre_help = Some(help.into()); self } /// Sets a string of the version number to be displayed when displaying version or help /// information with `-V`. /// /// **NOTE:** If only `version` is provided, and not [`App::long_version`] but the user /// requests `--version` clap will still display the contents of `version` appropriately /// /// **Pro-tip:** Use `clap`s convenience macro [`crate_version!`] to automatically set your /// application's version to the same thing as your crate at compile time. See the [`examples/`] /// directory for more information /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .version("v0.1.24") /// # ; /// ``` /// [`crate_version!`]: ./macro.crate_version!.html /// [`examples/`]: https://github.com/kbknapp/clap-rs/tree/master/examples /// [`App::long_version`]: ./struct.App.html#method.long_version pub fn version<S: Into<&'b str>>(mut self, ver: S) -> Self { self.version = Some(ver.into()); self } /// Sets a string of the version number to be displayed when displaying version or help /// information with `--version`. /// /// **NOTE:** If only `long_version` is provided, and not [`App::version`] but the user /// requests `-V` clap will still display the contents of `long_version` appropriately /// /// **Pro-tip:** Use `clap`s convenience macro [`crate_version!`] to automatically set your /// application's version to the same thing as your crate at compile time. See the [`examples/`] /// directory for more information /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .long_version( /// "v0.1.24 /// commit: abcdef89726d /// revision: 123 /// release: 2 /// binary: myprog") /// # ; /// ``` /// [`crate_version!`]: ./macro.crate_version!.html /// [`examples/`]: https://github.com/kbknapp/clap-rs/tree/master/examples /// [`App::version`]: ./struct.App.html#method.version pub fn long_version<S: Into<&'b str>>(mut self, ver: S) -> Self { self.long_version = Some(ver.into()); self } /// Overrides the `clap` generated usage string. /// /// This will be displayed to the user when errors are found in argument parsing. /// /// **CAUTION:** Using this setting disables `clap`s "context-aware" usage strings. After this /// setting is set, this will be the only usage string displayed to the user! /// /// **NOTE:** This will not replace the entire help message, *only* the portion /// showing the usage. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .override_usage("myapp [-clDas] <some_file>") /// # ; /// ``` /// [`ArgMatches::usage`]: ./struct.ArgMatches.html#method.usage pub fn override_usage<S: Into<&'b str>>(mut self, usage: S) -> Self { self.usage_str = Some(usage.into()); self } /// Overrides the `clap` generated help message. This should only be used /// when the auto-generated message does not suffice. /// /// This will be displayed to the user when they use `--help` or `-h` /// /// **NOTE:** This replaces the **entire** help message, so nothing will be auto-generated. /// /// **NOTE:** This **only** replaces the help message for the current command, meaning if you /// are using subcommands, those help messages will still be auto-generated unless you /// specify a [`Arg::override_help`] for them as well. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myapp") /// .override_help("myapp v1.0\n\ /// Does awesome things\n\ /// (C) me@mail.com\n\n\ /// /// USAGE: myapp <opts> <comamnd>\n\n\ /// /// Options:\n\ /// -h, --helpe Dispay this message\n\ /// -V, --version Display version info\n\ /// -s <stuff> Do something with stuff\n\ /// -v Be verbose\n\n\ /// /// Commmands:\n\ /// help Prints this message\n\ /// work Do some work") /// # ; /// ``` /// [`Arg::override_help`]: ./struct.Arg.html#method.override_help pub fn override_help<S: Into<&'b str>>(mut self, help: S) -> Self { self.help_str = Some(help.into()); self } /// Sets the help template to be used, overriding the default format. /// /// Tags arg given inside curly brackets. /// /// Valid tags are: /// /// * `{bin}` - Binary name. /// * `{version}` - Version number. /// * `{author}` - Author information. /// * `{about}` - General description (from [`App::about`]) /// * `{usage}` - Automatically generated or given usage string. /// * `{all-args}` - Help for all arguments (options, flags, positionals arguments, /// and subcommands) including titles. /// * `{unified}` - Unified help for options and flags. Note, you must *also* set /// [`AppSettings::UnifiedHelpMessage`] to fully merge both options and /// flags, otherwise the ordering is "best effort" /// * `{flags}` - Help for flags. /// * `{options}` - Help for options. /// * `{positionals}` - Help for positionals arguments. /// * `{subcommands}` - Help for subcommands. /// * `{after-help}` - Help from [`App::after_help`] /// * `{before-help}` - Help from [`App::before_help`] /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .version("1.0") /// .help_template("{bin} ({version}) - {usage}") /// # ; /// ``` /// **NOTE:**The template system is, on purpose, very simple. Therefore the tags have to writen /// in the lowercase and without spacing. /// [`App::about`]: ./struct.App.html#method.about /// [`App::after_help`]: ./struct.App.html#method.after_help /// [`App::before_help`]: ./struct.App.html#method.before_help /// [`AppSettings::UnifiedHelpMessage`]: ./enum.AppSettings.html#variant.UnifiedHelpMessage pub fn help_template<S: Into<&'b str>>(mut self, s: S) -> Self { self.template = Some(s.into()); self } /// Enables a single command, or [`SubCommand`], level settings. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, AppSettings}; /// App::new("myprog") /// .setting(AppSettings::SubcommandRequired) /// .setting(AppSettings::WaitOnError) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`AppSettings`]: ./enum.AppSettings.html pub fn setting(mut self, setting: AppSettings) -> Self { self.settings.set(setting); self } /// Disables a single command, or [`SubCommand`], level setting. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// # Examples /// /// ```no_run /// # use clap::{App, AppSettings}; /// App::new("myprog") /// .unset_setting(AppSettings::ColorAuto) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`AppSettings`]: ./enum.AppSettings.html /// [global]: ./struct.App.html#method.global_setting pub fn unset_setting(mut self, setting: AppSettings) -> Self { self.settings.unset(setting); self } /// Enables a single setting that is propagated down through all child subcommands. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// **NOTE**: The setting is *only* propagated *down* and not up through parent commands. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, AppSettings}; /// App::new("myprog") /// .global_setting(AppSettings::SubcommandRequired) /// # ; /// ``` /// [`AppSettings`]: ./enum.AppSettings.html pub fn global_setting(mut self, setting: AppSettings) -> Self { self.settings.set(setting); self.g_settings.set(setting); self } /// Disables a global setting, and stops propagating down to child subcommands. /// /// See [`AppSettings`] for a full list of possibilities and examples. /// /// **NOTE:** The setting being unset will be unset from both local and [global] settings /// /// # Examples /// /// ```no_run /// # use clap::{App, AppSettings}; /// App::new("myprog") /// .unset_global_setting(AppSettings::ColorAuto) /// # ; /// ``` /// [`AppSettings`]: ./enum.AppSettings.html /// [global]: ./struct.App.html#method.global_setting pub fn unset_global_setting(mut self, setting: AppSettings) -> Self { self.settings.unset(setting); self.g_settings.unset(setting); self } /// Sets the terminal width at which to wrap help messages. Defaults to `120`. Using `0` will /// ignore terminal widths and use source formatting. /// /// `clap` automatically tries to determine the terminal width on Unix, Linux, OSX and Windows /// if the `wrap_help` cargo "feature" has been used while compiling. If the terminal width /// cannot be determined, `clap` defaults to `120`. /// /// **NOTE:** This setting applies globally and *not* on a per-command basis. /// /// **NOTE:** This setting must be set **before** any subcommands are added! /// /// # Platform Specific /// /// Only Unix, Linux, OSX and Windows support automatic determination of terminal width. /// Even on those platforms, this setting is useful if for any reason the terminal width /// cannot be determined. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .set_term_width(80) /// # ; /// ``` pub fn set_term_width(mut self, width: usize) -> Self { self.term_w = Some(width); self } /// Sets the max terminal width at which to wrap help messages. Using `0` will ignore terminal /// widths and use source formatting. /// /// `clap` automatically tries to determine the terminal width on Unix, Linux, OSX and Windows /// if the `wrap_help` cargo "feature" has been used while compiling, but one might want to /// limit the size (e.g. when the terminal is running fullscreen). /// /// **NOTE:** This setting applies globally and *not* on a per-command basis. /// /// **NOTE:** This setting must be set **before** any subcommands are added! /// /// # Platform Specific /// /// Only Unix, Linux, OSX and Windows support automatic determination of terminal width. /// /// # Examples /// /// ```no_run /// # use clap::App; /// App::new("myprog") /// .max_term_width(100) /// # ; /// ``` pub fn max_term_width(mut self, w: usize) -> Self { self.max_w = Some(w); self } /// Adds an [argument] to the list of valid possibilities. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// // Adding a single "flag" argument with a short and help text, using Arg::with_name() /// .arg( /// Arg::with_name("debug") /// .short("d") /// .help("turns on debugging mode") /// ) /// // Adding a single "option" argument with a short, a long, and help text using the less /// // verbose Arg::from_usage() /// .arg( /// Arg::from_usage("-c --config=[CONFIG] 'Optionally sets a config file to use'") /// ) /// # ; /// ``` /// [argument]: ./struct.Arg.html pub fn arg<A: Into<Arg<'a, 'b>>>(mut self, a: A) -> Self { let help_heading : Option<&'a str> = if let Some(option_str) = self.help_headings.last() { *option_str } else { None }; let arg = a.into().help_heading(help_heading); self.args.push(arg); self } /// Set a custom section heading for future args. Every call to arg will /// have this header (instead of its default header) until a subsequent /// call to help_heading pub fn help_heading(mut self, heading: &'a str) -> Self { self.help_headings.push(Some(heading)); self } /// Stop using custom section headings. pub fn stop_custom_headings(mut self) -> Self { self.help_headings.push(None); self } /// Adds multiple [arguments] to the list of valid possibilties /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// App::new("myprog") /// .args(&[ /// Arg::from_usage("[debug] -d 'turns on debugging info'"), /// Arg::with_name("input").index(1).help("the input file to use") /// ]) /// # ; /// ``` /// [arguments]: ./struct.Arg.html pub fn args<I, T>(mut self, args: I) -> Self where I: IntoIterator<Item = T>, T: Into<Arg<'a, 'b>>, { // @TODO @perf @p4 @v3-beta: maybe extend_from_slice would be possible and perform better? // But that may also not let us do `&["-a 'some'", "-b 'other']` because of not Into<Arg> for arg in args.into_iter() { self.args.push(arg.into()); } self } /// Allows adding a [`SubCommand`] alias, which function as "hidden" subcommands that /// automatically dispatch as if this subcommand was used. This is more efficient, and easier /// than creating multiple hidden subcommands as one only needs to check for the existence of /// this command, and not all variants. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .alias("do-stuff")) /// .get_matches_from(vec!["myprog", "do-stuff"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn alias<S: Into<&'b str>>(mut self, name: S) -> Self { if let Some(ref mut als) = self.aliases { als.push((name.into(), false)); } else { self.aliases = Some(vec![(name.into(), false)]); } self } /// Allows adding [`SubCommand`] aliases, which function as "hidden" subcommands that /// automatically dispatch as if this subcommand was used. This is more efficient, and easier /// than creating multiple hidden subcommands as one only needs to check for the existence of /// this command, and not all variants. /// /// # Examples /// /// ```rust /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .aliases(&["do-stuff", "do-tests", "tests"])) /// .arg(Arg::with_name("input") /// .help("the file to add") /// .index(1) /// .required(false)) /// .get_matches_from(vec!["myprog", "do-tests"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn aliases(mut self, names: &[&'b str]) -> Self { if let Some(ref mut als) = self.aliases { for n in names { als.push((n, false)); } } else { self.aliases = Some(names.iter().map(|n| (*n, false)).collect::<Vec<_>>()); } self } /// Allows adding a [`SubCommand`] alias that functions exactly like those defined with /// [`App::alias`], except that they are visible inside the help message. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .visible_alias("do-stuff")) /// .get_matches_from(vec!["myprog", "do-stuff"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`App::alias`]: ./struct.App.html#method.alias pub fn visible_alias<S: Into<&'b str>>(mut self, name: S) -> Self { if let Some(ref mut als) = self.aliases { als.push((name.into(), true)); } else { self.aliases = Some(vec![(name.into(), true)]); } self } /// Allows adding multiple [`SubCommand`] aliases that functions exactly like those defined /// with [`App::aliases`], except that they are visible inside the help message. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// let m = App::new("myprog") /// .subcommand(SubCommand::with_name("test") /// .visible_aliases(&["do-stuff", "tests"])) /// .get_matches_from(vec!["myprog", "do-stuff"]); /// assert_eq!(m.subcommand_name(), Some("test")); /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`App::aliases`]: ./struct.App.html#method.aliases pub fn visible_aliases(mut self, names: &[&'b str]) -> Self { if let Some(ref mut als) = self.aliases { for n in names { als.push((n, true)); } } else { self.aliases = Some(names.iter().map(|n| (*n, true)).collect::<Vec<_>>()); } self } /// Adds an [`ArgGroup`] to the application. [`ArgGroup`]s are a family of related arguments. /// By placing them in a logical group, you can build easier requirement and exclusion rules. /// For instance, you can make an entire [`ArgGroup`] required, meaning that one (and *only* /// one) argument from that group must be present at runtime. /// /// You can also do things such as name an [`ArgGroup`] as a conflict to another argument. /// Meaning any of the arguments that belong to that group will cause a failure if present with /// the conflicting argument. /// /// Another added benefit of [`ArgGroup`]s is that you can extract a value from a group instead /// of determining exactly which argument was used. /// /// Finally, using [`ArgGroup`]s to ensure exclusion between arguments is another very common /// use /// /// # Examples /// /// The following example demonstrates using an [`ArgGroup`] to ensure that one, and only one, /// of the arguments from the specified group is present at runtime. /// /// ```no_run /// # use clap::{App, ArgGroup}; /// App::new("app") /// .args_from_usage( /// "--set-ver [ver] 'set the version manually' /// --major 'auto increase major' /// --minor 'auto increase minor' /// --patch 'auto increase patch'") /// .group(ArgGroup::with_name("vers") /// .args(&["set-ver", "major", "minor","patch"]) /// .required(true)) /// # ; /// ``` /// [`ArgGroup`]: ./struct.ArgGroup.html pub fn group(mut self, group: ArgGroup<'a>) -> Self { self.groups.push(group); self } /// Adds multiple [`ArgGroup`]s to the [`App`] at once. /// /// # Examples /// /// ```no_run /// # use clap::{App, ArgGroup}; /// App::new("app") /// .args_from_usage( /// "--set-ver [ver] 'set the version manually' /// --major 'auto increase major' /// --minor 'auto increase minor' /// --patch 'auto increase patch' /// -c [FILE] 'a config file' /// -i [IFACE] 'an interface'") /// .groups(&[ /// ArgGroup::with_name("vers") /// .args(&["set-ver", "major", "minor","patch"]) /// .required(true), /// ArgGroup::with_name("input") /// .args(&["c", "i"]) /// ]) /// # ; /// ``` /// [`ArgGroup`]: ./struct.ArgGroup.html /// [`App`]: ./struct.App.html pub fn groups(mut self, groups: &[ArgGroup<'a>]) -> Self { for g in groups { self = self.group(g.into()); } self } /// Adds a [`SubCommand`] to the list of valid possibilities. Subcommands are effectively /// sub-[`App`]s, because they can contain their own arguments, subcommands, version, usage, /// etc. They also function just like [`App`]s, in that they get their own auto generated help, /// version, and usage. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg, SubCommand}; /// App::new("myprog") /// .subcommand(SubCommand::with_name("config") /// .about("Controls configuration features") /// .arg_from_usage("<config> 'Required configuration file to use'")) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`App`]: ./struct.App.html pub fn subcommand(mut self, subcmd: App<'a, 'b>) -> Self { self.subcommands.push(subcmd); self } /// Adds multiple subcommands to the list of valid possibilities by iterating over an /// [`IntoIterator`] of [`SubCommand`]s /// /// # Examples /// /// ```rust /// # use clap::{App, Arg, SubCommand}; /// # App::new("myprog") /// .subcommands( vec![ /// SubCommand::with_name("config").about("Controls configuration functionality") /// .arg(Arg::with_name("config_file").index(1)), /// SubCommand::with_name("debug").about("Controls debug functionality")]) /// # ; /// ``` /// [`SubCommand`]: ./struct.SubCommand.html /// [`IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html pub fn subcommands<I>(mut self, subcmds: I) -> Self where I: IntoIterator<Item = App<'a, 'b>>, { for subcmd in subcmds { self.subcommands.push(subcmd); } self } /// Allows custom ordering of [`SubCommand`]s within the help message. Subcommands with a lower /// value will be displayed first in the help message. This is helpful when one would like to /// emphasise frequently used subcommands, or prioritize those towards the top of the list. /// Duplicate values **are** allowed. Subcommands with duplicate display orders will be /// displayed in alphabetical order. /// /// **NOTE:** The default is 999 for all subcommands. /// /// # Examples /// /// ```rust /// # use clap::{App, SubCommand}; /// let m = App::new("cust-ord") /// .subcommand(SubCommand::with_name("alpha") // typically subcommands are grouped /// // alphabetically by name. Subcommands /// // without a display_order have a value of /// // 999 and are displayed alphabetically with /// // all other 999 subcommands /// .about("Some help and text")) /// .subcommand(SubCommand::with_name("beta") /// .display_order(1) // In order to force this subcommand to appear *first* /// // all we have to do is give it a value lower than 999. /// // Any other subcommands with a value of 1 will be displayed /// // alphabetically with this one...then 2 values, then 3, etc. /// .about("I should be first!")) /// .get_matches_from(vec![ /// "cust-ord", "--help" /// ]); /// ``` /// /// The above example displays the following help message /// /// ```text /// cust-ord /// /// USAGE: /// cust-ord [FLAGS] [OPTIONS] /// /// FLAGS: /// -h, --help Prints help information /// -V, --version Prints version information /// /// SUBCOMMANDS: /// beta I should be first! /// alpha Some help and text /// ``` /// [`SubCommand`]: ./struct.SubCommand.html pub fn display_order(mut self, ord: usize) -> Self { self.disp_ord = ord; self } /// Allows one to mutate an [`Arg`] after it's been added to an `App`. /// /// # Examples /// /// ```rust /// # use clap::{App, Arg}; /// /// let mut app = App::new("foo") /// .arg(Arg::with_name("bar") /// .short("b")) /// .mut_arg("bar", |a| a.short("B")); /// /// let res = app.try_get_matches_from_mut(vec!["foo", "-b"]); /// /// // Since we changed `bar`'s short to "B" this should err as there /// // is no `-b` anymore, only `-B` /// /// assert!(res.is_err()); /// /// let res = app.try_get_matches_from_mut(vec!["foo", "-B"]); /// assert!(res.is_ok()); /// ``` /// [`Arg`]: ./struct.Arg.html pub fn mut_arg<F>(mut self, arg: &'a str, f: F) -> Self where F: FnOnce(Arg<'a, 'b>) -> Arg<'a, 'b>, { let i = self.args .iter() .enumerate() .filter_map(|(i, a)| if a.name == arg { Some(i) } else { None }) .next(); let a = if let Some(idx) = i { let mut a = self.args.swap_remove(idx); f(a) } else { let mut a = Arg::with_name(arg); f(a) }; self.args.push(a); self } /// Prints the full help message to [`io::stdout()`] using a [`BufWriter`] using the same /// method as if someone ran `-h` to request the help message /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// let mut app = App::new("myprog"); /// app.print_help(); /// ``` /// [`io::stdout()`]: https://doc.rust-lang.org/std/io/fn.stdout.html /// [`BufWriter`]: https://doc.rust-lang.org/std/io/struct.BufWriter.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn print_help(&mut self) -> ClapResult<()> { // If there are global arguments, or settings we need to propagate them down to subcommands // before parsing incase we run into a subcommand self._build(Propagation::NextLevel); let out = io::stdout(); let mut buf_w = BufWriter::new(out.lock()); self.write_help(&mut buf_w) } /// Prints the full help message to [`io::stdout()`] using a [`BufWriter`] using the same /// method as if someone ran `--help` to request the help message /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// let mut app = App::new("myprog"); /// app.print_long_help(); /// ``` /// [`io::stdout()`]: https://doc.rust-lang.org/std/io/fn.stdout.html /// [`BufWriter`]: https://doc.rust-lang.org/std/io/struct.BufWriter.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn print_long_help(&mut self) -> ClapResult<()> { // If there are global arguments, or settings we need to propagate them down to subcommands // before parsing incase we run into a subcommand self._build(Propagation::NextLevel); let out = io::stdout(); let mut buf_w = BufWriter::new(out.lock()); self.write_long_help(&mut buf_w) } /// Writes the full help message to the user to a [`io::Write`] object in the same method as if /// the user ran `-h` /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_help(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn write_help<W: Write>(&mut self, w: &mut W) -> ClapResult<()> { self._build(Propagation::NextLevel); let p = Parser::new(self); Help::write_parser_help(w, &p, false) } /// Writes the full help message to the user to a [`io::Write`] object in the same method as if /// the user ran `--help` /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" help messages /// depending on if the user ran [`-h` (short)] or [`--help` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_long_help(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-h` (short)]: ./struct.Arg.html#method.help /// [`--help` (long)]: ./struct.Arg.html#method.long_help pub fn write_long_help<W: Write>(&mut self, w: &mut W) -> ClapResult<()> { self._build(Propagation::NextLevel); let p = Parser::new(self); Help::write_parser_help(w, &p, true) } /// Writes the version message to the user to a [`io::Write`] object as if the user ran `-V`. /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" version messages /// depending on if the user ran [`-V` (short)] or [`--version` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_version(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-V` (short)]: ./struct.App.html#method.version /// [`--version` (long)]: ./struct.App.html#method.long_version pub fn write_version<W: Write>(&self, w: &mut W) -> ClapResult<()> { self._write_version(w, false).map_err(From::from) } /// Writes the version message to the user to a [`io::Write`] object /// /// **NOTE:** clap has the ability to distinguish between "short" and "long" version messages /// depending on if the user ran [`-V` (short)] or [`--version` (long)] /// /// # Examples /// /// ```rust /// # use clap::App; /// use std::io; /// let mut app = App::new("myprog"); /// let mut out = io::stdout(); /// app.write_long_version(&mut out).expect("failed to write to stdout"); /// ``` /// [`io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html /// [`-V` (short)]: ./struct.App.html#method.version /// [`--version` (long)]: ./struct.App.html#method.long_version pub fn write_long_version<W: Write>(&self, w: &mut W) -> ClapResult<()> { self._write_version(w, true).map_err(From::from) } /// Starts the parsing process, upon a failed parse an error will be displayed to the user and /// the process will exit with the appropriate error code. By default this method gets all user /// provided arguments from [`env::args_os`] in order to allow for invalid UTF-8 code points, /// which are legal on many platforms. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let matches = App::new("myprog") /// // Args and options go here... /// .get_matches(); /// ``` /// [`env::args_os`]: https://doc.rust-lang.org/std/env/fn.args_os.html pub fn get_matches(self) -> ArgMatches<'a> { self.get_matches_from(&mut env::args_os()) } /// Starts the parsing process, just like [`App::get_matches`] but doesn't consume the `App` /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let mut app = App::new("myprog") /// // Args and options go here... /// ; /// let matches = app.get_matches_mut(); /// ``` /// [`env::args_os`]: https://doc.rust-lang.org/std/env/fn.args_os.html /// [`App::get_matches`]: ./struct.App.html#method.get_matches pub fn get_matches_mut(&mut self) -> ArgMatches<'a> { self.try_get_matches_from_mut(&mut env::args_os()).unwrap_or_else(|e| { // Otherwise, write to stderr and exit if e.use_stderr() { wlnerr!("{}", e.message); if self.settings.is_set(AppSettings::WaitOnError) { wlnerr!("\nPress [ENTER] / [RETURN] to continue..."); let mut s = String::new(); let i = io::stdin(); i.lock().read_line(&mut s).unwrap(); } drop(self); drop(e); process::exit(1); } drop(self); e.exit() }) } /// Starts the parsing process. This method will return a [`clap::Result`] type instead of exiting /// the process on failed parse. By default this method gets matches from [`env::args_os`] /// /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are /// used. It will return a [`clap::Error`], where the [`kind`] is a /// [`ErrorKind::HelpDisplayed`] or [`ErrorKind::VersionDisplayed`] respectively. You must call /// [`Error::exit`] or perform a [`std::process::exit`]. /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let matches = App::new("myprog") /// // Args and options go here... /// .try_get_matches() /// .unwrap_or_else( |e| e.exit() ); /// ``` /// [`env::args_os`]: https://doc.rust-lang.org/std/env/fn.args_os.html /// [`ErrorKind::HelpDisplayed`]: ./enum.ErrorKind.html#variant.HelpDisplayed /// [`ErrorKind::VersionDisplayed`]: ./enum.ErrorKind.html#variant.VersionDisplayed /// [`Error::exit`]: ./struct.Error.html#method.exit /// [`std::process::exit`]: https://doc.rust-lang.org/std/process/fn.exit.html /// [`clap::Result`]: ./type.Result.html /// [`clap::Error`]: ./struct.Error.html /// [`kind`]: ./struct.Error.html pub fn try_get_matches(self) -> ClapResult<ArgMatches<'a>> { // Start the parsing self.try_get_matches_from(&mut env::args_os()) } /// Starts the parsing process. Like [`App::get_matches`] this method does not return a [`clap::Result`] /// and will automatically exit with an error message. This method, however, lets you specify /// what iterator to use when performing matches, such as a [`Vec`] of your making. /// /// **NOTE:** The first argument will be parsed as the binary name unless /// [`AppSettings::NoBinaryName`] is used /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let arg_vec = vec!["my_prog", "some", "args", "to", "parse"]; /// /// let matches = App::new("myprog") /// // Args and options go here... /// .get_matches_from(arg_vec); /// ``` /// [`App::get_matches`]: ./struct.App.html#method.get_matches /// [`clap::Result`]: ./type.Result.html /// [`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html /// [`AppSettings::NoBinaryName`]: ./enum.AppSettings.html#variant.NoBinaryName pub fn get_matches_from<I, T>(mut self, itr: I) -> ArgMatches<'a> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr).unwrap_or_else(|e| { // Otherwise, write to stderr and exit if e.use_stderr() { wlnerr!("{}", e.message); if self.settings.is_set(AppSettings::WaitOnError) { wlnerr!("\nPress [ENTER] / [RETURN] to continue..."); let mut s = String::new(); let i = io::stdin(); i.lock().read_line(&mut s).unwrap(); } drop(self); drop(e); process::exit(1); } drop(self); e.exit() }) } /// Starts the parsing process. A combination of [`App::get_matches_from`], and /// [`App::try_get_matches`] /// /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are /// used. It will return a [`clap::Error`], where the [`kind`] is a [`ErrorKind::HelpDisplayed`] /// or [`ErrorKind::VersionDisplayed`] respectively. You must call [`Error::exit`] or /// perform a [`std::process::exit`] yourself. /// /// **NOTE:** The first argument will be parsed as the binary name unless /// [`AppSettings::NoBinaryName`] is used /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let arg_vec = vec!["my_prog", "some", "args", "to", "parse"]; /// /// let matches = App::new("myprog") /// // Args and options go here... /// .try_get_matches_from(arg_vec) /// .unwrap_or_else( |e| { panic!("An error occurs: {}", e) }); /// ``` /// [`App::get_matches_from`]: ./struct.App.html#method.get_matches_from /// [`App::try_get_matches`]: ./struct.App.html#method.get_matches_safe /// [`ErrorKind::HelpDisplayed`]: ./enum.ErrorKind.html#variant.HelpDisplayed /// [`ErrorKind::VersionDisplayed`]: ./enum.ErrorKind.html#variant.VersionDisplayed /// [`Error::exit`]: ./struct.Error.html#method.exit /// [`std::process::exit`]: https://doc.rust-lang.org/std/process/fn.exit.html /// [`clap::Error`]: ./struct.Error.html /// [`Error::exit`]: ./struct.Error.html#method.exit /// [`kind`]: ./struct.Error.html /// [`AppSettings::NoBinaryName`]: ./enum.AppSettings.html#variant.NoBinaryName pub fn try_get_matches_from<I, T>(mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr) } /// Starts the parsing process without consuming the [`App`] struct `self`. This is normally not /// the desired functionality, instead prefer [`App::try_get_matches_from`] which *does* /// consume `self`. /// /// **NOTE:** The first argument will be parsed as the binary name unless /// [`AppSettings::NoBinaryName`] is used /// /// # Examples /// /// ```no_run /// # use clap::{App, Arg}; /// let arg_vec = vec!["my_prog", "some", "args", "to", "parse"]; /// /// let mut app = App::new("myprog"); /// // Args and options go here... /// let matches = app.try_get_matches_from_mut(arg_vec) /// .unwrap_or_else( |e| { panic!("An error occurs: {}", e) }); /// ``` /// [`App`]: ./struct.App.html /// [`App::try_get_matches_from`]: ./struct.App.html#method.try_get_matches_from /// [`AppSettings::NoBinaryName`]: ./enum.AppSettings.html#variant.NoBinaryName pub fn try_get_matches_from_mut<I, T>(&mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { let mut it = itr.into_iter(); // Get the name of the program (argument 1 of env::args()) and determine the // actual file // that was used to execute the program. This is because a program called // ./target/release/my_prog -a // will have two arguments, './target/release/my_prog', '-a' but we don't want // to display // the full path when displaying help messages and such if !self.settings.is_set(AppSettings::NoBinaryName) { if let Some(name) = it.next() { let bn_os = name.into(); let p = Path::new(&*bn_os); if let Some(f) = p.file_name() { if let Some(s) = f.to_os_string().to_str() { if self.bin_name.is_none() { self.bin_name = Some(s.to_owned()); } } } } } self._do_parse(&mut it.peekable()) } } // Internally used only #[doc(hidden)] impl<'a, 'b> App<'a, 'b> { #[doc(hidden)] fn _do_parse<I, T>(&mut self, it: &mut Peekable<I>) -> ClapResult<ArgMatches<'a>> where I: Iterator<Item = T>, T: Into<OsString> + Clone, { debugln!("App::_do_parse;"); let mut matcher = ArgMatcher::new(); // If there are global arguments, or settings we need to propgate them down to subcommands // before parsing incase we run into a subcommand if !self.settings.is_set(AppSettings::Propagated) { self._build(Propagation::NextLevel); } { let mut parser = Parser::new(self); // do the real parsing if let Err(e) = parser.get_matches_with(&mut matcher, it) { return Err(e); } } let global_arg_vec: Vec<&str> = (&self) .args .iter() .filter(|a| a.is_set(ArgSettings::Global)) .map(|ga| ga.name) .collect(); matcher.propagate_globals(&global_arg_vec); Ok(matcher.into()) } fn _build(&mut self, prop: Propagation) { debugln!("App::_build;"); // Make sure all the globally set flags apply to us as well self.settings = self.settings | self.g_settings; // Depending on if DeriveDisplayOrder is set or not, we need to determine when we build // the help and version flags, otherwise help message orders get screwed up if self.settings.is_set(AppSettings::DeriveDisplayOrder) { self._derive_display_order(); self._create_help_and_version(); self._propagate(prop); } else { self._create_help_and_version(); self._propagate(prop); self._derive_display_order(); } // Perform expensive debug assertions debug_assert!({ for a in &self.args { self._arg_debug_asserts(a); } true }); for a in &mut self.args { // Fill in the groups if let Some(ref grps) = a.groups { for g in grps { let mut found = false; if let Some(ref mut ag) = groups_mut!(self).find(|grp| &grp.name == g) { ag.args.push(a.name); found = true; } if !found { let mut ag = ArgGroup::with_name(g); ag.args.push(a.name); self.groups.push(ag); } } } // Figure out implied settings if a.is_set(ArgSettings::Last) { // if an arg has `Last` set, we need to imply DontCollapseArgsInUsage so that args // in the usage string don't get confused or left out. self.settings.set(AppSettings::DontCollapseArgsInUsage); self.settings.set(AppSettings::ContainsLast); } a._build(); } debug_assert!(self._app_debug_asserts()); self.settings.set(AppSettings::Propagated); } // Perform some expensive assertions on the Parser itself fn _app_debug_asserts(&mut self) -> bool { debugln!("App::app_debug_asserts;"); // * Args listed inside groups should exist // * Groups should not have naming conflicts with Args let g = groups!(self).find(|g| { g.args .iter() .any(|arg| !(find!(self, arg).is_some() || groups!(self).any(|g| &g.name == arg))) }); assert!( g.is_none(), "The group '{}' contains an arg that doesn't exist or has a naming conflict with a group.", g.unwrap().name ); true } // @TODO @v3-alpha @perf: should only propagate globals to subcmd we find, or for help pub fn _propagate(&mut self, prop: Propagation) { debugln!("App::_propagate:{}", self.name); for sc in &mut self.subcommands { // We have to create a new scope in order to tell rustc the borrow of `sc` is // done and to recursively call this method { let vsc = self.settings.is_set(AppSettings::VersionlessSubcommands); let gv = self.settings.is_set(AppSettings::GlobalVersion); if vsc { sc.set(AppSettings::DisableVersion); } if gv && sc.version.is_none() && self.version.is_some() { sc.set(AppSettings::GlobalVersion); sc.version = Some(self.version.unwrap()); } sc.settings = sc.settings | self.g_settings; sc.g_settings = sc.g_settings | self.g_settings; sc.term_w = self.term_w; sc.max_w = self.max_w; } { for a in self.args.iter().filter(|a| a.is_set(ArgSettings::Global)) { sc.args.push(a.clone()); } } // @TODO @deadcode @perf @v3-alpha: Currently we're not propagating if prop == Propagation::Full { sc._build(Propagation::Full); } } } pub(crate) fn _create_help_and_version(&mut self) { debugln!("App::_create_help_and_version;"); // name is "hclap_help" because flags are sorted by name if !self.contains_long("help") { debugln!("App::_create_help_and_version: Building --help"); if self.help_short.is_none() && !self.contains_short('h') { self.help_short = Some('h'); } let mut arg = Arg::with_name("hclap_help") .long("help") .help(self.help_message.unwrap_or("Prints help information")); // we have to set short manually because we're dealing with char's arg.short = self.help_short; self.args.push(arg); } else { self.settings.unset(AppSettings::NeedsLongHelp); } if !self.is_set(AppSettings::DisableVersion) && !self.contains_long("version") { debugln!("App::_create_help_and_version: Building --version"); if self.version_short.is_none() && !self.contains_short('V') { self.version_short = Some('V'); } // name is "vclap_version" because flags are sorted by name let mut arg = Arg::with_name("vclap_version") .long("version") .help(self.version_message.unwrap_or("Prints version information")); // we have to set short manually because we're dealing with char's arg.short = self.version_short; self.args.push(arg); } else { self.settings.unset(AppSettings::NeedsLongVersion); } if self.has_subcommands() && !self.is_set(AppSettings::DisableHelpSubcommand) && !subcommands!(self).any(|s| s.name == "help") { debugln!("App::_create_help_and_version: Building help"); self.subcommands.push( App::new("help") .about("Prints this message or the help of the given subcommand(s)"), ); } else { self.settings.unset(AppSettings::NeedsSubcommandHelp); } } pub(crate) fn _derive_display_order(&mut self) { debugln!("App::_derive_display_order:{}", self.name); if self.settings.is_set(AppSettings::DeriveDisplayOrder) { for (i, a) in args_mut!(self).filter(|a| a.has_switch()) .filter(|a| a.disp_ord == 999) .enumerate() { a.disp_ord = i; } for (i, sc) in &mut subcommands_mut!(self) .enumerate() .filter(|&(_, ref sc)| sc.disp_ord == 999) { sc.disp_ord = i; } } for sc in subcommands_mut!(self) { sc._derive_display_order(); } } // Perform expensive assertions on the Arg instance fn _arg_debug_asserts(&self, a: &Arg) -> bool { debugln!("App::_arg_debug_asserts:{}", a.name); // No naming conflicts assert!( arg_names!(self).fold(0, |acc, n| if n == a.name { acc + 1 } else { acc }) < 2, format!("Non-unique argument name: {} is already in use", a.name) ); // Long conflicts if let Some(l) = a.long { assert!( args!(self).fold(0, |acc, arg| if arg.long == Some(l) { acc + 1 } else { acc }) < 2, "Argument long must be unique\n\n\t--{} is already in use", l ); } // Short conflicts if let Some(s) = a.short { assert!( args!(self).fold(0, |acc, arg| if arg.short == Some(s) { acc + 1 } else { acc }) < 2, "Argument short must be unique\n\n\t-{} is already in use", s ); } if let Some(idx) = a.index { // No index conflicts assert!( positionals!(self).fold(0, |acc, p| if p.index == Some(idx as u64){acc+1}else{acc}) < 2, "Argument '{}' has the same index as another positional \ argument\n\n\tUse Arg::setting(ArgSettings::MultipleValues) to allow one \ positional argument to take multiple values", a.name ); } if a.is_set(ArgSettings::Last) { assert!(a.long.is_none(), "Flags or Options may not have last(true) set. {} has both a long and \ last(true) set.", a.name); assert!(a.short.is_none(), "Flags or Options may not have last(true) set. {} has both a short and \ last(true) set.", a.name); } assert!( !(a.is_set(ArgSettings::Required) && a.is_set(ArgSettings::Global)), "Global arguments cannot be required.\n\n\t'{}' is marked as \ global and required", a.name ); true } fn _build_bin_names(&mut self) { debugln!("App::_build_bin_names;"); for sc in subcommands_mut!(self) { debug!("Parser::build_bin_names:iter: bin_name set..."); if sc.bin_name.is_none() { sdebugln!("No"); let bin_name = format!( "{}{}{}", self.bin_name.as_ref().unwrap_or(&self.name.clone()), if self.bin_name.is_some() { " " } else { "" }, &*sc.name ); debugln!( "Parser::build_bin_names:iter: Setting bin_name of {} to {}", self.name, bin_name ); sc.bin_name = Some(bin_name); } else { sdebugln!("yes ({:?})", sc.bin_name); } debugln!( "Parser::build_bin_names:iter: Calling build_bin_names from...{}", sc.name ); sc._build_bin_names(); } } pub(crate) fn _write_version<W: Write>(&self, w: &mut W, use_long: bool) -> io::Result<()> { debugln!("App::_write_version;"); let ver = if use_long { self.long_version .unwrap_or_else(|| self.version.unwrap_or("")) } else { self.version .unwrap_or_else(|| self.long_version.unwrap_or("")) }; if let Some(bn) = self.bin_name.as_ref() { if bn.contains(' ') { // Incase we're dealing with subcommands i.e. git mv is translated to git-mv write!(w, "{} {}", bn.replace(" ", "-"), ver) } else { write!(w, "{} {}", &self.name[..], ver) } } else { write!(w, "{} {}", &self.name[..], ver) } } } // Internal Query Methods #[doc(hidden)] impl<'a, 'b> App<'a, 'b> { // Should we color the output? None=determined by output location, true=yes, false=no #[doc(hidden)] pub fn color(&self) -> ColorWhen { debugln!("App::color;"); debug!("App::color: Color setting..."); if self.is_set(AppSettings::ColorNever) { sdebugln!("Never"); ColorWhen::Never } else if self.is_set(AppSettings::ColorAlways) { sdebugln!("Always"); ColorWhen::Always } else { sdebugln!("Auto"); ColorWhen::Auto } } fn contains_long(&self, l: &str) -> bool { longs!(self).any(|al| al == l) } fn contains_short(&self, s: char) -> bool { shorts!(self).any(|arg_s| arg_s == s) } pub fn is_set(&self, s: AppSettings) -> bool { self.settings.is_set(s) || self.g_settings.is_set(s) } pub fn set(&mut self, s: AppSettings) { self.settings.set(s) } pub fn set_global(&mut self, s: AppSettings) { self.g_settings.set(s) } pub fn unset_global(&mut self, s: AppSettings) { self.g_settings.unset(s) } pub fn unset(&mut self, s: AppSettings) { self.settings.unset(s) } pub fn has_subcommands(&self) -> bool { !self.subcommands.is_empty() } pub fn has_args(&self) -> bool { !self.args.is_empty() } pub fn has_opts(&self) -> bool { opts!(self).count() > 0 } pub fn has_flags(&self) -> bool { flags!(self).count() > 0 } pub fn has_positionals(&self) -> bool { positionals!(self).count() > 0 } pub fn has_visible_opts(&self) -> bool { opts!(self).any(|o| !o.is_set(ArgSettings::Hidden)) } pub fn has_visible_flags(&self) -> bool { flags!(self).any(|o| !o.is_set(ArgSettings::Hidden)) } pub fn has_visible_positionals(&self) -> bool { positionals!(self).any(|o| !o.is_set(ArgSettings::Hidden)) } pub fn has_visible_subcommands(&self) -> bool { subcommands!(self) .filter(|sc| sc.name != "help") .any(|sc| !sc.is_set(AppSettings::Hidden)) } fn use_long_help(&self) -> bool { self.long_about.is_some() || self.args.iter().any(|f| f.long_help.is_some()) || subcommands!(self).any(|s| s.long_about.is_some()) } } // @TODO @v3-beta: remove // Deprecations impl<'a, 'b> App<'a, 'b> { /// **Deprecated:** Use `App::global_setting( SettingOne | SettingTwo )` instead #[deprecated(since="2.33.0", note="Use `App::global_setting( SettingOne | SettingTwo )` instead")] pub fn global_settings(mut self, settings: &[AppSettings]) -> Self { for s in settings { self.settings.set(*s); self.g_settings.set(*s) } self } /// **Deprecated:** Use `App::setting( SettingOne | SettingTwo )` instead #[deprecated(since="2.33.0", note="Use `App::setting( SettingOne | SettingTwo )` instead")] pub fn settings(mut self, settings: &[AppSettings]) -> Self { for s in settings { self.settings.set(*s); } self } /// **Deprecated:** Use `App::unset_setting( SettingOne | SettingTwo )` instead #[deprecated(since="2.33.0", note="Use `App::unset_setting( SettingOne | SettingTwo )` instead")] pub fn unset_settings(mut self, settings: &[AppSettings]) -> Self { for s in settings { self.settings.unset(*s); self.g_settings.unset(*s); } self } /// **Deprecated:** Use explicit `App::author()` and `App::version()` calls instead. #[deprecated(since="2.14.1", note="Can never work; use explicit App::author() and \ App::version() calls instead. Will be removed in v3.0-beta")] pub fn with_defaults<S: Into<String>>(n: S) -> Self { App { name: n.into(), author: Some("Kevin K. <kbknapp@gmail.com>"), version: Some("2.19.2"), ..Default::default() } } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use serde instead. Will be removed in v3.0-beta")] #[cfg(feature = "yaml")] pub fn from_yaml(yaml: &'a Yaml) -> App<'a, 'a> { App::from(yaml) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"help\", |a| a.short(\"H\"))` instead. Will be removed in v3.0-beta")] pub fn help_short<S: AsRef<str> + 'b>(mut self, s: S) -> Self { let c = s.as_ref() .trim_left_matches(|c| c == '-') .chars() .nth(0) .unwrap_or('h'); self.help_short = Some(c); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"version\", |a| a.short(\"v\"))` instead. Will be removed in v3.0-beta")] pub fn version_short<S: AsRef<str>>(mut self, s: S) -> Self { let c = s.as_ref() .trim_left_matches(|c| c == '-') .chars() .nth(0) .unwrap_or('V'); self.version_short = Some(c); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"help\", |a| a.help(\"Some message\"))` instead. Will be removed in v3.0-beta")] pub fn help_message<S: Into<&'a str>>(mut self, s: S) -> Self { self.help_message = Some(s.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::mut_arg(\"version\", |a| a.short(\"Some message\"))` instead. Will be removed in v3.0-beta")] pub fn version_message<S: Into<&'a str>>(mut self, s: S) -> Self { self.version_message = Some(s.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed to `App::override_usage`. Will be removed in v3.0-beta")] pub fn usage<S: Into<&'b str>>(mut self, usage: S) -> Self { self.usage_str = Some(usage.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed to `App::override_help`. Will be removed in v3.0-beta")] pub fn help<S: Into<&'b str>>(mut self, help: S) -> Self { self.help_str = Some(help.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed to `App::help_template`. Will be removed in v3.0-beta")] pub fn template<S: Into<&'b str>>(mut self, s: S) -> Self { self.template = Some(s.into()); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::arg(Arg::from(&str)` instead. Will be removed in v3.0-beta")] pub fn arg_from_usage(mut self, usage: &'a str) -> Self { self.args.push(Arg::from_usage(usage)); self } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `App::args(&str)` instead. Will be removed in v3.0-beta")] pub fn args_from_usage(mut self, usage: &'a str) -> Self { for line in usage.lines() { let l = line.trim(); if l.is_empty() { continue; } self.args.push(Arg::from_usage(l)); } self } /// **Deprecated:** Use #[allow(deprecated)] #[deprecated(since="2.30.0", note="Use `clap_completions crate and clap_completions::generate` instead. Will be removed in v3.0-beta")] pub fn gen_completions<T: Into<OsString>, S: Into<String>>( &mut self, bin_name: S, for_shell: Shell, out_dir: T, ) { use std::error::Error; let out_dir = PathBuf::from(out_dir.into()); let name = &*self.bin_name.as_ref().unwrap().clone(); let file_name = match for_shell { Shell::Bash => format!("{}.bash", name), Shell::Fish => format!("{}.fish", name), Shell::Zsh => format!("_{}", name), Shell::PowerShell => format!("_{}.ps1", name), }; let mut file = match File::create(out_dir.join(file_name)) { Err(why) => panic!("couldn't create completion file: {}", why.description()), Ok(file) => file, }; self.gen_completions_to(bin_name.into(), for_shell, &mut file) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Use `clap_completions crate and clap_completions::generate_to` instead. Will be removed in v3.0-beta")] pub fn gen_completions_to<W: Write, S: Into<String>>( &mut self, bin_name: S, for_shell: Shell, buf: &mut W, ) { self.bin_name = Some(bin_name.into()); if !self.is_set(AppSettings::Propagated) { self._build(Propagation::Full); self._build_bin_names(); } ComplGen::new(self).generate(for_shell, buf) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed `App::try_get_matches` to be consistent with Rust naming conventions. Will be removed in v3.0-beta")] pub fn get_matches_safe(self) -> ClapResult<ArgMatches<'a>> { // Start the parsing self.try_get_matches_from(&mut env::args_os()) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed `App::try_get_matches_from` to be consistent with Rust naming conventions. Will be removed in v3.0-beta")] pub fn get_matches_from_safe<I, T>(mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr) } /// **Deprecated:** Use #[deprecated(since="2.30.0", note="Renamed `App::try_get_matches_from_mut` to be consistent with Rust naming conventions. Will be removed in v3.0-beta")] pub fn get_matches_from_safe_borrow<I, T>(&mut self, itr: I) -> ClapResult<ArgMatches<'a>> where I: IntoIterator<Item = T>, T: Into<OsString> + Clone, { self.try_get_matches_from_mut(itr) } } #[cfg(feature = "yaml")] impl<'a> From<&'a Yaml> for App<'a, 'a> { fn from(mut yaml: &'a Yaml) -> Self { use args::SubCommand; // We WANT this to panic on error...so expect() is good. let mut is_sc = None; let mut a = if let Some(name) = yaml["name"].as_str() { App::new(name) } else { let yaml_hash = yaml.as_hash().unwrap(); let sc_key = yaml_hash.keys().nth(0).unwrap(); is_sc = Some(yaml_hash.get(sc_key).unwrap()); App::new(sc_key.as_str().unwrap()) }; yaml = if let Some(sc) = is_sc { sc } else { yaml }; macro_rules! yaml_str { ($a:ident, $y:ident, $i:ident) => { if let Some(v) = $y[stringify!($i)].as_str() { $a = $a.$i(v); } else if $y[stringify!($i)] != Yaml::BadValue { panic!("Failed to convert YAML value {:?} to a string", $y[stringify!($i)]); } }; } yaml_str!(a, yaml, version); yaml_str!(a, yaml, author); yaml_str!(a, yaml, bin_name); yaml_str!(a, yaml, about); yaml_str!(a, yaml, before_help); yaml_str!(a, yaml, after_help); yaml_str!(a, yaml, template); yaml_str!(a, yaml, usage); yaml_str!(a, yaml, help); yaml_str!(a, yaml, help_short); yaml_str!(a, yaml, version_short); yaml_str!(a, yaml, help_message); yaml_str!(a, yaml, version_message); yaml_str!(a, yaml, alias); yaml_str!(a, yaml, visible_alias); if let Some(v) = yaml["display_order"].as_i64() { a = a.display_order(v as usize); } else if yaml["display_order"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to a u64", yaml["display_order"] ); } if let Some(v) = yaml["setting"].as_str() { a = a.setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["setting"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to an AppSetting", yaml["setting"] ); } if let Some(v) = yaml["settings"].as_vec() { for ys in v { if let Some(s) = ys.as_str() { a = a.setting(s.parse().expect("unknown AppSetting found in YAML file")); } } } else if let Some(v) = yaml["settings"].as_str() { a = a.setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["settings"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to a string", yaml["settings"] ); } if let Some(v) = yaml["global_setting"].as_str() { a = a.setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["global_setting"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to an AppSetting", yaml["setting"] ); } if let Some(v) = yaml["global_settings"].as_vec() { for ys in v { if let Some(s) = ys.as_str() { a = a.global_setting(s.parse().expect("unknown AppSetting found in YAML file")); } } } else if let Some(v) = yaml["global_settings"].as_str() { a = a.global_setting(v.parse().expect("unknown AppSetting found in YAML file")); } else if yaml["global_settings"] != Yaml::BadValue { panic!( "Failed to convert YAML value {:?} to a string", yaml["global_settings"] ); } macro_rules! vec_or_str { ($a:ident, $y:ident, $as_vec:ident, $as_single:ident) => {{ let maybe_vec = $y[stringify!($as_vec)].as_vec(); if let Some(vec) = maybe_vec { for ys in vec { if let Some(s) = ys.as_str() { $a = $a.$as_single(s); } else { panic!("Failed to convert YAML value {:?} to a string", ys); } } } else { if let Some(s) = $y[stringify!($as_vec)].as_str() { $a = $a.$as_single(s); } else if $y[stringify!($as_vec)] != Yaml::BadValue { panic!("Failed to convert YAML value {:?} to either a vec or string", $y[stringify!($as_vec)]); } } $a } }; } a = vec_or_str!(a, yaml, aliases, alias); a = vec_or_str!(a, yaml, visible_aliases, visible_alias); if let Some(v) = yaml["args"].as_vec() { for arg_yaml in v { a = a.arg(Arg::from_yaml(arg_yaml.as_hash().unwrap())); } } if let Some(v) = yaml["subcommands"].as_vec() { for sc_yaml in v { a = a.subcommand(SubCommand::from_yaml(sc_yaml)); } } if let Some(v) = yaml["groups"].as_vec() { for ag_yaml in v { a = a.group(ArgGroup::from(ag_yaml.as_hash().unwrap())); } } a } } impl<'n, 'e> fmt::Display for App<'n, 'e> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.name) } }
#[macro_use] extern crate serde_derive; extern crate serde; extern crate toml; extern crate clap; use clap::{App, Arg, ArgMatches}; use std::path::Path; use std::fs::{create_dir, File}; use std::io::prelude::*; #[derive(Serialize, Deserialize)] struct Config { data_dir: Option<String>, } const CONFIG_PATH: &'static str = "/etc/blochs/"; const CONFIG_FILE_NAME: &'static str = "server.toml"; const DEFAULT_DATA_DIR: &'static str = "/var/lib/blochs/"; fn main() { let options = get_provided_options(); let config_path = Path::new(CONFIG_PATH); ensure_dir_exists(&config_path); let config_file_path = config_path.join(CONFIG_FILE_NAME); let mut read_config_file = match File::open(config_file_path.as_path()) { Ok(file) => file, Err(err) => panic!("Could not open config file {:?}: {}", config_file_path.display(), err), }; let mut actual_content = String::new(); match read_config_file.read_to_string(&mut actual_content) { Ok(_) => {}, Err(err) => panic!("Could not open config file {:?}: {}", config_file_path.display(), err), }; let mut config_values: Config = toml::from_str(&actual_content).unwrap(); let config_or_default_data_dir = config_values.data_dir.unwrap_or(DEFAULT_DATA_DIR.to_string()); let new_data_dir = options.value_of("data.dir").unwrap_or(&config_or_default_data_dir); config_values.data_dir = Some(new_data_dir.to_string()); let new_config_content = toml::to_string(&config_values).unwrap(); let mut write_config_file = match File::create(config_file_path.as_path()) { Ok(file) => file, Err(err) => panic!("Could not open config file {:?}: {}", config_file_path.display(), err), }; match write_config_file.write_all(new_config_content.as_bytes()) { Ok(_) => println!("New config saved at {}:\n\n{}", config_file_path.display(), new_config_content), Err(why) => panic!("Could not write config file {:?}: {}", config_file_path.display(), why), }; } fn ensure_dir_exists(path: &Path) { if !path.exists() { match create_dir(path) { Err(err) => panic!("Could not create directory under {}: {}", path.display(), err), Ok(_) => {} }; } } fn get_provided_options<'a>() -> ArgMatches<'a> { return App::new(env!("CARGO_PKG_NAME")) .version(env!("CARGO_PKG_VERSION")) .author(env!("CARGO_PKG_AUTHORS")) .about(env!("CARGO_PKG_DESCRIPTION")) .arg(Arg::with_name("data.dir") .long("data-dir") .value_name("DIR") .help(&format!("Sets where database data will be stored (default {})", DEFAULT_DATA_DIR)) ).get_matches(); } extract file reading to function #[macro_use] extern crate serde_derive; extern crate serde; extern crate toml; extern crate clap; use clap::{App, Arg, ArgMatches}; use std::path::Path; use std::fs::{create_dir, File}; use std::io::prelude::*; #[derive(Serialize, Deserialize)] struct Config { data_dir: Option<String>, } const CONFIG_PATH: &'static str = "/etc/blochs/"; const CONFIG_FILE_NAME: &'static str = "server.toml"; const DEFAULT_DATA_DIR: &'static str = "/var/lib/blochs/"; fn main() { let options = get_provided_options(); let config_path = Path::new(CONFIG_PATH); ensure_dir_exists(&config_path); let config_file_path = config_path.join(CONFIG_FILE_NAME); let actual_content = get_file_content(&config_file_path); let mut config_values: Config = toml::from_str(&actual_content).unwrap(); let config_or_default_data_dir = config_values.data_dir.unwrap_or(DEFAULT_DATA_DIR.to_string()); let new_data_dir = options.value_of("data.dir").unwrap_or(&config_or_default_data_dir); config_values.data_dir = Some(new_data_dir.to_string()); let new_config_content = toml::to_string(&config_values).unwrap(); let mut write_config_file = match File::create(config_file_path.as_path()) { Ok(file) => file, Err(err) => panic!("Could not open config file {:?}: {}", config_file_path.display(), err), }; match write_config_file.write_all(new_config_content.as_bytes()) { Ok(_) => println!("New config saved at {}:\n\n{}", config_file_path.display(), new_config_content), Err(why) => panic!("Could not write config file {:?}: {}", config_file_path.display(), why), }; } fn get_file_content(path: &Path) -> String { let mut content = String::new(); if path.exists() { let mut file = match File::open(&path) { Ok(file) => file, Err(err) => panic!("Could not open file {:?}: {}", path.display(), err), }; match file.read_to_string(&mut content) { Ok(_) => {}, Err(err) => panic!("Could not read file {:?}: {}", path.display(), err), }; } return content; } fn ensure_dir_exists(path: &Path) { if !path.exists() { match create_dir(path) { Err(err) => panic!("Could not create directory under {}: {}", path.display(), err), Ok(_) => {} }; } } fn get_provided_options<'a>() -> ArgMatches<'a> { return App::new(env!("CARGO_PKG_NAME")) .version(env!("CARGO_PKG_VERSION")) .author(env!("CARGO_PKG_AUTHORS")) .about(env!("CARGO_PKG_DESCRIPTION")) .arg(Arg::with_name("data.dir") .long("data-dir") .value_name("DIR") .help(&format!("Sets where database data will be stored (default {})", DEFAULT_DATA_DIR)) ).get_matches(); }
use gc_error::GcError; use types::binding::Binding; use types::js_var::{JsPtrEnum, JsVar}; pub trait Backend { fn alloc(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<Binding, GcError>; fn load(&mut self, bnd: &Binding) -> Result<(JsVar, Option<JsPtrEnum>), GcError>; fn store(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<(), GcError>; } add `get_allocator` method to Backend use std::cell::RefCell; use std::rc::Rc; use gc_error::GcError; use types::allocator::Allocator; use types::binding::Binding; use types::js_var::{JsPtrEnum, JsVar}; pub trait Backend { fn alloc(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<Binding, GcError>; fn load(&mut self, bnd: &Binding) -> Result<(JsVar, Option<JsPtrEnum>), GcError>; fn store(&mut self, var: JsVar, ptr: Option<JsPtrEnum>) -> Result<(), GcError>; fn get_allocator(&self) -> Rc<RefCell<Allocator<Error=GcError>>>; }
// Copyright 2014 Christopher Schröder, Johannes Köster. // Licensed under the MIT license (http://opensource.org/licenses/MIT) // This file may not be copied, modified, or distributed // except according to those terms. pub mod record; pub mod header; use std::ffi; use std::path; use std::ffi::AsOsStr; use std::os::unix::prelude::OsStrExt; use std::str; use htslib; /// A trait for a BAM reader with a read method. pub trait BAMRead { /// Read next BAM record into given record. /// Use this method in combination with a single allocated record to avoid the reallocations /// occurring with the iterator. /// /// # Arguments /// /// * `record` - the record to be filled fn read(&self, record: &mut record::Record) -> Result<(), ReadError>; } /// A BAM reader. pub struct BAMReader { f: *mut htslib::Struct_BGZF, header: *mut htslib::bam_hdr_t, } impl BAMReader { /// Create a new BAMReader. /// /// # Arguments /// /// * `path` - the path. Use "-" for stdin. pub fn new<P: path::AsPath>(path: &P) -> Self { let f = bgzf_open(path, b"r"); let header = unsafe { htslib::bam_hdr_read(f) }; BAMReader { f : f, header : header } } /// Iterator over the records of the BAM file. pub fn records(self) -> Records { Records { bam: self } } } impl BAMRead for BAMReader { fn read(&self, record: &mut record::Record) -> Result<(), ReadError> { match unsafe { htslib::bam_read1(self.f, &mut record.inner) } { -1 => Err(ReadError::EOF), -2 => Err(ReadError::Truncated), -4 => Err(ReadError::Invalid), _ => Ok(()) } } } impl Drop for BAMReader { fn drop(&mut self) { unsafe { htslib::bam_hdr_destroy(self.header); htslib::bgzf_close(self.f); } } } /// A BAM writer. pub struct BAMWriter { f: *mut htslib::Struct_BGZF, header: *mut htslib::bam_hdr_t, } impl BAMWriter { /// Create a new BAM file. /// /// # Arguments /// /// * `path` - the path. Use "-" for stdin. /// * `header` - header definition to use pub fn new<P: path::AsPath>(path: &P, header: &header::Header) -> Self { let f = bgzf_open(path, b"w"); let header_record = unsafe { let header_string = header.to_bytes(); println!("{}", str::from_utf8(&header_string).unwrap()); htslib::sam_hdr_parse( header_string.len() as i32, ffi::CString::new(header_string).unwrap().as_ptr() ) }; unsafe { htslib::bam_hdr_write(f, header_record); } BAMWriter { f: f, header: header_record } } /// Create a new BAM file from template. /// /// # Arguments /// /// * `path` - the path. Use "-" for stdin. /// * `template` - the template BAM. Use "-" for stdin. pub fn with_template<P: path::AsPath, T: path::AsPath>(path: &P, template: &T) -> Self { let t = bgzf_open(template, b"r"); let header = unsafe { htslib::bam_hdr_read(t) }; unsafe { htslib::bgzf_close(t); } let f = bgzf_open(path, b"w"); unsafe { htslib::bam_hdr_write(f, header); } BAMWriter { f: f, header: header } } /// Write record to BAM. /// /// # Arguments /// /// * `record` - the record to write pub fn write(&mut self, record: &record::Record) -> Result<(), ()> { if unsafe { htslib::bam_write1(self.f, &record.inner) } == -1 { Err(()) } else { Ok(()) } } } impl Drop for BAMWriter { fn drop(&mut self) { unsafe { htslib::bam_hdr_destroy(self.header); htslib::bgzf_close(self.f); } } } /// Iterator over the records of a BAM. pub struct Records { bam: BAMReader } impl Iterator for Records { type Item = Result<record::Record, ReadError>; fn next(&mut self) -> Option<Result<record::Record, ReadError>> { let mut record = record::Record::new(); match self.bam.read(&mut record) { Err(ReadError::EOF) => None, Ok(()) => Some(Ok(record)), Err(err) => Some(Err(err)) } } } pub enum ReadError { Truncated, Invalid, EOF, } /// Wrapper for opening a BAM file. fn bgzf_open<P: path::AsPath>(path: &P, mode: &[u8]) -> *mut htslib::Struct_BGZF { unsafe { htslib::bgzf_open( path.as_path().as_os_str().to_cstring().unwrap().as_ptr(), ffi::CString::new(mode).unwrap().as_ptr() ) } } #[cfg(test)] mod tests { extern crate tempdir; use super::*; use super::record::*; use super::header::*; use std::str; #[test] fn test_read() { let names = [b"I", b"II.14978392", b"III", b"IV", b"V", b"VI"]; let flags = [16u16, 16u16, 16u16, 16u16, 16u16, 2048u16]; let seqs = [ b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"ACTAAGCCTAAGCCTAAGCCTAAGCCAATTATCGATTTCTGAAAAAATTATCGAATTTTCTAGAAATTTTGCAAATTTTTTCATAAAATTATCGATTTTA", ]; let cigars = [ [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(100000), Cigar::Match(73)], ]; let bam = BAMReader::new(&"test.bam"); for (i, record) in bam.records().enumerate() { let rec = record.ok().expect("Expected valid record"); println!("{}", str::from_utf8(rec.qname()).ok().unwrap()); assert_eq!(rec.qname(), names[i]); assert_eq!(rec.flags(), flags[i]); assert_eq!(rec.seq().as_bytes(), seqs[i]); assert_eq!(rec.cigar(), cigars[i]); } } #[test] fn test_set_record() { let qname = b"I"; let cigar = [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)]; let seq = b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGC"; let qual = b"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ"; let mut rec = record::Record::new(); rec.set_reverse(); rec.set(qname, &cigar, seq, qual); rec.push_aux(b"NM", &Aux::Integer(15)); assert_eq!(rec.qname(), qname); assert_eq!(rec.cigar(), cigar); assert_eq!(rec.seq().as_bytes(), seq); assert_eq!(rec.qual(), qual); assert!(rec.is_reverse()); assert_eq!(rec.aux(b"NM").unwrap(), Aux::Integer(15)); } #[test] fn test_write() { let qname = b"I"; let cigar = [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)]; let seq = b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGC"; let qual = b"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ"; let tmp = tempdir::TempDir::new("rust-htslib").ok().expect("Cannot create temp dir"); let bampath = tmp.path().join("test.bam"); { let mut bam = BAMWriter::new( &bampath, Header::new().push_record( HeaderRecord::new(b"SQ").push_tag(b"SN", &"chr1") .push_tag(b"LN", &15072423) ) ); let mut rec = record::Record::new(); rec.set(qname, &cigar, seq, qual); rec.push_aux(b"NM", &Aux::Integer(15)); bam.write(&mut rec).ok().expect("Failed to write record."); } { let bam = BAMReader::new(&bampath); let mut rec = record::Record::new(); bam.read(&mut rec).ok().expect("Failed to read record."); assert_eq!(rec.qname(), qname); assert_eq!(rec.cigar(), cigar); assert_eq!(rec.seq().as_bytes(), seq); assert_eq!(rec.qual(), qual); assert_eq!(rec.aux(b"NM").unwrap(), Aux::Integer(15)); } tmp.close().ok().expect("Failed to delete temp dir"); } } Formatting. // Copyright 2014 Christopher Schröder, Johannes Köster. // Licensed under the MIT license (http://opensource.org/licenses/MIT) // This file may not be copied, modified, or distributed // except according to those terms. pub mod record; pub mod header; use std::ffi; use std::path; use std::ffi::AsOsStr; use std::os::unix::prelude::OsStrExt; use std::str; use htslib; /// A trait for a BAM reader with a read method. pub trait BAMRead { /// Read next BAM record into given record. /// Use this method in combination with a single allocated record to avoid the reallocations /// occurring with the iterator. /// /// # Arguments /// /// * `record` - the record to be filled fn read(&self, record: &mut record::Record) -> Result<(), ReadError>; } /// A BAM reader. pub struct BAMReader { f: *mut htslib::Struct_BGZF, header: *mut htslib::bam_hdr_t, } impl BAMReader { /// Create a new BAMReader. /// /// # Arguments /// /// * `path` - the path. Use "-" for stdin. pub fn new<P: path::AsPath>(path: &P) -> Self { let f = bgzf_open(path, b"r"); let header = unsafe { htslib::bam_hdr_read(f) }; BAMReader { f : f, header : header } } /// Iterator over the records of the BAM file. pub fn records(self) -> Records { Records { bam: self } } } impl BAMRead for BAMReader { fn read(&self, record: &mut record::Record) -> Result<(), ReadError> { match unsafe { htslib::bam_read1(self.f, &mut record.inner) } { -1 => Err(ReadError::EOF), -2 => Err(ReadError::Truncated), -4 => Err(ReadError::Invalid), _ => Ok(()) } } } impl Drop for BAMReader { fn drop(&mut self) { unsafe { htslib::bam_hdr_destroy(self.header); htslib::bgzf_close(self.f); } } } /// A BAM writer. pub struct BAMWriter { f: *mut htslib::Struct_BGZF, header: *mut htslib::bam_hdr_t, } impl BAMWriter { /// Create a new BAM file. /// /// # Arguments /// /// * `path` - the path. Use "-" for stdin. /// * `header` - header definition to use pub fn new<P: path::AsPath>(path: &P, header: &header::Header) -> Self { let f = bgzf_open(path, b"w"); let header_record = unsafe { let header_string = header.to_bytes(); println!("{}", str::from_utf8(&header_string).unwrap()); htslib::sam_hdr_parse( header_string.len() as i32, ffi::CString::new(header_string).unwrap().as_ptr() ) }; unsafe { htslib::bam_hdr_write(f, header_record); } BAMWriter { f: f, header: header_record } } /// Create a new BAM file from template. /// /// # Arguments /// /// * `path` - the path. Use "-" for stdin. /// * `template` - the template BAM. Use "-" for stdin. pub fn with_template<P: path::AsPath, T: path::AsPath>(path: &P, template: &T) -> Self { let t = bgzf_open(template, b"r"); let header = unsafe { htslib::bam_hdr_read(t) }; unsafe { htslib::bgzf_close(t); } let f = bgzf_open(path, b"w"); unsafe { htslib::bam_hdr_write(f, header); } BAMWriter { f: f, header: header } } /// Write record to BAM. /// /// # Arguments /// /// * `record` - the record to write pub fn write(&mut self, record: &record::Record) -> Result<(), ()> { if unsafe { htslib::bam_write1(self.f, &record.inner) } == -1 { Err(()) } else { Ok(()) } } } impl Drop for BAMWriter { fn drop(&mut self) { unsafe { htslib::bam_hdr_destroy(self.header); htslib::bgzf_close(self.f); } } } /// Iterator over the records of a BAM. pub struct Records { bam: BAMReader } impl Iterator for Records { type Item = Result<record::Record, ReadError>; fn next(&mut self) -> Option<Result<record::Record, ReadError>> { let mut record = record::Record::new(); match self.bam.read(&mut record) { Err(ReadError::EOF) => None, Ok(()) => Some(Ok(record)), Err(err) => Some(Err(err)) } } } pub enum ReadError { Truncated, Invalid, EOF, } /// Wrapper for opening a BAM file. fn bgzf_open<P: path::AsPath>(path: &P, mode: &[u8]) -> *mut htslib::Struct_BGZF { unsafe { htslib::bgzf_open( path.as_path().as_os_str().to_cstring().unwrap().as_ptr(), ffi::CString::new(mode).unwrap().as_ptr() ) } } #[cfg(test)] mod tests { extern crate tempdir; use super::*; use super::record::*; use super::header::*; use std::str; #[test] fn test_read() { let names = [b"I", b"II.14978392", b"III", b"IV", b"V", b"VI"]; let flags = [16u16, 16u16, 16u16, 16u16, 16u16, 2048u16]; let seqs = [ b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA", b"ACTAAGCCTAAGCCTAAGCCTAAGCCAATTATCGATTTCTGAAAAAATTATCGAATTTTCTAGAAATTTTGCAAATTTTTTCATAAAATTATCGATTTTA", ]; let cigars = [ [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)], [Cigar::Match(27), Cigar::Del(100000), Cigar::Match(73)], ]; let bam = BAMReader::new(&"test.bam"); for (i, record) in bam.records().enumerate() { let rec = record.ok().expect("Expected valid record"); println!("{}", str::from_utf8(rec.qname()).ok().unwrap()); assert_eq!(rec.qname(), names[i]); assert_eq!(rec.flags(), flags[i]); assert_eq!(rec.seq().as_bytes(), seqs[i]); assert_eq!(rec.cigar(), cigars[i]); } } #[test] fn test_set_record() { let qname = b"I"; let cigar = [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)]; let seq = b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGC"; let qual = b"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ"; let mut rec = record::Record::new(); rec.set_reverse(); rec.set(qname, &cigar, seq, qual); rec.push_aux(b"NM", &Aux::Integer(15)); assert_eq!(rec.qname(), qname); assert_eq!(rec.cigar(), cigar); assert_eq!(rec.seq().as_bytes(), seq); assert_eq!(rec.qual(), qual); assert!(rec.is_reverse()); assert_eq!(rec.aux(b"NM").unwrap(), Aux::Integer(15)); } #[test] fn test_write() { let qname = b"I"; let cigar = [Cigar::Match(27), Cigar::Del(1), Cigar::Match(73)]; let seq = b"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGC"; let qual = b"JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ"; let tmp = tempdir::TempDir::new("rust-htslib").ok().expect("Cannot create temp dir"); let bampath = tmp.path().join("test.bam"); { let mut bam = BAMWriter::new( &bampath, Header::new().push_record( HeaderRecord::new(b"SQ").push_tag(b"SN", &"chr1") .push_tag(b"LN", &15072423) ) ); let mut rec = record::Record::new(); rec.set(qname, &cigar, seq, qual); rec.push_aux(b"NM", &Aux::Integer(15)); bam.write(&mut rec).ok().expect("Failed to write record."); } { let bam = BAMReader::new(&bampath); let mut rec = record::Record::new(); bam.read(&mut rec).ok().expect("Failed to read record."); assert_eq!(rec.qname(), qname); assert_eq!(rec.cigar(), cigar); assert_eq!(rec.seq().as_bytes(), seq); assert_eq!(rec.qual(), qual); assert_eq!(rec.aux(b"NM").unwrap(), Aux::Integer(15)); } tmp.close().ok().expect("Failed to delete temp dir"); } }
//! Randomization of big integers // Some of the tests of non-RNG-based functionality are randomized using the // RNG-based functionality, so the RNG-based functionality needs to be enabled // for tests. use rand::Rng; use BigInt; use BigUint; use Sign::*; use big_digit::BigDigit; use traits::Zero; use integer::Integer; pub trait RandBigInt { /// Generate a random `BigUint` of the given bit size. fn gen_biguint(&mut self, bit_size: usize) -> BigUint; /// Generate a random BigInt of the given bit size. fn gen_bigint(&mut self, bit_size: usize) -> BigInt; /// Generate a random `BigUint` less than the given bound. Fails /// when the bound is zero. fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint; /// Generate a random `BigUint` within the given range. The lower /// bound is inclusive; the upper bound is exclusive. Fails when /// the upper bound is not greater than the lower bound. fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint; /// Generate a random `BigInt` within the given range. The lower /// bound is inclusive; the upper bound is exclusive. Fails when /// the upper bound is not greater than the lower bound. fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt; } #[cfg(any(feature = "rand", test))] impl<R: Rng> RandBigInt for R { fn gen_biguint(&mut self, bit_size: usize) -> BigUint { use super::big_digit::BITS; let (digits, rem) = bit_size.div_rem(&BITS); let mut data = Vec::with_capacity(digits + 1); for _ in 0..digits { data.push(self.gen()); } if rem > 0 { let final_digit: BigDigit = self.gen(); data.push(final_digit >> (BITS - rem)); } BigUint::new(data) } fn gen_bigint(&mut self, bit_size: usize) -> BigInt { // Generate a random BigUint... let biguint = self.gen_biguint(bit_size); // ...and then randomly assign it a Sign... let sign = if biguint.is_zero() { // ...except that if the BigUint is zero, we need to try // again with probability 0.5. This is because otherwise, // the probability of generating a zero BigInt would be // double that of any other number. if self.gen() { return self.gen_bigint(bit_size); } else { NoSign } } else if self.gen() { Plus } else { Minus }; BigInt::from_biguint(sign, biguint) } fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint { assert!(!bound.is_zero()); let bits = bound.bits(); loop { let n = self.gen_biguint(bits); if n < *bound { return n; } } } fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint { assert!(*lbound < *ubound); return lbound + self.gen_biguint_below(&(ubound - lbound)); } fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt { assert!(*lbound < *ubound); let delta = (ubound - lbound).to_biguint().unwrap(); return lbound + BigInt::from(self.gen_biguint_below(&delta)) } } loop gen_bigint retries //! Randomization of big integers // Some of the tests of non-RNG-based functionality are randomized using the // RNG-based functionality, so the RNG-based functionality needs to be enabled // for tests. use rand::Rng; use BigInt; use BigUint; use Sign::*; use big_digit::BigDigit; use traits::Zero; use integer::Integer; pub trait RandBigInt { /// Generate a random `BigUint` of the given bit size. fn gen_biguint(&mut self, bit_size: usize) -> BigUint; /// Generate a random BigInt of the given bit size. fn gen_bigint(&mut self, bit_size: usize) -> BigInt; /// Generate a random `BigUint` less than the given bound. Fails /// when the bound is zero. fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint; /// Generate a random `BigUint` within the given range. The lower /// bound is inclusive; the upper bound is exclusive. Fails when /// the upper bound is not greater than the lower bound. fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint; /// Generate a random `BigInt` within the given range. The lower /// bound is inclusive; the upper bound is exclusive. Fails when /// the upper bound is not greater than the lower bound. fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt; } #[cfg(any(feature = "rand", test))] impl<R: Rng> RandBigInt for R { fn gen_biguint(&mut self, bit_size: usize) -> BigUint { use super::big_digit::BITS; let (digits, rem) = bit_size.div_rem(&BITS); let mut data = Vec::with_capacity(digits + 1); for _ in 0..digits { data.push(self.gen()); } if rem > 0 { let final_digit: BigDigit = self.gen(); data.push(final_digit >> (BITS - rem)); } BigUint::new(data) } fn gen_bigint(&mut self, bit_size: usize) -> BigInt { loop { // Generate a random BigUint... let biguint = self.gen_biguint(bit_size); // ...and then randomly assign it a Sign... let sign = if biguint.is_zero() { // ...except that if the BigUint is zero, we need to try // again with probability 0.5. This is because otherwise, // the probability of generating a zero BigInt would be // double that of any other number. if self.gen() { continue; } else { NoSign } } else if self.gen() { Plus } else { Minus }; return BigInt::from_biguint(sign, biguint); } } fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint { assert!(!bound.is_zero()); let bits = bound.bits(); loop { let n = self.gen_biguint(bits); if n < *bound { return n; } } } fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint { assert!(*lbound < *ubound); return lbound + self.gen_biguint_below(&(ubound - lbound)); } fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt { assert!(*lbound < *ubound); let delta = (ubound - lbound).to_biguint().unwrap(); return lbound + BigInt::from(self.gen_biguint_below(&delta)) } }
use std::env; use cargo::core::Workspace; use cargo::ops::{self, MessageFormat, Packages}; use cargo::util::{CliResult, Config}; use cargo::util::important_paths::{find_root_manifest_for_wd}; #[derive(Deserialize)] pub struct Options { flag_target: Option<String>, flag_features: Vec<String>, flag_all_features: bool, flag_jobs: Option<u32>, flag_manifest_path: Option<String>, flag_no_default_features: bool, flag_no_deps: bool, flag_open: bool, flag_release: bool, flag_verbose: u32, flag_quiet: Option<bool>, flag_color: Option<String>, flag_message_format: MessageFormat, flag_package: Vec<String>, flag_lib: bool, flag_bin: Vec<String>, flag_bins: bool, flag_frozen: bool, flag_locked: bool, flag_all: bool, #[serde(rename = "flag_Z")] flag_z: Vec<String>, } pub const USAGE: &'static str = " Build a package's documentation Usage: cargo doc [options] Options: -h, --help Print this message --open Opens the docs in a browser after the operation -p SPEC, --package SPEC ... Package to document --all Document all packages in the workspace --no-deps Don't build documentation for dependencies -j N, --jobs N Number of parallel jobs, defaults to # of CPUs --lib Document only this package's library --bin NAME Document only the specified binary --bins Document all binaries --release Build artifacts in release mode, with optimizations --features FEATURES Space-separated list of features to also build --all-features Build all available features --no-default-features Do not build the `default` feature --target TRIPLE Build for the target triple --manifest-path PATH Path to the manifest to document -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) -q, --quiet No output printed to stdout --color WHEN Coloring: auto, always, never --message-format FMT Error format: human, json [default: human] --frozen Require Cargo.lock and cache are up to date --locked Require Cargo.lock is up to date -Z FLAG ... Unstable (nightly-only) flags to Cargo By default the documentation for the local package and all dependencies is built. The output is all placed in `target/doc` in rustdoc's usual format. All packages in the workspace are documented if the `--all` flag is supplied. The `--all` flag is automatically assumed for a virtual manifest. Note that `--exclude` has to be specified in conjunction with the `--all` flag. If the --package argument is given, then SPEC is a package id specification which indicates which package should be documented. If it is not given, then the current package is documented. For more information on SPEC and its format, see the `cargo help pkgid` command. "; pub fn execute(options: Options, config: &mut Config) -> CliResult { debug!("executing; cmd=cargo-check; args={:?}", env::args().collect::<Vec<_>>()); config.configure(options.flag_verbose, options.flag_quiet, &options.flag_color, options.flag_frozen, options.flag_locked, &options.flag_z)?; let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; let ws = Workspace::new(&root, config)?; let spec = if options.flag_all || (ws.is_virtual() && options.flag_package.is_empty()) { Packages::All } else { Packages::Packages(&options.flag_package) }; let empty = Vec::new(); let doc_opts = ops::DocOptions { open_result: options.flag_open, compile_opts: ops::CompileOptions { config: config, jobs: options.flag_jobs, target: options.flag_target.as_ref().map(|t| &t[..]), features: &options.flag_features, all_features: options.flag_all_features, no_default_features: options.flag_no_default_features, spec: spec, filter: ops::CompileFilter::new(options.flag_lib, &options.flag_bin, options.flag_bins, &empty, false, &empty, false, &empty, false, false), message_format: options.flag_message_format, release: options.flag_release, mode: ops::CompileMode::Doc { deps: !options.flag_no_deps, }, target_rustc_args: None, target_rustdoc_args: None, }, }; ops::doc(&ws, &doc_opts)?; Ok(()) } Auto merge of #5081 - matklad:document-not-all-the-things, r=alexcrichton Support --exclude option for `cargo doc` I think this should have been implemented when the feature was added for other commands. Probably just an oversight. cc https://github.com/rust-lang/cargo/pull/4031 r? @alexcrichton use std::env; use cargo::core::Workspace; use cargo::ops::{self, MessageFormat, Packages}; use cargo::util::{CliResult, Config}; use cargo::util::important_paths::{find_root_manifest_for_wd}; #[derive(Deserialize)] pub struct Options { flag_target: Option<String>, flag_features: Vec<String>, flag_all_features: bool, flag_jobs: Option<u32>, flag_manifest_path: Option<String>, flag_no_default_features: bool, flag_no_deps: bool, flag_open: bool, flag_release: bool, flag_verbose: u32, flag_quiet: Option<bool>, flag_color: Option<String>, flag_message_format: MessageFormat, flag_package: Vec<String>, flag_lib: bool, flag_bin: Vec<String>, flag_bins: bool, flag_frozen: bool, flag_locked: bool, flag_all: bool, flag_exclude: Vec<String>, #[serde(rename = "flag_Z")] flag_z: Vec<String>, } pub const USAGE: &'static str = " Build a package's documentation Usage: cargo doc [options] Options: -h, --help Print this message --open Opens the docs in a browser after the operation -p SPEC, --package SPEC ... Package to document --all Document all packages in the workspace --exclude SPEC ... Exclude packages from the build --no-deps Don't build documentation for dependencies -j N, --jobs N Number of parallel jobs, defaults to # of CPUs --lib Document only this package's library --bin NAME Document only the specified binary --bins Document all binaries --release Build artifacts in release mode, with optimizations --features FEATURES Space-separated list of features to also build --all-features Build all available features --no-default-features Do not build the `default` feature --target TRIPLE Build for the target triple --manifest-path PATH Path to the manifest to document -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) -q, --quiet No output printed to stdout --color WHEN Coloring: auto, always, never --message-format FMT Error format: human, json [default: human] --frozen Require Cargo.lock and cache are up to date --locked Require Cargo.lock is up to date -Z FLAG ... Unstable (nightly-only) flags to Cargo By default the documentation for the local package and all dependencies is built. The output is all placed in `target/doc` in rustdoc's usual format. All packages in the workspace are documented if the `--all` flag is supplied. The `--all` flag is automatically assumed for a virtual manifest. Note that `--exclude` has to be specified in conjunction with the `--all` flag. If the --package argument is given, then SPEC is a package id specification which indicates which package should be documented. If it is not given, then the current package is documented. For more information on SPEC and its format, see the `cargo help pkgid` command. "; pub fn execute(options: Options, config: &mut Config) -> CliResult { debug!("executing; cmd=cargo-check; args={:?}", env::args().collect::<Vec<_>>()); config.configure(options.flag_verbose, options.flag_quiet, &options.flag_color, options.flag_frozen, options.flag_locked, &options.flag_z)?; let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; let ws = Workspace::new(&root, config)?; let spec = Packages::from_flags(options.flag_all, &options.flag_exclude, &options.flag_package)?; let empty = Vec::new(); let doc_opts = ops::DocOptions { open_result: options.flag_open, compile_opts: ops::CompileOptions { config: config, jobs: options.flag_jobs, target: options.flag_target.as_ref().map(|t| &t[..]), features: &options.flag_features, all_features: options.flag_all_features, no_default_features: options.flag_no_default_features, spec: spec, filter: ops::CompileFilter::new(options.flag_lib, &options.flag_bin, options.flag_bins, &empty, false, &empty, false, &empty, false, false), message_format: options.flag_message_format, release: options.flag_release, mode: ops::CompileMode::Doc { deps: !options.flag_no_deps, }, target_rustc_args: None, target_rustdoc_args: None, }, }; ops::doc(&ws, &doc_opts)?; Ok(()) }