file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn shutdown_priv(&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send + 'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if !self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() |
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() {
let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
}
#[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
}
| {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
} | conditional_block |
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn shutdown_priv(&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send + 'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if !self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
}
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() |
#[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
}
| {
let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
} | identifier_body |
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn shutdown_priv(&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send + 'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if !self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
}
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() { | #[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
} | let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
}
| random_line_split |
main.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{channel::oneshot, FutureExt, TryFutureExt, TryStreamExt};
use iml_agent_comms::{
error::ImlAgentCommsError,
flush_queue,
host::{self, SharedHosts},
messaging::{consume_agent_tx_queue, AgentData, AGENT_TX_RUST},
session::{self, Session, Sessions},
};
use iml_rabbit::{self, create_connection_filter, send_message, Connection};
use iml_wire_types::{
Envelope, Fqdn, ManagerMessage, ManagerMessages, Message, PluginMessage, PluginName,
};
use std::{sync::Arc, time::Duration};
use warp::Filter;
async fn data_handler(
has_session: bool,
client: Connection,
data: AgentData,
) -> Result<(), ImlAgentCommsError> {
if has_session {
tracing::debug!("Forwarding valid message {}", data);
let s = format!("rust_agent_{}_rx", data.plugin);
send_message(client, "", s, PluginMessage::from(data)).await?;
} else |
Ok(())
}
async fn session_create_req_handler(
sessions: &mut Sessions,
client: Connection,
fqdn: Fqdn,
plugin: PluginName,
) -> Result<(), ImlAgentCommsError> {
let session = Session::new(plugin.clone(), fqdn.clone());
tracing::info!("Creating session {}", session);
let last_opt = sessions.insert(plugin.clone(), session.clone());
if let Some(last) = last_opt {
tracing::warn!("Destroying session {} to create new one", last);
let s = format!("rust_agent_{}_rx", plugin);
send_message(
client.clone(),
"",
s,
PluginMessage::SessionTerminate {
fqdn: last.fqdn,
plugin: last.plugin,
session_id: last.id,
},
)
.await?;
}
let s = format!("rust_agent_{}_rx", plugin.clone());
send_message(
client.clone(),
"",
s,
PluginMessage::SessionCreate {
fqdn: fqdn.clone(),
plugin: plugin.clone(),
session_id: session.id.clone(),
},
)
.await?;
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionCreateResponse {
fqdn,
plugin,
session_id: session.id,
},
)
.await?;
Ok(())
}
#[derive(serde::Deserialize, serde::Serialize, Debug)]
struct GetArgs {
server_boot_time: String,
client_start_time: String,
}
#[derive(serde::Deserialize, Debug)]
struct MessageFqdn {
pub fqdn: Fqdn,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
iml_tracing::init();
// Handle an error in locks by shutting down
let (tx, rx) = oneshot::channel();
let shared_hosts = host::shared_hosts();
let shared_hosts2 = Arc::clone(&shared_hosts);
let shared_hosts3 = Arc::clone(&shared_hosts);
tokio::spawn(
async move {
let conn = iml_rabbit::connect_to_rabbit().await?;
let ch = iml_rabbit::create_channel(&conn).await?;
let mut s = consume_agent_tx_queue(ch, AGENT_TX_RUST).await?;
while let Some(msg) = s.try_next().await? {
let MessageFqdn { fqdn } = serde_json::from_slice(&msg.data)?;
let mut hosts = shared_hosts.lock().await;
let host = hosts.get_mut(&fqdn);
if let Some(host) = host {
let mut queue = host.queue.lock().await;
queue.push_back(msg.data);
tracing::debug!(
"Put data on host queue {}: Queue size: {:?}",
fqdn,
queue.len()
);
} else {
tracing::warn!(
"Dropping message to {:?} because it did not have a host queue",
fqdn
);
}
}
Ok(())
}
.unwrap_or_else(|e: ImlAgentCommsError| {
tx.send(()).unwrap_or_else(drop);
tracing::error!("{:?}", e)
}),
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts2));
let (fut, client_filter) = create_connection_filter().await?;
tokio::spawn(fut);
let receiver = warp::post()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(hosts_filter)
.and(client_filter)
.and(warp::body::json())
.and_then(
|fqdn: Fqdn,
hosts: SharedHosts,
client: Connection,
Envelope {
messages,
client_start_time,
..
}: Envelope| {
async move {
tracing::debug!("<-- Delivery from agent {}: Messages: {:?}", fqdn, messages,);
let sessions = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(&mut hosts, fqdn, client_start_time);
Arc::clone(&host.sessions)
};
for msg in messages {
let s2 = Arc::clone(&sessions);
match msg {
Message::Data {
plugin,
session_id,
session_seq,
body,
fqdn,
..
} => {
let lock = s2.lock().await;
let has_session =
session::get_by_session_id(&plugin, &session_id, &lock)
.is_some();
data_handler(
has_session,
client.clone(),
AgentData {
fqdn,
plugin,
session_id,
session_seq,
body,
},
)
.await?;
}
Message::SessionCreateRequest { plugin, fqdn } => {
let mut lock = s2.lock().await;
session_create_req_handler(&mut lock, client.clone(), fqdn, plugin)
.await?;
}
}
}
Ok::<(), ImlAgentCommsError>(())
}
.map_err(warp::reject::custom)
.map_ok(|_| {
warp::reply::with_status(warp::reply(), warp::http::StatusCode::ACCEPTED)
})
},
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts3));
let sender = warp::get()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(warp::query::<GetArgs>())
.and(hosts_filter)
.and_then(|fqdn: Fqdn, args: GetArgs, hosts: SharedHosts| {
async move {
{
let mut hosts = hosts.lock().await;
let mut host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop();
// If we are not dealing with the same agent anymore, terminate all existing sessions.
if host.client_start_time != args.client_start_time {
tracing::info!(
"Terminating all sessions on {:?} because start time has changed",
fqdn
);
host.client_start_time = args.client_start_time;
return Ok::<_, ImlAgentCommsError>(ManagerMessages {
messages: vec![ManagerMessage::SessionTerminateAll { fqdn }],
});
}
}
let (tx, rx) = oneshot::channel();
let (sessions, queue) = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop_reading = Some(tx);
(Arc::clone(&host.sessions), Arc::clone(&host.queue))
};
let xs = flush_queue::flush(queue, Duration::from_secs(30), rx).await?;
let mut xs: Vec<ManagerMessage> = xs
.into_iter()
.map(|x| serde_json::from_slice(&x).map_err(ImlAgentCommsError::from))
.collect::<Result<Vec<_>, ImlAgentCommsError>>()?;
let guard = sessions.lock().await;
xs.retain(|x| session::is_session_valid(x, &guard));
tracing::debug!(
"--> Delivery to agent {}({:?}): {:?}",
&fqdn,
&args.client_start_time,
xs,
);
Ok::<_, ImlAgentCommsError>(ManagerMessages { messages: xs })
}
.map_err(warp::reject::custom)
})
.map(|envelope| warp::reply::json(&envelope));
let log = warp::log("iml_agent_comms::api");
let routes = warp::path("message").and(receiver.or(sender).with(log));
let addr = iml_manager_env::get_http_agent2_addr();
tracing::info!("Starting iml-agent-comms on {:?}", addr);
let (_, fut) = warp::serve(routes).bind_with_graceful_shutdown(addr, rx.map(drop));
fut.await;
Ok(())
}
| {
tracing::warn!("Terminating session because unknown {}", data);
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionTerminate {
fqdn: data.fqdn,
plugin: data.plugin,
session_id: data.session_id,
},
)
.await?;
} | conditional_block |
main.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{channel::oneshot, FutureExt, TryFutureExt, TryStreamExt};
use iml_agent_comms::{
error::ImlAgentCommsError,
flush_queue,
host::{self, SharedHosts},
messaging::{consume_agent_tx_queue, AgentData, AGENT_TX_RUST},
session::{self, Session, Sessions},
};
use iml_rabbit::{self, create_connection_filter, send_message, Connection};
use iml_wire_types::{
Envelope, Fqdn, ManagerMessage, ManagerMessages, Message, PluginMessage, PluginName,
};
use std::{sync::Arc, time::Duration};
use warp::Filter;
async fn data_handler(
has_session: bool,
client: Connection,
data: AgentData,
) -> Result<(), ImlAgentCommsError> {
if has_session {
tracing::debug!("Forwarding valid message {}", data);
let s = format!("rust_agent_{}_rx", data.plugin);
send_message(client, "", s, PluginMessage::from(data)).await?;
} else {
tracing::warn!("Terminating session because unknown {}", data);
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionTerminate {
fqdn: data.fqdn,
plugin: data.plugin,
session_id: data.session_id,
},
)
.await?;
}
Ok(())
}
async fn session_create_req_handler(
sessions: &mut Sessions,
client: Connection,
fqdn: Fqdn,
plugin: PluginName,
) -> Result<(), ImlAgentCommsError> {
let session = Session::new(plugin.clone(), fqdn.clone());
tracing::info!("Creating session {}", session);
let last_opt = sessions.insert(plugin.clone(), session.clone());
if let Some(last) = last_opt {
tracing::warn!("Destroying session {} to create new one", last);
let s = format!("rust_agent_{}_rx", plugin);
send_message(
client.clone(),
"",
s,
PluginMessage::SessionTerminate {
fqdn: last.fqdn,
plugin: last.plugin,
session_id: last.id,
},
)
.await?;
}
let s = format!("rust_agent_{}_rx", plugin.clone());
send_message(
client.clone(),
"",
s,
PluginMessage::SessionCreate {
fqdn: fqdn.clone(),
plugin: plugin.clone(),
session_id: session.id.clone(),
},
)
.await?;
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionCreateResponse {
fqdn,
plugin,
session_id: session.id,
},
)
.await?;
Ok(())
}
#[derive(serde::Deserialize, serde::Serialize, Debug)]
struct GetArgs {
server_boot_time: String,
client_start_time: String,
}
#[derive(serde::Deserialize, Debug)]
struct MessageFqdn {
pub fqdn: Fqdn,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
iml_tracing::init();
// Handle an error in locks by shutting down
let (tx, rx) = oneshot::channel();
let shared_hosts = host::shared_hosts();
let shared_hosts2 = Arc::clone(&shared_hosts);
let shared_hosts3 = Arc::clone(&shared_hosts);
tokio::spawn(
async move {
let conn = iml_rabbit::connect_to_rabbit().await?;
let ch = iml_rabbit::create_channel(&conn).await?;
let mut s = consume_agent_tx_queue(ch, AGENT_TX_RUST).await?;
while let Some(msg) = s.try_next().await? {
let MessageFqdn { fqdn } = serde_json::from_slice(&msg.data)?;
let mut hosts = shared_hosts.lock().await;
let host = hosts.get_mut(&fqdn);
if let Some(host) = host {
let mut queue = host.queue.lock().await; |
tracing::debug!(
"Put data on host queue {}: Queue size: {:?}",
fqdn,
queue.len()
);
} else {
tracing::warn!(
"Dropping message to {:?} because it did not have a host queue",
fqdn
);
}
}
Ok(())
}
.unwrap_or_else(|e: ImlAgentCommsError| {
tx.send(()).unwrap_or_else(drop);
tracing::error!("{:?}", e)
}),
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts2));
let (fut, client_filter) = create_connection_filter().await?;
tokio::spawn(fut);
let receiver = warp::post()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(hosts_filter)
.and(client_filter)
.and(warp::body::json())
.and_then(
|fqdn: Fqdn,
hosts: SharedHosts,
client: Connection,
Envelope {
messages,
client_start_time,
..
}: Envelope| {
async move {
tracing::debug!("<-- Delivery from agent {}: Messages: {:?}", fqdn, messages,);
let sessions = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(&mut hosts, fqdn, client_start_time);
Arc::clone(&host.sessions)
};
for msg in messages {
let s2 = Arc::clone(&sessions);
match msg {
Message::Data {
plugin,
session_id,
session_seq,
body,
fqdn,
..
} => {
let lock = s2.lock().await;
let has_session =
session::get_by_session_id(&plugin, &session_id, &lock)
.is_some();
data_handler(
has_session,
client.clone(),
AgentData {
fqdn,
plugin,
session_id,
session_seq,
body,
},
)
.await?;
}
Message::SessionCreateRequest { plugin, fqdn } => {
let mut lock = s2.lock().await;
session_create_req_handler(&mut lock, client.clone(), fqdn, plugin)
.await?;
}
}
}
Ok::<(), ImlAgentCommsError>(())
}
.map_err(warp::reject::custom)
.map_ok(|_| {
warp::reply::with_status(warp::reply(), warp::http::StatusCode::ACCEPTED)
})
},
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts3));
let sender = warp::get()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(warp::query::<GetArgs>())
.and(hosts_filter)
.and_then(|fqdn: Fqdn, args: GetArgs, hosts: SharedHosts| {
async move {
{
let mut hosts = hosts.lock().await;
let mut host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop();
// If we are not dealing with the same agent anymore, terminate all existing sessions.
if host.client_start_time != args.client_start_time {
tracing::info!(
"Terminating all sessions on {:?} because start time has changed",
fqdn
);
host.client_start_time = args.client_start_time;
return Ok::<_, ImlAgentCommsError>(ManagerMessages {
messages: vec![ManagerMessage::SessionTerminateAll { fqdn }],
});
}
}
let (tx, rx) = oneshot::channel();
let (sessions, queue) = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop_reading = Some(tx);
(Arc::clone(&host.sessions), Arc::clone(&host.queue))
};
let xs = flush_queue::flush(queue, Duration::from_secs(30), rx).await?;
let mut xs: Vec<ManagerMessage> = xs
.into_iter()
.map(|x| serde_json::from_slice(&x).map_err(ImlAgentCommsError::from))
.collect::<Result<Vec<_>, ImlAgentCommsError>>()?;
let guard = sessions.lock().await;
xs.retain(|x| session::is_session_valid(x, &guard));
tracing::debug!(
"--> Delivery to agent {}({:?}): {:?}",
&fqdn,
&args.client_start_time,
xs,
);
Ok::<_, ImlAgentCommsError>(ManagerMessages { messages: xs })
}
.map_err(warp::reject::custom)
})
.map(|envelope| warp::reply::json(&envelope));
let log = warp::log("iml_agent_comms::api");
let routes = warp::path("message").and(receiver.or(sender).with(log));
let addr = iml_manager_env::get_http_agent2_addr();
tracing::info!("Starting iml-agent-comms on {:?}", addr);
let (_, fut) = warp::serve(routes).bind_with_graceful_shutdown(addr, rx.map(drop));
fut.await;
Ok(())
} | queue.push_back(msg.data); | random_line_split |
main.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{channel::oneshot, FutureExt, TryFutureExt, TryStreamExt};
use iml_agent_comms::{
error::ImlAgentCommsError,
flush_queue,
host::{self, SharedHosts},
messaging::{consume_agent_tx_queue, AgentData, AGENT_TX_RUST},
session::{self, Session, Sessions},
};
use iml_rabbit::{self, create_connection_filter, send_message, Connection};
use iml_wire_types::{
Envelope, Fqdn, ManagerMessage, ManagerMessages, Message, PluginMessage, PluginName,
};
use std::{sync::Arc, time::Duration};
use warp::Filter;
async fn data_handler(
has_session: bool,
client: Connection,
data: AgentData,
) -> Result<(), ImlAgentCommsError> {
if has_session {
tracing::debug!("Forwarding valid message {}", data);
let s = format!("rust_agent_{}_rx", data.plugin);
send_message(client, "", s, PluginMessage::from(data)).await?;
} else {
tracing::warn!("Terminating session because unknown {}", data);
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionTerminate {
fqdn: data.fqdn,
plugin: data.plugin,
session_id: data.session_id,
},
)
.await?;
}
Ok(())
}
async fn session_create_req_handler(
sessions: &mut Sessions,
client: Connection,
fqdn: Fqdn,
plugin: PluginName,
) -> Result<(), ImlAgentCommsError> {
let session = Session::new(plugin.clone(), fqdn.clone());
tracing::info!("Creating session {}", session);
let last_opt = sessions.insert(plugin.clone(), session.clone());
if let Some(last) = last_opt {
tracing::warn!("Destroying session {} to create new one", last);
let s = format!("rust_agent_{}_rx", plugin);
send_message(
client.clone(),
"",
s,
PluginMessage::SessionTerminate {
fqdn: last.fqdn,
plugin: last.plugin,
session_id: last.id,
},
)
.await?;
}
let s = format!("rust_agent_{}_rx", plugin.clone());
send_message(
client.clone(),
"",
s,
PluginMessage::SessionCreate {
fqdn: fqdn.clone(),
plugin: plugin.clone(),
session_id: session.id.clone(),
},
)
.await?;
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionCreateResponse {
fqdn,
plugin,
session_id: session.id,
},
)
.await?;
Ok(())
}
#[derive(serde::Deserialize, serde::Serialize, Debug)]
struct GetArgs {
server_boot_time: String,
client_start_time: String,
}
#[derive(serde::Deserialize, Debug)]
struct MessageFqdn {
pub fqdn: Fqdn,
}
#[tokio::main]
async fn | () -> Result<(), Box<dyn std::error::Error>> {
iml_tracing::init();
// Handle an error in locks by shutting down
let (tx, rx) = oneshot::channel();
let shared_hosts = host::shared_hosts();
let shared_hosts2 = Arc::clone(&shared_hosts);
let shared_hosts3 = Arc::clone(&shared_hosts);
tokio::spawn(
async move {
let conn = iml_rabbit::connect_to_rabbit().await?;
let ch = iml_rabbit::create_channel(&conn).await?;
let mut s = consume_agent_tx_queue(ch, AGENT_TX_RUST).await?;
while let Some(msg) = s.try_next().await? {
let MessageFqdn { fqdn } = serde_json::from_slice(&msg.data)?;
let mut hosts = shared_hosts.lock().await;
let host = hosts.get_mut(&fqdn);
if let Some(host) = host {
let mut queue = host.queue.lock().await;
queue.push_back(msg.data);
tracing::debug!(
"Put data on host queue {}: Queue size: {:?}",
fqdn,
queue.len()
);
} else {
tracing::warn!(
"Dropping message to {:?} because it did not have a host queue",
fqdn
);
}
}
Ok(())
}
.unwrap_or_else(|e: ImlAgentCommsError| {
tx.send(()).unwrap_or_else(drop);
tracing::error!("{:?}", e)
}),
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts2));
let (fut, client_filter) = create_connection_filter().await?;
tokio::spawn(fut);
let receiver = warp::post()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(hosts_filter)
.and(client_filter)
.and(warp::body::json())
.and_then(
|fqdn: Fqdn,
hosts: SharedHosts,
client: Connection,
Envelope {
messages,
client_start_time,
..
}: Envelope| {
async move {
tracing::debug!("<-- Delivery from agent {}: Messages: {:?}", fqdn, messages,);
let sessions = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(&mut hosts, fqdn, client_start_time);
Arc::clone(&host.sessions)
};
for msg in messages {
let s2 = Arc::clone(&sessions);
match msg {
Message::Data {
plugin,
session_id,
session_seq,
body,
fqdn,
..
} => {
let lock = s2.lock().await;
let has_session =
session::get_by_session_id(&plugin, &session_id, &lock)
.is_some();
data_handler(
has_session,
client.clone(),
AgentData {
fqdn,
plugin,
session_id,
session_seq,
body,
},
)
.await?;
}
Message::SessionCreateRequest { plugin, fqdn } => {
let mut lock = s2.lock().await;
session_create_req_handler(&mut lock, client.clone(), fqdn, plugin)
.await?;
}
}
}
Ok::<(), ImlAgentCommsError>(())
}
.map_err(warp::reject::custom)
.map_ok(|_| {
warp::reply::with_status(warp::reply(), warp::http::StatusCode::ACCEPTED)
})
},
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts3));
let sender = warp::get()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(warp::query::<GetArgs>())
.and(hosts_filter)
.and_then(|fqdn: Fqdn, args: GetArgs, hosts: SharedHosts| {
async move {
{
let mut hosts = hosts.lock().await;
let mut host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop();
// If we are not dealing with the same agent anymore, terminate all existing sessions.
if host.client_start_time != args.client_start_time {
tracing::info!(
"Terminating all sessions on {:?} because start time has changed",
fqdn
);
host.client_start_time = args.client_start_time;
return Ok::<_, ImlAgentCommsError>(ManagerMessages {
messages: vec![ManagerMessage::SessionTerminateAll { fqdn }],
});
}
}
let (tx, rx) = oneshot::channel();
let (sessions, queue) = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop_reading = Some(tx);
(Arc::clone(&host.sessions), Arc::clone(&host.queue))
};
let xs = flush_queue::flush(queue, Duration::from_secs(30), rx).await?;
let mut xs: Vec<ManagerMessage> = xs
.into_iter()
.map(|x| serde_json::from_slice(&x).map_err(ImlAgentCommsError::from))
.collect::<Result<Vec<_>, ImlAgentCommsError>>()?;
let guard = sessions.lock().await;
xs.retain(|x| session::is_session_valid(x, &guard));
tracing::debug!(
"--> Delivery to agent {}({:?}): {:?}",
&fqdn,
&args.client_start_time,
xs,
);
Ok::<_, ImlAgentCommsError>(ManagerMessages { messages: xs })
}
.map_err(warp::reject::custom)
})
.map(|envelope| warp::reply::json(&envelope));
let log = warp::log("iml_agent_comms::api");
let routes = warp::path("message").and(receiver.or(sender).with(log));
let addr = iml_manager_env::get_http_agent2_addr();
tracing::info!("Starting iml-agent-comms on {:?}", addr);
let (_, fut) = warp::serve(routes).bind_with_graceful_shutdown(addr, rx.map(drop));
fut.await;
Ok(())
}
| main | identifier_name |
mod.rs | mod workflow_machines;
// TODO: Move all these inside a submachines module
#[allow(unused)]
mod activity_state_machine;
#[allow(unused)]
mod cancel_external_state_machine;
#[allow(unused)]
mod cancel_workflow_state_machine;
#[allow(unused)]
mod child_workflow_state_machine;
mod complete_workflow_state_machine;
#[allow(unused)]
mod continue_as_new_workflow_state_machine;
#[allow(unused)]
mod fail_workflow_state_machine;
#[allow(unused)]
mod local_activity_state_machine;
#[allow(unused)]
mod mutable_side_effect_state_machine;
#[allow(unused)]
mod side_effect_state_machine;
#[allow(unused)]
mod signal_external_state_machine;
mod timer_state_machine;
#[allow(unused)]
mod upsert_search_attributes_state_machine;
#[allow(unused)]
mod version_state_machine;
mod workflow_task_state_machine;
#[cfg(test)]
pub(crate) mod test_help;
pub(crate) use workflow_machines::{WFMachinesError, WorkflowMachines};
use crate::{
machines::workflow_machines::MachineResponse,
protos::{
coresdk::{self, command::Variant, wf_activation_job},
temporal::api::{
command::v1::{
command::Attributes, CancelTimerCommandAttributes, Command,
CompleteWorkflowExecutionCommandAttributes, StartTimerCommandAttributes,
},
enums::v1::CommandType,
history::v1::{
HistoryEvent, WorkflowExecutionCanceledEventAttributes,
WorkflowExecutionSignaledEventAttributes, WorkflowExecutionStartedEventAttributes,
},
},
},
};
use prost::alloc::fmt::Formatter;
use rustfsm::{MachineError, StateMachine};
use std::{
convert::{TryFrom, TryInto},
fmt::{Debug, Display},
};
use tracing::Level;
pub(crate) type ProtoCommand = Command;
/// Implementors of this trait represent something that can (eventually) call into a workflow to
/// drive it, start it, signal it, cancel it, etc.
pub(crate) trait DrivenWorkflow: ActivationListener + Send {
/// Start the workflow
fn start(&mut self, attribs: WorkflowExecutionStartedEventAttributes);
/// Obtain any output from the workflow's recent execution(s). Because the lang sdk is
/// responsible for calling workflow code as a result of receiving tasks from
/// [crate::Core::poll_task], we cannot directly iterate it here. Thus implementations of this
/// trait are expected to either buffer output or otherwise produce it on demand when this
/// function is called.
///
/// In the case of the real [WorkflowBridge] implementation, commands are simply pulled from
/// a buffer that the language side sinks into when it calls [crate::Core::complete_task]
fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand>;
/// Signal the workflow
fn signal(&mut self, attribs: WorkflowExecutionSignaledEventAttributes);
/// Cancel the workflow
fn cancel(&mut self, attribs: WorkflowExecutionCanceledEventAttributes);
}
/// Allows observers to listen to newly generated outgoing activation jobs. Used for testing, where
/// some activations must be handled before outgoing commands are issued to avoid deadlocking.
pub(crate) trait ActivationListener {
fn on_activation_job(&mut self, _activation: &wf_activation_job::Attributes) {}
}
/// [DrivenWorkflow]s respond with these when called, to indicate what they want to do next.
/// EX: Create a new timer, complete the workflow, etc.
#[derive(Debug, derive_more::From)]
pub enum WFCommand {
/// Returned when we need to wait for the lang sdk to send us something
NoCommandsFromLang,
AddTimer(StartTimerCommandAttributes),
CancelTimer(CancelTimerCommandAttributes),
CompleteWorkflow(CompleteWorkflowExecutionCommandAttributes),
}
#[derive(thiserror::Error, Debug, derive_more::From)]
#[error("Couldn't convert <lang> command")]
pub struct InconvertibleCommandError(pub coresdk::Command);
impl TryFrom<coresdk::Command> for WFCommand {
type Error = InconvertibleCommandError;
fn try_from(c: coresdk::Command) -> Result<Self, Self::Error> {
match c.variant {
Some(Variant::Api(Command {
attributes: Some(attrs),
..
})) => match attrs {
Attributes::StartTimerCommandAttributes(s) => Ok(WFCommand::AddTimer(s)),
Attributes::CancelTimerCommandAttributes(s) => Ok(WFCommand::CancelTimer(s)),
Attributes::CompleteWorkflowExecutionCommandAttributes(c) => {
Ok(WFCommand::CompleteWorkflow(c))
}
_ => unimplemented!(),
},
_ => Err(c.into()),
}
}
}
/// Extends [rustfsm::StateMachine] with some functionality specific to the temporal SDK.
///
/// Formerly known as `EntityStateMachine` in Java.
trait TemporalStateMachine: CheckStateMachineInFinal + Send {
fn name(&self) -> &str;
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError>;
/// Tell the state machine to handle some event. Returns a list of responses that can be used
/// to update the overall state of the workflow. EX: To issue outgoing WF activations.
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
/// Attempt to cancel the command associated with this state machine, if it is cancellable
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError>;
/// Should return true if the command was cancelled before we sent it to the server. Always
/// returns false for non-cancellable machines
fn was_cancelled_before_sent_to_server(&self) -> bool;
}
impl<SM> TemporalStateMachine for SM
where
SM: StateMachine + CheckStateMachineInFinal + WFMachinesAdapter + Cancellable + Clone + Send,
<SM as StateMachine>::Event: TryFrom<HistoryEvent>,
<SM as StateMachine>::Event: TryFrom<CommandType>,
WFMachinesError: From<<<SM as StateMachine>::Event as TryFrom<HistoryEvent>>::Error>,
<SM as StateMachine>::Command: Debug,
<SM as StateMachine>::State: Display,
<SM as StateMachine>::Error: Into<WFMachinesError> + 'static + Send + Sync,
{
fn name(&self) -> &str {
<Self as StateMachine>::name(self)
}
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling command",
?command_type,
machine_name = %self.name(),
state = %self.state()
);
if let Ok(converted_command) = command_type.try_into() {
match self.on_event_mut(converted_command) {
Ok(_c) => Ok(()),
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
} else {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
}
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling event",
%event,
machine_name = %self.name(),
state = %self.state()
);
let converted_event = event.clone().try_into()?;
match self.on_event_mut(converted_event) {
Ok(c) => {
if !c.is_empty() {
event!(Level::DEBUG, msg = "Machine produced commands", ?c, state = %self.state());
}
let mut machine_responses = vec![];
for cmd in c {
machine_responses.extend(self.adapt_response(event, has_next_event, cmd)?);
}
Ok(machine_responses)
}
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::InvalidTransitionDuringEvent(
event.clone(),
format!(
"{} in state {} says the transition is invalid",
self.name(),
self.state()
),
))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
}
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError> {
let res = self.cancel();
res.map_err(|e| match e {
MachineError::InvalidTransition => {
WFMachinesError::InvalidTransition("while attempting to cancel")
}
MachineError::Underlying(e) => e.into(),
})
}
fn | (&self) -> bool {
self.was_cancelled_before_sent_to_server()
}
}
/// Exists purely to allow generic implementation of `is_final_state` for all [StateMachine]
/// implementors
trait CheckStateMachineInFinal {
/// Returns true if the state machine is in a final state
fn is_final_state(&self) -> bool;
}
impl<SM> CheckStateMachineInFinal for SM
where
SM: StateMachine,
{
fn is_final_state(&self) -> bool {
self.on_final_state()
}
}
/// This trait exists to bridge [StateMachine]s and the [WorkflowMachines] instance. It has access
/// to the machine's concrete types while hiding those details from [WorkflowMachines]
trait WFMachinesAdapter: StateMachine {
/// Given a the event being processed, and a command that this [StateMachine] instance just
/// produced, perform any handling that needs inform the [WorkflowMachines] instance of some
/// action to be taken in response to that command.
fn adapt_response(
&self,
event: &HistoryEvent,
has_next_event: bool,
my_command: Self::Command,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
}
trait Cancellable: StateMachine {
/// Cancel the machine / the command represented by the machine.
///
/// # Panics
/// * If the machine is not cancellable. It's a logic error on our part to call it on such
/// machines.
fn cancel(&mut self) -> Result<MachineResponse, MachineError<Self::Error>> {
// It's a logic error on our part if this is ever called on a machine that can't actually
// be cancelled
panic!(format!("Machine {} cannot be cancelled", self.name()))
}
/// Should return true if the command was cancelled before we sent it to the server
fn was_cancelled_before_sent_to_server(&self) -> bool {
false
}
}
#[derive(Debug)]
struct NewMachineWithCommand<T: TemporalStateMachine> {
command: ProtoCommand,
machine: T,
}
impl Debug for dyn TemporalStateMachine {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.name())
}
}
| was_cancelled_before_sent_to_server | identifier_name |
mod.rs | mod workflow_machines;
// TODO: Move all these inside a submachines module
#[allow(unused)]
mod activity_state_machine;
#[allow(unused)]
mod cancel_external_state_machine;
#[allow(unused)]
mod cancel_workflow_state_machine;
#[allow(unused)]
mod child_workflow_state_machine;
mod complete_workflow_state_machine;
#[allow(unused)]
mod continue_as_new_workflow_state_machine;
#[allow(unused)]
mod fail_workflow_state_machine;
#[allow(unused)]
mod local_activity_state_machine;
#[allow(unused)]
mod mutable_side_effect_state_machine;
#[allow(unused)]
mod side_effect_state_machine;
#[allow(unused)]
mod signal_external_state_machine;
mod timer_state_machine;
#[allow(unused)]
mod upsert_search_attributes_state_machine;
#[allow(unused)]
mod version_state_machine;
mod workflow_task_state_machine;
#[cfg(test)]
pub(crate) mod test_help;
pub(crate) use workflow_machines::{WFMachinesError, WorkflowMachines};
use crate::{
machines::workflow_machines::MachineResponse,
protos::{
coresdk::{self, command::Variant, wf_activation_job},
temporal::api::{
command::v1::{
command::Attributes, CancelTimerCommandAttributes, Command,
CompleteWorkflowExecutionCommandAttributes, StartTimerCommandAttributes,
},
enums::v1::CommandType,
history::v1::{
HistoryEvent, WorkflowExecutionCanceledEventAttributes,
WorkflowExecutionSignaledEventAttributes, WorkflowExecutionStartedEventAttributes,
},
},
},
};
use prost::alloc::fmt::Formatter;
use rustfsm::{MachineError, StateMachine};
use std::{
convert::{TryFrom, TryInto},
fmt::{Debug, Display},
};
use tracing::Level;
pub(crate) type ProtoCommand = Command;
/// Implementors of this trait represent something that can (eventually) call into a workflow to
/// drive it, start it, signal it, cancel it, etc.
pub(crate) trait DrivenWorkflow: ActivationListener + Send {
/// Start the workflow
fn start(&mut self, attribs: WorkflowExecutionStartedEventAttributes);
/// Obtain any output from the workflow's recent execution(s). Because the lang sdk is
/// responsible for calling workflow code as a result of receiving tasks from
/// [crate::Core::poll_task], we cannot directly iterate it here. Thus implementations of this
/// trait are expected to either buffer output or otherwise produce it on demand when this
/// function is called.
///
/// In the case of the real [WorkflowBridge] implementation, commands are simply pulled from
/// a buffer that the language side sinks into when it calls [crate::Core::complete_task]
fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand>;
/// Signal the workflow
fn signal(&mut self, attribs: WorkflowExecutionSignaledEventAttributes);
/// Cancel the workflow
fn cancel(&mut self, attribs: WorkflowExecutionCanceledEventAttributes);
}
/// Allows observers to listen to newly generated outgoing activation jobs. Used for testing, where
/// some activations must be handled before outgoing commands are issued to avoid deadlocking.
pub(crate) trait ActivationListener {
fn on_activation_job(&mut self, _activation: &wf_activation_job::Attributes) {}
}
/// [DrivenWorkflow]s respond with these when called, to indicate what they want to do next.
/// EX: Create a new timer, complete the workflow, etc.
#[derive(Debug, derive_more::From)]
pub enum WFCommand {
/// Returned when we need to wait for the lang sdk to send us something
NoCommandsFromLang,
AddTimer(StartTimerCommandAttributes),
CancelTimer(CancelTimerCommandAttributes),
CompleteWorkflow(CompleteWorkflowExecutionCommandAttributes),
}
#[derive(thiserror::Error, Debug, derive_more::From)]
#[error("Couldn't convert <lang> command")]
pub struct InconvertibleCommandError(pub coresdk::Command);
impl TryFrom<coresdk::Command> for WFCommand {
type Error = InconvertibleCommandError;
fn try_from(c: coresdk::Command) -> Result<Self, Self::Error> {
match c.variant {
Some(Variant::Api(Command {
attributes: Some(attrs),
..
})) => match attrs {
Attributes::StartTimerCommandAttributes(s) => Ok(WFCommand::AddTimer(s)),
Attributes::CancelTimerCommandAttributes(s) => Ok(WFCommand::CancelTimer(s)),
Attributes::CompleteWorkflowExecutionCommandAttributes(c) => {
Ok(WFCommand::CompleteWorkflow(c))
}
_ => unimplemented!(),
},
_ => Err(c.into()),
}
}
}
/// Extends [rustfsm::StateMachine] with some functionality specific to the temporal SDK.
///
/// Formerly known as `EntityStateMachine` in Java.
trait TemporalStateMachine: CheckStateMachineInFinal + Send {
fn name(&self) -> &str;
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError>;
/// Tell the state machine to handle some event. Returns a list of responses that can be used
/// to update the overall state of the workflow. EX: To issue outgoing WF activations.
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
/// Attempt to cancel the command associated with this state machine, if it is cancellable
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError>;
/// Should return true if the command was cancelled before we sent it to the server. Always
/// returns false for non-cancellable machines
fn was_cancelled_before_sent_to_server(&self) -> bool;
}
impl<SM> TemporalStateMachine for SM
where
SM: StateMachine + CheckStateMachineInFinal + WFMachinesAdapter + Cancellable + Clone + Send,
<SM as StateMachine>::Event: TryFrom<HistoryEvent>,
<SM as StateMachine>::Event: TryFrom<CommandType>,
WFMachinesError: From<<<SM as StateMachine>::Event as TryFrom<HistoryEvent>>::Error>,
<SM as StateMachine>::Command: Debug,
<SM as StateMachine>::State: Display,
<SM as StateMachine>::Error: Into<WFMachinesError> + 'static + Send + Sync,
{
fn name(&self) -> &str {
<Self as StateMachine>::name(self)
}
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling command",
?command_type,
machine_name = %self.name(),
state = %self.state()
);
if let Ok(converted_command) = command_type.try_into() {
match self.on_event_mut(converted_command) {
Ok(_c) => Ok(()),
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
} else {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
}
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling event",
%event,
machine_name = %self.name(),
state = %self.state()
);
let converted_event = event.clone().try_into()?;
match self.on_event_mut(converted_event) {
Ok(c) => {
if !c.is_empty() {
event!(Level::DEBUG, msg = "Machine produced commands", ?c, state = %self.state());
}
let mut machine_responses = vec![];
for cmd in c {
machine_responses.extend(self.adapt_response(event, has_next_event, cmd)?);
}
Ok(machine_responses)
}
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::InvalidTransitionDuringEvent(
event.clone(),
format!(
"{} in state {} says the transition is invalid",
self.name(),
self.state()
),
))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
}
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError> {
let res = self.cancel();
res.map_err(|e| match e {
MachineError::InvalidTransition => {
WFMachinesError::InvalidTransition("while attempting to cancel")
}
MachineError::Underlying(e) => e.into(),
})
}
fn was_cancelled_before_sent_to_server(&self) -> bool |
}
/// Exists purely to allow generic implementation of `is_final_state` for all [StateMachine]
/// implementors
trait CheckStateMachineInFinal {
/// Returns true if the state machine is in a final state
fn is_final_state(&self) -> bool;
}
impl<SM> CheckStateMachineInFinal for SM
where
SM: StateMachine,
{
fn is_final_state(&self) -> bool {
self.on_final_state()
}
}
/// This trait exists to bridge [StateMachine]s and the [WorkflowMachines] instance. It has access
/// to the machine's concrete types while hiding those details from [WorkflowMachines]
trait WFMachinesAdapter: StateMachine {
/// Given a the event being processed, and a command that this [StateMachine] instance just
/// produced, perform any handling that needs inform the [WorkflowMachines] instance of some
/// action to be taken in response to that command.
fn adapt_response(
&self,
event: &HistoryEvent,
has_next_event: bool,
my_command: Self::Command,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
}
trait Cancellable: StateMachine {
/// Cancel the machine / the command represented by the machine.
///
/// # Panics
/// * If the machine is not cancellable. It's a logic error on our part to call it on such
/// machines.
fn cancel(&mut self) -> Result<MachineResponse, MachineError<Self::Error>> {
// It's a logic error on our part if this is ever called on a machine that can't actually
// be cancelled
panic!(format!("Machine {} cannot be cancelled", self.name()))
}
/// Should return true if the command was cancelled before we sent it to the server
fn was_cancelled_before_sent_to_server(&self) -> bool {
false
}
}
#[derive(Debug)]
struct NewMachineWithCommand<T: TemporalStateMachine> {
command: ProtoCommand,
machine: T,
}
impl Debug for dyn TemporalStateMachine {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.name())
}
}
| {
self.was_cancelled_before_sent_to_server()
} | identifier_body |
mod.rs | mod workflow_machines;
// TODO: Move all these inside a submachines module
#[allow(unused)]
mod activity_state_machine;
#[allow(unused)] | mod cancel_external_state_machine;
#[allow(unused)]
mod cancel_workflow_state_machine;
#[allow(unused)]
mod child_workflow_state_machine;
mod complete_workflow_state_machine;
#[allow(unused)]
mod continue_as_new_workflow_state_machine;
#[allow(unused)]
mod fail_workflow_state_machine;
#[allow(unused)]
mod local_activity_state_machine;
#[allow(unused)]
mod mutable_side_effect_state_machine;
#[allow(unused)]
mod side_effect_state_machine;
#[allow(unused)]
mod signal_external_state_machine;
mod timer_state_machine;
#[allow(unused)]
mod upsert_search_attributes_state_machine;
#[allow(unused)]
mod version_state_machine;
mod workflow_task_state_machine;
#[cfg(test)]
pub(crate) mod test_help;
pub(crate) use workflow_machines::{WFMachinesError, WorkflowMachines};
use crate::{
machines::workflow_machines::MachineResponse,
protos::{
coresdk::{self, command::Variant, wf_activation_job},
temporal::api::{
command::v1::{
command::Attributes, CancelTimerCommandAttributes, Command,
CompleteWorkflowExecutionCommandAttributes, StartTimerCommandAttributes,
},
enums::v1::CommandType,
history::v1::{
HistoryEvent, WorkflowExecutionCanceledEventAttributes,
WorkflowExecutionSignaledEventAttributes, WorkflowExecutionStartedEventAttributes,
},
},
},
};
use prost::alloc::fmt::Formatter;
use rustfsm::{MachineError, StateMachine};
use std::{
convert::{TryFrom, TryInto},
fmt::{Debug, Display},
};
use tracing::Level;
pub(crate) type ProtoCommand = Command;
/// Implementors of this trait represent something that can (eventually) call into a workflow to
/// drive it, start it, signal it, cancel it, etc.
pub(crate) trait DrivenWorkflow: ActivationListener + Send {
/// Start the workflow
fn start(&mut self, attribs: WorkflowExecutionStartedEventAttributes);
/// Obtain any output from the workflow's recent execution(s). Because the lang sdk is
/// responsible for calling workflow code as a result of receiving tasks from
/// [crate::Core::poll_task], we cannot directly iterate it here. Thus implementations of this
/// trait are expected to either buffer output or otherwise produce it on demand when this
/// function is called.
///
/// In the case of the real [WorkflowBridge] implementation, commands are simply pulled from
/// a buffer that the language side sinks into when it calls [crate::Core::complete_task]
fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand>;
/// Signal the workflow
fn signal(&mut self, attribs: WorkflowExecutionSignaledEventAttributes);
/// Cancel the workflow
fn cancel(&mut self, attribs: WorkflowExecutionCanceledEventAttributes);
}
/// Allows observers to listen to newly generated outgoing activation jobs. Used for testing, where
/// some activations must be handled before outgoing commands are issued to avoid deadlocking.
pub(crate) trait ActivationListener {
fn on_activation_job(&mut self, _activation: &wf_activation_job::Attributes) {}
}
/// [DrivenWorkflow]s respond with these when called, to indicate what they want to do next.
/// EX: Create a new timer, complete the workflow, etc.
#[derive(Debug, derive_more::From)]
pub enum WFCommand {
/// Returned when we need to wait for the lang sdk to send us something
NoCommandsFromLang,
AddTimer(StartTimerCommandAttributes),
CancelTimer(CancelTimerCommandAttributes),
CompleteWorkflow(CompleteWorkflowExecutionCommandAttributes),
}
#[derive(thiserror::Error, Debug, derive_more::From)]
#[error("Couldn't convert <lang> command")]
pub struct InconvertibleCommandError(pub coresdk::Command);
impl TryFrom<coresdk::Command> for WFCommand {
type Error = InconvertibleCommandError;
fn try_from(c: coresdk::Command) -> Result<Self, Self::Error> {
match c.variant {
Some(Variant::Api(Command {
attributes: Some(attrs),
..
})) => match attrs {
Attributes::StartTimerCommandAttributes(s) => Ok(WFCommand::AddTimer(s)),
Attributes::CancelTimerCommandAttributes(s) => Ok(WFCommand::CancelTimer(s)),
Attributes::CompleteWorkflowExecutionCommandAttributes(c) => {
Ok(WFCommand::CompleteWorkflow(c))
}
_ => unimplemented!(),
},
_ => Err(c.into()),
}
}
}
/// Extends [rustfsm::StateMachine] with some functionality specific to the temporal SDK.
///
/// Formerly known as `EntityStateMachine` in Java.
trait TemporalStateMachine: CheckStateMachineInFinal + Send {
fn name(&self) -> &str;
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError>;
/// Tell the state machine to handle some event. Returns a list of responses that can be used
/// to update the overall state of the workflow. EX: To issue outgoing WF activations.
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
/// Attempt to cancel the command associated with this state machine, if it is cancellable
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError>;
/// Should return true if the command was cancelled before we sent it to the server. Always
/// returns false for non-cancellable machines
fn was_cancelled_before_sent_to_server(&self) -> bool;
}
impl<SM> TemporalStateMachine for SM
where
SM: StateMachine + CheckStateMachineInFinal + WFMachinesAdapter + Cancellable + Clone + Send,
<SM as StateMachine>::Event: TryFrom<HistoryEvent>,
<SM as StateMachine>::Event: TryFrom<CommandType>,
WFMachinesError: From<<<SM as StateMachine>::Event as TryFrom<HistoryEvent>>::Error>,
<SM as StateMachine>::Command: Debug,
<SM as StateMachine>::State: Display,
<SM as StateMachine>::Error: Into<WFMachinesError> + 'static + Send + Sync,
{
fn name(&self) -> &str {
<Self as StateMachine>::name(self)
}
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling command",
?command_type,
machine_name = %self.name(),
state = %self.state()
);
if let Ok(converted_command) = command_type.try_into() {
match self.on_event_mut(converted_command) {
Ok(_c) => Ok(()),
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
} else {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
}
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling event",
%event,
machine_name = %self.name(),
state = %self.state()
);
let converted_event = event.clone().try_into()?;
match self.on_event_mut(converted_event) {
Ok(c) => {
if !c.is_empty() {
event!(Level::DEBUG, msg = "Machine produced commands", ?c, state = %self.state());
}
let mut machine_responses = vec![];
for cmd in c {
machine_responses.extend(self.adapt_response(event, has_next_event, cmd)?);
}
Ok(machine_responses)
}
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::InvalidTransitionDuringEvent(
event.clone(),
format!(
"{} in state {} says the transition is invalid",
self.name(),
self.state()
),
))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
}
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError> {
let res = self.cancel();
res.map_err(|e| match e {
MachineError::InvalidTransition => {
WFMachinesError::InvalidTransition("while attempting to cancel")
}
MachineError::Underlying(e) => e.into(),
})
}
fn was_cancelled_before_sent_to_server(&self) -> bool {
self.was_cancelled_before_sent_to_server()
}
}
/// Exists purely to allow generic implementation of `is_final_state` for all [StateMachine]
/// implementors
trait CheckStateMachineInFinal {
/// Returns true if the state machine is in a final state
fn is_final_state(&self) -> bool;
}
impl<SM> CheckStateMachineInFinal for SM
where
SM: StateMachine,
{
fn is_final_state(&self) -> bool {
self.on_final_state()
}
}
/// This trait exists to bridge [StateMachine]s and the [WorkflowMachines] instance. It has access
/// to the machine's concrete types while hiding those details from [WorkflowMachines]
trait WFMachinesAdapter: StateMachine {
/// Given a the event being processed, and a command that this [StateMachine] instance just
/// produced, perform any handling that needs inform the [WorkflowMachines] instance of some
/// action to be taken in response to that command.
fn adapt_response(
&self,
event: &HistoryEvent,
has_next_event: bool,
my_command: Self::Command,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
}
trait Cancellable: StateMachine {
/// Cancel the machine / the command represented by the machine.
///
/// # Panics
/// * If the machine is not cancellable. It's a logic error on our part to call it on such
/// machines.
fn cancel(&mut self) -> Result<MachineResponse, MachineError<Self::Error>> {
// It's a logic error on our part if this is ever called on a machine that can't actually
// be cancelled
panic!(format!("Machine {} cannot be cancelled", self.name()))
}
/// Should return true if the command was cancelled before we sent it to the server
fn was_cancelled_before_sent_to_server(&self) -> bool {
false
}
}
#[derive(Debug)]
struct NewMachineWithCommand<T: TemporalStateMachine> {
command: ProtoCommand,
machine: T,
}
impl Debug for dyn TemporalStateMachine {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.name())
}
} | random_line_split | |
main.go | package main
import (
"context"
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/go-redis/redis/v8"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
fiberlogger "github.com/gofiber/fiber/v2/middleware/logger"
"github.com/gofiber/fiber/v2/middleware/recover"
_ "github.com/joho/godotenv/autoload" // load .env file automatically
"github.com/rs/zerolog"
"github.com/twinj/uuid"
"net/http"
"os"
"strconv"
"strings"
"time"
)
type User struct {
ID int64 `json:"id"`
Email string `json:"email"`
Password string `json:"password"`
}
var user = User{
ID: 1,
Email: "sammidev@gmail.com",
Password: "sammidev",
}
type Todo struct {
UserID int64 `json:"user_id"`
Title string `json:"title"`
}
// logging
var logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: false,
TimeFormat: time.RFC3339,
}).With().Timestamp().Logger().Level(zerolog.GlobalLevel())
func Success(c *fiber.Ctx, code int, payload interface{}) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(payload)
}
func Error(c *fiber.Ctx, code int, err error) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(struct {
Message string `json:"message"`
}{Message: err.Error()})
}
func init() {
//Initializing redis
dsn := os.Getenv("REDIS_DSN")
if len(dsn) == 0 |
client = redis.NewClient(&redis.Options{
Addr: dsn, //redis port
})
_, err := client.Ping(context.Background()).Result()
if err != nil {
panic(err)
}
}
var client *redis.Client
func FiberMiddleware(a *fiber.App) {
a.Use(
// Add CORS to each route.
cors.New(),
// Add simple logger.
fiberlogger.New(),
// add recoverer for panic
recover.New(),
)
}
func main() {
readTimeoutSecondsCount, _ := strconv.Atoi(os.Getenv("SERVER_READ_TIMEOUT"))
app := fiber.New(fiber.Config{
ReadTimeout: time.Second * time.Duration(readTimeoutSecondsCount),
})
FiberMiddleware(app)
route := app.Group("api/v1")
route.Post("/login", Login)
route.Post("/todo", CreateTodo)
route.Post("/logout", Logout)
route.Post("/refresh", Refresh)
logger.Log().Err(app.Listen(os.Getenv("SERVER_URL")))
}
func Login(c *fiber.Ctx) error {
var u User
if err := c.BodyParser(&u); err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("invalid json provided"))
}
//compare the user from the request, with the one we defined:
if user.Email != u.Email || user.Password != u.Password {
return Error(c, fiber.StatusUnauthorized, errors.New("please provide valid login details"))
}
ts, err := CreateToken(user.ID)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
saveErr := CreateAuth(c, user.ID, ts)
if saveErr != nil {
return Error(c, http.StatusUnprocessableEntity, saveErr)
}
return Success(c, fiber.StatusOK, fiber.Map{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
})
}
func CreateTodo(c *fiber.Ctx) error {
var td Todo
if err := c.BodyParser(&td); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("invalid json"))
}
//Extract the access token metadata
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
userid, err := FetchAuth(c, metadata)
if err != nil {
return Error(c, fiber.StatusUnauthorized, err)
}
td.UserID = userid
//you can proceed to save the Todo to a database
return Success(c, fiber.StatusCreated, td)
}
func Logout(c *fiber.Ctx) error {
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
delErr := DeleteTokens(c, metadata)
if delErr != nil {
return Error(c, fiber.StatusUnauthorized, delErr)
}
return Success(c, fiber.StatusOK, "successfully logged out")
}
func Refresh(c *fiber.Ctx) error {
mapToken := map[string]string{}
if err := c.BodyParser(&mapToken); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
refreshToken := mapToken["refresh_token"]
//verify the token
token, err := jwt.Parse(refreshToken, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("REFRESH_SECRET")), nil
})
//if there is an error, the token must have expired
if err != nil {
fmt.Println("the error: ", err)
return Error(c, fiber.StatusUnauthorized, errors.New("refresh token expired"))
}
//is token valid?
if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {
return Error(c, fiber.StatusUnauthorized, err)
}
//Since token is valid, get the uuid:
claims, ok := token.Claims.(jwt.MapClaims) //the token claims should conform to MapClaims
if ok && token.Valid {
refreshUuid, ok := claims["refresh_uuid"].(string) //convert the interface to string
if !ok {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("error occurred"))
}
deleted, delErr := DeleteAuth(c, refreshUuid)
if delErr != nil || deleted == 0 { //if any goes wrong
//Delete the previous Refresh Token
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
//Create new pairs of refresh and access tokens
ts, createErr := CreateToken(userId)
if createErr != nil {
return Error(c, fiber.StatusForbidden, createErr)
}
saveErr := CreateAuth(c, userId, ts)
if saveErr != nil {
//save the tokens metadata to redis
return Error(c, fiber.StatusForbidden, saveErr)
}
tokens := map[string]string{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
}
return Success(c, fiber.StatusCreated, tokens)
} else {
return Error(c, fiber.StatusUnauthorized, errors.New("refresh expired"))
}
}
type AccessDetails struct {
AccessUuid string
UserId int64
}
type TokenDetails struct {
AccessToken string
RefreshToken string
AccessUuid string
RefreshUuid string
AtExpires int64
RtExpires int64
}
func CreateToken(userid int64) (*TokenDetails, error) {
td := &TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * 15).Unix()
td.AccessUuid = uuid.NewV4().String()
td.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()
td.RefreshUuid = td.AccessUuid + "++" + strconv.Itoa(int(userid))
var err error
//Creating Access Token
atClaims := jwt.MapClaims{}
atClaims["authorized"] = true
atClaims["access_uuid"] = td.AccessUuid
atClaims["user_id"] = userid
atClaims["exp"] = td.AtExpires
at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)
td.AccessToken, err = at.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return nil, err
}
//Creating Refresh Token
rtClaims := jwt.MapClaims{}
rtClaims["refresh_uuid"] = td.RefreshUuid
rtClaims["user_id"] = userid
rtClaims["exp"] = td.RtExpires
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)
td.RefreshToken, err = rt.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return nil, err
}
return td, nil
}
func CreateAuth(c *fiber.Ctx, userid int64, td *TokenDetails) error {
at := time.Unix(td.AtExpires, 0) //converting Unix to UTC(to Time object)
rt := time.Unix(td.RtExpires, 0)
now := time.Now()
errAccess := client.Set(c.Context(), td.AccessUuid, strconv.Itoa(int(userid)), at.Sub(now)).Err()
if errAccess != nil {
return errAccess
}
errRefresh := client.Set(c.Context(), td.RefreshUuid, strconv.Itoa(int(userid)), rt.Sub(now)).Err()
if errRefresh != nil {
return errRefresh
}
return nil
}
func ExtractToken(c *fiber.Ctx) string {
bearToken := c.Get("Authorization")
strArr := strings.Split(bearToken, " ")
if len(strArr) == 2 {
return strArr[1]
}
return ""
}
func VerifyToken(c *fiber.Ctx) (*jwt.Token, error) {
tokenString := ExtractToken(c)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
return nil, err
}
return token, nil
}
func TokenValid(c *fiber.Ctx) error {
token, err := VerifyToken(c)
if err != nil {
return err
}
if _, ok := token.Claims.(jwt.Claims); !ok || !token.Valid {
return err
}
return nil
}
func ExtractTokenMetadata(c *fiber.Ctx) (*AccessDetails, error) {
token, err := VerifyToken(c)
if err != nil {
return nil, err
}
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
accessUuid, ok := claims["access_uuid"].(string)
if !ok {
return nil, err
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return nil, err
}
return &AccessDetails{
AccessUuid: accessUuid,
UserId: userId,
}, nil
}
return nil, err
}
func FetchAuth(c *fiber.Ctx, authD *AccessDetails) (int64, error) {
userid, err := client.Get(c.Context(), authD.AccessUuid).Result()
if err != nil {
return 0, err
}
userID, _ := strconv.ParseInt(userid, 10, 64)
if authD.UserId != userID {
return 0, errors.New("unauthorized")
}
return userID, nil
}
func DeleteAuth(c *fiber.Ctx, givenUuid string) (int64, error) {
deleted, err := client.Del(c.Context(), givenUuid).Result()
if err != nil {
return 0, err
}
return deleted, nil
}
func DeleteTokens(c *fiber.Ctx, authD *AccessDetails) error {
//get the refresh uuid
refreshUuid := fmt.Sprintf("%s++%d", authD.AccessUuid, authD.UserId)
//delete access token
deletedAt, err := client.Del(c.Context(), authD.AccessUuid).Result()
if err != nil {
return err
}
//delete refresh token
deletedRt, err := client.Del(c.Context(), refreshUuid).Result()
if err != nil {
return err
}
//When the record is deleted, the return value is 1
if deletedAt != 1 || deletedRt != 1 {
return errors.New("something went wrong")
}
return nil
}
| {
dsn = "localhost:6379"
} | conditional_block |
main.go | package main
import (
"context"
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/go-redis/redis/v8"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
fiberlogger "github.com/gofiber/fiber/v2/middleware/logger"
"github.com/gofiber/fiber/v2/middleware/recover"
_ "github.com/joho/godotenv/autoload" // load .env file automatically
"github.com/rs/zerolog"
"github.com/twinj/uuid"
"net/http"
"os"
"strconv"
"strings"
"time"
)
type User struct {
ID int64 `json:"id"`
Email string `json:"email"`
Password string `json:"password"`
}
var user = User{
ID: 1,
Email: "sammidev@gmail.com",
Password: "sammidev",
}
type Todo struct {
UserID int64 `json:"user_id"`
Title string `json:"title"`
}
// logging
var logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: false,
TimeFormat: time.RFC3339,
}).With().Timestamp().Logger().Level(zerolog.GlobalLevel())
func Success(c *fiber.Ctx, code int, payload interface{}) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(payload)
}
func Error(c *fiber.Ctx, code int, err error) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(struct {
Message string `json:"message"`
}{Message: err.Error()})
}
func init() |
var client *redis.Client
func FiberMiddleware(a *fiber.App) {
a.Use(
// Add CORS to each route.
cors.New(),
// Add simple logger.
fiberlogger.New(),
// add recoverer for panic
recover.New(),
)
}
func main() {
readTimeoutSecondsCount, _ := strconv.Atoi(os.Getenv("SERVER_READ_TIMEOUT"))
app := fiber.New(fiber.Config{
ReadTimeout: time.Second * time.Duration(readTimeoutSecondsCount),
})
FiberMiddleware(app)
route := app.Group("api/v1")
route.Post("/login", Login)
route.Post("/todo", CreateTodo)
route.Post("/logout", Logout)
route.Post("/refresh", Refresh)
logger.Log().Err(app.Listen(os.Getenv("SERVER_URL")))
}
func Login(c *fiber.Ctx) error {
var u User
if err := c.BodyParser(&u); err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("invalid json provided"))
}
//compare the user from the request, with the one we defined:
if user.Email != u.Email || user.Password != u.Password {
return Error(c, fiber.StatusUnauthorized, errors.New("please provide valid login details"))
}
ts, err := CreateToken(user.ID)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
saveErr := CreateAuth(c, user.ID, ts)
if saveErr != nil {
return Error(c, http.StatusUnprocessableEntity, saveErr)
}
return Success(c, fiber.StatusOK, fiber.Map{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
})
}
func CreateTodo(c *fiber.Ctx) error {
var td Todo
if err := c.BodyParser(&td); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("invalid json"))
}
//Extract the access token metadata
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
userid, err := FetchAuth(c, metadata)
if err != nil {
return Error(c, fiber.StatusUnauthorized, err)
}
td.UserID = userid
//you can proceed to save the Todo to a database
return Success(c, fiber.StatusCreated, td)
}
func Logout(c *fiber.Ctx) error {
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
delErr := DeleteTokens(c, metadata)
if delErr != nil {
return Error(c, fiber.StatusUnauthorized, delErr)
}
return Success(c, fiber.StatusOK, "successfully logged out")
}
func Refresh(c *fiber.Ctx) error {
mapToken := map[string]string{}
if err := c.BodyParser(&mapToken); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
refreshToken := mapToken["refresh_token"]
//verify the token
token, err := jwt.Parse(refreshToken, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("REFRESH_SECRET")), nil
})
//if there is an error, the token must have expired
if err != nil {
fmt.Println("the error: ", err)
return Error(c, fiber.StatusUnauthorized, errors.New("refresh token expired"))
}
//is token valid?
if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {
return Error(c, fiber.StatusUnauthorized, err)
}
//Since token is valid, get the uuid:
claims, ok := token.Claims.(jwt.MapClaims) //the token claims should conform to MapClaims
if ok && token.Valid {
refreshUuid, ok := claims["refresh_uuid"].(string) //convert the interface to string
if !ok {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("error occurred"))
}
deleted, delErr := DeleteAuth(c, refreshUuid)
if delErr != nil || deleted == 0 { //if any goes wrong
//Delete the previous Refresh Token
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
//Create new pairs of refresh and access tokens
ts, createErr := CreateToken(userId)
if createErr != nil {
return Error(c, fiber.StatusForbidden, createErr)
}
saveErr := CreateAuth(c, userId, ts)
if saveErr != nil {
//save the tokens metadata to redis
return Error(c, fiber.StatusForbidden, saveErr)
}
tokens := map[string]string{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
}
return Success(c, fiber.StatusCreated, tokens)
} else {
return Error(c, fiber.StatusUnauthorized, errors.New("refresh expired"))
}
}
type AccessDetails struct {
AccessUuid string
UserId int64
}
type TokenDetails struct {
AccessToken string
RefreshToken string
AccessUuid string
RefreshUuid string
AtExpires int64
RtExpires int64
}
func CreateToken(userid int64) (*TokenDetails, error) {
td := &TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * 15).Unix()
td.AccessUuid = uuid.NewV4().String()
td.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()
td.RefreshUuid = td.AccessUuid + "++" + strconv.Itoa(int(userid))
var err error
//Creating Access Token
atClaims := jwt.MapClaims{}
atClaims["authorized"] = true
atClaims["access_uuid"] = td.AccessUuid
atClaims["user_id"] = userid
atClaims["exp"] = td.AtExpires
at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)
td.AccessToken, err = at.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return nil, err
}
//Creating Refresh Token
rtClaims := jwt.MapClaims{}
rtClaims["refresh_uuid"] = td.RefreshUuid
rtClaims["user_id"] = userid
rtClaims["exp"] = td.RtExpires
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)
td.RefreshToken, err = rt.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return nil, err
}
return td, nil
}
func CreateAuth(c *fiber.Ctx, userid int64, td *TokenDetails) error {
at := time.Unix(td.AtExpires, 0) //converting Unix to UTC(to Time object)
rt := time.Unix(td.RtExpires, 0)
now := time.Now()
errAccess := client.Set(c.Context(), td.AccessUuid, strconv.Itoa(int(userid)), at.Sub(now)).Err()
if errAccess != nil {
return errAccess
}
errRefresh := client.Set(c.Context(), td.RefreshUuid, strconv.Itoa(int(userid)), rt.Sub(now)).Err()
if errRefresh != nil {
return errRefresh
}
return nil
}
func ExtractToken(c *fiber.Ctx) string {
bearToken := c.Get("Authorization")
strArr := strings.Split(bearToken, " ")
if len(strArr) == 2 {
return strArr[1]
}
return ""
}
func VerifyToken(c *fiber.Ctx) (*jwt.Token, error) {
tokenString := ExtractToken(c)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
return nil, err
}
return token, nil
}
func TokenValid(c *fiber.Ctx) error {
token, err := VerifyToken(c)
if err != nil {
return err
}
if _, ok := token.Claims.(jwt.Claims); !ok || !token.Valid {
return err
}
return nil
}
func ExtractTokenMetadata(c *fiber.Ctx) (*AccessDetails, error) {
token, err := VerifyToken(c)
if err != nil {
return nil, err
}
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
accessUuid, ok := claims["access_uuid"].(string)
if !ok {
return nil, err
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return nil, err
}
return &AccessDetails{
AccessUuid: accessUuid,
UserId: userId,
}, nil
}
return nil, err
}
func FetchAuth(c *fiber.Ctx, authD *AccessDetails) (int64, error) {
userid, err := client.Get(c.Context(), authD.AccessUuid).Result()
if err != nil {
return 0, err
}
userID, _ := strconv.ParseInt(userid, 10, 64)
if authD.UserId != userID {
return 0, errors.New("unauthorized")
}
return userID, nil
}
func DeleteAuth(c *fiber.Ctx, givenUuid string) (int64, error) {
deleted, err := client.Del(c.Context(), givenUuid).Result()
if err != nil {
return 0, err
}
return deleted, nil
}
func DeleteTokens(c *fiber.Ctx, authD *AccessDetails) error {
//get the refresh uuid
refreshUuid := fmt.Sprintf("%s++%d", authD.AccessUuid, authD.UserId)
//delete access token
deletedAt, err := client.Del(c.Context(), authD.AccessUuid).Result()
if err != nil {
return err
}
//delete refresh token
deletedRt, err := client.Del(c.Context(), refreshUuid).Result()
if err != nil {
return err
}
//When the record is deleted, the return value is 1
if deletedAt != 1 || deletedRt != 1 {
return errors.New("something went wrong")
}
return nil
}
| {
//Initializing redis
dsn := os.Getenv("REDIS_DSN")
if len(dsn) == 0 {
dsn = "localhost:6379"
}
client = redis.NewClient(&redis.Options{
Addr: dsn, //redis port
})
_, err := client.Ping(context.Background()).Result()
if err != nil {
panic(err)
}
} | identifier_body |
main.go | package main
import (
"context"
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/go-redis/redis/v8"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
fiberlogger "github.com/gofiber/fiber/v2/middleware/logger"
"github.com/gofiber/fiber/v2/middleware/recover"
_ "github.com/joho/godotenv/autoload" // load .env file automatically
"github.com/rs/zerolog"
"github.com/twinj/uuid"
"net/http"
"os"
"strconv"
"strings"
"time"
)
type User struct {
ID int64 `json:"id"`
Email string `json:"email"`
Password string `json:"password"`
}
var user = User{
ID: 1,
Email: "sammidev@gmail.com",
Password: "sammidev",
}
type Todo struct {
UserID int64 `json:"user_id"`
Title string `json:"title"`
}
// logging
var logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: false,
TimeFormat: time.RFC3339,
}).With().Timestamp().Logger().Level(zerolog.GlobalLevel())
func Success(c *fiber.Ctx, code int, payload interface{}) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(payload)
}
func Error(c *fiber.Ctx, code int, err error) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(struct {
Message string `json:"message"`
}{Message: err.Error()})
}
func init() {
//Initializing redis
dsn := os.Getenv("REDIS_DSN")
if len(dsn) == 0 {
dsn = "localhost:6379"
}
client = redis.NewClient(&redis.Options{
Addr: dsn, //redis port
})
_, err := client.Ping(context.Background()).Result()
if err != nil {
panic(err)
}
}
var client *redis.Client
func FiberMiddleware(a *fiber.App) {
a.Use(
// Add CORS to each route.
cors.New(),
// Add simple logger.
fiberlogger.New(),
// add recoverer for panic
recover.New(),
)
}
func main() {
readTimeoutSecondsCount, _ := strconv.Atoi(os.Getenv("SERVER_READ_TIMEOUT"))
app := fiber.New(fiber.Config{
ReadTimeout: time.Second * time.Duration(readTimeoutSecondsCount),
})
FiberMiddleware(app)
route := app.Group("api/v1")
route.Post("/login", Login)
route.Post("/todo", CreateTodo)
route.Post("/logout", Logout)
route.Post("/refresh", Refresh)
logger.Log().Err(app.Listen(os.Getenv("SERVER_URL")))
}
func Login(c *fiber.Ctx) error {
var u User
if err := c.BodyParser(&u); err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("invalid json provided"))
}
//compare the user from the request, with the one we defined:
if user.Email != u.Email || user.Password != u.Password {
return Error(c, fiber.StatusUnauthorized, errors.New("please provide valid login details"))
}
ts, err := CreateToken(user.ID)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
saveErr := CreateAuth(c, user.ID, ts)
if saveErr != nil {
return Error(c, http.StatusUnprocessableEntity, saveErr)
}
return Success(c, fiber.StatusOK, fiber.Map{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
})
}
func CreateTodo(c *fiber.Ctx) error {
var td Todo
if err := c.BodyParser(&td); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("invalid json"))
}
//Extract the access token metadata
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
userid, err := FetchAuth(c, metadata)
if err != nil {
return Error(c, fiber.StatusUnauthorized, err)
}
td.UserID = userid
//you can proceed to save the Todo to a database
return Success(c, fiber.StatusCreated, td)
}
func Logout(c *fiber.Ctx) error {
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
delErr := DeleteTokens(c, metadata)
if delErr != nil {
return Error(c, fiber.StatusUnauthorized, delErr)
}
return Success(c, fiber.StatusOK, "successfully logged out")
}
func Refresh(c *fiber.Ctx) error {
mapToken := map[string]string{}
if err := c.BodyParser(&mapToken); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
refreshToken := mapToken["refresh_token"]
//verify the token
token, err := jwt.Parse(refreshToken, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("REFRESH_SECRET")), nil
})
//if there is an error, the token must have expired
if err != nil {
fmt.Println("the error: ", err)
return Error(c, fiber.StatusUnauthorized, errors.New("refresh token expired"))
}
//is token valid?
if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {
return Error(c, fiber.StatusUnauthorized, err)
}
//Since token is valid, get the uuid:
claims, ok := token.Claims.(jwt.MapClaims) //the token claims should conform to MapClaims
if ok && token.Valid {
refreshUuid, ok := claims["refresh_uuid"].(string) //convert the interface to string
if !ok {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("error occurred"))
}
deleted, delErr := DeleteAuth(c, refreshUuid)
if delErr != nil || deleted == 0 { //if any goes wrong
//Delete the previous Refresh Token
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
//Create new pairs of refresh and access tokens
ts, createErr := CreateToken(userId)
if createErr != nil {
return Error(c, fiber.StatusForbidden, createErr)
}
saveErr := CreateAuth(c, userId, ts)
if saveErr != nil {
//save the tokens metadata to redis
return Error(c, fiber.StatusForbidden, saveErr)
}
tokens := map[string]string{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
}
return Success(c, fiber.StatusCreated, tokens)
} else {
return Error(c, fiber.StatusUnauthorized, errors.New("refresh expired")) | UserId int64
}
type TokenDetails struct {
AccessToken string
RefreshToken string
AccessUuid string
RefreshUuid string
AtExpires int64
RtExpires int64
}
func CreateToken(userid int64) (*TokenDetails, error) {
td := &TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * 15).Unix()
td.AccessUuid = uuid.NewV4().String()
td.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()
td.RefreshUuid = td.AccessUuid + "++" + strconv.Itoa(int(userid))
var err error
//Creating Access Token
atClaims := jwt.MapClaims{}
atClaims["authorized"] = true
atClaims["access_uuid"] = td.AccessUuid
atClaims["user_id"] = userid
atClaims["exp"] = td.AtExpires
at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)
td.AccessToken, err = at.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return nil, err
}
//Creating Refresh Token
rtClaims := jwt.MapClaims{}
rtClaims["refresh_uuid"] = td.RefreshUuid
rtClaims["user_id"] = userid
rtClaims["exp"] = td.RtExpires
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)
td.RefreshToken, err = rt.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return nil, err
}
return td, nil
}
func CreateAuth(c *fiber.Ctx, userid int64, td *TokenDetails) error {
at := time.Unix(td.AtExpires, 0) //converting Unix to UTC(to Time object)
rt := time.Unix(td.RtExpires, 0)
now := time.Now()
errAccess := client.Set(c.Context(), td.AccessUuid, strconv.Itoa(int(userid)), at.Sub(now)).Err()
if errAccess != nil {
return errAccess
}
errRefresh := client.Set(c.Context(), td.RefreshUuid, strconv.Itoa(int(userid)), rt.Sub(now)).Err()
if errRefresh != nil {
return errRefresh
}
return nil
}
func ExtractToken(c *fiber.Ctx) string {
bearToken := c.Get("Authorization")
strArr := strings.Split(bearToken, " ")
if len(strArr) == 2 {
return strArr[1]
}
return ""
}
func VerifyToken(c *fiber.Ctx) (*jwt.Token, error) {
tokenString := ExtractToken(c)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
return nil, err
}
return token, nil
}
func TokenValid(c *fiber.Ctx) error {
token, err := VerifyToken(c)
if err != nil {
return err
}
if _, ok := token.Claims.(jwt.Claims); !ok || !token.Valid {
return err
}
return nil
}
func ExtractTokenMetadata(c *fiber.Ctx) (*AccessDetails, error) {
token, err := VerifyToken(c)
if err != nil {
return nil, err
}
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
accessUuid, ok := claims["access_uuid"].(string)
if !ok {
return nil, err
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return nil, err
}
return &AccessDetails{
AccessUuid: accessUuid,
UserId: userId,
}, nil
}
return nil, err
}
func FetchAuth(c *fiber.Ctx, authD *AccessDetails) (int64, error) {
userid, err := client.Get(c.Context(), authD.AccessUuid).Result()
if err != nil {
return 0, err
}
userID, _ := strconv.ParseInt(userid, 10, 64)
if authD.UserId != userID {
return 0, errors.New("unauthorized")
}
return userID, nil
}
func DeleteAuth(c *fiber.Ctx, givenUuid string) (int64, error) {
deleted, err := client.Del(c.Context(), givenUuid).Result()
if err != nil {
return 0, err
}
return deleted, nil
}
func DeleteTokens(c *fiber.Ctx, authD *AccessDetails) error {
//get the refresh uuid
refreshUuid := fmt.Sprintf("%s++%d", authD.AccessUuid, authD.UserId)
//delete access token
deletedAt, err := client.Del(c.Context(), authD.AccessUuid).Result()
if err != nil {
return err
}
//delete refresh token
deletedRt, err := client.Del(c.Context(), refreshUuid).Result()
if err != nil {
return err
}
//When the record is deleted, the return value is 1
if deletedAt != 1 || deletedRt != 1 {
return errors.New("something went wrong")
}
return nil
} | }
}
type AccessDetails struct {
AccessUuid string | random_line_split |
main.go | package main
import (
"context"
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
"github.com/go-redis/redis/v8"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
fiberlogger "github.com/gofiber/fiber/v2/middleware/logger"
"github.com/gofiber/fiber/v2/middleware/recover"
_ "github.com/joho/godotenv/autoload" // load .env file automatically
"github.com/rs/zerolog"
"github.com/twinj/uuid"
"net/http"
"os"
"strconv"
"strings"
"time"
)
type User struct {
ID int64 `json:"id"`
Email string `json:"email"`
Password string `json:"password"`
}
var user = User{
ID: 1,
Email: "sammidev@gmail.com",
Password: "sammidev",
}
type Todo struct {
UserID int64 `json:"user_id"`
Title string `json:"title"`
}
// logging
var logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: false,
TimeFormat: time.RFC3339,
}).With().Timestamp().Logger().Level(zerolog.GlobalLevel())
func Success(c *fiber.Ctx, code int, payload interface{}) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(payload)
}
func Error(c *fiber.Ctx, code int, err error) error {
c.Set("Content-Type", "application/json")
c.Status(code)
return c.JSON(struct {
Message string `json:"message"`
}{Message: err.Error()})
}
func init() {
//Initializing redis
dsn := os.Getenv("REDIS_DSN")
if len(dsn) == 0 {
dsn = "localhost:6379"
}
client = redis.NewClient(&redis.Options{
Addr: dsn, //redis port
})
_, err := client.Ping(context.Background()).Result()
if err != nil {
panic(err)
}
}
var client *redis.Client
func FiberMiddleware(a *fiber.App) {
a.Use(
// Add CORS to each route.
cors.New(),
// Add simple logger.
fiberlogger.New(),
// add recoverer for panic
recover.New(),
)
}
func main() {
readTimeoutSecondsCount, _ := strconv.Atoi(os.Getenv("SERVER_READ_TIMEOUT"))
app := fiber.New(fiber.Config{
ReadTimeout: time.Second * time.Duration(readTimeoutSecondsCount),
})
FiberMiddleware(app)
route := app.Group("api/v1")
route.Post("/login", Login)
route.Post("/todo", CreateTodo)
route.Post("/logout", Logout)
route.Post("/refresh", Refresh)
logger.Log().Err(app.Listen(os.Getenv("SERVER_URL")))
}
func Login(c *fiber.Ctx) error {
var u User
if err := c.BodyParser(&u); err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("invalid json provided"))
}
//compare the user from the request, with the one we defined:
if user.Email != u.Email || user.Password != u.Password {
return Error(c, fiber.StatusUnauthorized, errors.New("please provide valid login details"))
}
ts, err := CreateToken(user.ID)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
saveErr := CreateAuth(c, user.ID, ts)
if saveErr != nil {
return Error(c, http.StatusUnprocessableEntity, saveErr)
}
return Success(c, fiber.StatusOK, fiber.Map{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
})
}
func CreateTodo(c *fiber.Ctx) error {
var td Todo
if err := c.BodyParser(&td); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("invalid json"))
}
//Extract the access token metadata
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
userid, err := FetchAuth(c, metadata)
if err != nil {
return Error(c, fiber.StatusUnauthorized, err)
}
td.UserID = userid
//you can proceed to save the Todo to a database
return Success(c, fiber.StatusCreated, td)
}
func Logout(c *fiber.Ctx) error {
metadata, err := ExtractTokenMetadata(c)
if err != nil {
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
delErr := DeleteTokens(c, metadata)
if delErr != nil {
return Error(c, fiber.StatusUnauthorized, delErr)
}
return Success(c, fiber.StatusOK, "successfully logged out")
}
func | (c *fiber.Ctx) error {
mapToken := map[string]string{}
if err := c.BodyParser(&mapToken); err != nil {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
refreshToken := mapToken["refresh_token"]
//verify the token
token, err := jwt.Parse(refreshToken, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("REFRESH_SECRET")), nil
})
//if there is an error, the token must have expired
if err != nil {
fmt.Println("the error: ", err)
return Error(c, fiber.StatusUnauthorized, errors.New("refresh token expired"))
}
//is token valid?
if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {
return Error(c, fiber.StatusUnauthorized, err)
}
//Since token is valid, get the uuid:
claims, ok := token.Claims.(jwt.MapClaims) //the token claims should conform to MapClaims
if ok && token.Valid {
refreshUuid, ok := claims["refresh_uuid"].(string) //convert the interface to string
if !ok {
return Error(c, fiber.StatusUnprocessableEntity, err)
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return Error(c, fiber.StatusUnprocessableEntity, errors.New("error occurred"))
}
deleted, delErr := DeleteAuth(c, refreshUuid)
if delErr != nil || deleted == 0 { //if any goes wrong
//Delete the previous Refresh Token
return Error(c, fiber.StatusUnauthorized, errors.New("unauthorized"))
}
//Create new pairs of refresh and access tokens
ts, createErr := CreateToken(userId)
if createErr != nil {
return Error(c, fiber.StatusForbidden, createErr)
}
saveErr := CreateAuth(c, userId, ts)
if saveErr != nil {
//save the tokens metadata to redis
return Error(c, fiber.StatusForbidden, saveErr)
}
tokens := map[string]string{
"access_token": ts.AccessToken,
"refresh_token": ts.RefreshToken,
}
return Success(c, fiber.StatusCreated, tokens)
} else {
return Error(c, fiber.StatusUnauthorized, errors.New("refresh expired"))
}
}
type AccessDetails struct {
AccessUuid string
UserId int64
}
type TokenDetails struct {
AccessToken string
RefreshToken string
AccessUuid string
RefreshUuid string
AtExpires int64
RtExpires int64
}
func CreateToken(userid int64) (*TokenDetails, error) {
td := &TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * 15).Unix()
td.AccessUuid = uuid.NewV4().String()
td.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()
td.RefreshUuid = td.AccessUuid + "++" + strconv.Itoa(int(userid))
var err error
//Creating Access Token
atClaims := jwt.MapClaims{}
atClaims["authorized"] = true
atClaims["access_uuid"] = td.AccessUuid
atClaims["user_id"] = userid
atClaims["exp"] = td.AtExpires
at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)
td.AccessToken, err = at.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return nil, err
}
//Creating Refresh Token
rtClaims := jwt.MapClaims{}
rtClaims["refresh_uuid"] = td.RefreshUuid
rtClaims["user_id"] = userid
rtClaims["exp"] = td.RtExpires
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)
td.RefreshToken, err = rt.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return nil, err
}
return td, nil
}
func CreateAuth(c *fiber.Ctx, userid int64, td *TokenDetails) error {
at := time.Unix(td.AtExpires, 0) //converting Unix to UTC(to Time object)
rt := time.Unix(td.RtExpires, 0)
now := time.Now()
errAccess := client.Set(c.Context(), td.AccessUuid, strconv.Itoa(int(userid)), at.Sub(now)).Err()
if errAccess != nil {
return errAccess
}
errRefresh := client.Set(c.Context(), td.RefreshUuid, strconv.Itoa(int(userid)), rt.Sub(now)).Err()
if errRefresh != nil {
return errRefresh
}
return nil
}
func ExtractToken(c *fiber.Ctx) string {
bearToken := c.Get("Authorization")
strArr := strings.Split(bearToken, " ")
if len(strArr) == 2 {
return strArr[1]
}
return ""
}
func VerifyToken(c *fiber.Ctx) (*jwt.Token, error) {
tokenString := ExtractToken(c)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
return nil, err
}
return token, nil
}
func TokenValid(c *fiber.Ctx) error {
token, err := VerifyToken(c)
if err != nil {
return err
}
if _, ok := token.Claims.(jwt.Claims); !ok || !token.Valid {
return err
}
return nil
}
func ExtractTokenMetadata(c *fiber.Ctx) (*AccessDetails, error) {
token, err := VerifyToken(c)
if err != nil {
return nil, err
}
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
accessUuid, ok := claims["access_uuid"].(string)
if !ok {
return nil, err
}
userId, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return nil, err
}
return &AccessDetails{
AccessUuid: accessUuid,
UserId: userId,
}, nil
}
return nil, err
}
func FetchAuth(c *fiber.Ctx, authD *AccessDetails) (int64, error) {
userid, err := client.Get(c.Context(), authD.AccessUuid).Result()
if err != nil {
return 0, err
}
userID, _ := strconv.ParseInt(userid, 10, 64)
if authD.UserId != userID {
return 0, errors.New("unauthorized")
}
return userID, nil
}
func DeleteAuth(c *fiber.Ctx, givenUuid string) (int64, error) {
deleted, err := client.Del(c.Context(), givenUuid).Result()
if err != nil {
return 0, err
}
return deleted, nil
}
func DeleteTokens(c *fiber.Ctx, authD *AccessDetails) error {
//get the refresh uuid
refreshUuid := fmt.Sprintf("%s++%d", authD.AccessUuid, authD.UserId)
//delete access token
deletedAt, err := client.Del(c.Context(), authD.AccessUuid).Result()
if err != nil {
return err
}
//delete refresh token
deletedRt, err := client.Del(c.Context(), refreshUuid).Result()
if err != nil {
return err
}
//When the record is deleted, the return value is 1
if deletedAt != 1 || deletedRt != 1 {
return errors.New("something went wrong")
}
return nil
}
| Refresh | identifier_name |
controller.go | package compose
import (
"context"
"encoding/json"
"fmt"
"strings"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/pkg/errors"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/rancher/pkg/auth/tokens"
clusterClient "github.com/rancher/rancher/pkg/client/generated/cluster/v3"
managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3"
projectClient "github.com/rancher/rancher/pkg/client/generated/project/v3"
"github.com/rancher/rancher/pkg/controllers/managementlegacy/compose/common"
"github.com/rancher/rancher/pkg/generated/compose"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/types/config"
"github.com/rancher/rancher/pkg/types/config/systemtokens"
"github.com/rancher/rancher/pkg/user"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
composeTokenPrefix = "compose-token-"
description = "token for compose"
url = "https://localhost:%v/v3"
)
// Lifecycle for GlobalComposeConfig is a controller which watches composeConfig and execute the yaml config and create a bunch of global resources. There is no sync logic between yaml file and resources, which means config is only executed once. And resource is not deleted even if the compose config is deleted.
type Lifecycle struct {
TokenClient v3.TokenInterface
UserClient v3.UserInterface
systemTokens systemtokens.Interface
UserManager user.Manager
HTTPSPortGetter common.KubeConfigGetter
ComposeClient v3.ComposeConfigInterface
}
func Register(ctx context.Context, managementContext *config.ManagementContext, portGetter common.KubeConfigGetter) {
composeClient := managementContext.Management.ComposeConfigs("")
tokenClient := managementContext.Management.Tokens("")
userClient := managementContext.Management.Users("")
l := Lifecycle{
systemTokens: managementContext.SystemTokens,
HTTPSPortGetter: portGetter,
UserManager: managementContext.UserManager,
TokenClient: tokenClient,
UserClient: userClient,
ComposeClient: composeClient,
}
composeClient.AddHandler(ctx, "compose-controller", l.sync)
}
func (l Lifecycle) sync(key string, obj *v3.ComposeConfig) (runtime.Object, error) {
if key == "" || obj == nil {
return nil, nil
}
newObj, err := v32.ComposeConditionExecuted.Once(obj, func() (runtime.Object, error) {
obj, err := l.Create(obj)
if err != nil {
return obj, &controller.ForgetError{
Err: err,
}
}
return obj, nil
})
obj, _ = l.ComposeClient.Update(newObj.(*v3.ComposeConfig))
return obj, err
}
func (l Lifecycle) Create(obj *v3.ComposeConfig) (*v3.ComposeConfig, error) { | tokenPrefix := composeTokenPrefix + user.Name
token, err := l.systemTokens.EnsureSystemToken(tokenPrefix, description, "compose", user.Name, nil, true)
if err != nil {
return obj, err
}
tokenName, _ := tokens.SplitTokenParts(token)
defer func() {
if err := l.systemTokens.DeleteToken(tokenName); err != nil {
logrus.Errorf("cleanup for compose token [%s] failed, will not retry: %v", tokenName, err)
}
}()
config := &compose.Config{}
if err := yaml.Unmarshal([]byte(obj.Spec.RancherCompose), config); err != nil {
return obj, err
}
if err := up(token, l.HTTPSPortGetter.GetHTTPSPort(), config); err != nil {
return obj, err
}
v32.ComposeConditionExecuted.True(obj)
return obj, nil
}
func GetSchemas(token string, port int) (map[string]types.Schema, map[string]types.Schema, map[string]types.Schema, error) {
cc, err := clusterClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/clusters",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
mc, err := managementClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
pc, err := projectClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/projects",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
return cc.Types, mc.Types, pc.Types, nil
}
func up(token string, port int, config *compose.Config) error {
clusterSchemas, managementSchemas, projectSchemas, err := GetSchemas(token, port)
if err != nil {
return err
}
// referenceMap is a map of schemaType with name -> id value
referenceMap := map[string]map[string]string{}
rawData, err := json.Marshal(config)
if err != nil {
return err
}
rawMap := map[string]interface{}{}
if err := json.Unmarshal(rawData, &rawMap); err != nil {
return err
}
delete(rawMap, "version")
allSchemas := getAllSchemas(clusterSchemas, managementSchemas, projectSchemas)
sortedSchemas := common.SortSchema(allSchemas)
baseClusterClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/cluster",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseManagementClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseProjectClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/project",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseURL := fmt.Sprintf(url, port)
configManager := configClientManager{
clusterSchemas: clusterSchemas,
managementSchemas: managementSchemas,
projectSchemas: projectSchemas,
baseClusterClient: &baseClusterClient,
baseManagementClient: &baseManagementClient,
baseProjectClient: &baseProjectClient,
baseURL: baseURL,
}
for _, schemaKey := range sortedSchemas {
key := allSchemas[schemaKey].PluralName
v, ok := rawMap[key]
if !ok {
continue
}
value, ok := v.(map[string]interface{})
if !ok {
continue
}
var baseClient *clientbase.APIBaseClient
for name, data := range value {
dataMap, ok := data.(map[string]interface{})
if !ok {
break
}
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, "")
if err != nil {
return err
}
if err := common.ReplaceGlobalReference(allSchemas[schemaKey], dataMap, referenceMap, &baseManagementClient); err != nil {
return err
}
clusterID := convert.ToString(dataMap["clusterId"])
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, clusterID)
if err != nil {
return err
}
dataMap["name"] = name
respObj := map[string]interface{}{}
// in here we have to make sure the same name won't be created twice
created := map[string]string{}
if err := baseClient.List(schemaKey, &types.ListOpts{}, &respObj); err != nil {
return err
}
if data, ok := respObj["data"]; ok {
if collections, ok := data.([]interface{}); ok {
for _, obj := range collections {
if objMap, ok := obj.(map[string]interface{}); ok {
createdName := common.GetValue(objMap, "name")
if createdName != "" {
created[createdName] = common.GetValue(objMap, "id")
}
}
}
}
}
id := ""
if v, ok := created[name]; ok {
id = v
existing := &types.Resource{}
if err := baseClient.ByID(schemaKey, id, existing); err != nil {
return err
}
if err := baseClient.Update(schemaKey, existing, dataMap, nil); err != nil {
return err
}
} else {
if err := baseClient.Create(schemaKey, dataMap, &respObj); err != nil && !strings.Contains(err.Error(), "already exist") {
return err
} else if err != nil && strings.Contains(err.Error(), "already exist") {
break
}
v, ok := respObj["id"]
if !ok {
return errors.Errorf("id is missing after creating %s obj", schemaKey)
}
id = v.(string)
}
}
// fill in reference map name -> id
if err := common.FillInReferenceMap(baseClient, schemaKey, referenceMap, nil); err != nil {
return err
}
}
return nil
}
type configClientManager struct {
clusterSchemas map[string]types.Schema
managementSchemas map[string]types.Schema
projectSchemas map[string]types.Schema
baseClusterClient *clientbase.APIBaseClient
baseManagementClient *clientbase.APIBaseClient
baseProjectClient *clientbase.APIBaseClient
baseURL string
}
// GetBaseClient config a baseClient with a special base url based on schema type
func (c configClientManager) ConfigBaseClient(schemaType string, data map[string]interface{}, referenceMap map[string]map[string]string, clusterID string) (*clientbase.APIBaseClient, error) {
if _, ok := c.clusterSchemas[schemaType]; ok {
c.baseClusterClient.Opts.URL = c.baseURL + fmt.Sprintf("/cluster/%s", clusterID)
return c.baseClusterClient, nil
}
if _, ok := c.managementSchemas[schemaType]; ok {
return c.baseManagementClient, nil
}
if _, ok := c.projectSchemas[schemaType]; ok {
projectName := common.GetValue(data, "projectId")
if _, ok := referenceMap["project"]; !ok {
filter := map[string]string{
"clusterId": clusterID,
}
if err := common.FillInReferenceMap(c.baseManagementClient, "project", referenceMap, filter); err != nil {
return nil, err
}
}
projectID := referenceMap["project"][projectName]
c.baseProjectClient.Opts.URL = c.baseURL + fmt.Sprintf("/projects/%s", projectID)
return c.baseProjectClient, nil
}
return nil, errors.Errorf("schema type %s not supported", schemaType)
}
func getAllSchemas(clusterSchemas, managementSchemas, projectSchemas map[string]types.Schema) map[string]types.Schema {
r := map[string]types.Schema{}
for k, schema := range clusterSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range managementSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range projectSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
return r
} | userID := obj.Annotations["field.cattle.io/creatorId"]
user, err := l.UserClient.Get(userID, metav1.GetOptions{})
if err != nil {
return obj, err
} | random_line_split |
controller.go | package compose
import (
"context"
"encoding/json"
"fmt"
"strings"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/pkg/errors"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/rancher/pkg/auth/tokens"
clusterClient "github.com/rancher/rancher/pkg/client/generated/cluster/v3"
managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3"
projectClient "github.com/rancher/rancher/pkg/client/generated/project/v3"
"github.com/rancher/rancher/pkg/controllers/managementlegacy/compose/common"
"github.com/rancher/rancher/pkg/generated/compose"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/types/config"
"github.com/rancher/rancher/pkg/types/config/systemtokens"
"github.com/rancher/rancher/pkg/user"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
composeTokenPrefix = "compose-token-"
description = "token for compose"
url = "https://localhost:%v/v3"
)
// Lifecycle for GlobalComposeConfig is a controller which watches composeConfig and execute the yaml config and create a bunch of global resources. There is no sync logic between yaml file and resources, which means config is only executed once. And resource is not deleted even if the compose config is deleted.
type Lifecycle struct {
TokenClient v3.TokenInterface
UserClient v3.UserInterface
systemTokens systemtokens.Interface
UserManager user.Manager
HTTPSPortGetter common.KubeConfigGetter
ComposeClient v3.ComposeConfigInterface
}
func Register(ctx context.Context, managementContext *config.ManagementContext, portGetter common.KubeConfigGetter) {
composeClient := managementContext.Management.ComposeConfigs("")
tokenClient := managementContext.Management.Tokens("")
userClient := managementContext.Management.Users("")
l := Lifecycle{
systemTokens: managementContext.SystemTokens,
HTTPSPortGetter: portGetter,
UserManager: managementContext.UserManager,
TokenClient: tokenClient,
UserClient: userClient,
ComposeClient: composeClient,
}
composeClient.AddHandler(ctx, "compose-controller", l.sync)
}
func (l Lifecycle) sync(key string, obj *v3.ComposeConfig) (runtime.Object, error) {
if key == "" || obj == nil {
return nil, nil
}
newObj, err := v32.ComposeConditionExecuted.Once(obj, func() (runtime.Object, error) {
obj, err := l.Create(obj)
if err != nil {
return obj, &controller.ForgetError{
Err: err,
}
}
return obj, nil
})
obj, _ = l.ComposeClient.Update(newObj.(*v3.ComposeConfig))
return obj, err
}
func (l Lifecycle) Create(obj *v3.ComposeConfig) (*v3.ComposeConfig, error) {
userID := obj.Annotations["field.cattle.io/creatorId"]
user, err := l.UserClient.Get(userID, metav1.GetOptions{})
if err != nil {
return obj, err
}
tokenPrefix := composeTokenPrefix + user.Name
token, err := l.systemTokens.EnsureSystemToken(tokenPrefix, description, "compose", user.Name, nil, true)
if err != nil {
return obj, err
}
tokenName, _ := tokens.SplitTokenParts(token)
defer func() {
if err := l.systemTokens.DeleteToken(tokenName); err != nil {
logrus.Errorf("cleanup for compose token [%s] failed, will not retry: %v", tokenName, err)
}
}()
config := &compose.Config{}
if err := yaml.Unmarshal([]byte(obj.Spec.RancherCompose), config); err != nil {
return obj, err
}
if err := up(token, l.HTTPSPortGetter.GetHTTPSPort(), config); err != nil {
return obj, err
}
v32.ComposeConditionExecuted.True(obj)
return obj, nil
}
func GetSchemas(token string, port int) (map[string]types.Schema, map[string]types.Schema, map[string]types.Schema, error) {
cc, err := clusterClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/clusters",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
mc, err := managementClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
pc, err := projectClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/projects",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
return cc.Types, mc.Types, pc.Types, nil
}
func up(token string, port int, config *compose.Config) error {
clusterSchemas, managementSchemas, projectSchemas, err := GetSchemas(token, port)
if err != nil {
return err
}
// referenceMap is a map of schemaType with name -> id value
referenceMap := map[string]map[string]string{}
rawData, err := json.Marshal(config)
if err != nil {
return err
}
rawMap := map[string]interface{}{}
if err := json.Unmarshal(rawData, &rawMap); err != nil {
return err
}
delete(rawMap, "version")
allSchemas := getAllSchemas(clusterSchemas, managementSchemas, projectSchemas)
sortedSchemas := common.SortSchema(allSchemas)
baseClusterClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/cluster",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseManagementClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseProjectClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/project",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseURL := fmt.Sprintf(url, port)
configManager := configClientManager{
clusterSchemas: clusterSchemas,
managementSchemas: managementSchemas,
projectSchemas: projectSchemas,
baseClusterClient: &baseClusterClient,
baseManagementClient: &baseManagementClient,
baseProjectClient: &baseProjectClient,
baseURL: baseURL,
}
for _, schemaKey := range sortedSchemas {
key := allSchemas[schemaKey].PluralName
v, ok := rawMap[key]
if !ok {
continue
}
value, ok := v.(map[string]interface{})
if !ok {
continue
}
var baseClient *clientbase.APIBaseClient
for name, data := range value {
dataMap, ok := data.(map[string]interface{})
if !ok {
break
}
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, "")
if err != nil {
return err
}
if err := common.ReplaceGlobalReference(allSchemas[schemaKey], dataMap, referenceMap, &baseManagementClient); err != nil {
return err
}
clusterID := convert.ToString(dataMap["clusterId"])
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, clusterID)
if err != nil {
return err
}
dataMap["name"] = name
respObj := map[string]interface{}{}
// in here we have to make sure the same name won't be created twice
created := map[string]string{}
if err := baseClient.List(schemaKey, &types.ListOpts{}, &respObj); err != nil {
return err
}
if data, ok := respObj["data"]; ok {
if collections, ok := data.([]interface{}); ok {
for _, obj := range collections {
if objMap, ok := obj.(map[string]interface{}); ok {
createdName := common.GetValue(objMap, "name")
if createdName != "" {
created[createdName] = common.GetValue(objMap, "id")
}
}
}
}
}
id := ""
if v, ok := created[name]; ok {
id = v
existing := &types.Resource{}
if err := baseClient.ByID(schemaKey, id, existing); err != nil {
return err
}
if err := baseClient.Update(schemaKey, existing, dataMap, nil); err != nil {
return err
}
} else {
if err := baseClient.Create(schemaKey, dataMap, &respObj); err != nil && !strings.Contains(err.Error(), "already exist") {
return err
} else if err != nil && strings.Contains(err.Error(), "already exist") {
break
}
v, ok := respObj["id"]
if !ok {
return errors.Errorf("id is missing after creating %s obj", schemaKey)
}
id = v.(string)
}
}
// fill in reference map name -> id
if err := common.FillInReferenceMap(baseClient, schemaKey, referenceMap, nil); err != nil {
return err
}
}
return nil
}
type configClientManager struct {
clusterSchemas map[string]types.Schema
managementSchemas map[string]types.Schema
projectSchemas map[string]types.Schema
baseClusterClient *clientbase.APIBaseClient
baseManagementClient *clientbase.APIBaseClient
baseProjectClient *clientbase.APIBaseClient
baseURL string
}
// GetBaseClient config a baseClient with a special base url based on schema type
func (c configClientManager) ConfigBaseClient(schemaType string, data map[string]interface{}, referenceMap map[string]map[string]string, clusterID string) (*clientbase.APIBaseClient, error) {
if _, ok := c.clusterSchemas[schemaType]; ok {
c.baseClusterClient.Opts.URL = c.baseURL + fmt.Sprintf("/cluster/%s", clusterID)
return c.baseClusterClient, nil
}
if _, ok := c.managementSchemas[schemaType]; ok {
return c.baseManagementClient, nil
}
if _, ok := c.projectSchemas[schemaType]; ok {
projectName := common.GetValue(data, "projectId")
if _, ok := referenceMap["project"]; !ok {
filter := map[string]string{
"clusterId": clusterID,
}
if err := common.FillInReferenceMap(c.baseManagementClient, "project", referenceMap, filter); err != nil {
return nil, err
}
}
projectID := referenceMap["project"][projectName]
c.baseProjectClient.Opts.URL = c.baseURL + fmt.Sprintf("/projects/%s", projectID)
return c.baseProjectClient, nil
}
return nil, errors.Errorf("schema type %s not supported", schemaType)
}
func getAllSchemas(clusterSchemas, managementSchemas, projectSchemas map[string]types.Schema) map[string]types.Schema {
r := map[string]types.Schema{}
for k, schema := range clusterSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range managementSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range projectSchemas |
return r
}
| {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
} | conditional_block |
controller.go | package compose
import (
"context"
"encoding/json"
"fmt"
"strings"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/pkg/errors"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/rancher/pkg/auth/tokens"
clusterClient "github.com/rancher/rancher/pkg/client/generated/cluster/v3"
managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3"
projectClient "github.com/rancher/rancher/pkg/client/generated/project/v3"
"github.com/rancher/rancher/pkg/controllers/managementlegacy/compose/common"
"github.com/rancher/rancher/pkg/generated/compose"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/types/config"
"github.com/rancher/rancher/pkg/types/config/systemtokens"
"github.com/rancher/rancher/pkg/user"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
composeTokenPrefix = "compose-token-"
description = "token for compose"
url = "https://localhost:%v/v3"
)
// Lifecycle for GlobalComposeConfig is a controller which watches composeConfig and execute the yaml config and create a bunch of global resources. There is no sync logic between yaml file and resources, which means config is only executed once. And resource is not deleted even if the compose config is deleted.
type Lifecycle struct {
TokenClient v3.TokenInterface
UserClient v3.UserInterface
systemTokens systemtokens.Interface
UserManager user.Manager
HTTPSPortGetter common.KubeConfigGetter
ComposeClient v3.ComposeConfigInterface
}
func Register(ctx context.Context, managementContext *config.ManagementContext, portGetter common.KubeConfigGetter) {
composeClient := managementContext.Management.ComposeConfigs("")
tokenClient := managementContext.Management.Tokens("")
userClient := managementContext.Management.Users("")
l := Lifecycle{
systemTokens: managementContext.SystemTokens,
HTTPSPortGetter: portGetter,
UserManager: managementContext.UserManager,
TokenClient: tokenClient,
UserClient: userClient,
ComposeClient: composeClient,
}
composeClient.AddHandler(ctx, "compose-controller", l.sync)
}
func (l Lifecycle) sync(key string, obj *v3.ComposeConfig) (runtime.Object, error) {
if key == "" || obj == nil {
return nil, nil
}
newObj, err := v32.ComposeConditionExecuted.Once(obj, func() (runtime.Object, error) {
obj, err := l.Create(obj)
if err != nil {
return obj, &controller.ForgetError{
Err: err,
}
}
return obj, nil
})
obj, _ = l.ComposeClient.Update(newObj.(*v3.ComposeConfig))
return obj, err
}
func (l Lifecycle) Create(obj *v3.ComposeConfig) (*v3.ComposeConfig, error) {
userID := obj.Annotations["field.cattle.io/creatorId"]
user, err := l.UserClient.Get(userID, metav1.GetOptions{})
if err != nil {
return obj, err
}
tokenPrefix := composeTokenPrefix + user.Name
token, err := l.systemTokens.EnsureSystemToken(tokenPrefix, description, "compose", user.Name, nil, true)
if err != nil {
return obj, err
}
tokenName, _ := tokens.SplitTokenParts(token)
defer func() {
if err := l.systemTokens.DeleteToken(tokenName); err != nil {
logrus.Errorf("cleanup for compose token [%s] failed, will not retry: %v", tokenName, err)
}
}()
config := &compose.Config{}
if err := yaml.Unmarshal([]byte(obj.Spec.RancherCompose), config); err != nil {
return obj, err
}
if err := up(token, l.HTTPSPortGetter.GetHTTPSPort(), config); err != nil {
return obj, err
}
v32.ComposeConditionExecuted.True(obj)
return obj, nil
}
func GetSchemas(token string, port int) (map[string]types.Schema, map[string]types.Schema, map[string]types.Schema, error) {
cc, err := clusterClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/clusters",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
mc, err := managementClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
pc, err := projectClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/projects",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
return cc.Types, mc.Types, pc.Types, nil
}
func up(token string, port int, config *compose.Config) error |
type configClientManager struct {
clusterSchemas map[string]types.Schema
managementSchemas map[string]types.Schema
projectSchemas map[string]types.Schema
baseClusterClient *clientbase.APIBaseClient
baseManagementClient *clientbase.APIBaseClient
baseProjectClient *clientbase.APIBaseClient
baseURL string
}
// GetBaseClient config a baseClient with a special base url based on schema type
func (c configClientManager) ConfigBaseClient(schemaType string, data map[string]interface{}, referenceMap map[string]map[string]string, clusterID string) (*clientbase.APIBaseClient, error) {
if _, ok := c.clusterSchemas[schemaType]; ok {
c.baseClusterClient.Opts.URL = c.baseURL + fmt.Sprintf("/cluster/%s", clusterID)
return c.baseClusterClient, nil
}
if _, ok := c.managementSchemas[schemaType]; ok {
return c.baseManagementClient, nil
}
if _, ok := c.projectSchemas[schemaType]; ok {
projectName := common.GetValue(data, "projectId")
if _, ok := referenceMap["project"]; !ok {
filter := map[string]string{
"clusterId": clusterID,
}
if err := common.FillInReferenceMap(c.baseManagementClient, "project", referenceMap, filter); err != nil {
return nil, err
}
}
projectID := referenceMap["project"][projectName]
c.baseProjectClient.Opts.URL = c.baseURL + fmt.Sprintf("/projects/%s", projectID)
return c.baseProjectClient, nil
}
return nil, errors.Errorf("schema type %s not supported", schemaType)
}
func getAllSchemas(clusterSchemas, managementSchemas, projectSchemas map[string]types.Schema) map[string]types.Schema {
r := map[string]types.Schema{}
for k, schema := range clusterSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range managementSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range projectSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
return r
}
| {
clusterSchemas, managementSchemas, projectSchemas, err := GetSchemas(token, port)
if err != nil {
return err
}
// referenceMap is a map of schemaType with name -> id value
referenceMap := map[string]map[string]string{}
rawData, err := json.Marshal(config)
if err != nil {
return err
}
rawMap := map[string]interface{}{}
if err := json.Unmarshal(rawData, &rawMap); err != nil {
return err
}
delete(rawMap, "version")
allSchemas := getAllSchemas(clusterSchemas, managementSchemas, projectSchemas)
sortedSchemas := common.SortSchema(allSchemas)
baseClusterClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/cluster",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseManagementClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseProjectClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/project",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseURL := fmt.Sprintf(url, port)
configManager := configClientManager{
clusterSchemas: clusterSchemas,
managementSchemas: managementSchemas,
projectSchemas: projectSchemas,
baseClusterClient: &baseClusterClient,
baseManagementClient: &baseManagementClient,
baseProjectClient: &baseProjectClient,
baseURL: baseURL,
}
for _, schemaKey := range sortedSchemas {
key := allSchemas[schemaKey].PluralName
v, ok := rawMap[key]
if !ok {
continue
}
value, ok := v.(map[string]interface{})
if !ok {
continue
}
var baseClient *clientbase.APIBaseClient
for name, data := range value {
dataMap, ok := data.(map[string]interface{})
if !ok {
break
}
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, "")
if err != nil {
return err
}
if err := common.ReplaceGlobalReference(allSchemas[schemaKey], dataMap, referenceMap, &baseManagementClient); err != nil {
return err
}
clusterID := convert.ToString(dataMap["clusterId"])
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, clusterID)
if err != nil {
return err
}
dataMap["name"] = name
respObj := map[string]interface{}{}
// in here we have to make sure the same name won't be created twice
created := map[string]string{}
if err := baseClient.List(schemaKey, &types.ListOpts{}, &respObj); err != nil {
return err
}
if data, ok := respObj["data"]; ok {
if collections, ok := data.([]interface{}); ok {
for _, obj := range collections {
if objMap, ok := obj.(map[string]interface{}); ok {
createdName := common.GetValue(objMap, "name")
if createdName != "" {
created[createdName] = common.GetValue(objMap, "id")
}
}
}
}
}
id := ""
if v, ok := created[name]; ok {
id = v
existing := &types.Resource{}
if err := baseClient.ByID(schemaKey, id, existing); err != nil {
return err
}
if err := baseClient.Update(schemaKey, existing, dataMap, nil); err != nil {
return err
}
} else {
if err := baseClient.Create(schemaKey, dataMap, &respObj); err != nil && !strings.Contains(err.Error(), "already exist") {
return err
} else if err != nil && strings.Contains(err.Error(), "already exist") {
break
}
v, ok := respObj["id"]
if !ok {
return errors.Errorf("id is missing after creating %s obj", schemaKey)
}
id = v.(string)
}
}
// fill in reference map name -> id
if err := common.FillInReferenceMap(baseClient, schemaKey, referenceMap, nil); err != nil {
return err
}
}
return nil
} | identifier_body |
controller.go | package compose
import (
"context"
"encoding/json"
"fmt"
"strings"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/pkg/errors"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/controller"
"github.com/rancher/norman/types"
"github.com/rancher/norman/types/convert"
"github.com/rancher/rancher/pkg/auth/tokens"
clusterClient "github.com/rancher/rancher/pkg/client/generated/cluster/v3"
managementClient "github.com/rancher/rancher/pkg/client/generated/management/v3"
projectClient "github.com/rancher/rancher/pkg/client/generated/project/v3"
"github.com/rancher/rancher/pkg/controllers/managementlegacy/compose/common"
"github.com/rancher/rancher/pkg/generated/compose"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/types/config"
"github.com/rancher/rancher/pkg/types/config/systemtokens"
"github.com/rancher/rancher/pkg/user"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const (
composeTokenPrefix = "compose-token-"
description = "token for compose"
url = "https://localhost:%v/v3"
)
// Lifecycle for GlobalComposeConfig is a controller which watches composeConfig and execute the yaml config and create a bunch of global resources. There is no sync logic between yaml file and resources, which means config is only executed once. And resource is not deleted even if the compose config is deleted.
type Lifecycle struct {
TokenClient v3.TokenInterface
UserClient v3.UserInterface
systemTokens systemtokens.Interface
UserManager user.Manager
HTTPSPortGetter common.KubeConfigGetter
ComposeClient v3.ComposeConfigInterface
}
func Register(ctx context.Context, managementContext *config.ManagementContext, portGetter common.KubeConfigGetter) {
composeClient := managementContext.Management.ComposeConfigs("")
tokenClient := managementContext.Management.Tokens("")
userClient := managementContext.Management.Users("")
l := Lifecycle{
systemTokens: managementContext.SystemTokens,
HTTPSPortGetter: portGetter,
UserManager: managementContext.UserManager,
TokenClient: tokenClient,
UserClient: userClient,
ComposeClient: composeClient,
}
composeClient.AddHandler(ctx, "compose-controller", l.sync)
}
func (l Lifecycle) sync(key string, obj *v3.ComposeConfig) (runtime.Object, error) {
if key == "" || obj == nil {
return nil, nil
}
newObj, err := v32.ComposeConditionExecuted.Once(obj, func() (runtime.Object, error) {
obj, err := l.Create(obj)
if err != nil {
return obj, &controller.ForgetError{
Err: err,
}
}
return obj, nil
})
obj, _ = l.ComposeClient.Update(newObj.(*v3.ComposeConfig))
return obj, err
}
func (l Lifecycle) Create(obj *v3.ComposeConfig) (*v3.ComposeConfig, error) {
userID := obj.Annotations["field.cattle.io/creatorId"]
user, err := l.UserClient.Get(userID, metav1.GetOptions{})
if err != nil {
return obj, err
}
tokenPrefix := composeTokenPrefix + user.Name
token, err := l.systemTokens.EnsureSystemToken(tokenPrefix, description, "compose", user.Name, nil, true)
if err != nil {
return obj, err
}
tokenName, _ := tokens.SplitTokenParts(token)
defer func() {
if err := l.systemTokens.DeleteToken(tokenName); err != nil {
logrus.Errorf("cleanup for compose token [%s] failed, will not retry: %v", tokenName, err)
}
}()
config := &compose.Config{}
if err := yaml.Unmarshal([]byte(obj.Spec.RancherCompose), config); err != nil {
return obj, err
}
if err := up(token, l.HTTPSPortGetter.GetHTTPSPort(), config); err != nil {
return obj, err
}
v32.ComposeConditionExecuted.True(obj)
return obj, nil
}
func GetSchemas(token string, port int) (map[string]types.Schema, map[string]types.Schema, map[string]types.Schema, error) {
cc, err := clusterClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/clusters",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
mc, err := managementClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
pc, err := projectClient.NewClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/projects",
TokenKey: token,
Insecure: true,
})
if err != nil {
return nil, nil, nil, err
}
return cc.Types, mc.Types, pc.Types, nil
}
func up(token string, port int, config *compose.Config) error {
clusterSchemas, managementSchemas, projectSchemas, err := GetSchemas(token, port)
if err != nil {
return err
}
// referenceMap is a map of schemaType with name -> id value
referenceMap := map[string]map[string]string{}
rawData, err := json.Marshal(config)
if err != nil {
return err
}
rawMap := map[string]interface{}{}
if err := json.Unmarshal(rawData, &rawMap); err != nil {
return err
}
delete(rawMap, "version")
allSchemas := getAllSchemas(clusterSchemas, managementSchemas, projectSchemas)
sortedSchemas := common.SortSchema(allSchemas)
baseClusterClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/cluster",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseManagementClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port),
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseProjectClient, err := clientbase.NewAPIClient(&clientbase.ClientOpts{
URL: fmt.Sprintf(url, port) + "/project",
TokenKey: token,
Insecure: true,
})
if err != nil {
return err
}
baseURL := fmt.Sprintf(url, port)
configManager := configClientManager{
clusterSchemas: clusterSchemas,
managementSchemas: managementSchemas,
projectSchemas: projectSchemas,
baseClusterClient: &baseClusterClient,
baseManagementClient: &baseManagementClient,
baseProjectClient: &baseProjectClient,
baseURL: baseURL,
}
for _, schemaKey := range sortedSchemas {
key := allSchemas[schemaKey].PluralName
v, ok := rawMap[key]
if !ok {
continue
}
value, ok := v.(map[string]interface{})
if !ok {
continue
}
var baseClient *clientbase.APIBaseClient
for name, data := range value {
dataMap, ok := data.(map[string]interface{})
if !ok {
break
}
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, "")
if err != nil {
return err
}
if err := common.ReplaceGlobalReference(allSchemas[schemaKey], dataMap, referenceMap, &baseManagementClient); err != nil {
return err
}
clusterID := convert.ToString(dataMap["clusterId"])
baseClient, err = configManager.ConfigBaseClient(schemaKey, dataMap, referenceMap, clusterID)
if err != nil {
return err
}
dataMap["name"] = name
respObj := map[string]interface{}{}
// in here we have to make sure the same name won't be created twice
created := map[string]string{}
if err := baseClient.List(schemaKey, &types.ListOpts{}, &respObj); err != nil {
return err
}
if data, ok := respObj["data"]; ok {
if collections, ok := data.([]interface{}); ok {
for _, obj := range collections {
if objMap, ok := obj.(map[string]interface{}); ok {
createdName := common.GetValue(objMap, "name")
if createdName != "" {
created[createdName] = common.GetValue(objMap, "id")
}
}
}
}
}
id := ""
if v, ok := created[name]; ok {
id = v
existing := &types.Resource{}
if err := baseClient.ByID(schemaKey, id, existing); err != nil {
return err
}
if err := baseClient.Update(schemaKey, existing, dataMap, nil); err != nil {
return err
}
} else {
if err := baseClient.Create(schemaKey, dataMap, &respObj); err != nil && !strings.Contains(err.Error(), "already exist") {
return err
} else if err != nil && strings.Contains(err.Error(), "already exist") {
break
}
v, ok := respObj["id"]
if !ok {
return errors.Errorf("id is missing after creating %s obj", schemaKey)
}
id = v.(string)
}
}
// fill in reference map name -> id
if err := common.FillInReferenceMap(baseClient, schemaKey, referenceMap, nil); err != nil {
return err
}
}
return nil
}
type configClientManager struct {
clusterSchemas map[string]types.Schema
managementSchemas map[string]types.Schema
projectSchemas map[string]types.Schema
baseClusterClient *clientbase.APIBaseClient
baseManagementClient *clientbase.APIBaseClient
baseProjectClient *clientbase.APIBaseClient
baseURL string
}
// GetBaseClient config a baseClient with a special base url based on schema type
func (c configClientManager) ConfigBaseClient(schemaType string, data map[string]interface{}, referenceMap map[string]map[string]string, clusterID string) (*clientbase.APIBaseClient, error) {
if _, ok := c.clusterSchemas[schemaType]; ok {
c.baseClusterClient.Opts.URL = c.baseURL + fmt.Sprintf("/cluster/%s", clusterID)
return c.baseClusterClient, nil
}
if _, ok := c.managementSchemas[schemaType]; ok {
return c.baseManagementClient, nil
}
if _, ok := c.projectSchemas[schemaType]; ok {
projectName := common.GetValue(data, "projectId")
if _, ok := referenceMap["project"]; !ok {
filter := map[string]string{
"clusterId": clusterID,
}
if err := common.FillInReferenceMap(c.baseManagementClient, "project", referenceMap, filter); err != nil {
return nil, err
}
}
projectID := referenceMap["project"][projectName]
c.baseProjectClient.Opts.URL = c.baseURL + fmt.Sprintf("/projects/%s", projectID)
return c.baseProjectClient, nil
}
return nil, errors.Errorf("schema type %s not supported", schemaType)
}
func | (clusterSchemas, managementSchemas, projectSchemas map[string]types.Schema) map[string]types.Schema {
r := map[string]types.Schema{}
for k, schema := range clusterSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range managementSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
for k, schema := range projectSchemas {
if _, ok := schema.ResourceFields["creatorId"]; !ok {
continue
}
r[k] = schema
}
return r
}
| getAllSchemas | identifier_name |
process_rwis.py | """Process the IDOT RWIS Data files"""
from __future__ import print_function
import datetime
import os
import sys
import smtplib
import ftplib
import subprocess
from email.mime.text import MIMEText
import pandas as pd
import pytz
import numpy as np
from metpy.units import units
import metpy.calc as mcalc
from pyiem.tracker import TrackerEngine
from pyiem.datatypes import temperature, speed
from pyiem.network import Table as NetworkTable
from pyiem.observation import Observation
from pyiem import util
GTS = sys.argv[1]
NT = NetworkTable('IA_RWIS')
IEM = util.get_dbconn('iem')
PORTFOLIO = util.get_dbconn('portfolio')
INCOMING = "/mesonet/data/incoming/rwis"
RWIS2METAR = {'00': 'XADA', '01': 'XALG', '02': 'XATN', '03': 'XALT',
'04': 'XAME', '05': 'XANK', '06': 'XAVO', '07': 'XBUR',
'08': 'XCAR', '09': 'XCDR', '10': 'XCID', '11': 'XCEN',
'12': 'XCOU', '13': 'XCRE', '14': 'XDAV', '15': 'XDEC',
'16': 'XDSM', '17': 'XDES', '18': 'XDST', '19': 'XDEW',
'20': 'XDUB', '21': 'XFOD', '22': 'XGRI', '23': 'XIAC',
'24': 'XIOW', '25': 'XJEF', '26': 'XLEO', '27': 'XMAN',
'28': 'XMAQ', '29': 'XMAR', '30': 'XMCW', '31': 'XMIS',
'32': 'XMOU', '33': 'XNEW', '34': 'XONA', '35': 'XOSC',
'36': 'XOTT', '37': 'XPEL', '38': 'XRED', '39': 'XSID',
'40': 'XSIG', '41': 'XSIO', '42': 'XSPE', '43': 'XSTO',
'44': 'XTIP', '45': 'XURB', '46': 'XWAT', '47': 'XWIL',
'48': 'XWBG', '49': 'XHAN', '50': 'XSBI', '51': 'XIGI',
'52': 'XCRI', '53': 'XCFI', '54': 'XSYI', '55': 'XBFI',
'56': 'XDYI', '57': 'XTMI', '58': 'XPFI', '59': 'XCTI',
'60': 'XDNI', '61': 'XQCI', '62': 'XSMI', '63': 'XRWI',
'64': 'XETI', '65': 'XCCI', '66': 'XKSI', '67': 'XKNI',
'68': 'XCMI', '69': 'XRGI', '70': 'XKYI', '72': 'XCTI'}
KNOWN_UNKNOWNS = []
def get_nwsli(rpuid):
"""Lookup a rpuid and return the NWSLI"""
rpuid = int(rpuid)
for sid in NT.sts:
if NT.sts[sid]['remote_id'] == rpuid:
return sid
return None
def get_temp(val):
"""Attempt to convert a RWIS temperature into F"""
if val in ['', 32767]:
return None
return temperature(val / 100., 'C').value('F')
def get_speed(val):
""" Convert a speed value """
if val in ['', 255]:
return None
return speed(val, 'KMH').value('KT')
def merge(atmos, surface):
"""Create a dictionary of data based on these two dataframes
Args:
atmos (DataFrame): atmospherics
surface (DataFrame): surface data
Returns:
dictionary of values
"""
data = {}
# Do what we can with the atmospheric data
for _, row in atmos.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in atmos'
'') % (row['Rpuid'],))
continue
if nwsli not in data:
data[nwsli] = {}
# Timestamp
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
data[nwsli]['valid'] = ts.replace(tzinfo=pytz.UTC)
data[nwsli]['tmpf'] = get_temp(row['AirTemp'])
data[nwsli]['dwpf'] = get_temp(row['Dewpoint'])
if data[nwsli]['tmpf'] is not None and data[nwsli]['dwpf'] is not None:
data[nwsli]['relh'] = mcalc.relative_humidity_from_dewpoint(
data[nwsli]['tmpf'] * units('degF'),
data[nwsli]['dwpf'] * units('degF')).magnitude * 100.
# Rh is unused
data[nwsli]['sknt'] = get_speed(row['SpdAvg'])
data[nwsli]['gust'] = get_speed(row['SpdGust'])
if row['DirMin'] not in ['', 32767, np.nan]:
data[nwsli]['drct'] = row['DirMin']
# DirMax is unused
# Pressure is not reported
# PcIntens
# PcType
# PcRate
if row['PcAccum'] not in ['', -1, 32767, np.nan]:
data[nwsli]['pday'] = row['PcAccum'] * 0.00098425
if row['Visibility'] not in ['', -1, 32767, np.nan]:
data[nwsli]['vsby'] = row['Visibility'] / 1609.344
# Do what we can with the surface data
for _, row in surface.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in sfc'
'') % (row['Rpuid'],))
continue
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
ts = ts.replace(tzinfo=pytz.UTC)
if nwsli not in data:
data[nwsli] = {'valid': ts}
sensorid = int(row['Senid'])
key = 'sfvalid%s' % (sensorid,)
data[nwsli][key] = ts
key = 'scond%s' % (sensorid,)
data[nwsli][key] = row['sfcond']
# sftemp -150
key = 'tsf%s' % (sensorid,)
data[nwsli][key] = get_temp(row['sftemp'])
# frztemp 32767
# chemfactor 0
# chempct 101
# depth 32767
# icepct 101
# subsftemp NaN
key = 'tsub%s' % (sensorid,)
data[nwsli][key] = get_temp(row['subsftemp'])
# waterlevel NaN
# Unnamed: 13 NaN
# Unnamed: 14 NaN
return data
def do_windalerts(obs):
"""Iterate through the obs and do wind alerts where appropriate"""
for sid in obs:
# Problem sites with lightning issues
|
def do_iemtracker(obs):
"""Iterate over the obs and do IEM Tracker related activities """
threshold = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
threshold = threshold.replace(tzinfo=pytz.UTC)
tracker = TrackerEngine(IEM.cursor(), PORTFOLIO.cursor())
tracker.process_network(obs, 'iarwis', NT, threshold)
tracker.send_emails()
IEM.commit()
PORTFOLIO.commit()
def METARtemp(val):
"""convert temp to METAR"""
f_temp = float(val)
i_temp = int(round(f_temp, 0))
f1_temp = int(round(f_temp * 10., 0))
if i_temp < 0:
i_temp = 0 - i_temp
m_temp = "M%02i" % (i_temp,)
else:
m_temp = "%02i" % (i_temp,)
if f1_temp < 0:
t_temp = "1%03i" % (0 - f1_temp,)
else:
t_temp = "0%03i" % (f1_temp, )
return m_temp, t_temp
def METARwind(sknt, drct, gust):
"""convert to METAR"""
s = ""
d5 = drct
if str(d5)[-1] == "5":
d5 -= 5
s += "%03.0f%02.0f" % (d5, sknt)
if gust is not None:
s += "G%02.0f" % (gust, )
s += "KT"
return s
def gen_metars(obs, filename, convids=False):
"""Create METAR Data files
Args:
obs (list): list of dictionaries with obs in them
filename (str): filename to write data to
convids (bool): should we use special logic for ID conversion
"""
mtime = datetime.datetime.utcnow().strftime("%d%H%M")
thres = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
thres = thres.replace(tzinfo=pytz.UTC)
fp = open(filename, 'w')
fp.write("\001\015\015\012001\n")
fp.write("SAUS43 KDMX %s\015\015\012METAR\015\015\012" % (mtime, ))
for sid in obs:
ob = obs[sid]
if ob['valid'] < thres:
continue
if sid in ["RIOI4", "ROSI4", "RSMI4", 'RMCI4']:
continue
metarid = sid[:4]
remoteid = NT.sts[sid]['remote_id']
if convids:
metarid = RWIS2METAR.get("%02i" % (remoteid,), 'XXXX')
temptxt = ""
t_temptxt = ""
windtxt = ""
if ob.get('sknt') is not None and ob.get('drct') is not None:
windtxt = METARwind(ob['sknt'], ob['drct'], ob.get('gust'))
if obs.get('tmpf') is not None and obs.get('dwpf') is not None:
m_tmpc, t_tmpc = METARtemp(temperature(ob['tmpf'], 'F').value('C'))
m_dwpc, t_dwpc = METARtemp(temperature(ob['dwpf'], 'F').value('C'))
temptxt = "%s/%s" % (m_tmpc, m_dwpc)
t_temptxt = "T%s%s " % (t_tmpc, t_dwpc)
fp.write(("%s %s %s %s RMK AO2 %s%s\015\015\012"
"") % (metarid, ob['valid'].strftime("%d%H%MZ"),
windtxt, temptxt, t_temptxt, "="))
fp.write("\015\015\012\003")
fp.close()
def update_iemaccess(obs):
"""Update the IEMAccess database"""
icursor = IEM.cursor()
for sid in obs:
ob = obs[sid]
iemob = Observation(sid, "IA_RWIS", ob['valid'])
for varname in ['tmpf', 'dwpf', 'drct', 'sknt', 'gust', 'vsby',
'pday', 'tsf0', 'tsf1', 'tsf2', 'tsf3', 'scond0',
'scond1', 'scond2', 'scond3', 'relh']:
# Don't insert NaN values into iemaccess
thisval = ob.get(varname)
if thisval is None:
continue
# strings fail the isnan check
if isinstance(thisval, str):
iemob.data[varname] = ob.get(varname)
elif not np.isnan(thisval):
iemob.data[varname] = ob.get(varname)
for varname in ['tsub0', 'tsub1', 'tsub2', 'tsub3']:
if ob.get(varname) is not None:
iemob.data['rwis_subf'] = ob.get(varname)
break
iemob.save(icursor)
icursor.close()
IEM.commit()
def fetch_files():
"""Download the files we need"""
props = util.get_properties()
# get atmosfn
atmosfn = "%s/rwis.txt" % (INCOMING, )
try:
ftp = ftplib.FTP('165.206.203.34')
except TimeoutError as _exp:
print("process_rwis FTP Server Timeout")
sys.exit()
ftp.login('rwis', props['rwis_ftp_password'])
ftp.retrbinary('RETR ExpApAirData.txt', open(atmosfn, 'wb').write)
# Insert into LDM
pqstr = "plot ac %s rwis.txt raw/rwis/%sat.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, atmosfn), shell=True)
# get sfcfn
sfcfn = "%s/rwis_sf.txt" % (INCOMING, )
ftp.retrbinary('RETR ExpSfData.txt', open(sfcfn, 'wb').write)
ftp.close()
# Insert into LDM
pqstr = "plot ac %s rwis_sf.txt raw/rwis/%ssf.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, sfcfn), shell=True)
return atmosfn, sfcfn
def ldm_insert_metars(fn1, fn2):
""" Insert into LDM please """
for fn in [fn1, fn2]:
proc = subprocess.Popen(("/home/ldm/bin/pqinsert -p '%s' %s"
) % (fn.replace("/tmp/", ""), fn),
shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
os.waitpid(proc.pid, 0)
os.unlink(fn)
def main():
"""Go Main Go"""
(atmosfn, sfcfn) = fetch_files()
atmos = pd.read_csv(atmosfn)
surface = pd.read_csv(sfcfn)
obs = merge(atmos, surface)
do_windalerts(obs)
do_iemtracker(obs)
ts = datetime.datetime.utcnow().strftime("%d%H%M")
fn1 = "/tmp/IArwis%s.sao" % (ts, )
fn2 = "/tmp/IA.rwis%s.sao" % (ts, )
gen_metars(obs, fn1, False)
gen_metars(obs, fn2, True)
ldm_insert_metars(fn1, fn2)
# Discontinued rwis.csv generation, does not appear to be used, I hope
update_iemaccess(obs)
if __name__ == '__main__':
main()
IEM.commit()
PORTFOLIO.commit()
| if sid in ['RBFI4', 'RTMI4', 'RWII4', 'RCAI4', 'RDYI4',
'RDNI4', 'RCDI4', 'RCII4', 'RCLI4', 'VCTI4',
'RGAI4', 'RAVI4']:
continue
ob = obs[sid]
# screening
if ob.get('gust') is None or ob['gust'] < 40:
continue
if np.isnan(ob['gust']):
continue
smph = speed(ob['gust'], 'KT').value('MPH')
if smph < 50:
continue
if smph > 100:
print(('process_rwis did not relay gust %.1f MPH from %s'
'') % (smph, sid))
continue
# Use a hacky tmp file to denote a wind alert that was sent
fn = "/tmp/iarwis.%s.%s" % (sid, ob['valid'].strftime("%Y%m%d%H%M"))
if os.path.isfile(fn):
continue
o = open(fn, 'w')
o.write(" ")
o.close()
lts = ob['valid'].astimezone(pytz.timezone("America/Chicago"))
stname = NT.sts[sid]['name']
msg = ("At %s, a wind gust of %.1f mph (%.1f kts) was recorded "
"at the %s (%s) Iowa RWIS station"
"") % (lts.strftime("%I:%M %p %d %b %Y"), smph, ob['gust'],
stname, sid)
mt = MIMEText(msg)
mt['From'] = 'akrherz@iastate.edu'
# mt['To'] = 'akrherz@iastate.edu'
mt['To'] = 'iarwis-alert@mesonet.agron.iastate.edu'
mt['Subject'] = 'Iowa RWIS Wind Gust %.0f mph %s' % (smph, stname)
s = smtplib.SMTP('mailhub.iastate.edu')
s.sendmail(mt['From'], [mt['To']], mt.as_string())
s.quit() | conditional_block |
process_rwis.py | """Process the IDOT RWIS Data files"""
from __future__ import print_function
import datetime
import os
import sys
import smtplib
import ftplib
import subprocess
from email.mime.text import MIMEText
import pandas as pd
import pytz
import numpy as np
from metpy.units import units
import metpy.calc as mcalc
from pyiem.tracker import TrackerEngine
from pyiem.datatypes import temperature, speed
from pyiem.network import Table as NetworkTable
from pyiem.observation import Observation
from pyiem import util
GTS = sys.argv[1]
NT = NetworkTable('IA_RWIS')
IEM = util.get_dbconn('iem')
PORTFOLIO = util.get_dbconn('portfolio')
INCOMING = "/mesonet/data/incoming/rwis"
RWIS2METAR = {'00': 'XADA', '01': 'XALG', '02': 'XATN', '03': 'XALT',
'04': 'XAME', '05': 'XANK', '06': 'XAVO', '07': 'XBUR',
'08': 'XCAR', '09': 'XCDR', '10': 'XCID', '11': 'XCEN',
'12': 'XCOU', '13': 'XCRE', '14': 'XDAV', '15': 'XDEC',
'16': 'XDSM', '17': 'XDES', '18': 'XDST', '19': 'XDEW',
'20': 'XDUB', '21': 'XFOD', '22': 'XGRI', '23': 'XIAC',
'24': 'XIOW', '25': 'XJEF', '26': 'XLEO', '27': 'XMAN',
'28': 'XMAQ', '29': 'XMAR', '30': 'XMCW', '31': 'XMIS',
'32': 'XMOU', '33': 'XNEW', '34': 'XONA', '35': 'XOSC',
'36': 'XOTT', '37': 'XPEL', '38': 'XRED', '39': 'XSID',
'40': 'XSIG', '41': 'XSIO', '42': 'XSPE', '43': 'XSTO',
'44': 'XTIP', '45': 'XURB', '46': 'XWAT', '47': 'XWIL',
'48': 'XWBG', '49': 'XHAN', '50': 'XSBI', '51': 'XIGI',
'52': 'XCRI', '53': 'XCFI', '54': 'XSYI', '55': 'XBFI',
'56': 'XDYI', '57': 'XTMI', '58': 'XPFI', '59': 'XCTI',
'60': 'XDNI', '61': 'XQCI', '62': 'XSMI', '63': 'XRWI',
'64': 'XETI', '65': 'XCCI', '66': 'XKSI', '67': 'XKNI',
'68': 'XCMI', '69': 'XRGI', '70': 'XKYI', '72': 'XCTI'}
KNOWN_UNKNOWNS = []
def get_nwsli(rpuid):
"""Lookup a rpuid and return the NWSLI"""
rpuid = int(rpuid)
for sid in NT.sts:
if NT.sts[sid]['remote_id'] == rpuid:
return sid
return None
def get_temp(val):
|
def get_speed(val):
""" Convert a speed value """
if val in ['', 255]:
return None
return speed(val, 'KMH').value('KT')
def merge(atmos, surface):
"""Create a dictionary of data based on these two dataframes
Args:
atmos (DataFrame): atmospherics
surface (DataFrame): surface data
Returns:
dictionary of values
"""
data = {}
# Do what we can with the atmospheric data
for _, row in atmos.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in atmos'
'') % (row['Rpuid'],))
continue
if nwsli not in data:
data[nwsli] = {}
# Timestamp
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
data[nwsli]['valid'] = ts.replace(tzinfo=pytz.UTC)
data[nwsli]['tmpf'] = get_temp(row['AirTemp'])
data[nwsli]['dwpf'] = get_temp(row['Dewpoint'])
if data[nwsli]['tmpf'] is not None and data[nwsli]['dwpf'] is not None:
data[nwsli]['relh'] = mcalc.relative_humidity_from_dewpoint(
data[nwsli]['tmpf'] * units('degF'),
data[nwsli]['dwpf'] * units('degF')).magnitude * 100.
# Rh is unused
data[nwsli]['sknt'] = get_speed(row['SpdAvg'])
data[nwsli]['gust'] = get_speed(row['SpdGust'])
if row['DirMin'] not in ['', 32767, np.nan]:
data[nwsli]['drct'] = row['DirMin']
# DirMax is unused
# Pressure is not reported
# PcIntens
# PcType
# PcRate
if row['PcAccum'] not in ['', -1, 32767, np.nan]:
data[nwsli]['pday'] = row['PcAccum'] * 0.00098425
if row['Visibility'] not in ['', -1, 32767, np.nan]:
data[nwsli]['vsby'] = row['Visibility'] / 1609.344
# Do what we can with the surface data
for _, row in surface.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in sfc'
'') % (row['Rpuid'],))
continue
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
ts = ts.replace(tzinfo=pytz.UTC)
if nwsli not in data:
data[nwsli] = {'valid': ts}
sensorid = int(row['Senid'])
key = 'sfvalid%s' % (sensorid,)
data[nwsli][key] = ts
key = 'scond%s' % (sensorid,)
data[nwsli][key] = row['sfcond']
# sftemp -150
key = 'tsf%s' % (sensorid,)
data[nwsli][key] = get_temp(row['sftemp'])
# frztemp 32767
# chemfactor 0
# chempct 101
# depth 32767
# icepct 101
# subsftemp NaN
key = 'tsub%s' % (sensorid,)
data[nwsli][key] = get_temp(row['subsftemp'])
# waterlevel NaN
# Unnamed: 13 NaN
# Unnamed: 14 NaN
return data
def do_windalerts(obs):
"""Iterate through the obs and do wind alerts where appropriate"""
for sid in obs:
# Problem sites with lightning issues
if sid in ['RBFI4', 'RTMI4', 'RWII4', 'RCAI4', 'RDYI4',
'RDNI4', 'RCDI4', 'RCII4', 'RCLI4', 'VCTI4',
'RGAI4', 'RAVI4']:
continue
ob = obs[sid]
# screening
if ob.get('gust') is None or ob['gust'] < 40:
continue
if np.isnan(ob['gust']):
continue
smph = speed(ob['gust'], 'KT').value('MPH')
if smph < 50:
continue
if smph > 100:
print(('process_rwis did not relay gust %.1f MPH from %s'
'') % (smph, sid))
continue
# Use a hacky tmp file to denote a wind alert that was sent
fn = "/tmp/iarwis.%s.%s" % (sid, ob['valid'].strftime("%Y%m%d%H%M"))
if os.path.isfile(fn):
continue
o = open(fn, 'w')
o.write(" ")
o.close()
lts = ob['valid'].astimezone(pytz.timezone("America/Chicago"))
stname = NT.sts[sid]['name']
msg = ("At %s, a wind gust of %.1f mph (%.1f kts) was recorded "
"at the %s (%s) Iowa RWIS station"
"") % (lts.strftime("%I:%M %p %d %b %Y"), smph, ob['gust'],
stname, sid)
mt = MIMEText(msg)
mt['From'] = 'akrherz@iastate.edu'
# mt['To'] = 'akrherz@iastate.edu'
mt['To'] = 'iarwis-alert@mesonet.agron.iastate.edu'
mt['Subject'] = 'Iowa RWIS Wind Gust %.0f mph %s' % (smph, stname)
s = smtplib.SMTP('mailhub.iastate.edu')
s.sendmail(mt['From'], [mt['To']], mt.as_string())
s.quit()
def do_iemtracker(obs):
"""Iterate over the obs and do IEM Tracker related activities """
threshold = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
threshold = threshold.replace(tzinfo=pytz.UTC)
tracker = TrackerEngine(IEM.cursor(), PORTFOLIO.cursor())
tracker.process_network(obs, 'iarwis', NT, threshold)
tracker.send_emails()
IEM.commit()
PORTFOLIO.commit()
def METARtemp(val):
"""convert temp to METAR"""
f_temp = float(val)
i_temp = int(round(f_temp, 0))
f1_temp = int(round(f_temp * 10., 0))
if i_temp < 0:
i_temp = 0 - i_temp
m_temp = "M%02i" % (i_temp,)
else:
m_temp = "%02i" % (i_temp,)
if f1_temp < 0:
t_temp = "1%03i" % (0 - f1_temp,)
else:
t_temp = "0%03i" % (f1_temp, )
return m_temp, t_temp
def METARwind(sknt, drct, gust):
"""convert to METAR"""
s = ""
d5 = drct
if str(d5)[-1] == "5":
d5 -= 5
s += "%03.0f%02.0f" % (d5, sknt)
if gust is not None:
s += "G%02.0f" % (gust, )
s += "KT"
return s
def gen_metars(obs, filename, convids=False):
"""Create METAR Data files
Args:
obs (list): list of dictionaries with obs in them
filename (str): filename to write data to
convids (bool): should we use special logic for ID conversion
"""
mtime = datetime.datetime.utcnow().strftime("%d%H%M")
thres = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
thres = thres.replace(tzinfo=pytz.UTC)
fp = open(filename, 'w')
fp.write("\001\015\015\012001\n")
fp.write("SAUS43 KDMX %s\015\015\012METAR\015\015\012" % (mtime, ))
for sid in obs:
ob = obs[sid]
if ob['valid'] < thres:
continue
if sid in ["RIOI4", "ROSI4", "RSMI4", 'RMCI4']:
continue
metarid = sid[:4]
remoteid = NT.sts[sid]['remote_id']
if convids:
metarid = RWIS2METAR.get("%02i" % (remoteid,), 'XXXX')
temptxt = ""
t_temptxt = ""
windtxt = ""
if ob.get('sknt') is not None and ob.get('drct') is not None:
windtxt = METARwind(ob['sknt'], ob['drct'], ob.get('gust'))
if obs.get('tmpf') is not None and obs.get('dwpf') is not None:
m_tmpc, t_tmpc = METARtemp(temperature(ob['tmpf'], 'F').value('C'))
m_dwpc, t_dwpc = METARtemp(temperature(ob['dwpf'], 'F').value('C'))
temptxt = "%s/%s" % (m_tmpc, m_dwpc)
t_temptxt = "T%s%s " % (t_tmpc, t_dwpc)
fp.write(("%s %s %s %s RMK AO2 %s%s\015\015\012"
"") % (metarid, ob['valid'].strftime("%d%H%MZ"),
windtxt, temptxt, t_temptxt, "="))
fp.write("\015\015\012\003")
fp.close()
def update_iemaccess(obs):
"""Update the IEMAccess database"""
icursor = IEM.cursor()
for sid in obs:
ob = obs[sid]
iemob = Observation(sid, "IA_RWIS", ob['valid'])
for varname in ['tmpf', 'dwpf', 'drct', 'sknt', 'gust', 'vsby',
'pday', 'tsf0', 'tsf1', 'tsf2', 'tsf3', 'scond0',
'scond1', 'scond2', 'scond3', 'relh']:
# Don't insert NaN values into iemaccess
thisval = ob.get(varname)
if thisval is None:
continue
# strings fail the isnan check
if isinstance(thisval, str):
iemob.data[varname] = ob.get(varname)
elif not np.isnan(thisval):
iemob.data[varname] = ob.get(varname)
for varname in ['tsub0', 'tsub1', 'tsub2', 'tsub3']:
if ob.get(varname) is not None:
iemob.data['rwis_subf'] = ob.get(varname)
break
iemob.save(icursor)
icursor.close()
IEM.commit()
def fetch_files():
"""Download the files we need"""
props = util.get_properties()
# get atmosfn
atmosfn = "%s/rwis.txt" % (INCOMING, )
try:
ftp = ftplib.FTP('165.206.203.34')
except TimeoutError as _exp:
print("process_rwis FTP Server Timeout")
sys.exit()
ftp.login('rwis', props['rwis_ftp_password'])
ftp.retrbinary('RETR ExpApAirData.txt', open(atmosfn, 'wb').write)
# Insert into LDM
pqstr = "plot ac %s rwis.txt raw/rwis/%sat.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, atmosfn), shell=True)
# get sfcfn
sfcfn = "%s/rwis_sf.txt" % (INCOMING, )
ftp.retrbinary('RETR ExpSfData.txt', open(sfcfn, 'wb').write)
ftp.close()
# Insert into LDM
pqstr = "plot ac %s rwis_sf.txt raw/rwis/%ssf.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, sfcfn), shell=True)
return atmosfn, sfcfn
def ldm_insert_metars(fn1, fn2):
""" Insert into LDM please """
for fn in [fn1, fn2]:
proc = subprocess.Popen(("/home/ldm/bin/pqinsert -p '%s' %s"
) % (fn.replace("/tmp/", ""), fn),
shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
os.waitpid(proc.pid, 0)
os.unlink(fn)
def main():
"""Go Main Go"""
(atmosfn, sfcfn) = fetch_files()
atmos = pd.read_csv(atmosfn)
surface = pd.read_csv(sfcfn)
obs = merge(atmos, surface)
do_windalerts(obs)
do_iemtracker(obs)
ts = datetime.datetime.utcnow().strftime("%d%H%M")
fn1 = "/tmp/IArwis%s.sao" % (ts, )
fn2 = "/tmp/IA.rwis%s.sao" % (ts, )
gen_metars(obs, fn1, False)
gen_metars(obs, fn2, True)
ldm_insert_metars(fn1, fn2)
# Discontinued rwis.csv generation, does not appear to be used, I hope
update_iemaccess(obs)
if __name__ == '__main__':
main()
IEM.commit()
PORTFOLIO.commit()
| """Attempt to convert a RWIS temperature into F"""
if val in ['', 32767]:
return None
return temperature(val / 100., 'C').value('F') | identifier_body |
process_rwis.py | """Process the IDOT RWIS Data files"""
from __future__ import print_function
import datetime
import os
import sys
import smtplib
import ftplib
import subprocess
from email.mime.text import MIMEText
import pandas as pd
import pytz
import numpy as np
from metpy.units import units
import metpy.calc as mcalc
from pyiem.tracker import TrackerEngine
from pyiem.datatypes import temperature, speed
from pyiem.network import Table as NetworkTable
from pyiem.observation import Observation
from pyiem import util
GTS = sys.argv[1]
NT = NetworkTable('IA_RWIS')
IEM = util.get_dbconn('iem')
PORTFOLIO = util.get_dbconn('portfolio')
INCOMING = "/mesonet/data/incoming/rwis"
RWIS2METAR = {'00': 'XADA', '01': 'XALG', '02': 'XATN', '03': 'XALT',
'04': 'XAME', '05': 'XANK', '06': 'XAVO', '07': 'XBUR',
'08': 'XCAR', '09': 'XCDR', '10': 'XCID', '11': 'XCEN',
'12': 'XCOU', '13': 'XCRE', '14': 'XDAV', '15': 'XDEC',
'16': 'XDSM', '17': 'XDES', '18': 'XDST', '19': 'XDEW',
'20': 'XDUB', '21': 'XFOD', '22': 'XGRI', '23': 'XIAC',
'24': 'XIOW', '25': 'XJEF', '26': 'XLEO', '27': 'XMAN',
'28': 'XMAQ', '29': 'XMAR', '30': 'XMCW', '31': 'XMIS',
'32': 'XMOU', '33': 'XNEW', '34': 'XONA', '35': 'XOSC',
'36': 'XOTT', '37': 'XPEL', '38': 'XRED', '39': 'XSID',
'40': 'XSIG', '41': 'XSIO', '42': 'XSPE', '43': 'XSTO',
'44': 'XTIP', '45': 'XURB', '46': 'XWAT', '47': 'XWIL',
'48': 'XWBG', '49': 'XHAN', '50': 'XSBI', '51': 'XIGI',
'52': 'XCRI', '53': 'XCFI', '54': 'XSYI', '55': 'XBFI',
'56': 'XDYI', '57': 'XTMI', '58': 'XPFI', '59': 'XCTI',
'60': 'XDNI', '61': 'XQCI', '62': 'XSMI', '63': 'XRWI',
'64': 'XETI', '65': 'XCCI', '66': 'XKSI', '67': 'XKNI',
'68': 'XCMI', '69': 'XRGI', '70': 'XKYI', '72': 'XCTI'}
KNOWN_UNKNOWNS = []
def get_nwsli(rpuid):
"""Lookup a rpuid and return the NWSLI"""
rpuid = int(rpuid)
for sid in NT.sts:
if NT.sts[sid]['remote_id'] == rpuid:
return sid
return None
def get_temp(val):
"""Attempt to convert a RWIS temperature into F"""
if val in ['', 32767]:
return None
return temperature(val / 100., 'C').value('F')
def get_speed(val):
""" Convert a speed value """
if val in ['', 255]:
return None
return speed(val, 'KMH').value('KT')
def merge(atmos, surface):
"""Create a dictionary of data based on these two dataframes
Args:
atmos (DataFrame): atmospherics
surface (DataFrame): surface data
Returns:
dictionary of values
"""
data = {}
# Do what we can with the atmospheric data
for _, row in atmos.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in atmos'
'') % (row['Rpuid'],))
continue
if nwsli not in data:
data[nwsli] = {}
# Timestamp
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
data[nwsli]['valid'] = ts.replace(tzinfo=pytz.UTC)
data[nwsli]['tmpf'] = get_temp(row['AirTemp'])
data[nwsli]['dwpf'] = get_temp(row['Dewpoint'])
if data[nwsli]['tmpf'] is not None and data[nwsli]['dwpf'] is not None:
data[nwsli]['relh'] = mcalc.relative_humidity_from_dewpoint(
data[nwsli]['tmpf'] * units('degF'),
data[nwsli]['dwpf'] * units('degF')).magnitude * 100.
# Rh is unused
data[nwsli]['sknt'] = get_speed(row['SpdAvg'])
data[nwsli]['gust'] = get_speed(row['SpdGust'])
if row['DirMin'] not in ['', 32767, np.nan]:
data[nwsli]['drct'] = row['DirMin']
# DirMax is unused
# Pressure is not reported
# PcIntens
# PcType
# PcRate
if row['PcAccum'] not in ['', -1, 32767, np.nan]:
data[nwsli]['pday'] = row['PcAccum'] * 0.00098425
if row['Visibility'] not in ['', -1, 32767, np.nan]:
data[nwsli]['vsby'] = row['Visibility'] / 1609.344
# Do what we can with the surface data
for _, row in surface.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in sfc'
'') % (row['Rpuid'],))
continue
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
ts = ts.replace(tzinfo=pytz.UTC)
if nwsli not in data:
data[nwsli] = {'valid': ts}
sensorid = int(row['Senid'])
key = 'sfvalid%s' % (sensorid,)
data[nwsli][key] = ts
key = 'scond%s' % (sensorid,)
data[nwsli][key] = row['sfcond']
# sftemp -150
key = 'tsf%s' % (sensorid,)
data[nwsli][key] = get_temp(row['sftemp'])
# frztemp 32767
# chemfactor 0
# chempct 101
# depth 32767
# icepct 101
# subsftemp NaN
key = 'tsub%s' % (sensorid,)
data[nwsli][key] = get_temp(row['subsftemp'])
# waterlevel NaN
# Unnamed: 13 NaN
# Unnamed: 14 NaN
return data
def do_windalerts(obs):
"""Iterate through the obs and do wind alerts where appropriate"""
for sid in obs:
# Problem sites with lightning issues
if sid in ['RBFI4', 'RTMI4', 'RWII4', 'RCAI4', 'RDYI4',
'RDNI4', 'RCDI4', 'RCII4', 'RCLI4', 'VCTI4',
'RGAI4', 'RAVI4']:
continue
ob = obs[sid]
# screening
if ob.get('gust') is None or ob['gust'] < 40:
continue
if np.isnan(ob['gust']):
continue
smph = speed(ob['gust'], 'KT').value('MPH')
if smph < 50:
continue
if smph > 100:
print(('process_rwis did not relay gust %.1f MPH from %s'
'') % (smph, sid))
continue
# Use a hacky tmp file to denote a wind alert that was sent
fn = "/tmp/iarwis.%s.%s" % (sid, ob['valid'].strftime("%Y%m%d%H%M"))
if os.path.isfile(fn):
continue
o = open(fn, 'w')
o.write(" ")
o.close()
lts = ob['valid'].astimezone(pytz.timezone("America/Chicago"))
stname = NT.sts[sid]['name']
msg = ("At %s, a wind gust of %.1f mph (%.1f kts) was recorded "
"at the %s (%s) Iowa RWIS station"
"") % (lts.strftime("%I:%M %p %d %b %Y"), smph, ob['gust'],
stname, sid)
mt = MIMEText(msg)
mt['From'] = 'akrherz@iastate.edu'
# mt['To'] = 'akrherz@iastate.edu'
mt['To'] = 'iarwis-alert@mesonet.agron.iastate.edu'
mt['Subject'] = 'Iowa RWIS Wind Gust %.0f mph %s' % (smph, stname)
s = smtplib.SMTP('mailhub.iastate.edu')
s.sendmail(mt['From'], [mt['To']], mt.as_string())
s.quit()
def do_iemtracker(obs):
"""Iterate over the obs and do IEM Tracker related activities """
threshold = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
threshold = threshold.replace(tzinfo=pytz.UTC)
tracker = TrackerEngine(IEM.cursor(), PORTFOLIO.cursor())
tracker.process_network(obs, 'iarwis', NT, threshold)
tracker.send_emails()
IEM.commit()
PORTFOLIO.commit()
def METARtemp(val):
"""convert temp to METAR"""
f_temp = float(val)
i_temp = int(round(f_temp, 0))
f1_temp = int(round(f_temp * 10., 0))
if i_temp < 0:
i_temp = 0 - i_temp
m_temp = "M%02i" % (i_temp,)
else:
m_temp = "%02i" % (i_temp,)
if f1_temp < 0:
t_temp = "1%03i" % (0 - f1_temp,)
else:
t_temp = "0%03i" % (f1_temp, )
return m_temp, t_temp
def METARwind(sknt, drct, gust):
"""convert to METAR"""
s = ""
d5 = drct
if str(d5)[-1] == "5":
d5 -= 5
s += "%03.0f%02.0f" % (d5, sknt)
if gust is not None:
s += "G%02.0f" % (gust, )
s += "KT"
return s
def gen_metars(obs, filename, convids=False):
"""Create METAR Data files
Args:
obs (list): list of dictionaries with obs in them
filename (str): filename to write data to
convids (bool): should we use special logic for ID conversion
"""
mtime = datetime.datetime.utcnow().strftime("%d%H%M")
thres = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
thres = thres.replace(tzinfo=pytz.UTC)
fp = open(filename, 'w')
fp.write("\001\015\015\012001\n")
fp.write("SAUS43 KDMX %s\015\015\012METAR\015\015\012" % (mtime, ))
for sid in obs:
ob = obs[sid]
if ob['valid'] < thres:
continue
if sid in ["RIOI4", "ROSI4", "RSMI4", 'RMCI4']:
continue
metarid = sid[:4]
remoteid = NT.sts[sid]['remote_id']
if convids:
metarid = RWIS2METAR.get("%02i" % (remoteid,), 'XXXX')
temptxt = ""
t_temptxt = ""
windtxt = ""
if ob.get('sknt') is not None and ob.get('drct') is not None:
windtxt = METARwind(ob['sknt'], ob['drct'], ob.get('gust'))
if obs.get('tmpf') is not None and obs.get('dwpf') is not None:
m_tmpc, t_tmpc = METARtemp(temperature(ob['tmpf'], 'F').value('C'))
m_dwpc, t_dwpc = METARtemp(temperature(ob['dwpf'], 'F').value('C'))
temptxt = "%s/%s" % (m_tmpc, m_dwpc)
t_temptxt = "T%s%s " % (t_tmpc, t_dwpc)
fp.write(("%s %s %s %s RMK AO2 %s%s\015\015\012"
"") % (metarid, ob['valid'].strftime("%d%H%MZ"),
windtxt, temptxt, t_temptxt, "="))
fp.write("\015\015\012\003")
fp.close()
def | (obs):
"""Update the IEMAccess database"""
icursor = IEM.cursor()
for sid in obs:
ob = obs[sid]
iemob = Observation(sid, "IA_RWIS", ob['valid'])
for varname in ['tmpf', 'dwpf', 'drct', 'sknt', 'gust', 'vsby',
'pday', 'tsf0', 'tsf1', 'tsf2', 'tsf3', 'scond0',
'scond1', 'scond2', 'scond3', 'relh']:
# Don't insert NaN values into iemaccess
thisval = ob.get(varname)
if thisval is None:
continue
# strings fail the isnan check
if isinstance(thisval, str):
iemob.data[varname] = ob.get(varname)
elif not np.isnan(thisval):
iemob.data[varname] = ob.get(varname)
for varname in ['tsub0', 'tsub1', 'tsub2', 'tsub3']:
if ob.get(varname) is not None:
iemob.data['rwis_subf'] = ob.get(varname)
break
iemob.save(icursor)
icursor.close()
IEM.commit()
def fetch_files():
"""Download the files we need"""
props = util.get_properties()
# get atmosfn
atmosfn = "%s/rwis.txt" % (INCOMING, )
try:
ftp = ftplib.FTP('165.206.203.34')
except TimeoutError as _exp:
print("process_rwis FTP Server Timeout")
sys.exit()
ftp.login('rwis', props['rwis_ftp_password'])
ftp.retrbinary('RETR ExpApAirData.txt', open(atmosfn, 'wb').write)
# Insert into LDM
pqstr = "plot ac %s rwis.txt raw/rwis/%sat.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, atmosfn), shell=True)
# get sfcfn
sfcfn = "%s/rwis_sf.txt" % (INCOMING, )
ftp.retrbinary('RETR ExpSfData.txt', open(sfcfn, 'wb').write)
ftp.close()
# Insert into LDM
pqstr = "plot ac %s rwis_sf.txt raw/rwis/%ssf.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, sfcfn), shell=True)
return atmosfn, sfcfn
def ldm_insert_metars(fn1, fn2):
""" Insert into LDM please """
for fn in [fn1, fn2]:
proc = subprocess.Popen(("/home/ldm/bin/pqinsert -p '%s' %s"
) % (fn.replace("/tmp/", ""), fn),
shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
os.waitpid(proc.pid, 0)
os.unlink(fn)
def main():
"""Go Main Go"""
(atmosfn, sfcfn) = fetch_files()
atmos = pd.read_csv(atmosfn)
surface = pd.read_csv(sfcfn)
obs = merge(atmos, surface)
do_windalerts(obs)
do_iemtracker(obs)
ts = datetime.datetime.utcnow().strftime("%d%H%M")
fn1 = "/tmp/IArwis%s.sao" % (ts, )
fn2 = "/tmp/IA.rwis%s.sao" % (ts, )
gen_metars(obs, fn1, False)
gen_metars(obs, fn2, True)
ldm_insert_metars(fn1, fn2)
# Discontinued rwis.csv generation, does not appear to be used, I hope
update_iemaccess(obs)
if __name__ == '__main__':
main()
IEM.commit()
PORTFOLIO.commit()
| update_iemaccess | identifier_name |
process_rwis.py | """Process the IDOT RWIS Data files"""
from __future__ import print_function
import datetime
import os
import sys
import smtplib
import ftplib
import subprocess
from email.mime.text import MIMEText
import pandas as pd
import pytz
import numpy as np
from metpy.units import units
import metpy.calc as mcalc
from pyiem.tracker import TrackerEngine
from pyiem.datatypes import temperature, speed
from pyiem.network import Table as NetworkTable
from pyiem.observation import Observation
from pyiem import util
GTS = sys.argv[1]
NT = NetworkTable('IA_RWIS')
IEM = util.get_dbconn('iem')
PORTFOLIO = util.get_dbconn('portfolio')
INCOMING = "/mesonet/data/incoming/rwis"
RWIS2METAR = {'00': 'XADA', '01': 'XALG', '02': 'XATN', '03': 'XALT',
'04': 'XAME', '05': 'XANK', '06': 'XAVO', '07': 'XBUR',
'08': 'XCAR', '09': 'XCDR', '10': 'XCID', '11': 'XCEN',
'12': 'XCOU', '13': 'XCRE', '14': 'XDAV', '15': 'XDEC',
'16': 'XDSM', '17': 'XDES', '18': 'XDST', '19': 'XDEW',
'20': 'XDUB', '21': 'XFOD', '22': 'XGRI', '23': 'XIAC',
'24': 'XIOW', '25': 'XJEF', '26': 'XLEO', '27': 'XMAN',
'28': 'XMAQ', '29': 'XMAR', '30': 'XMCW', '31': 'XMIS',
'32': 'XMOU', '33': 'XNEW', '34': 'XONA', '35': 'XOSC',
'36': 'XOTT', '37': 'XPEL', '38': 'XRED', '39': 'XSID',
'40': 'XSIG', '41': 'XSIO', '42': 'XSPE', '43': 'XSTO',
'44': 'XTIP', '45': 'XURB', '46': 'XWAT', '47': 'XWIL',
'48': 'XWBG', '49': 'XHAN', '50': 'XSBI', '51': 'XIGI',
'52': 'XCRI', '53': 'XCFI', '54': 'XSYI', '55': 'XBFI',
'56': 'XDYI', '57': 'XTMI', '58': 'XPFI', '59': 'XCTI',
'60': 'XDNI', '61': 'XQCI', '62': 'XSMI', '63': 'XRWI', |
KNOWN_UNKNOWNS = []
def get_nwsli(rpuid):
"""Lookup a rpuid and return the NWSLI"""
rpuid = int(rpuid)
for sid in NT.sts:
if NT.sts[sid]['remote_id'] == rpuid:
return sid
return None
def get_temp(val):
"""Attempt to convert a RWIS temperature into F"""
if val in ['', 32767]:
return None
return temperature(val / 100., 'C').value('F')
def get_speed(val):
""" Convert a speed value """
if val in ['', 255]:
return None
return speed(val, 'KMH').value('KT')
def merge(atmos, surface):
"""Create a dictionary of data based on these two dataframes
Args:
atmos (DataFrame): atmospherics
surface (DataFrame): surface data
Returns:
dictionary of values
"""
data = {}
# Do what we can with the atmospheric data
for _, row in atmos.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in atmos'
'') % (row['Rpuid'],))
continue
if nwsli not in data:
data[nwsli] = {}
# Timestamp
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
data[nwsli]['valid'] = ts.replace(tzinfo=pytz.UTC)
data[nwsli]['tmpf'] = get_temp(row['AirTemp'])
data[nwsli]['dwpf'] = get_temp(row['Dewpoint'])
if data[nwsli]['tmpf'] is not None and data[nwsli]['dwpf'] is not None:
data[nwsli]['relh'] = mcalc.relative_humidity_from_dewpoint(
data[nwsli]['tmpf'] * units('degF'),
data[nwsli]['dwpf'] * units('degF')).magnitude * 100.
# Rh is unused
data[nwsli]['sknt'] = get_speed(row['SpdAvg'])
data[nwsli]['gust'] = get_speed(row['SpdGust'])
if row['DirMin'] not in ['', 32767, np.nan]:
data[nwsli]['drct'] = row['DirMin']
# DirMax is unused
# Pressure is not reported
# PcIntens
# PcType
# PcRate
if row['PcAccum'] not in ['', -1, 32767, np.nan]:
data[nwsli]['pday'] = row['PcAccum'] * 0.00098425
if row['Visibility'] not in ['', -1, 32767, np.nan]:
data[nwsli]['vsby'] = row['Visibility'] / 1609.344
# Do what we can with the surface data
for _, row in surface.iterrows():
nwsli = get_nwsli(row['Rpuid'])
if nwsli is None:
if int(row['Rpuid']) not in KNOWN_UNKNOWNS:
print(('process_rwis: Unknown Rpuid: %s in sfc'
'') % (row['Rpuid'],))
continue
ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')
ts = ts.replace(tzinfo=pytz.UTC)
if nwsli not in data:
data[nwsli] = {'valid': ts}
sensorid = int(row['Senid'])
key = 'sfvalid%s' % (sensorid,)
data[nwsli][key] = ts
key = 'scond%s' % (sensorid,)
data[nwsli][key] = row['sfcond']
# sftemp -150
key = 'tsf%s' % (sensorid,)
data[nwsli][key] = get_temp(row['sftemp'])
# frztemp 32767
# chemfactor 0
# chempct 101
# depth 32767
# icepct 101
# subsftemp NaN
key = 'tsub%s' % (sensorid,)
data[nwsli][key] = get_temp(row['subsftemp'])
# waterlevel NaN
# Unnamed: 13 NaN
# Unnamed: 14 NaN
return data
def do_windalerts(obs):
"""Iterate through the obs and do wind alerts where appropriate"""
for sid in obs:
# Problem sites with lightning issues
if sid in ['RBFI4', 'RTMI4', 'RWII4', 'RCAI4', 'RDYI4',
'RDNI4', 'RCDI4', 'RCII4', 'RCLI4', 'VCTI4',
'RGAI4', 'RAVI4']:
continue
ob = obs[sid]
# screening
if ob.get('gust') is None or ob['gust'] < 40:
continue
if np.isnan(ob['gust']):
continue
smph = speed(ob['gust'], 'KT').value('MPH')
if smph < 50:
continue
if smph > 100:
print(('process_rwis did not relay gust %.1f MPH from %s'
'') % (smph, sid))
continue
# Use a hacky tmp file to denote a wind alert that was sent
fn = "/tmp/iarwis.%s.%s" % (sid, ob['valid'].strftime("%Y%m%d%H%M"))
if os.path.isfile(fn):
continue
o = open(fn, 'w')
o.write(" ")
o.close()
lts = ob['valid'].astimezone(pytz.timezone("America/Chicago"))
stname = NT.sts[sid]['name']
msg = ("At %s, a wind gust of %.1f mph (%.1f kts) was recorded "
"at the %s (%s) Iowa RWIS station"
"") % (lts.strftime("%I:%M %p %d %b %Y"), smph, ob['gust'],
stname, sid)
mt = MIMEText(msg)
mt['From'] = 'akrherz@iastate.edu'
# mt['To'] = 'akrherz@iastate.edu'
mt['To'] = 'iarwis-alert@mesonet.agron.iastate.edu'
mt['Subject'] = 'Iowa RWIS Wind Gust %.0f mph %s' % (smph, stname)
s = smtplib.SMTP('mailhub.iastate.edu')
s.sendmail(mt['From'], [mt['To']], mt.as_string())
s.quit()
def do_iemtracker(obs):
"""Iterate over the obs and do IEM Tracker related activities """
threshold = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
threshold = threshold.replace(tzinfo=pytz.UTC)
tracker = TrackerEngine(IEM.cursor(), PORTFOLIO.cursor())
tracker.process_network(obs, 'iarwis', NT, threshold)
tracker.send_emails()
IEM.commit()
PORTFOLIO.commit()
def METARtemp(val):
"""convert temp to METAR"""
f_temp = float(val)
i_temp = int(round(f_temp, 0))
f1_temp = int(round(f_temp * 10., 0))
if i_temp < 0:
i_temp = 0 - i_temp
m_temp = "M%02i" % (i_temp,)
else:
m_temp = "%02i" % (i_temp,)
if f1_temp < 0:
t_temp = "1%03i" % (0 - f1_temp,)
else:
t_temp = "0%03i" % (f1_temp, )
return m_temp, t_temp
def METARwind(sknt, drct, gust):
"""convert to METAR"""
s = ""
d5 = drct
if str(d5)[-1] == "5":
d5 -= 5
s += "%03.0f%02.0f" % (d5, sknt)
if gust is not None:
s += "G%02.0f" % (gust, )
s += "KT"
return s
def gen_metars(obs, filename, convids=False):
"""Create METAR Data files
Args:
obs (list): list of dictionaries with obs in them
filename (str): filename to write data to
convids (bool): should we use special logic for ID conversion
"""
mtime = datetime.datetime.utcnow().strftime("%d%H%M")
thres = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
thres = thres.replace(tzinfo=pytz.UTC)
fp = open(filename, 'w')
fp.write("\001\015\015\012001\n")
fp.write("SAUS43 KDMX %s\015\015\012METAR\015\015\012" % (mtime, ))
for sid in obs:
ob = obs[sid]
if ob['valid'] < thres:
continue
if sid in ["RIOI4", "ROSI4", "RSMI4", 'RMCI4']:
continue
metarid = sid[:4]
remoteid = NT.sts[sid]['remote_id']
if convids:
metarid = RWIS2METAR.get("%02i" % (remoteid,), 'XXXX')
temptxt = ""
t_temptxt = ""
windtxt = ""
if ob.get('sknt') is not None and ob.get('drct') is not None:
windtxt = METARwind(ob['sknt'], ob['drct'], ob.get('gust'))
if obs.get('tmpf') is not None and obs.get('dwpf') is not None:
m_tmpc, t_tmpc = METARtemp(temperature(ob['tmpf'], 'F').value('C'))
m_dwpc, t_dwpc = METARtemp(temperature(ob['dwpf'], 'F').value('C'))
temptxt = "%s/%s" % (m_tmpc, m_dwpc)
t_temptxt = "T%s%s " % (t_tmpc, t_dwpc)
fp.write(("%s %s %s %s RMK AO2 %s%s\015\015\012"
"") % (metarid, ob['valid'].strftime("%d%H%MZ"),
windtxt, temptxt, t_temptxt, "="))
fp.write("\015\015\012\003")
fp.close()
def update_iemaccess(obs):
"""Update the IEMAccess database"""
icursor = IEM.cursor()
for sid in obs:
ob = obs[sid]
iemob = Observation(sid, "IA_RWIS", ob['valid'])
for varname in ['tmpf', 'dwpf', 'drct', 'sknt', 'gust', 'vsby',
'pday', 'tsf0', 'tsf1', 'tsf2', 'tsf3', 'scond0',
'scond1', 'scond2', 'scond3', 'relh']:
# Don't insert NaN values into iemaccess
thisval = ob.get(varname)
if thisval is None:
continue
# strings fail the isnan check
if isinstance(thisval, str):
iemob.data[varname] = ob.get(varname)
elif not np.isnan(thisval):
iemob.data[varname] = ob.get(varname)
for varname in ['tsub0', 'tsub1', 'tsub2', 'tsub3']:
if ob.get(varname) is not None:
iemob.data['rwis_subf'] = ob.get(varname)
break
iemob.save(icursor)
icursor.close()
IEM.commit()
def fetch_files():
"""Download the files we need"""
props = util.get_properties()
# get atmosfn
atmosfn = "%s/rwis.txt" % (INCOMING, )
try:
ftp = ftplib.FTP('165.206.203.34')
except TimeoutError as _exp:
print("process_rwis FTP Server Timeout")
sys.exit()
ftp.login('rwis', props['rwis_ftp_password'])
ftp.retrbinary('RETR ExpApAirData.txt', open(atmosfn, 'wb').write)
# Insert into LDM
pqstr = "plot ac %s rwis.txt raw/rwis/%sat.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, atmosfn), shell=True)
# get sfcfn
sfcfn = "%s/rwis_sf.txt" % (INCOMING, )
ftp.retrbinary('RETR ExpSfData.txt', open(sfcfn, 'wb').write)
ftp.close()
# Insert into LDM
pqstr = "plot ac %s rwis_sf.txt raw/rwis/%ssf.txt txt" % (GTS, GTS)
subprocess.call(("/home/ldm/bin/pqinsert -i -p '%s' %s "
"") % (pqstr, sfcfn), shell=True)
return atmosfn, sfcfn
def ldm_insert_metars(fn1, fn2):
""" Insert into LDM please """
for fn in [fn1, fn2]:
proc = subprocess.Popen(("/home/ldm/bin/pqinsert -p '%s' %s"
) % (fn.replace("/tmp/", ""), fn),
shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
os.waitpid(proc.pid, 0)
os.unlink(fn)
def main():
"""Go Main Go"""
(atmosfn, sfcfn) = fetch_files()
atmos = pd.read_csv(atmosfn)
surface = pd.read_csv(sfcfn)
obs = merge(atmos, surface)
do_windalerts(obs)
do_iemtracker(obs)
ts = datetime.datetime.utcnow().strftime("%d%H%M")
fn1 = "/tmp/IArwis%s.sao" % (ts, )
fn2 = "/tmp/IA.rwis%s.sao" % (ts, )
gen_metars(obs, fn1, False)
gen_metars(obs, fn2, True)
ldm_insert_metars(fn1, fn2)
# Discontinued rwis.csv generation, does not appear to be used, I hope
update_iemaccess(obs)
if __name__ == '__main__':
main()
IEM.commit()
PORTFOLIO.commit() | '64': 'XETI', '65': 'XCCI', '66': 'XKSI', '67': 'XKNI',
'68': 'XCMI', '69': 'XRGI', '70': 'XKYI', '72': 'XCTI'} | random_line_split |
cimport.rs | use libc::{c_char, c_uint, c_float, c_int};
use scene::RawScene;
use types::{AiString, MemoryInfo};
use fileio::{AiFileIO};
/// Represents an opaque set of settings to be used during importing.
#[repr(C)]
pub struct | {
sentinel: c_char,
}
#[link(name = "assimp")]
extern {
/// Reads the given file and returns its content.
///
/// If the call succeeds, the imported data is returned in an aiScene
/// structure. The data is intended to be read-only, it stays property of
/// the ASSIMP library and will be stable until aiReleaseImport() is
/// called. After you're done with it, call aiReleaseImport() to free the
/// resources associated with this file. If the import fails, NULL is
/// returned instead. Call aiGetErrorString() to retrieve a human-readable
/// error text.
///
/// # Parameters
/// * `pFile` Path and filename of the file to be imported,
/// expected to be a null-terminated c-string. NULL is not a valid value.
///
/// * `pFlags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags.
///
/// Pointer to the imported data or NULL if the import failed.
pub fn aiImportFile(fname: *const c_char, flags: c_uint) -> *const RawScene;
/// Same as #aiImportFileEx, but adds an extra parameter containing importer settings.
/// * pProps #aiPropertyStore instance containing import settings.
// ASSIMP_API const C_STRUCT aiScene* aiImportFileExWithProperties(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS,
// const C_STRUCT aiPropertyStore* pProps);
pub fn aiImportFileExWithProperties(fname: *const c_char,
flags: c_uint,
fio : *mut AiFileIO,
props: *const PropertyStore)
-> *const RawScene;
/// Returns the error text of the last failed import process.
///
/// @return A textual description of the error that occurred at the last
/// import process. NULL if there was no error.
/// There can't be an error if you got a non-NULL aiScene from
/// aiImportFile/aiImportFileEx/aiApplyPostProcessing.
pub fn aiGetErrorString() -> *const c_char;
/// Reads the given file from a given memory buffer,
///
/// If the call succeeds, the contents of the file are returned as a
/// pointer to an aiScene object. The returned data is intended to be
/// read-only, the importer keeps ownership of the data and will destroy
/// it upon destruction. If the import fails, NULL is returned. A
/// human-readable error description can be retrieved by calling
/// aiGetErrorString().
/// # Arguments
///
/// * `buffer` Pointer to the file data
/// * `length` Length of pBuffer, in bytes
/// * `flags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags. If you wish to inspect the imported
/// scene first in order to fine-tune your post-processing setup,
/// consider to use aiApplyPostProcessing().
/// * `hint` An additional hint to the library. If this is a non empty
/// string, the library looks for a loader to support the file extension
/// specified by pHint and passes the file to the first matching loader.
/// If this loader is unable to completely the request, the library
/// continues and tries to determine the file format on its own, a task
/// that may or may not be successful. Check the return value, and
/// you'll know ...
///
/// A pointer to the imported data, NULL if the import failed.
///
/// Note: This is a straightforward way to decode models from memory
/// buffers, but it doesn't handle model formats spreading their data
/// across multiple files or even directories. Examples include OBJ or
/// MD3, which outsource parts of their material stuff into external
/// scripts. If you need the full functionality, provide a custom IOSystem
/// to make Assimp find these files.
pub fn aiImportFileFromMemory(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char)
-> *const RawScene;
/// Same as aiImportFileFromMemory, but adds an extra parameter
/// containing importer settings.
///
/// * props PropertyStore instance containing import settings.
pub fn aiImportFileFromMemoryWithProperties(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char,
props: *const PropertyStore)
-> *const RawScene;
/// Apply post-processing to an already-imported scene.
///
/// This is strictly equivalent to calling aiImportFile()/aiImportFileEx
/// with the same flags. However, you can use this separate function to
/// inspect the imported scene first to fine-tune your post-processing
/// setup.
///
/// # Parameters
///
/// * `scene` Scene to work on.
/// * `flags` Provide a bitwise combination of the aiPostProcessSteps flags.
///
/// Returns a pointer to the post-processed data.
///
/// Post processing is done in-place, meaning this is still the same
/// aiScene which you passed for pScene. However, _if_ post-processing
/// failed, the scene could now be NULL. That's quite a rare case, post
/// processing steps are not really designed to 'fail'. To be exact, the
/// aiProcess_ValidateDS flag is currently the only post processing step
/// which can actually cause the scene to be reset to NULL.
pub fn aiApplyPostProcessing(scene: *const RawScene,
flags: c_uint)
-> *const RawScene;
/// Releases all resources associated with the given import process.
///
/// Call this function after you're done with the imported data.
/// pScene The imported data to release. NULL is a valid value.
pub fn aiReleaseImport(scene: *const RawScene);
/// Get the approximated storage required by an imported asset
///
/// # Parameters
///
/// * pIn Input asset.
/// * in Data structure to be filled.
pub fn aiGetMemoryRequirements(scene: *const RawScene, info: *mut MemoryInfo);
/// Create an empty property store.
///
/// Property stores are used to collect import settings.
/// Returns a new property store. Property stores need to
/// be manually destroyed using the aiReleasePropertyStore API function.
pub fn aiCreatePropertyStore() -> *mut PropertyStore;
/// Delete a property store.
pub fn aiReleasePropertyStore(p: *mut PropertyStore);
/// Set an integer property.
///
/// This is the C-version of Assimp::Importer::SetPropertyInteger(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// * `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file (AI_CONFIG_XXX).
/// * `value` New value for the property
pub fn aiSetImportPropertyInteger(store: *mut PropertyStore,
name: *const c_char,
value: c_int);
/// Set a floating-point property.
///
/// This is the C-version of Assimp::Importer::SetPropertyFloat(). In the
/// C interface, properties are always shared by all imports. It is not
/// possible to specify them per import.
///
/// `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// `value` New value for the property
///
pub fn aiSetImportPropertyFloat(store: *mut PropertyStore,
name: *const c_char,
value: c_float);
/// Set a string property.
///
/// This is the C-version of Assimp::Importer::SetPropertyString(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// # Parameters
/// * property store to modify. Use aiCreatePropertyStore to obtain a store.
/// * szName Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// (AI_CONFIG_XXX).
/// * value New value for the property
///
pub fn aiSetImportPropertyString(store: *mut PropertyStore,
name: *const c_char,
st: *const AiString);
}
// /** Reads the given file using user-defined I/O functions and returns
// * its content.
// *
// * If the call succeeds, the imported data is returned in an aiScene structure.
// * The data is intended to be read-only, it stays property of the ASSIMP
// * library and will be stable until aiReleaseImport() is called. After you're
// * done with it, call aiReleaseImport() to free the resources associated with
// * this file. If the import fails, NULL is returned instead. Call
// * aiGetErrorString() to retrieve a human-readable error text.
// * @param pFile Path and filename of the file to be imported,
// * expected to be a null-terminated c-string. NULL is not a valid value.
// * @param pFlags Optional post processing steps to be executed after
// * a successful import. Provide a bitwise combination of the
// * #aiPostProcessSteps flags.
// * @param pFS aiFileIO structure. Will be used to open the model file itself
// * and any other files the loader needs to open. Pass NULL to use the default
// * implementation.
// * @return Pointer to the imported data or NULL if the import failed.
// * @note Include <aiFileIO.h> for the definition of #aiFileIO.
// */
// ASSIMP_API const C_STRUCT aiScene* aiImportFileEx(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS);
| PropertyStore | identifier_name |
cimport.rs | use libc::{c_char, c_uint, c_float, c_int};
use scene::RawScene;
use types::{AiString, MemoryInfo};
use fileio::{AiFileIO};
/// Represents an opaque set of settings to be used during importing.
#[repr(C)]
pub struct PropertyStore {
sentinel: c_char,
}
#[link(name = "assimp")]
extern {
/// Reads the given file and returns its content.
///
/// If the call succeeds, the imported data is returned in an aiScene
/// structure. The data is intended to be read-only, it stays property of
/// the ASSIMP library and will be stable until aiReleaseImport() is
/// called. After you're done with it, call aiReleaseImport() to free the
/// resources associated with this file. If the import fails, NULL is
/// returned instead. Call aiGetErrorString() to retrieve a human-readable
/// error text.
///
/// # Parameters
/// * `pFile` Path and filename of the file to be imported,
/// expected to be a null-terminated c-string. NULL is not a valid value.
///
/// * `pFlags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags.
///
/// Pointer to the imported data or NULL if the import failed.
pub fn aiImportFile(fname: *const c_char, flags: c_uint) -> *const RawScene;
/// Same as #aiImportFileEx, but adds an extra parameter containing importer settings.
/// * pProps #aiPropertyStore instance containing import settings.
// ASSIMP_API const C_STRUCT aiScene* aiImportFileExWithProperties(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS,
// const C_STRUCT aiPropertyStore* pProps);
pub fn aiImportFileExWithProperties(fname: *const c_char,
flags: c_uint,
fio : *mut AiFileIO,
props: *const PropertyStore)
-> *const RawScene;
/// Returns the error text of the last failed import process.
///
/// @return A textual description of the error that occurred at the last
/// import process. NULL if there was no error.
/// There can't be an error if you got a non-NULL aiScene from
/// aiImportFile/aiImportFileEx/aiApplyPostProcessing.
pub fn aiGetErrorString() -> *const c_char;
/// Reads the given file from a given memory buffer,
///
/// If the call succeeds, the contents of the file are returned as a
/// pointer to an aiScene object. The returned data is intended to be
/// read-only, the importer keeps ownership of the data and will destroy
/// it upon destruction. If the import fails, NULL is returned. A
/// human-readable error description can be retrieved by calling
/// aiGetErrorString().
/// # Arguments
///
/// * `buffer` Pointer to the file data
/// * `length` Length of pBuffer, in bytes
/// * `flags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags. If you wish to inspect the imported
/// scene first in order to fine-tune your post-processing setup,
/// consider to use aiApplyPostProcessing().
/// * `hint` An additional hint to the library. If this is a non empty
/// string, the library looks for a loader to support the file extension
/// specified by pHint and passes the file to the first matching loader.
/// If this loader is unable to completely the request, the library
/// continues and tries to determine the file format on its own, a task
/// that may or may not be successful. Check the return value, and
/// you'll know ...
///
/// A pointer to the imported data, NULL if the import failed.
///
/// Note: This is a straightforward way to decode models from memory
/// buffers, but it doesn't handle model formats spreading their data
/// across multiple files or even directories. Examples include OBJ or
/// MD3, which outsource parts of their material stuff into external
/// scripts. If you need the full functionality, provide a custom IOSystem
/// to make Assimp find these files.
pub fn aiImportFileFromMemory(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char)
-> *const RawScene;
| ///
/// * props PropertyStore instance containing import settings.
pub fn aiImportFileFromMemoryWithProperties(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char,
props: *const PropertyStore)
-> *const RawScene;
/// Apply post-processing to an already-imported scene.
///
/// This is strictly equivalent to calling aiImportFile()/aiImportFileEx
/// with the same flags. However, you can use this separate function to
/// inspect the imported scene first to fine-tune your post-processing
/// setup.
///
/// # Parameters
///
/// * `scene` Scene to work on.
/// * `flags` Provide a bitwise combination of the aiPostProcessSteps flags.
///
/// Returns a pointer to the post-processed data.
///
/// Post processing is done in-place, meaning this is still the same
/// aiScene which you passed for pScene. However, _if_ post-processing
/// failed, the scene could now be NULL. That's quite a rare case, post
/// processing steps are not really designed to 'fail'. To be exact, the
/// aiProcess_ValidateDS flag is currently the only post processing step
/// which can actually cause the scene to be reset to NULL.
pub fn aiApplyPostProcessing(scene: *const RawScene,
flags: c_uint)
-> *const RawScene;
/// Releases all resources associated with the given import process.
///
/// Call this function after you're done with the imported data.
/// pScene The imported data to release. NULL is a valid value.
pub fn aiReleaseImport(scene: *const RawScene);
/// Get the approximated storage required by an imported asset
///
/// # Parameters
///
/// * pIn Input asset.
/// * in Data structure to be filled.
pub fn aiGetMemoryRequirements(scene: *const RawScene, info: *mut MemoryInfo);
/// Create an empty property store.
///
/// Property stores are used to collect import settings.
/// Returns a new property store. Property stores need to
/// be manually destroyed using the aiReleasePropertyStore API function.
pub fn aiCreatePropertyStore() -> *mut PropertyStore;
/// Delete a property store.
pub fn aiReleasePropertyStore(p: *mut PropertyStore);
/// Set an integer property.
///
/// This is the C-version of Assimp::Importer::SetPropertyInteger(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// * `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file (AI_CONFIG_XXX).
/// * `value` New value for the property
pub fn aiSetImportPropertyInteger(store: *mut PropertyStore,
name: *const c_char,
value: c_int);
/// Set a floating-point property.
///
/// This is the C-version of Assimp::Importer::SetPropertyFloat(). In the
/// C interface, properties are always shared by all imports. It is not
/// possible to specify them per import.
///
/// `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// `value` New value for the property
///
pub fn aiSetImportPropertyFloat(store: *mut PropertyStore,
name: *const c_char,
value: c_float);
/// Set a string property.
///
/// This is the C-version of Assimp::Importer::SetPropertyString(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// # Parameters
/// * property store to modify. Use aiCreatePropertyStore to obtain a store.
/// * szName Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// (AI_CONFIG_XXX).
/// * value New value for the property
///
pub fn aiSetImportPropertyString(store: *mut PropertyStore,
name: *const c_char,
st: *const AiString);
}
// /** Reads the given file using user-defined I/O functions and returns
// * its content.
// *
// * If the call succeeds, the imported data is returned in an aiScene structure.
// * The data is intended to be read-only, it stays property of the ASSIMP
// * library and will be stable until aiReleaseImport() is called. After you're
// * done with it, call aiReleaseImport() to free the resources associated with
// * this file. If the import fails, NULL is returned instead. Call
// * aiGetErrorString() to retrieve a human-readable error text.
// * @param pFile Path and filename of the file to be imported,
// * expected to be a null-terminated c-string. NULL is not a valid value.
// * @param pFlags Optional post processing steps to be executed after
// * a successful import. Provide a bitwise combination of the
// * #aiPostProcessSteps flags.
// * @param pFS aiFileIO structure. Will be used to open the model file itself
// * and any other files the loader needs to open. Pass NULL to use the default
// * implementation.
// * @return Pointer to the imported data or NULL if the import failed.
// * @note Include <aiFileIO.h> for the definition of #aiFileIO.
// */
// ASSIMP_API const C_STRUCT aiScene* aiImportFileEx(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS); | /// Same as aiImportFileFromMemory, but adds an extra parameter
/// containing importer settings. | random_line_split |
z5113243_ass_2.py | #!/usr/bin/env python3
from flask import Flask, request
from flask_restplus import Resource, Api, fields
from pymongo import MongoClient
from bson.objectid import ObjectId
import requests, datetime, re
#------------- CONFIG CONSTANTS -------------#
DEBUG = True
MAX_PAGE_LIMIT = 2
COLLECTION = 'indicators'
DB_CONFIG = {
'dbuser': 'z5113243',
'dbpassword': 'badpassword01',
'mlab_inst': 'ds239071',
'dbname': 'cs9321_ass2'
}
#------------- API INITIALISATION -------------#
db = None # initialised in main
app = Flask(__name__)
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
api = Api(
app,
title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',
description='In this assignment, we\'re asked to develop ' \
'a Flask-Restplus data service that allows a client to ' \
'read and store some publicly available economic indicator ' \
'data for countries around the world, and allow the consumers ' \
'to access the data through a REST API.'
)
indicator_model = api.model(COLLECTION, {
'indicator_id': fields.String(required=True,
title='An Indicator ',
description='http://api.worldbank.org/v2/indicators',
example='NY.GDP.MKTP.CD'),
})
parser = api.parser()
parser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \
'where k is between 1 and 100. Eg. top10, bottom40')
#------------- HELPER FUNCTIONS -------------#
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return 'http://api.worldbank.org/v2/countries/all/indicators/' \
f'{indicator}?date={date}&format={fmt}&page={page}'
# Recursively build an array containing indicator data
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:
return prevRes+response[1]
return get_indicator_data(
indicator=indicator,
page=response[0]['page']+1,
prevRes=prevRes+response[1],
max_pages=max_pages,
)
# Restructure indicator entry according to spec
def format_collection_entry(indicator_data):
return {
'country': indicator_data['country']['value'],
'date': indicator_data['date'],
'value': indicator_data['value'],
}
# Transform to top<k>/bottom<k> queries to array indexes
def query_to_index(query, arr_size):
try:
match = re.search(r'^(bottom|top)\d+$', query).group()
order = re.search(r'^(bottom|top)', match).group()
length = int(re.search(r'\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size-length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
#------------- QUESTION ROUTES -------------#
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
# Indicator hasn't been specified in body (400)
if not body['indicator_id']:
return { 'message': 'Please specify an indicator.' }, 400
# Retrieve indicator from database (200)
existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})
if existing_collection:
return {
'location': f'/{COLLECTION}/{str(existing_collection["_id"])}',
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator'],
}, 200
# From now onwards we need to obtain data from the Worldbank API
indicator_data = get_indicator_data(body['indicator_id'])
# Valid indicator hasn't been specified (400)
if indicator_data == 'Invalid indicator':
return { 'message': 'Please specify a valid indicator.' }, 400
# Create and retrieve indicator from Worldbank API (201)
collection = {
'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(),
'entries': [format_collection_entry(entry) for entry in indicator_data],
}
created_collection = db[COLLECTION].insert_one(collection)
return {
'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']),
'indicator': collection['indicator'],
}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
|
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
# Check if collection exists
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return { 'message': 'Unable to find collection.' }, 404
# Remove collection from db
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to remove collection.' }, 400
return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'creation_time': str(collection['creation_time']),
'entries': collection['entries'],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200, 'Successfully retrieved economic indicator for given a country and year.')
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
# Create a filtered list containing entries that match params
filtered_entries = [
entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year
]
if len(filtered_entries) == 0:
return {'message': 'Unable to find specific indicator entry ' \
f'for country=\'{country}\' and year=\'{year}\'.'}, 400
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
**filtered_entries[0],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
filtered_entries = [
entry for entry in collection['entries'] if entry['date'] == year
]
if not query:
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': filtered_entries,
}, 200
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': sorted(
filtered_entries,
key=lambda k: k['value'],
reverse=True
)[query_to_index(query, len(filtered_entries))],
}, 200
if __name__ == '__main__':
db = mlab_client(
dbuser=DB_CONFIG['dbuser'],
dbpassword=DB_CONFIG['dbpassword'],
mlab_inst=DB_CONFIG['mlab_inst'],
dbname=DB_CONFIG['dbname']
)
app.run(debug=DEBUG) | try:
collections = db[COLLECTION].find()
except:
return { 'message': 'Unable to retrieve collections.' }, 400
return [{
'location': f'/{COLLECTION}/{str(doc["_id"])}',
'collection_id': str(doc['_id']),
'creation_time': str(doc['creation_time']),
'indicator': doc['indicator'],
} for doc in collections], 200 | identifier_body |
z5113243_ass_2.py | #!/usr/bin/env python3
from flask import Flask, request
from flask_restplus import Resource, Api, fields
from pymongo import MongoClient
from bson.objectid import ObjectId
import requests, datetime, re
#------------- CONFIG CONSTANTS -------------#
DEBUG = True
MAX_PAGE_LIMIT = 2
COLLECTION = 'indicators'
DB_CONFIG = {
'dbuser': 'z5113243',
'dbpassword': 'badpassword01',
'mlab_inst': 'ds239071',
'dbname': 'cs9321_ass2'
}
#------------- API INITIALISATION -------------#
db = None # initialised in main
app = Flask(__name__)
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
api = Api(
app,
title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',
description='In this assignment, we\'re asked to develop ' \
'a Flask-Restplus data service that allows a client to ' \
'read and store some publicly available economic indicator ' \
'data for countries around the world, and allow the consumers ' \
'to access the data through a REST API.'
)
indicator_model = api.model(COLLECTION, {
'indicator_id': fields.String(required=True,
title='An Indicator ',
description='http://api.worldbank.org/v2/indicators',
example='NY.GDP.MKTP.CD'),
})
parser = api.parser()
parser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \
'where k is between 1 and 100. Eg. top10, bottom40')
#------------- HELPER FUNCTIONS -------------#
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return 'http://api.worldbank.org/v2/countries/all/indicators/' \
f'{indicator}?date={date}&format={fmt}&page={page}'
# Recursively build an array containing indicator data
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:
return prevRes+response[1]
return get_indicator_data(
indicator=indicator,
page=response[0]['page']+1,
prevRes=prevRes+response[1],
max_pages=max_pages,
)
# Restructure indicator entry according to spec
def format_collection_entry(indicator_data):
return {
'country': indicator_data['country']['value'],
'date': indicator_data['date'],
'value': indicator_data['value'],
}
# Transform to top<k>/bottom<k> queries to array indexes
def query_to_index(query, arr_size):
try:
match = re.search(r'^(bottom|top)\d+$', query).group()
order = re.search(r'^(bottom|top)', match).group()
length = int(re.search(r'\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size-length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
#------------- QUESTION ROUTES -------------#
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
# Indicator hasn't been specified in body (400)
if not body['indicator_id']:
return { 'message': 'Please specify an indicator.' }, 400
# Retrieve indicator from database (200)
existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})
if existing_collection:
|
# From now onwards we need to obtain data from the Worldbank API
indicator_data = get_indicator_data(body['indicator_id'])
# Valid indicator hasn't been specified (400)
if indicator_data == 'Invalid indicator':
return { 'message': 'Please specify a valid indicator.' }, 400
# Create and retrieve indicator from Worldbank API (201)
collection = {
'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(),
'entries': [format_collection_entry(entry) for entry in indicator_data],
}
created_collection = db[COLLECTION].insert_one(collection)
return {
'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']),
'indicator': collection['indicator'],
}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return { 'message': 'Unable to retrieve collections.' }, 400
return [{
'location': f'/{COLLECTION}/{str(doc["_id"])}',
'collection_id': str(doc['_id']),
'creation_time': str(doc['creation_time']),
'indicator': doc['indicator'],
} for doc in collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
# Check if collection exists
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return { 'message': 'Unable to find collection.' }, 404
# Remove collection from db
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to remove collection.' }, 400
return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'creation_time': str(collection['creation_time']),
'entries': collection['entries'],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200, 'Successfully retrieved economic indicator for given a country and year.')
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
# Create a filtered list containing entries that match params
filtered_entries = [
entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year
]
if len(filtered_entries) == 0:
return {'message': 'Unable to find specific indicator entry ' \
f'for country=\'{country}\' and year=\'{year}\'.'}, 400
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
**filtered_entries[0],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
filtered_entries = [
entry for entry in collection['entries'] if entry['date'] == year
]
if not query:
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': filtered_entries,
}, 200
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': sorted(
filtered_entries,
key=lambda k: k['value'],
reverse=True
)[query_to_index(query, len(filtered_entries))],
}, 200
if __name__ == '__main__':
db = mlab_client(
dbuser=DB_CONFIG['dbuser'],
dbpassword=DB_CONFIG['dbpassword'],
mlab_inst=DB_CONFIG['mlab_inst'],
dbname=DB_CONFIG['dbname']
)
app.run(debug=DEBUG) | return {
'location': f'/{COLLECTION}/{str(existing_collection["_id"])}',
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator'],
}, 200 | conditional_block |
z5113243_ass_2.py | #!/usr/bin/env python3
from flask import Flask, request
from flask_restplus import Resource, Api, fields
from pymongo import MongoClient
from bson.objectid import ObjectId
import requests, datetime, re
#------------- CONFIG CONSTANTS -------------#
DEBUG = True
MAX_PAGE_LIMIT = 2
COLLECTION = 'indicators'
DB_CONFIG = {
'dbuser': 'z5113243',
'dbpassword': 'badpassword01',
'mlab_inst': 'ds239071',
'dbname': 'cs9321_ass2'
}
#------------- API INITIALISATION -------------#
db = None # initialised in main
app = Flask(__name__)
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
api = Api(
app,
title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',
description='In this assignment, we\'re asked to develop ' \
'a Flask-Restplus data service that allows a client to ' \
'read and store some publicly available economic indicator ' \
'data for countries around the world, and allow the consumers ' \
'to access the data through a REST API.'
)
indicator_model = api.model(COLLECTION, {
'indicator_id': fields.String(required=True,
title='An Indicator ',
description='http://api.worldbank.org/v2/indicators',
example='NY.GDP.MKTP.CD'),
})
parser = api.parser()
parser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \
'where k is between 1 and 100. Eg. top10, bottom40')
#------------- HELPER FUNCTIONS -------------#
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return 'http://api.worldbank.org/v2/countries/all/indicators/' \
f'{indicator}?date={date}&format={fmt}&page={page}'
# Recursively build an array containing indicator data
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:
return prevRes+response[1]
return get_indicator_data(
indicator=indicator,
page=response[0]['page']+1,
prevRes=prevRes+response[1],
max_pages=max_pages,
)
# Restructure indicator entry according to spec
def format_collection_entry(indicator_data):
return {
'country': indicator_data['country']['value'],
'date': indicator_data['date'],
'value': indicator_data['value'],
}
# Transform to top<k>/bottom<k> queries to array indexes
def query_to_index(query, arr_size):
try:
match = re.search(r'^(bottom|top)\d+$', query).group()
order = re.search(r'^(bottom|top)', match).group()
length = int(re.search(r'\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size-length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
#------------- QUESTION ROUTES -------------#
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
# Indicator hasn't been specified in body (400)
if not body['indicator_id']:
return { 'message': 'Please specify an indicator.' }, 400
# Retrieve indicator from database (200)
existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})
if existing_collection:
return {
'location': f'/{COLLECTION}/{str(existing_collection["_id"])}',
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator'],
}, 200
# From now onwards we need to obtain data from the Worldbank API
indicator_data = get_indicator_data(body['indicator_id'])
# Valid indicator hasn't been specified (400)
if indicator_data == 'Invalid indicator':
return { 'message': 'Please specify a valid indicator.' }, 400
# Create and retrieve indicator from Worldbank API (201)
collection = {
'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(),
'entries': [format_collection_entry(entry) for entry in indicator_data],
}
created_collection = db[COLLECTION].insert_one(collection)
return {
'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']),
'indicator': collection['indicator'],
}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return { 'message': 'Unable to retrieve collections.' }, 400
return [{
'location': f'/{COLLECTION}/{str(doc["_id"])}',
'collection_id': str(doc['_id']),
'creation_time': str(doc['creation_time']),
'indicator': doc['indicator'],
} for doc in collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
# Check if collection exists
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return { 'message': 'Unable to find collection.' }, 404
# Remove collection from db
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to remove collection.' }, 400
return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'creation_time': str(collection['creation_time']),
'entries': collection['entries'],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200, 'Successfully retrieved economic indicator for given a country and year.')
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
# Create a filtered list containing entries that match params
filtered_entries = [
entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year
]
if len(filtered_entries) == 0:
return {'message': 'Unable to find specific indicator entry ' \
f'for country=\'{country}\' and year=\'{year}\'.'}, 400
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
**filtered_entries[0],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
filtered_entries = [
entry for entry in collection['entries'] if entry['date'] == year
]
if not query:
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'], | return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': sorted(
filtered_entries,
key=lambda k: k['value'],
reverse=True
)[query_to_index(query, len(filtered_entries))],
}, 200
if __name__ == '__main__':
db = mlab_client(
dbuser=DB_CONFIG['dbuser'],
dbpassword=DB_CONFIG['dbpassword'],
mlab_inst=DB_CONFIG['mlab_inst'],
dbname=DB_CONFIG['dbname']
)
app.run(debug=DEBUG) | 'entries': filtered_entries,
}, 200 | random_line_split |
z5113243_ass_2.py | #!/usr/bin/env python3
from flask import Flask, request
from flask_restplus import Resource, Api, fields
from pymongo import MongoClient
from bson.objectid import ObjectId
import requests, datetime, re
#------------- CONFIG CONSTANTS -------------#
DEBUG = True
MAX_PAGE_LIMIT = 2
COLLECTION = 'indicators'
DB_CONFIG = {
'dbuser': 'z5113243',
'dbpassword': 'badpassword01',
'mlab_inst': 'ds239071',
'dbname': 'cs9321_ass2'
}
#------------- API INITIALISATION -------------#
db = None # initialised in main
app = Flask(__name__)
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
api = Api(
app,
title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',
description='In this assignment, we\'re asked to develop ' \
'a Flask-Restplus data service that allows a client to ' \
'read and store some publicly available economic indicator ' \
'data for countries around the world, and allow the consumers ' \
'to access the data through a REST API.'
)
indicator_model = api.model(COLLECTION, {
'indicator_id': fields.String(required=True,
title='An Indicator ',
description='http://api.worldbank.org/v2/indicators',
example='NY.GDP.MKTP.CD'),
})
parser = api.parser()
parser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \
'where k is between 1 and 100. Eg. top10, bottom40')
#------------- HELPER FUNCTIONS -------------#
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return 'http://api.worldbank.org/v2/countries/all/indicators/' \
f'{indicator}?date={date}&format={fmt}&page={page}'
# Recursively build an array containing indicator data
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:
return prevRes+response[1]
return get_indicator_data(
indicator=indicator,
page=response[0]['page']+1,
prevRes=prevRes+response[1],
max_pages=max_pages,
)
# Restructure indicator entry according to spec
def format_collection_entry(indicator_data):
return {
'country': indicator_data['country']['value'],
'date': indicator_data['date'],
'value': indicator_data['value'],
}
# Transform to top<k>/bottom<k> queries to array indexes
def query_to_index(query, arr_size):
try:
match = re.search(r'^(bottom|top)\d+$', query).group()
order = re.search(r'^(bottom|top)', match).group()
length = int(re.search(r'\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size-length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
#------------- QUESTION ROUTES -------------#
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
# Indicator hasn't been specified in body (400)
if not body['indicator_id']:
return { 'message': 'Please specify an indicator.' }, 400
# Retrieve indicator from database (200)
existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})
if existing_collection:
return {
'location': f'/{COLLECTION}/{str(existing_collection["_id"])}',
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator'],
}, 200
# From now onwards we need to obtain data from the Worldbank API
indicator_data = get_indicator_data(body['indicator_id'])
# Valid indicator hasn't been specified (400)
if indicator_data == 'Invalid indicator':
return { 'message': 'Please specify a valid indicator.' }, 400
# Create and retrieve indicator from Worldbank API (201)
collection = {
'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(),
'entries': [format_collection_entry(entry) for entry in indicator_data],
}
created_collection = db[COLLECTION].insert_one(collection)
return {
'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']),
'indicator': collection['indicator'],
}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return { 'message': 'Unable to retrieve collections.' }, 400
return [{
'location': f'/{COLLECTION}/{str(doc["_id"])}',
'collection_id': str(doc['_id']),
'creation_time': str(doc['creation_time']),
'indicator': doc['indicator'],
} for doc in collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def | (self, collection_id):
# Check if collection exists
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return { 'message': 'Unable to find collection.' }, 404
# Remove collection from db
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to remove collection.' }, 400
return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'creation_time': str(collection['creation_time']),
'entries': collection['entries'],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200, 'Successfully retrieved economic indicator for given a country and year.')
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
# Create a filtered list containing entries that match params
filtered_entries = [
entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year
]
if len(filtered_entries) == 0:
return {'message': 'Unable to find specific indicator entry ' \
f'for country=\'{country}\' and year=\'{year}\'.'}, 400
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
**filtered_entries[0],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
filtered_entries = [
entry for entry in collection['entries'] if entry['date'] == year
]
if not query:
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': filtered_entries,
}, 200
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': sorted(
filtered_entries,
key=lambda k: k['value'],
reverse=True
)[query_to_index(query, len(filtered_entries))],
}, 200
if __name__ == '__main__':
db = mlab_client(
dbuser=DB_CONFIG['dbuser'],
dbpassword=DB_CONFIG['dbpassword'],
mlab_inst=DB_CONFIG['mlab_inst'],
dbname=DB_CONFIG['dbname']
)
app.run(debug=DEBUG) | delete | identifier_name |
stacks.rs | //! Generate a Wasm program that keeps track of its current stack frames.
//!
//! We can then compare the stack trace we observe in Wasmtime to what the Wasm
//! program believes its stack should be. Any discrepencies between the two
//! points to a bug in either this test case generator or Wasmtime's stack
//! walker.
use std::mem;
use arbitrary::{Arbitrary, Result, Unstructured};
use wasm_encoder::Instruction;
const MAX_FUNCS: usize = 20;
const MAX_OPS: usize = 1_000;
/// Generate a Wasm module that keeps track of its current call stack, to
/// compare to the host.
#[derive(Debug)]
pub struct Stacks {
funcs: Vec<Function>,
inputs: Vec<u8>,
}
#[derive(Debug, Default)]
struct Function {
ops: Vec<Op>,
}
#[derive(Arbitrary, Debug, Clone, Copy)]
enum Op {
CheckStackInHost,
Call(u32),
CallThroughHost(u32),
}
impl<'a> Arbitrary<'a> for Stacks {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
let funcs = Self::arbitrary_funcs(u)?;
let n = u.len().min(200);
let inputs = u.bytes(n)?.to_vec();
Ok(Stacks { funcs, inputs })
}
}
impl Stacks {
fn arbitrary_funcs(u: &mut Unstructured) -> Result<Vec<Function>> {
let mut funcs = vec![Function::default()];
// The indices of functions within `funcs` that we still need to
// generate.
let mut work_list = vec![0];
while let Some(f) = work_list.pop() {
let mut ops = Vec::with_capacity(u.arbitrary_len::<Op>()?.min(MAX_OPS));
for _ in 0..ops.capacity() {
ops.push(u.arbitrary()?);
}
for op in &mut ops {
match op {
Op::CallThroughHost(idx) | Op::Call(idx) => {
if u.is_empty() || funcs.len() >= MAX_FUNCS || u.ratio(4, 5)? {
// Call an existing function.
*idx = *idx % u32::try_from(funcs.len()).unwrap();
} else {
// Call a new function...
*idx = u32::try_from(funcs.len()).unwrap();
// ...which means we also need to eventually define it.
work_list.push(funcs.len());
funcs.push(Function::default());
}
}
Op::CheckStackInHost => {}
}
}
funcs[f].ops = ops;
}
Ok(funcs)
}
/// Get the input values to run the Wasm module with.
pub fn inputs(&self) -> &[u8] {
&self.inputs
}
/// Get this test case's Wasm module.
///
/// The Wasm module has the following imports:
///
/// * `host.check_stack: [] -> []`: The host can check the Wasm's
/// understanding of its own stack against the host's understanding of the
/// Wasm stack to find discrepency bugs.
///
/// * `host.call_func: [funcref] -> []`: The host should call the given
/// `funcref`, creating a call stack with multiple sequences of contiguous
/// Wasm frames on the stack like `[..., wasm, host, wasm]`.
///
/// The Wasm module has the following exports:
///
/// * `run: [i32] -> []`: This function should be called with each of the
/// input values to run this generated test case.
///
/// * `get_stack: [] -> [i32 i32]`: Get the pointer and length of the `u32`
/// array of this Wasm's understanding of its stack. This is useful for
/// checking whether the host's view of the stack at a trap matches the
/// Wasm program's understanding.
pub fn wasm(&self) -> Vec<u8> {
let mut module = wasm_encoder::Module::new();
let mut types = wasm_encoder::TypeSection::new();
let run_type = types.len();
types.function(vec![wasm_encoder::ValType::I32], vec![]);
let get_stack_type = types.len();
types.function(
vec![],
vec![wasm_encoder::ValType::I32, wasm_encoder::ValType::I32],
);
let null_type = types.len();
types.function(vec![], vec![]);
let call_func_type = types.len();
types.function(vec![wasm_encoder::ValType::FUNCREF], vec![]);
section(&mut module, types);
let mut imports = wasm_encoder::ImportSection::new();
let check_stack_func = 0;
imports.import(
"host",
"check_stack",
wasm_encoder::EntityType::Function(null_type),
);
let call_func_func = 1;
imports.import(
"host",
"call_func",
wasm_encoder::EntityType::Function(call_func_type),
);
let num_imported_funcs = 2;
section(&mut module, imports);
let mut funcs = wasm_encoder::FunctionSection::new();
for _ in &self.funcs {
funcs.function(null_type);
}
let run_func = funcs.len() + num_imported_funcs;
funcs.function(run_type);
let get_stack_func = funcs.len() + num_imported_funcs;
funcs.function(get_stack_type);
section(&mut module, funcs);
let mut mems = wasm_encoder::MemorySection::new();
let memory = mems.len();
mems.memory(wasm_encoder::MemoryType {
minimum: 1,
maximum: Some(1),
memory64: false,
shared: false,
});
section(&mut module, mems);
let mut globals = wasm_encoder::GlobalSection::new();
let fuel_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
let stack_len_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
section(&mut module, globals);
let mut exports = wasm_encoder::ExportSection::new();
exports.export("run", wasm_encoder::ExportKind::Func, run_func);
exports.export("get_stack", wasm_encoder::ExportKind::Func, get_stack_func);
exports.export("memory", wasm_encoder::ExportKind::Memory, memory);
exports.export("fuel", wasm_encoder::ExportKind::Global, fuel_global);
section(&mut module, exports);
let mut elems = wasm_encoder::ElementSection::new();
elems.declared(wasm_encoder::Elements::Functions(
&(0..num_imported_funcs + u32::try_from(self.funcs.len()).unwrap()).collect::<Vec<_>>(),
));
section(&mut module, elems);
let check_fuel = |body: &mut wasm_encoder::Function| {
// Trap if we are out of fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Eqz)
.instruction(&Instruction::If(wasm_encoder::BlockType::Empty))
.instruction(&Instruction::Unreachable)
.instruction(&Instruction::End);
// Decrement fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Const(1))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(fuel_global));
};
let push_func_to_stack = |body: &mut wasm_encoder::Function, func: u32| {
// Add this function to our internal stack.
//
// Note that we know our `stack_len_global` can't go beyond memory
// bounds because we limit fuel to at most `u8::MAX` and each stack
// entry is an `i32` and `u8::MAX * size_of(i32)` still fits in one
// Wasm page.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(func as i32))
.instruction(&Instruction::I32Store(wasm_encoder::MemArg {
offset: 0,
align: 0,
memory_index: memory,
}))
.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Add)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let pop_func_from_stack = |body: &mut wasm_encoder::Function| {
// Remove this function from our internal stack.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let mut code = wasm_encoder::CodeSection::new();
for (func_index, func) in self.funcs.iter().enumerate() {
let mut body = wasm_encoder::Function::new(vec![]);
push_func_to_stack(
&mut body,
num_imported_funcs + u32::try_from(func_index).unwrap(),
);
check_fuel(&mut body);
// Perform our specified operations.
for op in &func.ops {
match op {
Op::CheckStackInHost => {
body.instruction(&Instruction::Call(check_stack_func));
}
Op::Call(f) => {
body.instruction(&Instruction::Call(f + num_imported_funcs));
}
Op::CallThroughHost(f) => {
body.instruction(&Instruction::RefFunc(f + num_imported_funcs))
.instruction(&Instruction::Call(call_func_func));
}
}
}
// Potentially trap at the end of our function as well, so that we
// exercise the scenario where the Wasm-to-host trampoline
// initialized `last_wasm_exit_sp` et al when calling out to a host
// function, but then we returned back to Wasm and then trapped
// while `last_wasm_exit_sp` et al are still initialized from that
// previous host call.
check_fuel(&mut body);
pop_func_from_stack(&mut body);
function(&mut code, body);
}
let mut run_body = wasm_encoder::Function::new(vec![]);
// Reset the bump pointer for the internal stack (this allows us to
// reuse an instance in the oracle, rather than re-instantiate).
run_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalSet(stack_len_global));
// Initialize the fuel global.
run_body
.instruction(&Instruction::LocalGet(0))
.instruction(&Instruction::GlobalSet(fuel_global));
push_func_to_stack(&mut run_body, run_func);
// Make sure to check for out-of-fuel in the `run` function as well, so
// that we also capture stack traces with only one frame, not just `run`
// followed by the first locally-defined function and then zero or more
// extra frames.
check_fuel(&mut run_body);
// Call the first locally defined function.
run_body.instruction(&Instruction::Call(num_imported_funcs));
check_fuel(&mut run_body);
pop_func_from_stack(&mut run_body);
function(&mut code, run_body);
let mut get_stack_body = wasm_encoder::Function::new(vec![]);
get_stack_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalGet(stack_len_global));
function(&mut code, get_stack_body);
section(&mut module, code);
return module.finish();
// Helper that defines a section in the module and takes ownership of it
// so that it is dropped and its memory reclaimed after adding it to the
// module.
fn section(module: &mut wasm_encoder::Module, section: impl wasm_encoder::Section) {
module.section(§ion);
}
// Helper that defines a function body in the code section and takes
// ownership of it so that it is dropped and its memory reclaimed after
// adding it to the module.
fn function(code: &mut wasm_encoder::CodeSection, mut func: wasm_encoder::Function) {
func.instruction(&Instruction::End);
code.function(&func);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
use wasmparser::Validator;
#[test]
fn stacks_generates_valid_wasm_modules() |
fn validate(wasm: &[u8]) {
let mut validator = Validator::new();
let err = match validator.validate_all(wasm) {
Ok(_) => return,
Err(e) => e,
};
drop(std::fs::write("test.wasm", wasm));
if let Ok(text) = wasmprinter::print_bytes(wasm) {
drop(std::fs::write("test.wat", &text));
}
panic!("wasm failed to validate: {}", err);
}
}
| {
let mut rng = SmallRng::seed_from_u64(0);
let mut buf = vec![0; 2048];
for _ in 0..1024 {
rng.fill_bytes(&mut buf);
let u = Unstructured::new(&buf);
if let Ok(stacks) = Stacks::arbitrary_take_rest(u) {
let wasm = stacks.wasm();
validate(&wasm);
}
}
} | identifier_body |
stacks.rs | //! Generate a Wasm program that keeps track of its current stack frames.
//!
//! We can then compare the stack trace we observe in Wasmtime to what the Wasm
//! program believes its stack should be. Any discrepencies between the two
//! points to a bug in either this test case generator or Wasmtime's stack
//! walker.
use std::mem;
use arbitrary::{Arbitrary, Result, Unstructured};
use wasm_encoder::Instruction;
const MAX_FUNCS: usize = 20;
const MAX_OPS: usize = 1_000;
/// Generate a Wasm module that keeps track of its current call stack, to
/// compare to the host.
#[derive(Debug)]
pub struct Stacks {
funcs: Vec<Function>,
inputs: Vec<u8>,
}
#[derive(Debug, Default)]
struct Function {
ops: Vec<Op>,
}
#[derive(Arbitrary, Debug, Clone, Copy)]
enum Op {
CheckStackInHost,
Call(u32),
CallThroughHost(u32),
}
impl<'a> Arbitrary<'a> for Stacks {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
let funcs = Self::arbitrary_funcs(u)?;
let n = u.len().min(200);
let inputs = u.bytes(n)?.to_vec();
Ok(Stacks { funcs, inputs })
}
}
impl Stacks {
fn arbitrary_funcs(u: &mut Unstructured) -> Result<Vec<Function>> {
let mut funcs = vec![Function::default()];
// The indices of functions within `funcs` that we still need to
// generate.
let mut work_list = vec![0];
while let Some(f) = work_list.pop() {
let mut ops = Vec::with_capacity(u.arbitrary_len::<Op>()?.min(MAX_OPS));
for _ in 0..ops.capacity() {
ops.push(u.arbitrary()?);
}
for op in &mut ops {
match op {
Op::CallThroughHost(idx) | Op::Call(idx) => {
if u.is_empty() || funcs.len() >= MAX_FUNCS || u.ratio(4, 5)? {
// Call an existing function.
*idx = *idx % u32::try_from(funcs.len()).unwrap();
} else {
// Call a new function...
*idx = u32::try_from(funcs.len()).unwrap();
// ...which means we also need to eventually define it.
work_list.push(funcs.len());
funcs.push(Function::default());
}
}
Op::CheckStackInHost => {}
}
}
funcs[f].ops = ops;
}
Ok(funcs)
}
/// Get the input values to run the Wasm module with.
pub fn inputs(&self) -> &[u8] {
&self.inputs
}
/// Get this test case's Wasm module.
///
/// The Wasm module has the following imports:
///
/// * `host.check_stack: [] -> []`: The host can check the Wasm's
/// understanding of its own stack against the host's understanding of the
/// Wasm stack to find discrepency bugs.
///
/// * `host.call_func: [funcref] -> []`: The host should call the given
/// `funcref`, creating a call stack with multiple sequences of contiguous
/// Wasm frames on the stack like `[..., wasm, host, wasm]`.
///
/// The Wasm module has the following exports:
///
/// * `run: [i32] -> []`: This function should be called with each of the
/// input values to run this generated test case.
///
/// * `get_stack: [] -> [i32 i32]`: Get the pointer and length of the `u32`
/// array of this Wasm's understanding of its stack. This is useful for
/// checking whether the host's view of the stack at a trap matches the
/// Wasm program's understanding.
pub fn wasm(&self) -> Vec<u8> {
let mut module = wasm_encoder::Module::new();
let mut types = wasm_encoder::TypeSection::new();
let run_type = types.len();
types.function(vec![wasm_encoder::ValType::I32], vec![]);
| vec![wasm_encoder::ValType::I32, wasm_encoder::ValType::I32],
);
let null_type = types.len();
types.function(vec![], vec![]);
let call_func_type = types.len();
types.function(vec![wasm_encoder::ValType::FUNCREF], vec![]);
section(&mut module, types);
let mut imports = wasm_encoder::ImportSection::new();
let check_stack_func = 0;
imports.import(
"host",
"check_stack",
wasm_encoder::EntityType::Function(null_type),
);
let call_func_func = 1;
imports.import(
"host",
"call_func",
wasm_encoder::EntityType::Function(call_func_type),
);
let num_imported_funcs = 2;
section(&mut module, imports);
let mut funcs = wasm_encoder::FunctionSection::new();
for _ in &self.funcs {
funcs.function(null_type);
}
let run_func = funcs.len() + num_imported_funcs;
funcs.function(run_type);
let get_stack_func = funcs.len() + num_imported_funcs;
funcs.function(get_stack_type);
section(&mut module, funcs);
let mut mems = wasm_encoder::MemorySection::new();
let memory = mems.len();
mems.memory(wasm_encoder::MemoryType {
minimum: 1,
maximum: Some(1),
memory64: false,
shared: false,
});
section(&mut module, mems);
let mut globals = wasm_encoder::GlobalSection::new();
let fuel_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
let stack_len_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
section(&mut module, globals);
let mut exports = wasm_encoder::ExportSection::new();
exports.export("run", wasm_encoder::ExportKind::Func, run_func);
exports.export("get_stack", wasm_encoder::ExportKind::Func, get_stack_func);
exports.export("memory", wasm_encoder::ExportKind::Memory, memory);
exports.export("fuel", wasm_encoder::ExportKind::Global, fuel_global);
section(&mut module, exports);
let mut elems = wasm_encoder::ElementSection::new();
elems.declared(wasm_encoder::Elements::Functions(
&(0..num_imported_funcs + u32::try_from(self.funcs.len()).unwrap()).collect::<Vec<_>>(),
));
section(&mut module, elems);
let check_fuel = |body: &mut wasm_encoder::Function| {
// Trap if we are out of fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Eqz)
.instruction(&Instruction::If(wasm_encoder::BlockType::Empty))
.instruction(&Instruction::Unreachable)
.instruction(&Instruction::End);
// Decrement fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Const(1))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(fuel_global));
};
let push_func_to_stack = |body: &mut wasm_encoder::Function, func: u32| {
// Add this function to our internal stack.
//
// Note that we know our `stack_len_global` can't go beyond memory
// bounds because we limit fuel to at most `u8::MAX` and each stack
// entry is an `i32` and `u8::MAX * size_of(i32)` still fits in one
// Wasm page.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(func as i32))
.instruction(&Instruction::I32Store(wasm_encoder::MemArg {
offset: 0,
align: 0,
memory_index: memory,
}))
.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Add)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let pop_func_from_stack = |body: &mut wasm_encoder::Function| {
// Remove this function from our internal stack.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let mut code = wasm_encoder::CodeSection::new();
for (func_index, func) in self.funcs.iter().enumerate() {
let mut body = wasm_encoder::Function::new(vec![]);
push_func_to_stack(
&mut body,
num_imported_funcs + u32::try_from(func_index).unwrap(),
);
check_fuel(&mut body);
// Perform our specified operations.
for op in &func.ops {
match op {
Op::CheckStackInHost => {
body.instruction(&Instruction::Call(check_stack_func));
}
Op::Call(f) => {
body.instruction(&Instruction::Call(f + num_imported_funcs));
}
Op::CallThroughHost(f) => {
body.instruction(&Instruction::RefFunc(f + num_imported_funcs))
.instruction(&Instruction::Call(call_func_func));
}
}
}
// Potentially trap at the end of our function as well, so that we
// exercise the scenario where the Wasm-to-host trampoline
// initialized `last_wasm_exit_sp` et al when calling out to a host
// function, but then we returned back to Wasm and then trapped
// while `last_wasm_exit_sp` et al are still initialized from that
// previous host call.
check_fuel(&mut body);
pop_func_from_stack(&mut body);
function(&mut code, body);
}
let mut run_body = wasm_encoder::Function::new(vec![]);
// Reset the bump pointer for the internal stack (this allows us to
// reuse an instance in the oracle, rather than re-instantiate).
run_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalSet(stack_len_global));
// Initialize the fuel global.
run_body
.instruction(&Instruction::LocalGet(0))
.instruction(&Instruction::GlobalSet(fuel_global));
push_func_to_stack(&mut run_body, run_func);
// Make sure to check for out-of-fuel in the `run` function as well, so
// that we also capture stack traces with only one frame, not just `run`
// followed by the first locally-defined function and then zero or more
// extra frames.
check_fuel(&mut run_body);
// Call the first locally defined function.
run_body.instruction(&Instruction::Call(num_imported_funcs));
check_fuel(&mut run_body);
pop_func_from_stack(&mut run_body);
function(&mut code, run_body);
let mut get_stack_body = wasm_encoder::Function::new(vec![]);
get_stack_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalGet(stack_len_global));
function(&mut code, get_stack_body);
section(&mut module, code);
return module.finish();
// Helper that defines a section in the module and takes ownership of it
// so that it is dropped and its memory reclaimed after adding it to the
// module.
fn section(module: &mut wasm_encoder::Module, section: impl wasm_encoder::Section) {
module.section(§ion);
}
// Helper that defines a function body in the code section and takes
// ownership of it so that it is dropped and its memory reclaimed after
// adding it to the module.
fn function(code: &mut wasm_encoder::CodeSection, mut func: wasm_encoder::Function) {
func.instruction(&Instruction::End);
code.function(&func);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
use wasmparser::Validator;
#[test]
fn stacks_generates_valid_wasm_modules() {
let mut rng = SmallRng::seed_from_u64(0);
let mut buf = vec![0; 2048];
for _ in 0..1024 {
rng.fill_bytes(&mut buf);
let u = Unstructured::new(&buf);
if let Ok(stacks) = Stacks::arbitrary_take_rest(u) {
let wasm = stacks.wasm();
validate(&wasm);
}
}
}
fn validate(wasm: &[u8]) {
let mut validator = Validator::new();
let err = match validator.validate_all(wasm) {
Ok(_) => return,
Err(e) => e,
};
drop(std::fs::write("test.wasm", wasm));
if let Ok(text) = wasmprinter::print_bytes(wasm) {
drop(std::fs::write("test.wat", &text));
}
panic!("wasm failed to validate: {}", err);
}
} | let get_stack_type = types.len();
types.function(
vec![], | random_line_split |
stacks.rs | //! Generate a Wasm program that keeps track of its current stack frames.
//!
//! We can then compare the stack trace we observe in Wasmtime to what the Wasm
//! program believes its stack should be. Any discrepencies between the two
//! points to a bug in either this test case generator or Wasmtime's stack
//! walker.
use std::mem;
use arbitrary::{Arbitrary, Result, Unstructured};
use wasm_encoder::Instruction;
const MAX_FUNCS: usize = 20;
const MAX_OPS: usize = 1_000;
/// Generate a Wasm module that keeps track of its current call stack, to
/// compare to the host.
#[derive(Debug)]
pub struct Stacks {
funcs: Vec<Function>,
inputs: Vec<u8>,
}
#[derive(Debug, Default)]
struct Function {
ops: Vec<Op>,
}
#[derive(Arbitrary, Debug, Clone, Copy)]
enum Op {
CheckStackInHost,
Call(u32),
CallThroughHost(u32),
}
impl<'a> Arbitrary<'a> for Stacks {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
let funcs = Self::arbitrary_funcs(u)?;
let n = u.len().min(200);
let inputs = u.bytes(n)?.to_vec();
Ok(Stacks { funcs, inputs })
}
}
impl Stacks {
fn | (u: &mut Unstructured) -> Result<Vec<Function>> {
let mut funcs = vec![Function::default()];
// The indices of functions within `funcs` that we still need to
// generate.
let mut work_list = vec![0];
while let Some(f) = work_list.pop() {
let mut ops = Vec::with_capacity(u.arbitrary_len::<Op>()?.min(MAX_OPS));
for _ in 0..ops.capacity() {
ops.push(u.arbitrary()?);
}
for op in &mut ops {
match op {
Op::CallThroughHost(idx) | Op::Call(idx) => {
if u.is_empty() || funcs.len() >= MAX_FUNCS || u.ratio(4, 5)? {
// Call an existing function.
*idx = *idx % u32::try_from(funcs.len()).unwrap();
} else {
// Call a new function...
*idx = u32::try_from(funcs.len()).unwrap();
// ...which means we also need to eventually define it.
work_list.push(funcs.len());
funcs.push(Function::default());
}
}
Op::CheckStackInHost => {}
}
}
funcs[f].ops = ops;
}
Ok(funcs)
}
/// Get the input values to run the Wasm module with.
pub fn inputs(&self) -> &[u8] {
&self.inputs
}
/// Get this test case's Wasm module.
///
/// The Wasm module has the following imports:
///
/// * `host.check_stack: [] -> []`: The host can check the Wasm's
/// understanding of its own stack against the host's understanding of the
/// Wasm stack to find discrepency bugs.
///
/// * `host.call_func: [funcref] -> []`: The host should call the given
/// `funcref`, creating a call stack with multiple sequences of contiguous
/// Wasm frames on the stack like `[..., wasm, host, wasm]`.
///
/// The Wasm module has the following exports:
///
/// * `run: [i32] -> []`: This function should be called with each of the
/// input values to run this generated test case.
///
/// * `get_stack: [] -> [i32 i32]`: Get the pointer and length of the `u32`
/// array of this Wasm's understanding of its stack. This is useful for
/// checking whether the host's view of the stack at a trap matches the
/// Wasm program's understanding.
pub fn wasm(&self) -> Vec<u8> {
let mut module = wasm_encoder::Module::new();
let mut types = wasm_encoder::TypeSection::new();
let run_type = types.len();
types.function(vec![wasm_encoder::ValType::I32], vec![]);
let get_stack_type = types.len();
types.function(
vec![],
vec![wasm_encoder::ValType::I32, wasm_encoder::ValType::I32],
);
let null_type = types.len();
types.function(vec![], vec![]);
let call_func_type = types.len();
types.function(vec![wasm_encoder::ValType::FUNCREF], vec![]);
section(&mut module, types);
let mut imports = wasm_encoder::ImportSection::new();
let check_stack_func = 0;
imports.import(
"host",
"check_stack",
wasm_encoder::EntityType::Function(null_type),
);
let call_func_func = 1;
imports.import(
"host",
"call_func",
wasm_encoder::EntityType::Function(call_func_type),
);
let num_imported_funcs = 2;
section(&mut module, imports);
let mut funcs = wasm_encoder::FunctionSection::new();
for _ in &self.funcs {
funcs.function(null_type);
}
let run_func = funcs.len() + num_imported_funcs;
funcs.function(run_type);
let get_stack_func = funcs.len() + num_imported_funcs;
funcs.function(get_stack_type);
section(&mut module, funcs);
let mut mems = wasm_encoder::MemorySection::new();
let memory = mems.len();
mems.memory(wasm_encoder::MemoryType {
minimum: 1,
maximum: Some(1),
memory64: false,
shared: false,
});
section(&mut module, mems);
let mut globals = wasm_encoder::GlobalSection::new();
let fuel_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
let stack_len_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
section(&mut module, globals);
let mut exports = wasm_encoder::ExportSection::new();
exports.export("run", wasm_encoder::ExportKind::Func, run_func);
exports.export("get_stack", wasm_encoder::ExportKind::Func, get_stack_func);
exports.export("memory", wasm_encoder::ExportKind::Memory, memory);
exports.export("fuel", wasm_encoder::ExportKind::Global, fuel_global);
section(&mut module, exports);
let mut elems = wasm_encoder::ElementSection::new();
elems.declared(wasm_encoder::Elements::Functions(
&(0..num_imported_funcs + u32::try_from(self.funcs.len()).unwrap()).collect::<Vec<_>>(),
));
section(&mut module, elems);
let check_fuel = |body: &mut wasm_encoder::Function| {
// Trap if we are out of fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Eqz)
.instruction(&Instruction::If(wasm_encoder::BlockType::Empty))
.instruction(&Instruction::Unreachable)
.instruction(&Instruction::End);
// Decrement fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Const(1))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(fuel_global));
};
let push_func_to_stack = |body: &mut wasm_encoder::Function, func: u32| {
// Add this function to our internal stack.
//
// Note that we know our `stack_len_global` can't go beyond memory
// bounds because we limit fuel to at most `u8::MAX` and each stack
// entry is an `i32` and `u8::MAX * size_of(i32)` still fits in one
// Wasm page.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(func as i32))
.instruction(&Instruction::I32Store(wasm_encoder::MemArg {
offset: 0,
align: 0,
memory_index: memory,
}))
.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Add)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let pop_func_from_stack = |body: &mut wasm_encoder::Function| {
// Remove this function from our internal stack.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let mut code = wasm_encoder::CodeSection::new();
for (func_index, func) in self.funcs.iter().enumerate() {
let mut body = wasm_encoder::Function::new(vec![]);
push_func_to_stack(
&mut body,
num_imported_funcs + u32::try_from(func_index).unwrap(),
);
check_fuel(&mut body);
// Perform our specified operations.
for op in &func.ops {
match op {
Op::CheckStackInHost => {
body.instruction(&Instruction::Call(check_stack_func));
}
Op::Call(f) => {
body.instruction(&Instruction::Call(f + num_imported_funcs));
}
Op::CallThroughHost(f) => {
body.instruction(&Instruction::RefFunc(f + num_imported_funcs))
.instruction(&Instruction::Call(call_func_func));
}
}
}
// Potentially trap at the end of our function as well, so that we
// exercise the scenario where the Wasm-to-host trampoline
// initialized `last_wasm_exit_sp` et al when calling out to a host
// function, but then we returned back to Wasm and then trapped
// while `last_wasm_exit_sp` et al are still initialized from that
// previous host call.
check_fuel(&mut body);
pop_func_from_stack(&mut body);
function(&mut code, body);
}
let mut run_body = wasm_encoder::Function::new(vec![]);
// Reset the bump pointer for the internal stack (this allows us to
// reuse an instance in the oracle, rather than re-instantiate).
run_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalSet(stack_len_global));
// Initialize the fuel global.
run_body
.instruction(&Instruction::LocalGet(0))
.instruction(&Instruction::GlobalSet(fuel_global));
push_func_to_stack(&mut run_body, run_func);
// Make sure to check for out-of-fuel in the `run` function as well, so
// that we also capture stack traces with only one frame, not just `run`
// followed by the first locally-defined function and then zero or more
// extra frames.
check_fuel(&mut run_body);
// Call the first locally defined function.
run_body.instruction(&Instruction::Call(num_imported_funcs));
check_fuel(&mut run_body);
pop_func_from_stack(&mut run_body);
function(&mut code, run_body);
let mut get_stack_body = wasm_encoder::Function::new(vec![]);
get_stack_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalGet(stack_len_global));
function(&mut code, get_stack_body);
section(&mut module, code);
return module.finish();
// Helper that defines a section in the module and takes ownership of it
// so that it is dropped and its memory reclaimed after adding it to the
// module.
fn section(module: &mut wasm_encoder::Module, section: impl wasm_encoder::Section) {
module.section(§ion);
}
// Helper that defines a function body in the code section and takes
// ownership of it so that it is dropped and its memory reclaimed after
// adding it to the module.
fn function(code: &mut wasm_encoder::CodeSection, mut func: wasm_encoder::Function) {
func.instruction(&Instruction::End);
code.function(&func);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
use wasmparser::Validator;
#[test]
fn stacks_generates_valid_wasm_modules() {
let mut rng = SmallRng::seed_from_u64(0);
let mut buf = vec![0; 2048];
for _ in 0..1024 {
rng.fill_bytes(&mut buf);
let u = Unstructured::new(&buf);
if let Ok(stacks) = Stacks::arbitrary_take_rest(u) {
let wasm = stacks.wasm();
validate(&wasm);
}
}
}
fn validate(wasm: &[u8]) {
let mut validator = Validator::new();
let err = match validator.validate_all(wasm) {
Ok(_) => return,
Err(e) => e,
};
drop(std::fs::write("test.wasm", wasm));
if let Ok(text) = wasmprinter::print_bytes(wasm) {
drop(std::fs::write("test.wat", &text));
}
panic!("wasm failed to validate: {}", err);
}
}
| arbitrary_funcs | identifier_name |
uri.ts | import { assert, escapeRegex } from '@hapi/hoek';
interface RFC3986 {
ipv4address: string;
ipv4Cidr: string;
ipv6Cidr: string;
ipv6address: string;
ipvFuture: string;
scheme: string;
schemeRegex: RegExp;
hierPart: string;
hierPartCapture: string;
relativeRef: string;
relativeRefCapture: string;
query: string;
queryWithSquareBrackets: string;
fragment: string;
}
function generate() {
const rfc3986 = {} as RFC3986;
const hexDigit = '\\dA-Fa-f'; // HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
const hexDigitOnly = '[' + hexDigit + ']';
const unreserved = '\\w-\\.~'; // unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
const subDelims = "!\\$&'\\(\\)\\*\\+,;="; // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
const pctEncoded = '%' + hexDigit; // pct-encoded = "%" HEXDIG HEXDIG
const pchar = unreserved + pctEncoded + subDelims + ':@'; // pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
const pcharOnly = '[' + pchar + ']';
const decOctect = '(?:0{0,2}\\d|0?[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])'; // dec-octet = DIGIT / %x31-39 DIGIT / "1" 2DIGIT / "2" %x30-34 DIGIT / "25" %x30-35 ; 0-9 / 10-99 / 100-199 / 200-249 / 250-255
rfc3986.ipv4address = '(?:' + decOctect + '\\.){3}' + decOctect; // IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
/*
h16 = 1*4HEXDIG ; 16 bits of address represented in hexadecimal
ls32 = ( h16 ":" h16 ) / IPv4address ; least-significant 32 bits of address
IPv6address = 6( h16 ":" ) ls32
/ "::" 5( h16 ":" ) ls32
/ [ h16 ] "::" 4( h16 ":" ) ls32
/ [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
/ [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
/ [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
/ [ *4( h16 ":" ) h16 ] "::" ls32
/ [ *5( h16 ":" ) h16 ] "::" h16
/ [ *6( h16 ":" ) h16 ] "::"
*/
const h16 = hexDigitOnly + '{1,4}';
const ls32 = '(?:' + h16 + ':' + h16 + '|' + rfc3986.ipv4address + ')';
const IPv6SixHex = '(?:' + h16 + ':){6}' + ls32;
const IPv6FiveHex = '::(?:' + h16 + ':){5}' + ls32;
const IPv6FourHex = '(?:' + h16 + ')?::(?:' + h16 + ':){4}' + ls32;
const IPv6ThreeHex = '(?:(?:' + h16 + ':){0,1}' + h16 + ')?::(?:' + h16 + ':){3}' + ls32;
const IPv6TwoHex = '(?:(?:' + h16 + ':){0,2}' + h16 + ')?::(?:' + h16 + ':){2}' + ls32;
const IPv6OneHex = '(?:(?:' + h16 + ':){0,3}' + h16 + ')?::' + h16 + ':' + ls32;
const IPv6NoneHex = '(?:(?:' + h16 + ':){0,4}' + h16 + ')?::' + ls32;
const IPv6NoneHex2 = '(?:(?:' + h16 + ':){0,5}' + h16 + ')?::' + h16;
const IPv6NoneHex3 = '(?:(?:' + h16 + ':){0,6}' + h16 + ')?::';
rfc3986.ipv4Cidr = '(?:\\d|[1-2]\\d|3[0-2])'; // IPv4 cidr = DIGIT / %x31-32 DIGIT / "3" %x30-32 ; 0-9 / 10-29 / 30-32
rfc3986.ipv6Cidr = '(?:0{0,2}\\d|0?[1-9]\\d|1[01]\\d|12[0-8])'; // IPv6 cidr = DIGIT / %x31-39 DIGIT / "1" %x0-1 DIGIT / "12" %x0-8; 0-9 / 10-99 / 100-119 / 120-128
rfc3986.ipv6address =
'(?:' +
IPv6SixHex +
'|' +
IPv6FiveHex +
'|' +
IPv6FourHex +
'|' +
IPv6ThreeHex +
'|' +
IPv6TwoHex +
'|' +
IPv6OneHex +
'|' +
IPv6NoneHex +
'|' +
IPv6NoneHex2 +
'|' +
IPv6NoneHex3 +
')';
rfc3986.ipvFuture = 'v' + hexDigitOnly + '+\\.[' + unreserved + subDelims + ':]+'; // IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
rfc3986.scheme = '[a-zA-Z][a-zA-Z\\d+-\\.]*'; // scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
rfc3986.schemeRegex = new RegExp(rfc3986.scheme);
const userinfo = '[' + unreserved + pctEncoded + subDelims + ':]*'; // userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
const IPLiteral = '\\[(?:' + rfc3986.ipv6address + '|' + rfc3986.ipvFuture + ')\\]'; // IP-literal = "[" ( IPv6address / IPvFuture ) "]"
const regName = '[' + unreserved + pctEncoded + subDelims + ']{1,255}'; // reg-name = *( unreserved / pct-encoded / sub-delims )
const host = '(?:' + IPLiteral + '|' + rfc3986.ipv4address + '|' + regName + ')'; // host = IP-literal / IPv4address / reg-name
const port = '\\d*'; // port = *DIGIT
const authority = '(?:' + userinfo + '@)?' + host + '(?::' + port + ')?'; // authority = [ userinfo "@" ] host [ ":" port ]
const authorityCapture = '(?:' + userinfo + '@)?(' + host + ')(?::' + port + ')?';
/*
segment = *pchar
segment-nz = 1*pchar
path = path-abempty ; begins with "/" '|' is empty
/ path-absolute ; begins with "/" but not "//"
/ path-noscheme ; begins with a non-colon segment
/ path-rootless ; begins with a segment
/ path-empty ; zero characters
path-abempty = *( "/" segment )
path-absolute = "/" [ segment-nz *( "/" segment ) ]
path-rootless = segment-nz *( "/" segment )
*/
const segment = pcharOnly + '*';
const segmentNz = pcharOnly + '+';
const segmentNzNc = '[' + unreserved + pctEncoded + subDelims + '@' + ']+';
const pathEmpty = '';
const pathAbEmpty = '(?:\\/' + segment + ')*';
const pathAbsolute = '\\/(?:' + segmentNz + pathAbEmpty + ')?';
const pathRootless = segmentNz + pathAbEmpty;
const pathNoScheme = segmentNzNc + pathAbEmpty;
const pathAbNoAuthority = '(?:\\/\\/\\/' + segment + pathAbEmpty + ')'; // Used by file:///
// hier-part = "//" authority path
rfc3986.hierPart =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathRootless +
'|' +
pathAbNoAuthority +
')';
rfc3986.hierPartCapture =
'(?:' + '(?:\\/\\/' + authorityCapture + pathAbEmpty + ')' + '|' + pathAbsolute + '|' + pathRootless + ')';
// relative-part = "//" authority path-abempty / path-absolute / path-noscheme / path-empty
rfc3986.relativeRef =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
rfc3986.relativeRefCapture =
'(?:' +
'(?:\\/\\/' +
authorityCapture +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
// query = *( pchar / "/" / "?" )
// query = *( pchar / "[" / "]" / "/" / "?" )
rfc3986.query = '[' + pchar + '\\/\\?]*(?=#|$)'; //Finish matching either at the fragment part '|' end of the line.
rfc3986.queryWithSquareBrackets = '[' + pchar + '\\[\\]\\/\\?]*(?=#|$)';
// fragment = *( pchar / "/" / "?" )
rfc3986.fragment = '[' + pchar + '\\/\\?]*';
return rfc3986;
}
const rfc3986 = generate();
export const ipVersions = {
v4Cidr: rfc3986.ipv4Cidr,
v6Cidr: rfc3986.ipv6Cidr,
ipv4: rfc3986.ipv4address,
ipv6: rfc3986.ipv6address,
ipvfuture: rfc3986.ipvFuture
};
function | (options: Options) {
const rfc = rfc3986;
// Construct expression
const query = options.allowQuerySquareBrackets ? rfc.queryWithSquareBrackets : rfc.query;
const suffix = '(?:\\?' + query + ')?' + '(?:#' + rfc.fragment + ')?';
// relative-ref = relative-part [ "?" query ] [ "#" fragment ]
const relative = options.domain ? rfc.relativeRefCapture : rfc.relativeRef;
if (options.relativeOnly) {
return wrap(relative + suffix);
}
// Custom schemes
let customScheme = '';
if (options.scheme) {
assert(
options.scheme instanceof RegExp || typeof options.scheme === 'string' || Array.isArray(options.scheme),
'scheme must be a RegExp, String, or Array'
);
const schemes = [].concat(options.scheme);
assert(schemes.length >= 1, 'scheme must have at least 1 scheme specified');
// Flatten the array into a string to be used to match the schemes
const selections = [];
for (let i = 0; i < schemes.length; ++i) {
const scheme = schemes[i];
assert(
scheme instanceof RegExp || typeof scheme === 'string',
'scheme at position ' + i + ' must be a RegExp or String'
);
if (scheme instanceof RegExp) {
selections.push(scheme.source.toString());
} else {
assert(rfc.schemeRegex.test(scheme), 'scheme at position ' + i + ' must be a valid scheme');
selections.push(escapeRegex(scheme));
}
}
customScheme = selections.join('|');
}
// URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
const scheme = customScheme ? '(?:' + customScheme + ')' : rfc.scheme;
const absolute = '(?:' + scheme + ':' + (options.domain ? rfc.hierPartCapture : rfc.hierPart) + ')';
const prefix = options.allowRelative ? '(?:' + absolute + '|' + relative + ')' : absolute;
return wrap(prefix + suffix, customScheme);
}
interface Expression {
/** The raw regular expression string. */
raw: string;
/** The regular expression. */
regex: RegExp;
/** The specified URI scheme */
scheme: string | null;
}
function wrap(raw: string, scheme: string = null): Expression {
raw = `(?=.)(?!https?\:/(?:$|[^/]))(?!https?\:///)(?!https?\:[^/])${raw}`; // Require at least one character and explicitly forbid 'http:/' or HTTP with empty domain
return {
raw,
regex: new RegExp(`^${raw}$`),
scheme
};
}
const genericUriRegex = createRegex({});
/**
* Generates a regular expression used to validate URI addresses.
*
* @param options - optional settings.
*
* @returns an object with the regular expression and meta data.
*/
export function uriRegex(options: Options = {}) {
if (
options.scheme ||
options.allowRelative ||
options.relativeOnly ||
options.allowQuerySquareBrackets ||
options.domain
) {
return createRegex(options);
}
return genericUriRegex;
}
type Scheme = string | RegExp;
interface Options {
/**
* Allow the use of [] in query parameters.
*
* @default false
*/
readonly allowQuerySquareBrackets?: boolean;
/**
* Allow relative URIs.
*
* @default false
*/
readonly allowRelative?: boolean;
/**
* Requires the URI to be relative.
*
* @default false
*/
readonly relativeOnly?: boolean;
/**
* Capture domain segment ($1).
*
* @default false
*/
readonly domain?: boolean;
/**
* The allowed URI schemes.
*/
readonly scheme?: Scheme | Scheme[];
}
| createRegex | identifier_name |
uri.ts | import { assert, escapeRegex } from '@hapi/hoek';
interface RFC3986 {
ipv4address: string;
ipv4Cidr: string;
ipv6Cidr: string;
ipv6address: string;
ipvFuture: string;
scheme: string;
schemeRegex: RegExp;
hierPart: string;
hierPartCapture: string;
relativeRef: string;
relativeRefCapture: string;
query: string;
queryWithSquareBrackets: string;
fragment: string;
}
function generate() {
const rfc3986 = {} as RFC3986;
const hexDigit = '\\dA-Fa-f'; // HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
const hexDigitOnly = '[' + hexDigit + ']';
const unreserved = '\\w-\\.~'; // unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
const subDelims = "!\\$&'\\(\\)\\*\\+,;="; // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
const pctEncoded = '%' + hexDigit; // pct-encoded = "%" HEXDIG HEXDIG
const pchar = unreserved + pctEncoded + subDelims + ':@'; // pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
const pcharOnly = '[' + pchar + ']';
const decOctect = '(?:0{0,2}\\d|0?[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])'; // dec-octet = DIGIT / %x31-39 DIGIT / "1" 2DIGIT / "2" %x30-34 DIGIT / "25" %x30-35 ; 0-9 / 10-99 / 100-199 / 200-249 / 250-255
rfc3986.ipv4address = '(?:' + decOctect + '\\.){3}' + decOctect; // IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
/*
h16 = 1*4HEXDIG ; 16 bits of address represented in hexadecimal
ls32 = ( h16 ":" h16 ) / IPv4address ; least-significant 32 bits of address
IPv6address = 6( h16 ":" ) ls32
/ "::" 5( h16 ":" ) ls32
/ [ h16 ] "::" 4( h16 ":" ) ls32
/ [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
/ [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
/ [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
/ [ *4( h16 ":" ) h16 ] "::" ls32
/ [ *5( h16 ":" ) h16 ] "::" h16
/ [ *6( h16 ":" ) h16 ] "::"
*/
const h16 = hexDigitOnly + '{1,4}';
const ls32 = '(?:' + h16 + ':' + h16 + '|' + rfc3986.ipv4address + ')';
const IPv6SixHex = '(?:' + h16 + ':){6}' + ls32;
const IPv6FiveHex = '::(?:' + h16 + ':){5}' + ls32;
const IPv6FourHex = '(?:' + h16 + ')?::(?:' + h16 + ':){4}' + ls32;
const IPv6ThreeHex = '(?:(?:' + h16 + ':){0,1}' + h16 + ')?::(?:' + h16 + ':){3}' + ls32;
const IPv6TwoHex = '(?:(?:' + h16 + ':){0,2}' + h16 + ')?::(?:' + h16 + ':){2}' + ls32;
const IPv6OneHex = '(?:(?:' + h16 + ':){0,3}' + h16 + ')?::' + h16 + ':' + ls32;
const IPv6NoneHex = '(?:(?:' + h16 + ':){0,4}' + h16 + ')?::' + ls32;
const IPv6NoneHex2 = '(?:(?:' + h16 + ':){0,5}' + h16 + ')?::' + h16;
const IPv6NoneHex3 = '(?:(?:' + h16 + ':){0,6}' + h16 + ')?::';
rfc3986.ipv4Cidr = '(?:\\d|[1-2]\\d|3[0-2])'; // IPv4 cidr = DIGIT / %x31-32 DIGIT / "3" %x30-32 ; 0-9 / 10-29 / 30-32
rfc3986.ipv6Cidr = '(?:0{0,2}\\d|0?[1-9]\\d|1[01]\\d|12[0-8])'; // IPv6 cidr = DIGIT / %x31-39 DIGIT / "1" %x0-1 DIGIT / "12" %x0-8; 0-9 / 10-99 / 100-119 / 120-128
rfc3986.ipv6address =
'(?:' +
IPv6SixHex +
'|' +
IPv6FiveHex +
'|' +
IPv6FourHex +
'|' +
IPv6ThreeHex +
'|' +
IPv6TwoHex +
'|' +
IPv6OneHex +
'|' +
IPv6NoneHex +
'|' +
IPv6NoneHex2 +
'|' +
IPv6NoneHex3 +
')';
rfc3986.ipvFuture = 'v' + hexDigitOnly + '+\\.[' + unreserved + subDelims + ':]+'; // IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
rfc3986.scheme = '[a-zA-Z][a-zA-Z\\d+-\\.]*'; // scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
rfc3986.schemeRegex = new RegExp(rfc3986.scheme);
const userinfo = '[' + unreserved + pctEncoded + subDelims + ':]*'; // userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
const IPLiteral = '\\[(?:' + rfc3986.ipv6address + '|' + rfc3986.ipvFuture + ')\\]'; // IP-literal = "[" ( IPv6address / IPvFuture ) "]"
const regName = '[' + unreserved + pctEncoded + subDelims + ']{1,255}'; // reg-name = *( unreserved / pct-encoded / sub-delims )
const host = '(?:' + IPLiteral + '|' + rfc3986.ipv4address + '|' + regName + ')'; // host = IP-literal / IPv4address / reg-name
const port = '\\d*'; // port = *DIGIT
const authority = '(?:' + userinfo + '@)?' + host + '(?::' + port + ')?'; // authority = [ userinfo "@" ] host [ ":" port ]
const authorityCapture = '(?:' + userinfo + '@)?(' + host + ')(?::' + port + ')?';
/*
segment = *pchar
segment-nz = 1*pchar
path = path-abempty ; begins with "/" '|' is empty
/ path-absolute ; begins with "/" but not "//"
/ path-noscheme ; begins with a non-colon segment
/ path-rootless ; begins with a segment
/ path-empty ; zero characters
path-abempty = *( "/" segment )
path-absolute = "/" [ segment-nz *( "/" segment ) ]
path-rootless = segment-nz *( "/" segment )
*/
const segment = pcharOnly + '*';
const segmentNz = pcharOnly + '+';
const segmentNzNc = '[' + unreserved + pctEncoded + subDelims + '@' + ']+';
const pathEmpty = '';
const pathAbEmpty = '(?:\\/' + segment + ')*';
const pathAbsolute = '\\/(?:' + segmentNz + pathAbEmpty + ')?';
const pathRootless = segmentNz + pathAbEmpty;
const pathNoScheme = segmentNzNc + pathAbEmpty;
const pathAbNoAuthority = '(?:\\/\\/\\/' + segment + pathAbEmpty + ')'; // Used by file:///
// hier-part = "//" authority path
rfc3986.hierPart =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathRootless +
'|' +
pathAbNoAuthority +
')';
rfc3986.hierPartCapture =
'(?:' + '(?:\\/\\/' + authorityCapture + pathAbEmpty + ')' + '|' + pathAbsolute + '|' + pathRootless + ')';
// relative-part = "//" authority path-abempty / path-absolute / path-noscheme / path-empty
rfc3986.relativeRef =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
rfc3986.relativeRefCapture =
'(?:' +
'(?:\\/\\/' +
authorityCapture +
pathAbEmpty + | ')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
// query = *( pchar / "/" / "?" )
// query = *( pchar / "[" / "]" / "/" / "?" )
rfc3986.query = '[' + pchar + '\\/\\?]*(?=#|$)'; //Finish matching either at the fragment part '|' end of the line.
rfc3986.queryWithSquareBrackets = '[' + pchar + '\\[\\]\\/\\?]*(?=#|$)';
// fragment = *( pchar / "/" / "?" )
rfc3986.fragment = '[' + pchar + '\\/\\?]*';
return rfc3986;
}
const rfc3986 = generate();
export const ipVersions = {
v4Cidr: rfc3986.ipv4Cidr,
v6Cidr: rfc3986.ipv6Cidr,
ipv4: rfc3986.ipv4address,
ipv6: rfc3986.ipv6address,
ipvfuture: rfc3986.ipvFuture
};
function createRegex(options: Options) {
const rfc = rfc3986;
// Construct expression
const query = options.allowQuerySquareBrackets ? rfc.queryWithSquareBrackets : rfc.query;
const suffix = '(?:\\?' + query + ')?' + '(?:#' + rfc.fragment + ')?';
// relative-ref = relative-part [ "?" query ] [ "#" fragment ]
const relative = options.domain ? rfc.relativeRefCapture : rfc.relativeRef;
if (options.relativeOnly) {
return wrap(relative + suffix);
}
// Custom schemes
let customScheme = '';
if (options.scheme) {
assert(
options.scheme instanceof RegExp || typeof options.scheme === 'string' || Array.isArray(options.scheme),
'scheme must be a RegExp, String, or Array'
);
const schemes = [].concat(options.scheme);
assert(schemes.length >= 1, 'scheme must have at least 1 scheme specified');
// Flatten the array into a string to be used to match the schemes
const selections = [];
for (let i = 0; i < schemes.length; ++i) {
const scheme = schemes[i];
assert(
scheme instanceof RegExp || typeof scheme === 'string',
'scheme at position ' + i + ' must be a RegExp or String'
);
if (scheme instanceof RegExp) {
selections.push(scheme.source.toString());
} else {
assert(rfc.schemeRegex.test(scheme), 'scheme at position ' + i + ' must be a valid scheme');
selections.push(escapeRegex(scheme));
}
}
customScheme = selections.join('|');
}
// URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
const scheme = customScheme ? '(?:' + customScheme + ')' : rfc.scheme;
const absolute = '(?:' + scheme + ':' + (options.domain ? rfc.hierPartCapture : rfc.hierPart) + ')';
const prefix = options.allowRelative ? '(?:' + absolute + '|' + relative + ')' : absolute;
return wrap(prefix + suffix, customScheme);
}
interface Expression {
/** The raw regular expression string. */
raw: string;
/** The regular expression. */
regex: RegExp;
/** The specified URI scheme */
scheme: string | null;
}
function wrap(raw: string, scheme: string = null): Expression {
raw = `(?=.)(?!https?\:/(?:$|[^/]))(?!https?\:///)(?!https?\:[^/])${raw}`; // Require at least one character and explicitly forbid 'http:/' or HTTP with empty domain
return {
raw,
regex: new RegExp(`^${raw}$`),
scheme
};
}
const genericUriRegex = createRegex({});
/**
* Generates a regular expression used to validate URI addresses.
*
* @param options - optional settings.
*
* @returns an object with the regular expression and meta data.
*/
export function uriRegex(options: Options = {}) {
if (
options.scheme ||
options.allowRelative ||
options.relativeOnly ||
options.allowQuerySquareBrackets ||
options.domain
) {
return createRegex(options);
}
return genericUriRegex;
}
type Scheme = string | RegExp;
interface Options {
/**
* Allow the use of [] in query parameters.
*
* @default false
*/
readonly allowQuerySquareBrackets?: boolean;
/**
* Allow relative URIs.
*
* @default false
*/
readonly allowRelative?: boolean;
/**
* Requires the URI to be relative.
*
* @default false
*/
readonly relativeOnly?: boolean;
/**
* Capture domain segment ($1).
*
* @default false
*/
readonly domain?: boolean;
/**
* The allowed URI schemes.
*/
readonly scheme?: Scheme | Scheme[];
} | random_line_split | |
uri.ts | import { assert, escapeRegex } from '@hapi/hoek';
interface RFC3986 {
ipv4address: string;
ipv4Cidr: string;
ipv6Cidr: string;
ipv6address: string;
ipvFuture: string;
scheme: string;
schemeRegex: RegExp;
hierPart: string;
hierPartCapture: string;
relativeRef: string;
relativeRefCapture: string;
query: string;
queryWithSquareBrackets: string;
fragment: string;
}
function generate() {
const rfc3986 = {} as RFC3986;
const hexDigit = '\\dA-Fa-f'; // HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
const hexDigitOnly = '[' + hexDigit + ']';
const unreserved = '\\w-\\.~'; // unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
const subDelims = "!\\$&'\\(\\)\\*\\+,;="; // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
const pctEncoded = '%' + hexDigit; // pct-encoded = "%" HEXDIG HEXDIG
const pchar = unreserved + pctEncoded + subDelims + ':@'; // pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
const pcharOnly = '[' + pchar + ']';
const decOctect = '(?:0{0,2}\\d|0?[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])'; // dec-octet = DIGIT / %x31-39 DIGIT / "1" 2DIGIT / "2" %x30-34 DIGIT / "25" %x30-35 ; 0-9 / 10-99 / 100-199 / 200-249 / 250-255
rfc3986.ipv4address = '(?:' + decOctect + '\\.){3}' + decOctect; // IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
/*
h16 = 1*4HEXDIG ; 16 bits of address represented in hexadecimal
ls32 = ( h16 ":" h16 ) / IPv4address ; least-significant 32 bits of address
IPv6address = 6( h16 ":" ) ls32
/ "::" 5( h16 ":" ) ls32
/ [ h16 ] "::" 4( h16 ":" ) ls32
/ [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
/ [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
/ [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
/ [ *4( h16 ":" ) h16 ] "::" ls32
/ [ *5( h16 ":" ) h16 ] "::" h16
/ [ *6( h16 ":" ) h16 ] "::"
*/
const h16 = hexDigitOnly + '{1,4}';
const ls32 = '(?:' + h16 + ':' + h16 + '|' + rfc3986.ipv4address + ')';
const IPv6SixHex = '(?:' + h16 + ':){6}' + ls32;
const IPv6FiveHex = '::(?:' + h16 + ':){5}' + ls32;
const IPv6FourHex = '(?:' + h16 + ')?::(?:' + h16 + ':){4}' + ls32;
const IPv6ThreeHex = '(?:(?:' + h16 + ':){0,1}' + h16 + ')?::(?:' + h16 + ':){3}' + ls32;
const IPv6TwoHex = '(?:(?:' + h16 + ':){0,2}' + h16 + ')?::(?:' + h16 + ':){2}' + ls32;
const IPv6OneHex = '(?:(?:' + h16 + ':){0,3}' + h16 + ')?::' + h16 + ':' + ls32;
const IPv6NoneHex = '(?:(?:' + h16 + ':){0,4}' + h16 + ')?::' + ls32;
const IPv6NoneHex2 = '(?:(?:' + h16 + ':){0,5}' + h16 + ')?::' + h16;
const IPv6NoneHex3 = '(?:(?:' + h16 + ':){0,6}' + h16 + ')?::';
rfc3986.ipv4Cidr = '(?:\\d|[1-2]\\d|3[0-2])'; // IPv4 cidr = DIGIT / %x31-32 DIGIT / "3" %x30-32 ; 0-9 / 10-29 / 30-32
rfc3986.ipv6Cidr = '(?:0{0,2}\\d|0?[1-9]\\d|1[01]\\d|12[0-8])'; // IPv6 cidr = DIGIT / %x31-39 DIGIT / "1" %x0-1 DIGIT / "12" %x0-8; 0-9 / 10-99 / 100-119 / 120-128
rfc3986.ipv6address =
'(?:' +
IPv6SixHex +
'|' +
IPv6FiveHex +
'|' +
IPv6FourHex +
'|' +
IPv6ThreeHex +
'|' +
IPv6TwoHex +
'|' +
IPv6OneHex +
'|' +
IPv6NoneHex +
'|' +
IPv6NoneHex2 +
'|' +
IPv6NoneHex3 +
')';
rfc3986.ipvFuture = 'v' + hexDigitOnly + '+\\.[' + unreserved + subDelims + ':]+'; // IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
rfc3986.scheme = '[a-zA-Z][a-zA-Z\\d+-\\.]*'; // scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
rfc3986.schemeRegex = new RegExp(rfc3986.scheme);
const userinfo = '[' + unreserved + pctEncoded + subDelims + ':]*'; // userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
const IPLiteral = '\\[(?:' + rfc3986.ipv6address + '|' + rfc3986.ipvFuture + ')\\]'; // IP-literal = "[" ( IPv6address / IPvFuture ) "]"
const regName = '[' + unreserved + pctEncoded + subDelims + ']{1,255}'; // reg-name = *( unreserved / pct-encoded / sub-delims )
const host = '(?:' + IPLiteral + '|' + rfc3986.ipv4address + '|' + regName + ')'; // host = IP-literal / IPv4address / reg-name
const port = '\\d*'; // port = *DIGIT
const authority = '(?:' + userinfo + '@)?' + host + '(?::' + port + ')?'; // authority = [ userinfo "@" ] host [ ":" port ]
const authorityCapture = '(?:' + userinfo + '@)?(' + host + ')(?::' + port + ')?';
/*
segment = *pchar
segment-nz = 1*pchar
path = path-abempty ; begins with "/" '|' is empty
/ path-absolute ; begins with "/" but not "//"
/ path-noscheme ; begins with a non-colon segment
/ path-rootless ; begins with a segment
/ path-empty ; zero characters
path-abempty = *( "/" segment )
path-absolute = "/" [ segment-nz *( "/" segment ) ]
path-rootless = segment-nz *( "/" segment )
*/
const segment = pcharOnly + '*';
const segmentNz = pcharOnly + '+';
const segmentNzNc = '[' + unreserved + pctEncoded + subDelims + '@' + ']+';
const pathEmpty = '';
const pathAbEmpty = '(?:\\/' + segment + ')*';
const pathAbsolute = '\\/(?:' + segmentNz + pathAbEmpty + ')?';
const pathRootless = segmentNz + pathAbEmpty;
const pathNoScheme = segmentNzNc + pathAbEmpty;
const pathAbNoAuthority = '(?:\\/\\/\\/' + segment + pathAbEmpty + ')'; // Used by file:///
// hier-part = "//" authority path
rfc3986.hierPart =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathRootless +
'|' +
pathAbNoAuthority +
')';
rfc3986.hierPartCapture =
'(?:' + '(?:\\/\\/' + authorityCapture + pathAbEmpty + ')' + '|' + pathAbsolute + '|' + pathRootless + ')';
// relative-part = "//" authority path-abempty / path-absolute / path-noscheme / path-empty
rfc3986.relativeRef =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
rfc3986.relativeRefCapture =
'(?:' +
'(?:\\/\\/' +
authorityCapture +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
// query = *( pchar / "/" / "?" )
// query = *( pchar / "[" / "]" / "/" / "?" )
rfc3986.query = '[' + pchar + '\\/\\?]*(?=#|$)'; //Finish matching either at the fragment part '|' end of the line.
rfc3986.queryWithSquareBrackets = '[' + pchar + '\\[\\]\\/\\?]*(?=#|$)';
// fragment = *( pchar / "/" / "?" )
rfc3986.fragment = '[' + pchar + '\\/\\?]*';
return rfc3986;
}
const rfc3986 = generate();
export const ipVersions = {
v4Cidr: rfc3986.ipv4Cidr,
v6Cidr: rfc3986.ipv6Cidr,
ipv4: rfc3986.ipv4address,
ipv6: rfc3986.ipv6address,
ipvfuture: rfc3986.ipvFuture
};
function createRegex(options: Options) |
interface Expression {
/** The raw regular expression string. */
raw: string;
/** The regular expression. */
regex: RegExp;
/** The specified URI scheme */
scheme: string | null;
}
function wrap(raw: string, scheme: string = null): Expression {
raw = `(?=.)(?!https?\:/(?:$|[^/]))(?!https?\:///)(?!https?\:[^/])${raw}`; // Require at least one character and explicitly forbid 'http:/' or HTTP with empty domain
return {
raw,
regex: new RegExp(`^${raw}$`),
scheme
};
}
const genericUriRegex = createRegex({});
/**
* Generates a regular expression used to validate URI addresses.
*
* @param options - optional settings.
*
* @returns an object with the regular expression and meta data.
*/
export function uriRegex(options: Options = {}) {
if (
options.scheme ||
options.allowRelative ||
options.relativeOnly ||
options.allowQuerySquareBrackets ||
options.domain
) {
return createRegex(options);
}
return genericUriRegex;
}
type Scheme = string | RegExp;
interface Options {
/**
* Allow the use of [] in query parameters.
*
* @default false
*/
readonly allowQuerySquareBrackets?: boolean;
/**
* Allow relative URIs.
*
* @default false
*/
readonly allowRelative?: boolean;
/**
* Requires the URI to be relative.
*
* @default false
*/
readonly relativeOnly?: boolean;
/**
* Capture domain segment ($1).
*
* @default false
*/
readonly domain?: boolean;
/**
* The allowed URI schemes.
*/
readonly scheme?: Scheme | Scheme[];
}
| {
const rfc = rfc3986;
// Construct expression
const query = options.allowQuerySquareBrackets ? rfc.queryWithSquareBrackets : rfc.query;
const suffix = '(?:\\?' + query + ')?' + '(?:#' + rfc.fragment + ')?';
// relative-ref = relative-part [ "?" query ] [ "#" fragment ]
const relative = options.domain ? rfc.relativeRefCapture : rfc.relativeRef;
if (options.relativeOnly) {
return wrap(relative + suffix);
}
// Custom schemes
let customScheme = '';
if (options.scheme) {
assert(
options.scheme instanceof RegExp || typeof options.scheme === 'string' || Array.isArray(options.scheme),
'scheme must be a RegExp, String, or Array'
);
const schemes = [].concat(options.scheme);
assert(schemes.length >= 1, 'scheme must have at least 1 scheme specified');
// Flatten the array into a string to be used to match the schemes
const selections = [];
for (let i = 0; i < schemes.length; ++i) {
const scheme = schemes[i];
assert(
scheme instanceof RegExp || typeof scheme === 'string',
'scheme at position ' + i + ' must be a RegExp or String'
);
if (scheme instanceof RegExp) {
selections.push(scheme.source.toString());
} else {
assert(rfc.schemeRegex.test(scheme), 'scheme at position ' + i + ' must be a valid scheme');
selections.push(escapeRegex(scheme));
}
}
customScheme = selections.join('|');
}
// URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
const scheme = customScheme ? '(?:' + customScheme + ')' : rfc.scheme;
const absolute = '(?:' + scheme + ':' + (options.domain ? rfc.hierPartCapture : rfc.hierPart) + ')';
const prefix = options.allowRelative ? '(?:' + absolute + '|' + relative + ')' : absolute;
return wrap(prefix + suffix, customScheme);
} | identifier_body |
uri.ts | import { assert, escapeRegex } from '@hapi/hoek';
interface RFC3986 {
ipv4address: string;
ipv4Cidr: string;
ipv6Cidr: string;
ipv6address: string;
ipvFuture: string;
scheme: string;
schemeRegex: RegExp;
hierPart: string;
hierPartCapture: string;
relativeRef: string;
relativeRefCapture: string;
query: string;
queryWithSquareBrackets: string;
fragment: string;
}
function generate() {
const rfc3986 = {} as RFC3986;
const hexDigit = '\\dA-Fa-f'; // HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F"
const hexDigitOnly = '[' + hexDigit + ']';
const unreserved = '\\w-\\.~'; // unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
const subDelims = "!\\$&'\\(\\)\\*\\+,;="; // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
const pctEncoded = '%' + hexDigit; // pct-encoded = "%" HEXDIG HEXDIG
const pchar = unreserved + pctEncoded + subDelims + ':@'; // pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
const pcharOnly = '[' + pchar + ']';
const decOctect = '(?:0{0,2}\\d|0?[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])'; // dec-octet = DIGIT / %x31-39 DIGIT / "1" 2DIGIT / "2" %x30-34 DIGIT / "25" %x30-35 ; 0-9 / 10-99 / 100-199 / 200-249 / 250-255
rfc3986.ipv4address = '(?:' + decOctect + '\\.){3}' + decOctect; // IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
/*
h16 = 1*4HEXDIG ; 16 bits of address represented in hexadecimal
ls32 = ( h16 ":" h16 ) / IPv4address ; least-significant 32 bits of address
IPv6address = 6( h16 ":" ) ls32
/ "::" 5( h16 ":" ) ls32
/ [ h16 ] "::" 4( h16 ":" ) ls32
/ [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
/ [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
/ [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
/ [ *4( h16 ":" ) h16 ] "::" ls32
/ [ *5( h16 ":" ) h16 ] "::" h16
/ [ *6( h16 ":" ) h16 ] "::"
*/
const h16 = hexDigitOnly + '{1,4}';
const ls32 = '(?:' + h16 + ':' + h16 + '|' + rfc3986.ipv4address + ')';
const IPv6SixHex = '(?:' + h16 + ':){6}' + ls32;
const IPv6FiveHex = '::(?:' + h16 + ':){5}' + ls32;
const IPv6FourHex = '(?:' + h16 + ')?::(?:' + h16 + ':){4}' + ls32;
const IPv6ThreeHex = '(?:(?:' + h16 + ':){0,1}' + h16 + ')?::(?:' + h16 + ':){3}' + ls32;
const IPv6TwoHex = '(?:(?:' + h16 + ':){0,2}' + h16 + ')?::(?:' + h16 + ':){2}' + ls32;
const IPv6OneHex = '(?:(?:' + h16 + ':){0,3}' + h16 + ')?::' + h16 + ':' + ls32;
const IPv6NoneHex = '(?:(?:' + h16 + ':){0,4}' + h16 + ')?::' + ls32;
const IPv6NoneHex2 = '(?:(?:' + h16 + ':){0,5}' + h16 + ')?::' + h16;
const IPv6NoneHex3 = '(?:(?:' + h16 + ':){0,6}' + h16 + ')?::';
rfc3986.ipv4Cidr = '(?:\\d|[1-2]\\d|3[0-2])'; // IPv4 cidr = DIGIT / %x31-32 DIGIT / "3" %x30-32 ; 0-9 / 10-29 / 30-32
rfc3986.ipv6Cidr = '(?:0{0,2}\\d|0?[1-9]\\d|1[01]\\d|12[0-8])'; // IPv6 cidr = DIGIT / %x31-39 DIGIT / "1" %x0-1 DIGIT / "12" %x0-8; 0-9 / 10-99 / 100-119 / 120-128
rfc3986.ipv6address =
'(?:' +
IPv6SixHex +
'|' +
IPv6FiveHex +
'|' +
IPv6FourHex +
'|' +
IPv6ThreeHex +
'|' +
IPv6TwoHex +
'|' +
IPv6OneHex +
'|' +
IPv6NoneHex +
'|' +
IPv6NoneHex2 +
'|' +
IPv6NoneHex3 +
')';
rfc3986.ipvFuture = 'v' + hexDigitOnly + '+\\.[' + unreserved + subDelims + ':]+'; // IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
rfc3986.scheme = '[a-zA-Z][a-zA-Z\\d+-\\.]*'; // scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
rfc3986.schemeRegex = new RegExp(rfc3986.scheme);
const userinfo = '[' + unreserved + pctEncoded + subDelims + ':]*'; // userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
const IPLiteral = '\\[(?:' + rfc3986.ipv6address + '|' + rfc3986.ipvFuture + ')\\]'; // IP-literal = "[" ( IPv6address / IPvFuture ) "]"
const regName = '[' + unreserved + pctEncoded + subDelims + ']{1,255}'; // reg-name = *( unreserved / pct-encoded / sub-delims )
const host = '(?:' + IPLiteral + '|' + rfc3986.ipv4address + '|' + regName + ')'; // host = IP-literal / IPv4address / reg-name
const port = '\\d*'; // port = *DIGIT
const authority = '(?:' + userinfo + '@)?' + host + '(?::' + port + ')?'; // authority = [ userinfo "@" ] host [ ":" port ]
const authorityCapture = '(?:' + userinfo + '@)?(' + host + ')(?::' + port + ')?';
/*
segment = *pchar
segment-nz = 1*pchar
path = path-abempty ; begins with "/" '|' is empty
/ path-absolute ; begins with "/" but not "//"
/ path-noscheme ; begins with a non-colon segment
/ path-rootless ; begins with a segment
/ path-empty ; zero characters
path-abempty = *( "/" segment )
path-absolute = "/" [ segment-nz *( "/" segment ) ]
path-rootless = segment-nz *( "/" segment )
*/
const segment = pcharOnly + '*';
const segmentNz = pcharOnly + '+';
const segmentNzNc = '[' + unreserved + pctEncoded + subDelims + '@' + ']+';
const pathEmpty = '';
const pathAbEmpty = '(?:\\/' + segment + ')*';
const pathAbsolute = '\\/(?:' + segmentNz + pathAbEmpty + ')?';
const pathRootless = segmentNz + pathAbEmpty;
const pathNoScheme = segmentNzNc + pathAbEmpty;
const pathAbNoAuthority = '(?:\\/\\/\\/' + segment + pathAbEmpty + ')'; // Used by file:///
// hier-part = "//" authority path
rfc3986.hierPart =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathRootless +
'|' +
pathAbNoAuthority +
')';
rfc3986.hierPartCapture =
'(?:' + '(?:\\/\\/' + authorityCapture + pathAbEmpty + ')' + '|' + pathAbsolute + '|' + pathRootless + ')';
// relative-part = "//" authority path-abempty / path-absolute / path-noscheme / path-empty
rfc3986.relativeRef =
'(?:' +
'(?:\\/\\/' +
authority +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
rfc3986.relativeRefCapture =
'(?:' +
'(?:\\/\\/' +
authorityCapture +
pathAbEmpty +
')' +
'|' +
pathAbsolute +
'|' +
pathNoScheme +
'|' +
pathEmpty +
')';
// query = *( pchar / "/" / "?" )
// query = *( pchar / "[" / "]" / "/" / "?" )
rfc3986.query = '[' + pchar + '\\/\\?]*(?=#|$)'; //Finish matching either at the fragment part '|' end of the line.
rfc3986.queryWithSquareBrackets = '[' + pchar + '\\[\\]\\/\\?]*(?=#|$)';
// fragment = *( pchar / "/" / "?" )
rfc3986.fragment = '[' + pchar + '\\/\\?]*';
return rfc3986;
}
const rfc3986 = generate();
export const ipVersions = {
v4Cidr: rfc3986.ipv4Cidr,
v6Cidr: rfc3986.ipv6Cidr,
ipv4: rfc3986.ipv4address,
ipv6: rfc3986.ipv6address,
ipvfuture: rfc3986.ipvFuture
};
function createRegex(options: Options) {
const rfc = rfc3986;
// Construct expression
const query = options.allowQuerySquareBrackets ? rfc.queryWithSquareBrackets : rfc.query;
const suffix = '(?:\\?' + query + ')?' + '(?:#' + rfc.fragment + ')?';
// relative-ref = relative-part [ "?" query ] [ "#" fragment ]
const relative = options.domain ? rfc.relativeRefCapture : rfc.relativeRef;
if (options.relativeOnly) {
return wrap(relative + suffix);
}
// Custom schemes
let customScheme = '';
if (options.scheme) {
assert(
options.scheme instanceof RegExp || typeof options.scheme === 'string' || Array.isArray(options.scheme),
'scheme must be a RegExp, String, or Array'
);
const schemes = [].concat(options.scheme);
assert(schemes.length >= 1, 'scheme must have at least 1 scheme specified');
// Flatten the array into a string to be used to match the schemes
const selections = [];
for (let i = 0; i < schemes.length; ++i) |
customScheme = selections.join('|');
}
// URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
const scheme = customScheme ? '(?:' + customScheme + ')' : rfc.scheme;
const absolute = '(?:' + scheme + ':' + (options.domain ? rfc.hierPartCapture : rfc.hierPart) + ')';
const prefix = options.allowRelative ? '(?:' + absolute + '|' + relative + ')' : absolute;
return wrap(prefix + suffix, customScheme);
}
interface Expression {
/** The raw regular expression string. */
raw: string;
/** The regular expression. */
regex: RegExp;
/** The specified URI scheme */
scheme: string | null;
}
function wrap(raw: string, scheme: string = null): Expression {
raw = `(?=.)(?!https?\:/(?:$|[^/]))(?!https?\:///)(?!https?\:[^/])${raw}`; // Require at least one character and explicitly forbid 'http:/' or HTTP with empty domain
return {
raw,
regex: new RegExp(`^${raw}$`),
scheme
};
}
const genericUriRegex = createRegex({});
/**
* Generates a regular expression used to validate URI addresses.
*
* @param options - optional settings.
*
* @returns an object with the regular expression and meta data.
*/
export function uriRegex(options: Options = {}) {
if (
options.scheme ||
options.allowRelative ||
options.relativeOnly ||
options.allowQuerySquareBrackets ||
options.domain
) {
return createRegex(options);
}
return genericUriRegex;
}
type Scheme = string | RegExp;
interface Options {
/**
* Allow the use of [] in query parameters.
*
* @default false
*/
readonly allowQuerySquareBrackets?: boolean;
/**
* Allow relative URIs.
*
* @default false
*/
readonly allowRelative?: boolean;
/**
* Requires the URI to be relative.
*
* @default false
*/
readonly relativeOnly?: boolean;
/**
* Capture domain segment ($1).
*
* @default false
*/
readonly domain?: boolean;
/**
* The allowed URI schemes.
*/
readonly scheme?: Scheme | Scheme[];
}
| {
const scheme = schemes[i];
assert(
scheme instanceof RegExp || typeof scheme === 'string',
'scheme at position ' + i + ' must be a RegExp or String'
);
if (scheme instanceof RegExp) {
selections.push(scheme.source.toString());
} else {
assert(rfc.schemeRegex.test(scheme), 'scheme at position ' + i + ' must be a valid scheme');
selections.push(escapeRegex(scheme));
}
} | conditional_block |
window.rs | //! General logic for OS window manipulation and creation.
//!
//! Each `Window` gets a data model, which is a struct that implements the `Layout`
//! trait. All window and UI specific state is contained in the data model and it is
//! responsible for the GUI layout (`Layout::layout()`).
//!
//! Manipulation of all GUI related things can only be done in the main thread. This is
//! possible by sending events either with a `app::AppHandle` or `WindowHandle`.
use super::app::{self, App};
use super::ErrorCode;
use super::UiError;
use super::UiResult;
use crate::utils;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::Arc;
use std::sync::Weak;
pub use winit::window::WindowId;
/// This trait contains the functions which are used to layout the GUI.
///
/// You must implement this trait in your own class and create a `Window` with an instance
/// of that class (which is called the data model). On every redraw of the GUI, the App
/// instance calls the `ActiveWindow::render()` method, which in turn calls
/// `Layout::layout()`.
pub trait Layout {
/// The central method for creating the GUI.
fn layout(&mut self, ui: LayoutContext, app: &App, window: &mut Window);
/// A user defined logging method, which can be called from a thread-safe handle of
/// the window.
///
/// If `level` is `None`, the `message` is only displayed in the GUI and not logged.
fn log(&mut self, level: Option<log::Level>, message: &str);
/// Used for downcasting to the actual data model type.
///
/// When manipulating the data model from a different thread using
/// `WindowHandle::run_with_data_model()`, this allows the cast from the `Layout`
/// trait reference to the actual data model type.
fn as_any(&mut self) -> &mut dyn Any;
/// This method is used to initialize the data model.
///
/// It will be called only once in the entire lifetime of the window, before the
/// window is shown. If this method fails it can return a boxed `std::error::Error`
/// and both `App::new_window()` and `Window::new()` will also fail and return
/// the same error.
fn init(&mut self, window: &mut Window) -> Result<(), Box<dyn Error>>;
/// This method is called before the window gets closed and destroyed.
/// The return value determines if the window actually gets closed.
///
/// If the return value is `true`, the window will be closed and destroyed,
/// otherwise when `false` it will remain open and the close action will be
/// ignored.
///
/// **Warning:**
/// Don't use this method for cleanup, because there is no guaratee that it is ever
/// called (for example when the window creation fails). Implement the `Drop` trait
/// for the data model, where all cleanup can then be done in the `Drop::drop()`
/// method, which is guaranteed to be called when the data model is destroyed.
fn before_close(&mut self, window: &mut Window) -> bool;
}
/// The amount the a window is or should be invalidated over time.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum InvalidateAmount {
/// Possible continuous invalidation is stopped or inactive.
Stop,
/// The window will be invalidated once as soon as possible.
Once,
/// The window is continuously invalidated until the given instant.
Until(std::time::Instant),
/// The window is continuously invalidated indefinetly.
Indefinetely,
}
impl InvalidateAmount {
/// Wether or not continuous updating is currently active.
///
/// `true` if `InvalidateAmount::Until(_)` or `InvalidateAmount::Indefinetely`,
/// `false` otherwise.
pub fn is_continuously(&self) -> bool {
match self {
Self::Stop | Self::Once => false,
_ => true,
}
}
}
/// The context used to create the GUI using Dear ImGUI.
/// It is passed to the `Layout::layout()` method.
pub struct LayoutContext<'ui> {
pub ui: &'ui imgui::Ui<'ui>,
pub window_handle: WindowHandle,
invalidate_amount_changed: Cell<bool>,
invalidate_amount: Cell<InvalidateAmount>,
}
impl LayoutContext<'_> {
/// Requests invalidation of the specified `amount` after the current frame is
/// finished. The resulting requested invalidation amount is the maximum of
/// all `request_invalidate()` calls for one frame.
#[inline]
pub fn request_invalidate(&self, amount: InvalidateAmount) {
if self.invalidate_amount.get() < amount {
self.invalidate_amount.set(amount);
} else if self.invalidate_amount.get() == amount {
self.invalidate_amount
.set(match (self.invalidate_amount.get(), amount) {
(InvalidateAmount::Until(inst0), InvalidateAmount::Until(inst1)) => {
InvalidateAmount::Until(utils::max_instant(inst0, inst1))
}
(curr, _) => curr,
});
}
self.invalidate_amount_changed.set(true);
}
}
impl<'ui> Deref for LayoutContext<'ui> {
type Target = imgui::Ui<'ui>;
#[inline]
fn deref(&self) -> &Self::Target {
if self.ui.is_item_edited() {
self.request_invalidate(InvalidateAmount::Once);
} else if self.ui.is_item_activated() {
self.request_invalidate(InvalidateAmount::Indefinetely);
} else if self.ui.is_item_deactivated() {
self.request_invalidate(InvalidateAmount::Stop);
}
self.ui
}
}
impl Drop for LayoutContext<'_> {
fn drop(&mut self) {
if self.invalidate_amount_changed.get() {
let _ = self
.window_handle
.set_invalidate_amount(self.invalidate_amount.get());
} else {
if self.ui.is_any_item_active() {
let _ = self.window_handle.request_invalidate();
}
}
}
}
/// This struct represents an OS window, which contains an ImGUI graphical user interface.
pub struct Window {
window: winit::window::Window,
last_frame_time: std::time::Instant,
alive: Arc<()>,
app_handle: app::AppHandle,
invalidate_amount: InvalidateAmount,
// Everything for rendering
surface: wgpu::Surface,
gpu_device: wgpu::Device,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
renderer: imgui_wgpu::Renderer,
queue: wgpu::Queue,
/// A MSAA framebuffer texture and its sample count.
msaa_framebuffer: Option<(wgpu::TextureView, wgpu::Extent3d, u32)>,
// All imgui related
winit_platform: imgui_winit_support::WinitPlatform,
imgui: ImguiContext,
default_font: Option<imgui::FontId>,
last_cursor: Option<imgui::MouseCursor>,
/// The data model associated with this native window, that holds its state.
pub data_model: Box<dyn Layout>,
}
enum ImguiContext {
Suspended(imgui::SuspendedContext),
Used(),
}
impl Window {
/// Closes the window and, if no further windows remain, shuts down the application.
pub fn close(&mut self) {
let data_model = &mut self.data_model as *mut Box<(dyn Layout + 'static)>;
let should_close = unsafe { &mut *data_model }.before_close(self);
if should_close {
let window_id = self.id();
let _ = self.app_handle.execute_with_gui(move |app: &mut App| {
app.remove_window(window_id);
});
}
}
/// Get a mutable reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window_mut(&mut self) -> &mut winit::window::Window {
&mut self.window
}
/// Get a reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window(&self) -> &winit::window::Window {
&self.window
}
/// Get the id of the window.
pub fn id(&self) -> winit::window::WindowId {
self.window.id()
}
/// Get the time the window was last updated.
pub fn last_frame_time(&self) -> std::time::Instant {
self.last_frame_time
}
/// Updates the frame time to now and returns the duration since the last frame.
#[inline]
pub(crate) fn update_frame_time(&mut self) -> std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> {
&mut self.renderer.textures
}
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn drop(&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout + 'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor != ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
{
(msaa_framebuffer, Some(&frame.output.view))
} else {
this.msaa_framebuffer = Some(Window::create_msaa_framebuffer(
&this.gpu_device,
&this.swap_chain_desc,
app.msaa_samples,
));
(
&this.msaa_framebuffer.as_ref().unwrap().0,
Some(&frame.output.view),
)
}
} else {
(&frame.output.view, None)
};
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment,
resolve_target,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
match this
.renderer
.render(&draw_data, &this.queue, &this.gpu_device, &mut render_pass)
{
Err(err) => {
return Err(UiError::with_source(
ErrorCode::RENDER_ERROR,
utils::MessageError::debug(&err),
)) | Ok(_) => (),
};
drop(render_pass);
this.queue.submit(Some(encoder.finish()));
Ok(())
}
}
/// A thread-safe handle to a `Window`.
///
/// This handle can be used to communicate with the Window from a different thread
/// through events. All methods on this handle will return an error when the window does
/// not exist anymore (can be queried with `alive()`).
pub struct WindowHandle {
window_id: winit::window::WindowId,
app_handle: app::AppHandle,
alive: Weak<()>,
}
impl WindowHandle {
/// Queries wether the represented window still exists or not.
pub fn alive(&self) -> bool {
match self.alive.upgrade() {
Some(_) => true,
_ => false,
}
}
/// Runs the closure `callback` in the UI thread.
///
/// Returns an error if the `Window` this handle referres to doesn't exist anymore.
pub fn run(
&self,
callback: impl FnOnce(&mut app::App, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(callback)),
})
.unwrap();
Ok(())
}
/// Runs the closure callback in the UI thread and passes
/// the data model of the `Window` downcast to T.
///
/// The main thread will panic if the data model of the Window
/// cannot be downcast to T.
///
/// ## Note
/// There is no guarantee that the passed closure will be run.
/// If the Window gets destryed after this method has been called
/// and before the main thread has gotten the event for running the closure,
/// it will be skipped.
pub fn run_with_data_model<T: Layout + Any>(
&self,
callback: impl FnOnce(&mut app::App, &mut T, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(move |app, wnd: &mut Window| {
let wnd_ptr = wnd as *mut _;
let data_model = wnd.data_model.as_any().downcast_mut::<T>().unwrap();
callback(app, data_model, unsafe { &mut *wnd_ptr });
})),
})
.unwrap();
Ok(())
}
/// Request a redraw of the window.
pub fn request_invalidate(&self) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle.send_event(app::AppEvent::InvalidateWindow {
window_id: self.window_id,
})
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.window_id,
state: amount,
})
}
/// Calls `Window::data_model.log(level, message)` from the UI thread. If the
/// window does not exist anymore (it was already destroyed) and `level` is not
/// `None`, logs the `message` with the given `level` instead.
pub fn log(&self, level: Option<log::Level>, message: &str) {
let message_copy = String::from(message);
match self.run(move |_app, wnd| {
wnd.data_model.log(level, &message_copy);
}) {
Ok(_) => (),
Err(_) if level.is_some() => {
log::log!(level.unwrap(), "{}", message);
}
Err(_) => (),
};
}
/// Schedules the closure `callback` to approximately be executed at the given `instant`.
///
/// Returnes an error if the window represented by this handle does not exist anymore.
pub fn schedule(
&self,
instant: std::time::Instant,
callback: app::ExecuteAtCallback,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteAt { instant, callback })
}
}
impl Clone for WindowHandle {
fn clone(&self) -> Self {
WindowHandle {
window_id: self.window_id,
app_handle: self.app_handle.clone(),
alive: self.alive.clone(),
}
}
} | } | random_line_split |
window.rs | //! General logic for OS window manipulation and creation.
//!
//! Each `Window` gets a data model, which is a struct that implements the `Layout`
//! trait. All window and UI specific state is contained in the data model and it is
//! responsible for the GUI layout (`Layout::layout()`).
//!
//! Manipulation of all GUI related things can only be done in the main thread. This is
//! possible by sending events either with a `app::AppHandle` or `WindowHandle`.
use super::app::{self, App};
use super::ErrorCode;
use super::UiError;
use super::UiResult;
use crate::utils;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::Arc;
use std::sync::Weak;
pub use winit::window::WindowId;
/// This trait contains the functions which are used to layout the GUI.
///
/// You must implement this trait in your own class and create a `Window` with an instance
/// of that class (which is called the data model). On every redraw of the GUI, the App
/// instance calls the `ActiveWindow::render()` method, which in turn calls
/// `Layout::layout()`.
pub trait Layout {
/// The central method for creating the GUI.
fn layout(&mut self, ui: LayoutContext, app: &App, window: &mut Window);
/// A user defined logging method, which can be called from a thread-safe handle of
/// the window.
///
/// If `level` is `None`, the `message` is only displayed in the GUI and not logged.
fn log(&mut self, level: Option<log::Level>, message: &str);
/// Used for downcasting to the actual data model type.
///
/// When manipulating the data model from a different thread using
/// `WindowHandle::run_with_data_model()`, this allows the cast from the `Layout`
/// trait reference to the actual data model type.
fn as_any(&mut self) -> &mut dyn Any;
/// This method is used to initialize the data model.
///
/// It will be called only once in the entire lifetime of the window, before the
/// window is shown. If this method fails it can return a boxed `std::error::Error`
/// and both `App::new_window()` and `Window::new()` will also fail and return
/// the same error.
fn init(&mut self, window: &mut Window) -> Result<(), Box<dyn Error>>;
/// This method is called before the window gets closed and destroyed.
/// The return value determines if the window actually gets closed.
///
/// If the return value is `true`, the window will be closed and destroyed,
/// otherwise when `false` it will remain open and the close action will be
/// ignored.
///
/// **Warning:**
/// Don't use this method for cleanup, because there is no guaratee that it is ever
/// called (for example when the window creation fails). Implement the `Drop` trait
/// for the data model, where all cleanup can then be done in the `Drop::drop()`
/// method, which is guaranteed to be called when the data model is destroyed.
fn before_close(&mut self, window: &mut Window) -> bool;
}
/// The amount the a window is or should be invalidated over time.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum InvalidateAmount {
/// Possible continuous invalidation is stopped or inactive.
Stop,
/// The window will be invalidated once as soon as possible.
Once,
/// The window is continuously invalidated until the given instant.
Until(std::time::Instant),
/// The window is continuously invalidated indefinetly.
Indefinetely,
}
impl InvalidateAmount {
/// Wether or not continuous updating is currently active.
///
/// `true` if `InvalidateAmount::Until(_)` or `InvalidateAmount::Indefinetely`,
/// `false` otherwise.
pub fn is_continuously(&self) -> bool {
match self {
Self::Stop | Self::Once => false,
_ => true,
}
}
}
/// The context used to create the GUI using Dear ImGUI.
/// It is passed to the `Layout::layout()` method.
pub struct LayoutContext<'ui> {
pub ui: &'ui imgui::Ui<'ui>,
pub window_handle: WindowHandle,
invalidate_amount_changed: Cell<bool>,
invalidate_amount: Cell<InvalidateAmount>,
}
impl LayoutContext<'_> {
/// Requests invalidation of the specified `amount` after the current frame is
/// finished. The resulting requested invalidation amount is the maximum of
/// all `request_invalidate()` calls for one frame.
#[inline]
pub fn request_invalidate(&self, amount: InvalidateAmount) {
if self.invalidate_amount.get() < amount {
self.invalidate_amount.set(amount);
} else if self.invalidate_amount.get() == amount {
self.invalidate_amount
.set(match (self.invalidate_amount.get(), amount) {
(InvalidateAmount::Until(inst0), InvalidateAmount::Until(inst1)) => {
InvalidateAmount::Until(utils::max_instant(inst0, inst1))
}
(curr, _) => curr,
});
}
self.invalidate_amount_changed.set(true);
}
}
impl<'ui> Deref for LayoutContext<'ui> {
type Target = imgui::Ui<'ui>;
#[inline]
fn deref(&self) -> &Self::Target {
if self.ui.is_item_edited() {
self.request_invalidate(InvalidateAmount::Once);
} else if self.ui.is_item_activated() {
self.request_invalidate(InvalidateAmount::Indefinetely);
} else if self.ui.is_item_deactivated() {
self.request_invalidate(InvalidateAmount::Stop);
}
self.ui
}
}
impl Drop for LayoutContext<'_> {
fn drop(&mut self) {
if self.invalidate_amount_changed.get() {
let _ = self
.window_handle
.set_invalidate_amount(self.invalidate_amount.get());
} else {
if self.ui.is_any_item_active() {
let _ = self.window_handle.request_invalidate();
}
}
}
}
/// This struct represents an OS window, which contains an ImGUI graphical user interface.
pub struct Window {
window: winit::window::Window,
last_frame_time: std::time::Instant,
alive: Arc<()>,
app_handle: app::AppHandle,
invalidate_amount: InvalidateAmount,
// Everything for rendering
surface: wgpu::Surface,
gpu_device: wgpu::Device,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
renderer: imgui_wgpu::Renderer,
queue: wgpu::Queue,
/// A MSAA framebuffer texture and its sample count.
msaa_framebuffer: Option<(wgpu::TextureView, wgpu::Extent3d, u32)>,
// All imgui related
winit_platform: imgui_winit_support::WinitPlatform,
imgui: ImguiContext,
default_font: Option<imgui::FontId>,
last_cursor: Option<imgui::MouseCursor>,
/// The data model associated with this native window, that holds its state.
pub data_model: Box<dyn Layout>,
}
enum ImguiContext {
Suspended(imgui::SuspendedContext),
Used(),
}
impl Window {
/// Closes the window and, if no further windows remain, shuts down the application.
pub fn close(&mut self) {
let data_model = &mut self.data_model as *mut Box<(dyn Layout + 'static)>;
let should_close = unsafe { &mut *data_model }.before_close(self);
if should_close {
let window_id = self.id();
let _ = self.app_handle.execute_with_gui(move |app: &mut App| {
app.remove_window(window_id);
});
}
}
/// Get a mutable reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window_mut(&mut self) -> &mut winit::window::Window {
&mut self.window
}
/// Get a reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window(&self) -> &winit::window::Window {
&self.window
}
/// Get the id of the window.
pub fn id(&self) -> winit::window::WindowId {
self.window.id()
}
/// Get the time the window was last updated.
pub fn last_frame_time(&self) -> std::time::Instant {
self.last_frame_time
}
/// Updates the frame time to now and returns the duration since the last frame.
#[inline]
pub(crate) fn update_frame_time(&mut self) -> std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> {
&mut self.renderer.textures
}
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn | (&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout + 'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor != ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
{
(msaa_framebuffer, Some(&frame.output.view))
} else {
this.msaa_framebuffer = Some(Window::create_msaa_framebuffer(
&this.gpu_device,
&this.swap_chain_desc,
app.msaa_samples,
));
(
&this.msaa_framebuffer.as_ref().unwrap().0,
Some(&frame.output.view),
)
}
} else {
(&frame.output.view, None)
};
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment,
resolve_target,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
match this
.renderer
.render(&draw_data, &this.queue, &this.gpu_device, &mut render_pass)
{
Err(err) => {
return Err(UiError::with_source(
ErrorCode::RENDER_ERROR,
utils::MessageError::debug(&err),
))
}
Ok(_) => (),
};
drop(render_pass);
this.queue.submit(Some(encoder.finish()));
Ok(())
}
}
/// A thread-safe handle to a `Window`.
///
/// This handle can be used to communicate with the Window from a different thread
/// through events. All methods on this handle will return an error when the window does
/// not exist anymore (can be queried with `alive()`).
pub struct WindowHandle {
window_id: winit::window::WindowId,
app_handle: app::AppHandle,
alive: Weak<()>,
}
impl WindowHandle {
/// Queries wether the represented window still exists or not.
pub fn alive(&self) -> bool {
match self.alive.upgrade() {
Some(_) => true,
_ => false,
}
}
/// Runs the closure `callback` in the UI thread.
///
/// Returns an error if the `Window` this handle referres to doesn't exist anymore.
pub fn run(
&self,
callback: impl FnOnce(&mut app::App, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(callback)),
})
.unwrap();
Ok(())
}
/// Runs the closure callback in the UI thread and passes
/// the data model of the `Window` downcast to T.
///
/// The main thread will panic if the data model of the Window
/// cannot be downcast to T.
///
/// ## Note
/// There is no guarantee that the passed closure will be run.
/// If the Window gets destryed after this method has been called
/// and before the main thread has gotten the event for running the closure,
/// it will be skipped.
pub fn run_with_data_model<T: Layout + Any>(
&self,
callback: impl FnOnce(&mut app::App, &mut T, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(move |app, wnd: &mut Window| {
let wnd_ptr = wnd as *mut _;
let data_model = wnd.data_model.as_any().downcast_mut::<T>().unwrap();
callback(app, data_model, unsafe { &mut *wnd_ptr });
})),
})
.unwrap();
Ok(())
}
/// Request a redraw of the window.
pub fn request_invalidate(&self) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle.send_event(app::AppEvent::InvalidateWindow {
window_id: self.window_id,
})
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.window_id,
state: amount,
})
}
/// Calls `Window::data_model.log(level, message)` from the UI thread. If the
/// window does not exist anymore (it was already destroyed) and `level` is not
/// `None`, logs the `message` with the given `level` instead.
pub fn log(&self, level: Option<log::Level>, message: &str) {
let message_copy = String::from(message);
match self.run(move |_app, wnd| {
wnd.data_model.log(level, &message_copy);
}) {
Ok(_) => (),
Err(_) if level.is_some() => {
log::log!(level.unwrap(), "{}", message);
}
Err(_) => (),
};
}
/// Schedules the closure `callback` to approximately be executed at the given `instant`.
///
/// Returnes an error if the window represented by this handle does not exist anymore.
pub fn schedule(
&self,
instant: std::time::Instant,
callback: app::ExecuteAtCallback,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteAt { instant, callback })
}
}
impl Clone for WindowHandle {
fn clone(&self) -> Self {
WindowHandle {
window_id: self.window_id,
app_handle: self.app_handle.clone(),
alive: self.alive.clone(),
}
}
}
| drop | identifier_name |
window.rs | //! General logic for OS window manipulation and creation.
//!
//! Each `Window` gets a data model, which is a struct that implements the `Layout`
//! trait. All window and UI specific state is contained in the data model and it is
//! responsible for the GUI layout (`Layout::layout()`).
//!
//! Manipulation of all GUI related things can only be done in the main thread. This is
//! possible by sending events either with a `app::AppHandle` or `WindowHandle`.
use super::app::{self, App};
use super::ErrorCode;
use super::UiError;
use super::UiResult;
use crate::utils;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::Arc;
use std::sync::Weak;
pub use winit::window::WindowId;
/// This trait contains the functions which are used to layout the GUI.
///
/// You must implement this trait in your own class and create a `Window` with an instance
/// of that class (which is called the data model). On every redraw of the GUI, the App
/// instance calls the `ActiveWindow::render()` method, which in turn calls
/// `Layout::layout()`.
pub trait Layout {
/// The central method for creating the GUI.
fn layout(&mut self, ui: LayoutContext, app: &App, window: &mut Window);
/// A user defined logging method, which can be called from a thread-safe handle of
/// the window.
///
/// If `level` is `None`, the `message` is only displayed in the GUI and not logged.
fn log(&mut self, level: Option<log::Level>, message: &str);
/// Used for downcasting to the actual data model type.
///
/// When manipulating the data model from a different thread using
/// `WindowHandle::run_with_data_model()`, this allows the cast from the `Layout`
/// trait reference to the actual data model type.
fn as_any(&mut self) -> &mut dyn Any;
/// This method is used to initialize the data model.
///
/// It will be called only once in the entire lifetime of the window, before the
/// window is shown. If this method fails it can return a boxed `std::error::Error`
/// and both `App::new_window()` and `Window::new()` will also fail and return
/// the same error.
fn init(&mut self, window: &mut Window) -> Result<(), Box<dyn Error>>;
/// This method is called before the window gets closed and destroyed.
/// The return value determines if the window actually gets closed.
///
/// If the return value is `true`, the window will be closed and destroyed,
/// otherwise when `false` it will remain open and the close action will be
/// ignored.
///
/// **Warning:**
/// Don't use this method for cleanup, because there is no guaratee that it is ever
/// called (for example when the window creation fails). Implement the `Drop` trait
/// for the data model, where all cleanup can then be done in the `Drop::drop()`
/// method, which is guaranteed to be called when the data model is destroyed.
fn before_close(&mut self, window: &mut Window) -> bool;
}
/// The amount the a window is or should be invalidated over time.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum InvalidateAmount {
/// Possible continuous invalidation is stopped or inactive.
Stop,
/// The window will be invalidated once as soon as possible.
Once,
/// The window is continuously invalidated until the given instant.
Until(std::time::Instant),
/// The window is continuously invalidated indefinetly.
Indefinetely,
}
impl InvalidateAmount {
/// Wether or not continuous updating is currently active.
///
/// `true` if `InvalidateAmount::Until(_)` or `InvalidateAmount::Indefinetely`,
/// `false` otherwise.
pub fn is_continuously(&self) -> bool {
match self {
Self::Stop | Self::Once => false,
_ => true,
}
}
}
/// The context used to create the GUI using Dear ImGUI.
/// It is passed to the `Layout::layout()` method.
pub struct LayoutContext<'ui> {
pub ui: &'ui imgui::Ui<'ui>,
pub window_handle: WindowHandle,
invalidate_amount_changed: Cell<bool>,
invalidate_amount: Cell<InvalidateAmount>,
}
impl LayoutContext<'_> {
/// Requests invalidation of the specified `amount` after the current frame is
/// finished. The resulting requested invalidation amount is the maximum of
/// all `request_invalidate()` calls for one frame.
#[inline]
pub fn request_invalidate(&self, amount: InvalidateAmount) {
if self.invalidate_amount.get() < amount {
self.invalidate_amount.set(amount);
} else if self.invalidate_amount.get() == amount {
self.invalidate_amount
.set(match (self.invalidate_amount.get(), amount) {
(InvalidateAmount::Until(inst0), InvalidateAmount::Until(inst1)) => {
InvalidateAmount::Until(utils::max_instant(inst0, inst1))
}
(curr, _) => curr,
});
}
self.invalidate_amount_changed.set(true);
}
}
impl<'ui> Deref for LayoutContext<'ui> {
type Target = imgui::Ui<'ui>;
#[inline]
fn deref(&self) -> &Self::Target {
if self.ui.is_item_edited() {
self.request_invalidate(InvalidateAmount::Once);
} else if self.ui.is_item_activated() {
self.request_invalidate(InvalidateAmount::Indefinetely);
} else if self.ui.is_item_deactivated() {
self.request_invalidate(InvalidateAmount::Stop);
}
self.ui
}
}
impl Drop for LayoutContext<'_> {
fn drop(&mut self) {
if self.invalidate_amount_changed.get() {
let _ = self
.window_handle
.set_invalidate_amount(self.invalidate_amount.get());
} else {
if self.ui.is_any_item_active() {
let _ = self.window_handle.request_invalidate();
}
}
}
}
/// This struct represents an OS window, which contains an ImGUI graphical user interface.
pub struct Window {
window: winit::window::Window,
last_frame_time: std::time::Instant,
alive: Arc<()>,
app_handle: app::AppHandle,
invalidate_amount: InvalidateAmount,
// Everything for rendering
surface: wgpu::Surface,
gpu_device: wgpu::Device,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
renderer: imgui_wgpu::Renderer,
queue: wgpu::Queue,
/// A MSAA framebuffer texture and its sample count.
msaa_framebuffer: Option<(wgpu::TextureView, wgpu::Extent3d, u32)>,
// All imgui related
winit_platform: imgui_winit_support::WinitPlatform,
imgui: ImguiContext,
default_font: Option<imgui::FontId>,
last_cursor: Option<imgui::MouseCursor>,
/// The data model associated with this native window, that holds its state.
pub data_model: Box<dyn Layout>,
}
enum ImguiContext {
Suspended(imgui::SuspendedContext),
Used(),
}
impl Window {
/// Closes the window and, if no further windows remain, shuts down the application.
pub fn close(&mut self) {
let data_model = &mut self.data_model as *mut Box<(dyn Layout + 'static)>;
let should_close = unsafe { &mut *data_model }.before_close(self);
if should_close {
let window_id = self.id();
let _ = self.app_handle.execute_with_gui(move |app: &mut App| {
app.remove_window(window_id);
});
}
}
/// Get a mutable reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window_mut(&mut self) -> &mut winit::window::Window {
&mut self.window
}
/// Get a reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window(&self) -> &winit::window::Window {
&self.window
}
/// Get the id of the window.
pub fn id(&self) -> winit::window::WindowId {
self.window.id()
}
/// Get the time the window was last updated.
pub fn last_frame_time(&self) -> std::time::Instant {
self.last_frame_time
}
/// Updates the frame time to now and returns the duration since the last frame.
#[inline]
pub(crate) fn update_frame_time(&mut self) -> std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> {
&mut self.renderer.textures
}
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn drop(&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout + 'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor != ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
{
(msaa_framebuffer, Some(&frame.output.view))
} else {
this.msaa_framebuffer = Some(Window::create_msaa_framebuffer(
&this.gpu_device,
&this.swap_chain_desc,
app.msaa_samples,
));
(
&this.msaa_framebuffer.as_ref().unwrap().0,
Some(&frame.output.view),
)
}
} else {
(&frame.output.view, None)
};
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment,
resolve_target,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
match this
.renderer
.render(&draw_data, &this.queue, &this.gpu_device, &mut render_pass)
{
Err(err) => {
return Err(UiError::with_source(
ErrorCode::RENDER_ERROR,
utils::MessageError::debug(&err),
))
}
Ok(_) => (),
};
drop(render_pass);
this.queue.submit(Some(encoder.finish()));
Ok(())
}
}
/// A thread-safe handle to a `Window`.
///
/// This handle can be used to communicate with the Window from a different thread
/// through events. All methods on this handle will return an error when the window does
/// not exist anymore (can be queried with `alive()`).
pub struct WindowHandle {
window_id: winit::window::WindowId,
app_handle: app::AppHandle,
alive: Weak<()>,
}
impl WindowHandle {
/// Queries wether the represented window still exists or not.
pub fn alive(&self) -> bool {
match self.alive.upgrade() {
Some(_) => true,
_ => false,
}
}
/// Runs the closure `callback` in the UI thread.
///
/// Returns an error if the `Window` this handle referres to doesn't exist anymore.
pub fn run(
&self,
callback: impl FnOnce(&mut app::App, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(callback)),
})
.unwrap();
Ok(())
}
/// Runs the closure callback in the UI thread and passes
/// the data model of the `Window` downcast to T.
///
/// The main thread will panic if the data model of the Window
/// cannot be downcast to T.
///
/// ## Note
/// There is no guarantee that the passed closure will be run.
/// If the Window gets destryed after this method has been called
/// and before the main thread has gotten the event for running the closure,
/// it will be skipped.
pub fn run_with_data_model<T: Layout + Any>(
&self,
callback: impl FnOnce(&mut app::App, &mut T, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() |
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(move |app, wnd: &mut Window| {
let wnd_ptr = wnd as *mut _;
let data_model = wnd.data_model.as_any().downcast_mut::<T>().unwrap();
callback(app, data_model, unsafe { &mut *wnd_ptr });
})),
})
.unwrap();
Ok(())
}
/// Request a redraw of the window.
pub fn request_invalidate(&self) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle.send_event(app::AppEvent::InvalidateWindow {
window_id: self.window_id,
})
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.window_id,
state: amount,
})
}
/// Calls `Window::data_model.log(level, message)` from the UI thread. If the
/// window does not exist anymore (it was already destroyed) and `level` is not
/// `None`, logs the `message` with the given `level` instead.
pub fn log(&self, level: Option<log::Level>, message: &str) {
let message_copy = String::from(message);
match self.run(move |_app, wnd| {
wnd.data_model.log(level, &message_copy);
}) {
Ok(_) => (),
Err(_) if level.is_some() => {
log::log!(level.unwrap(), "{}", message);
}
Err(_) => (),
};
}
/// Schedules the closure `callback` to approximately be executed at the given `instant`.
///
/// Returnes an error if the window represented by this handle does not exist anymore.
pub fn schedule(
&self,
instant: std::time::Instant,
callback: app::ExecuteAtCallback,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteAt { instant, callback })
}
}
impl Clone for WindowHandle {
fn clone(&self) -> Self {
WindowHandle {
window_id: self.window_id,
app_handle: self.app_handle.clone(),
alive: self.alive.clone(),
}
}
}
| {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
} | conditional_block |
window.rs | //! General logic for OS window manipulation and creation.
//!
//! Each `Window` gets a data model, which is a struct that implements the `Layout`
//! trait. All window and UI specific state is contained in the data model and it is
//! responsible for the GUI layout (`Layout::layout()`).
//!
//! Manipulation of all GUI related things can only be done in the main thread. This is
//! possible by sending events either with a `app::AppHandle` or `WindowHandle`.
use super::app::{self, App};
use super::ErrorCode;
use super::UiError;
use super::UiResult;
use crate::utils;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::Arc;
use std::sync::Weak;
pub use winit::window::WindowId;
/// This trait contains the functions which are used to layout the GUI.
///
/// You must implement this trait in your own class and create a `Window` with an instance
/// of that class (which is called the data model). On every redraw of the GUI, the App
/// instance calls the `ActiveWindow::render()` method, which in turn calls
/// `Layout::layout()`.
pub trait Layout {
/// The central method for creating the GUI.
fn layout(&mut self, ui: LayoutContext, app: &App, window: &mut Window);
/// A user defined logging method, which can be called from a thread-safe handle of
/// the window.
///
/// If `level` is `None`, the `message` is only displayed in the GUI and not logged.
fn log(&mut self, level: Option<log::Level>, message: &str);
/// Used for downcasting to the actual data model type.
///
/// When manipulating the data model from a different thread using
/// `WindowHandle::run_with_data_model()`, this allows the cast from the `Layout`
/// trait reference to the actual data model type.
fn as_any(&mut self) -> &mut dyn Any;
/// This method is used to initialize the data model.
///
/// It will be called only once in the entire lifetime of the window, before the
/// window is shown. If this method fails it can return a boxed `std::error::Error`
/// and both `App::new_window()` and `Window::new()` will also fail and return
/// the same error.
fn init(&mut self, window: &mut Window) -> Result<(), Box<dyn Error>>;
/// This method is called before the window gets closed and destroyed.
/// The return value determines if the window actually gets closed.
///
/// If the return value is `true`, the window will be closed and destroyed,
/// otherwise when `false` it will remain open and the close action will be
/// ignored.
///
/// **Warning:**
/// Don't use this method for cleanup, because there is no guaratee that it is ever
/// called (for example when the window creation fails). Implement the `Drop` trait
/// for the data model, where all cleanup can then be done in the `Drop::drop()`
/// method, which is guaranteed to be called when the data model is destroyed.
fn before_close(&mut self, window: &mut Window) -> bool;
}
/// The amount the a window is or should be invalidated over time.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum InvalidateAmount {
/// Possible continuous invalidation is stopped or inactive.
Stop,
/// The window will be invalidated once as soon as possible.
Once,
/// The window is continuously invalidated until the given instant.
Until(std::time::Instant),
/// The window is continuously invalidated indefinetly.
Indefinetely,
}
impl InvalidateAmount {
/// Wether or not continuous updating is currently active.
///
/// `true` if `InvalidateAmount::Until(_)` or `InvalidateAmount::Indefinetely`,
/// `false` otherwise.
pub fn is_continuously(&self) -> bool {
match self {
Self::Stop | Self::Once => false,
_ => true,
}
}
}
/// The context used to create the GUI using Dear ImGUI.
/// It is passed to the `Layout::layout()` method.
pub struct LayoutContext<'ui> {
pub ui: &'ui imgui::Ui<'ui>,
pub window_handle: WindowHandle,
invalidate_amount_changed: Cell<bool>,
invalidate_amount: Cell<InvalidateAmount>,
}
impl LayoutContext<'_> {
/// Requests invalidation of the specified `amount` after the current frame is
/// finished. The resulting requested invalidation amount is the maximum of
/// all `request_invalidate()` calls for one frame.
#[inline]
pub fn request_invalidate(&self, amount: InvalidateAmount) {
if self.invalidate_amount.get() < amount {
self.invalidate_amount.set(amount);
} else if self.invalidate_amount.get() == amount {
self.invalidate_amount
.set(match (self.invalidate_amount.get(), amount) {
(InvalidateAmount::Until(inst0), InvalidateAmount::Until(inst1)) => {
InvalidateAmount::Until(utils::max_instant(inst0, inst1))
}
(curr, _) => curr,
});
}
self.invalidate_amount_changed.set(true);
}
}
impl<'ui> Deref for LayoutContext<'ui> {
type Target = imgui::Ui<'ui>;
#[inline]
fn deref(&self) -> &Self::Target {
if self.ui.is_item_edited() {
self.request_invalidate(InvalidateAmount::Once);
} else if self.ui.is_item_activated() {
self.request_invalidate(InvalidateAmount::Indefinetely);
} else if self.ui.is_item_deactivated() {
self.request_invalidate(InvalidateAmount::Stop);
}
self.ui
}
}
impl Drop for LayoutContext<'_> {
fn drop(&mut self) {
if self.invalidate_amount_changed.get() {
let _ = self
.window_handle
.set_invalidate_amount(self.invalidate_amount.get());
} else {
if self.ui.is_any_item_active() {
let _ = self.window_handle.request_invalidate();
}
}
}
}
/// This struct represents an OS window, which contains an ImGUI graphical user interface.
pub struct Window {
window: winit::window::Window,
last_frame_time: std::time::Instant,
alive: Arc<()>,
app_handle: app::AppHandle,
invalidate_amount: InvalidateAmount,
// Everything for rendering
surface: wgpu::Surface,
gpu_device: wgpu::Device,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
renderer: imgui_wgpu::Renderer,
queue: wgpu::Queue,
/// A MSAA framebuffer texture and its sample count.
msaa_framebuffer: Option<(wgpu::TextureView, wgpu::Extent3d, u32)>,
// All imgui related
winit_platform: imgui_winit_support::WinitPlatform,
imgui: ImguiContext,
default_font: Option<imgui::FontId>,
last_cursor: Option<imgui::MouseCursor>,
/// The data model associated with this native window, that holds its state.
pub data_model: Box<dyn Layout>,
}
enum ImguiContext {
Suspended(imgui::SuspendedContext),
Used(),
}
impl Window {
/// Closes the window and, if no further windows remain, shuts down the application.
pub fn close(&mut self) {
let data_model = &mut self.data_model as *mut Box<(dyn Layout + 'static)>;
let should_close = unsafe { &mut *data_model }.before_close(self);
if should_close {
let window_id = self.id();
let _ = self.app_handle.execute_with_gui(move |app: &mut App| {
app.remove_window(window_id);
});
}
}
/// Get a mutable reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window_mut(&mut self) -> &mut winit::window::Window {
&mut self.window
}
/// Get a reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window(&self) -> &winit::window::Window {
&self.window
}
/// Get the id of the window.
pub fn id(&self) -> winit::window::WindowId {
self.window.id()
}
/// Get the time the window was last updated.
pub fn last_frame_time(&self) -> std::time::Instant {
self.last_frame_time
}
/// Updates the frame time to now and returns the duration since the last frame.
#[inline]
pub(crate) fn update_frame_time(&mut self) -> std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> |
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn drop(&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout + 'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor != ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
{
(msaa_framebuffer, Some(&frame.output.view))
} else {
this.msaa_framebuffer = Some(Window::create_msaa_framebuffer(
&this.gpu_device,
&this.swap_chain_desc,
app.msaa_samples,
));
(
&this.msaa_framebuffer.as_ref().unwrap().0,
Some(&frame.output.view),
)
}
} else {
(&frame.output.view, None)
};
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment,
resolve_target,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
match this
.renderer
.render(&draw_data, &this.queue, &this.gpu_device, &mut render_pass)
{
Err(err) => {
return Err(UiError::with_source(
ErrorCode::RENDER_ERROR,
utils::MessageError::debug(&err),
))
}
Ok(_) => (),
};
drop(render_pass);
this.queue.submit(Some(encoder.finish()));
Ok(())
}
}
/// A thread-safe handle to a `Window`.
///
/// This handle can be used to communicate with the Window from a different thread
/// through events. All methods on this handle will return an error when the window does
/// not exist anymore (can be queried with `alive()`).
pub struct WindowHandle {
window_id: winit::window::WindowId,
app_handle: app::AppHandle,
alive: Weak<()>,
}
impl WindowHandle {
/// Queries wether the represented window still exists or not.
pub fn alive(&self) -> bool {
match self.alive.upgrade() {
Some(_) => true,
_ => false,
}
}
/// Runs the closure `callback` in the UI thread.
///
/// Returns an error if the `Window` this handle referres to doesn't exist anymore.
pub fn run(
&self,
callback: impl FnOnce(&mut app::App, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(callback)),
})
.unwrap();
Ok(())
}
/// Runs the closure callback in the UI thread and passes
/// the data model of the `Window` downcast to T.
///
/// The main thread will panic if the data model of the Window
/// cannot be downcast to T.
///
/// ## Note
/// There is no guarantee that the passed closure will be run.
/// If the Window gets destryed after this method has been called
/// and before the main thread has gotten the event for running the closure,
/// it will be skipped.
pub fn run_with_data_model<T: Layout + Any>(
&self,
callback: impl FnOnce(&mut app::App, &mut T, &mut Window) + 'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(move |app, wnd: &mut Window| {
let wnd_ptr = wnd as *mut _;
let data_model = wnd.data_model.as_any().downcast_mut::<T>().unwrap();
callback(app, data_model, unsafe { &mut *wnd_ptr });
})),
})
.unwrap();
Ok(())
}
/// Request a redraw of the window.
pub fn request_invalidate(&self) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle.send_event(app::AppEvent::InvalidateWindow {
window_id: self.window_id,
})
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.window_id,
state: amount,
})
}
/// Calls `Window::data_model.log(level, message)` from the UI thread. If the
/// window does not exist anymore (it was already destroyed) and `level` is not
/// `None`, logs the `message` with the given `level` instead.
pub fn log(&self, level: Option<log::Level>, message: &str) {
let message_copy = String::from(message);
match self.run(move |_app, wnd| {
wnd.data_model.log(level, &message_copy);
}) {
Ok(_) => (),
Err(_) if level.is_some() => {
log::log!(level.unwrap(), "{}", message);
}
Err(_) => (),
};
}
/// Schedules the closure `callback` to approximately be executed at the given `instant`.
///
/// Returnes an error if the window represented by this handle does not exist anymore.
pub fn schedule(
&self,
instant: std::time::Instant,
callback: app::ExecuteAtCallback,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteAt { instant, callback })
}
}
impl Clone for WindowHandle {
fn clone(&self) -> Self {
WindowHandle {
window_id: self.window_id,
app_handle: self.app_handle.clone(),
alive: self.alive.clone(),
}
}
}
| {
&mut self.renderer.textures
} | identifier_body |
PNotifyBootstrap3.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(global = global || self, factory(global.PNotifyBootstrap3 = {}));
}(this, (function (exports) { 'use strict';
function _typeof(obj) {
"@babel/helpers - typeof";
if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
_typeof = function (obj) {
return typeof obj;
};
} else {
_typeof = function (obj) {
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
};
}
return _typeof(obj);
}
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
function _defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
function _createClass(Constructor, protoProps, staticProps) {
if (protoProps) _defineProperties(Constructor.prototype, protoProps);
if (staticProps) _defineProperties(Constructor, staticProps);
return Constructor;
}
function _inherits(subClass, superClass) {
if (typeof superClass !== "function" && superClass !== null) {
throw new TypeError("Super expression must either be null or a function");
}
subClass.prototype = Object.create(superClass && superClass.prototype, {
constructor: {
value: subClass,
writable: true,
configurable: true
}
});
if (superClass) _setPrototypeOf(subClass, superClass);
}
function _getPrototypeOf(o) {
_getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) {
return o.__proto__ || Object.getPrototypeOf(o);
};
return _getPrototypeOf(o);
}
function _setPrototypeOf(o, p) {
_setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) {
o.__proto__ = p;
return o;
};
return _setPrototypeOf(o, p);
}
function _isNativeReflectConstruct() {
if (typeof Reflect === "undefined" || !Reflect.construct) return false;
if (Reflect.construct.sham) return false;
if (typeof Proxy === "function") return true;
try {
Date.prototype.toString.call(Reflect.construct(Date, [], function () {}));
return true;
} catch (e) {
return false;
}
}
function _assertThisInitialized(self) {
if (self === void 0) {
throw new ReferenceError("this hasn't been initialised - super() hasn't been called");
}
return self;
}
function _possibleConstructorReturn(self, call) {
if (call && (typeof call === "object" || typeof call === "function")) {
return call;
}
return _assertThisInitialized(self);
}
function _createSuper(Derived) {
return function () {
var Super = _getPrototypeOf(Derived),
result;
if (_isNativeReflectConstruct()) {
var NewTarget = _getPrototypeOf(this).constructor;
result = Reflect.construct(Super, arguments, NewTarget);
} else {
result = Super.apply(this, arguments);
}
return _possibleConstructorReturn(this, result);
};
}
function _toConsumableArray(arr) {
return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
}
function _arrayWithoutHoles(arr) {
if (Array.isArray(arr)) return _arrayLikeToArray(arr);
}
function _iterableToArray(iter) {
if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter);
}
function _unsupportedIterableToArray(o, minLen) {
if (!o) return;
if (typeof o === "string") return _arrayLikeToArray(o, minLen);
var n = Object.prototype.toString.call(o).slice(8, -1);
if (n === "Object" && o.constructor) n = o.constructor.name;
if (n === "Map" || n === "Set") return Array.from(n);
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
}
function _arrayLikeToArray(arr, len) {
if (len == null || len > arr.length) len = arr.length;
for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
return arr2;
}
function _nonIterableSpread() {
throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
}
function noop() {}
function run(fn) |
function blank_object() {
return Object.create(null);
}
function run_all(fns) {
fns.forEach(run);
}
function is_function(thing) {
return typeof thing === 'function';
}
function safe_not_equal(a, b) {
return a != a ? b == b : a !== b || a && _typeof(a) === 'object' || typeof a === 'function';
}
function detach(node) {
node.parentNode.removeChild(node);
}
function children(element) {
return Array.from(element.childNodes);
}
var current_component;
function set_current_component(component) {
current_component = component;
}
var dirty_components = [];
var binding_callbacks = [];
var render_callbacks = [];
var flush_callbacks = [];
var resolved_promise = Promise.resolve();
var update_scheduled = false;
function schedule_update() {
if (!update_scheduled) {
update_scheduled = true;
resolved_promise.then(flush);
}
}
function add_render_callback(fn) {
render_callbacks.push(fn);
}
var flushing = false;
var seen_callbacks = new Set();
function flush() {
if (flushing) return;
flushing = true;
do {
// first, call beforeUpdate functions
// and update components
for (var i = 0; i < dirty_components.length; i += 1) {
var component = dirty_components[i];
set_current_component(component);
update(component.$$);
}
dirty_components.length = 0;
while (binding_callbacks.length) {
binding_callbacks.pop()();
} // then, once components are updated, call
// afterUpdate functions. This may cause
// subsequent updates...
for (var _i = 0; _i < render_callbacks.length; _i += 1) {
var callback = render_callbacks[_i];
if (!seen_callbacks.has(callback)) {
// ...so guard against infinite loops
seen_callbacks.add(callback);
callback();
}
}
render_callbacks.length = 0;
} while (dirty_components.length);
while (flush_callbacks.length) {
flush_callbacks.pop()();
}
update_scheduled = false;
flushing = false;
seen_callbacks.clear();
}
function update($$) {
if ($$.fragment !== null) {
$$.update();
run_all($$.before_update);
var dirty = $$.dirty;
$$.dirty = [-1];
$$.fragment && $$.fragment.p($$.ctx, dirty);
$$.after_update.forEach(add_render_callback);
}
}
var outroing = new Set();
function transition_in(block, local) {
if (block && block.i) {
outroing["delete"](block);
block.i(local);
}
}
function mount_component(component, target, anchor) {
var _component$$$ = component.$$,
fragment = _component$$$.fragment,
on_mount = _component$$$.on_mount,
on_destroy = _component$$$.on_destroy,
after_update = _component$$$.after_update;
fragment && fragment.m(target, anchor); // onMount happens before the initial afterUpdate
add_render_callback(function () {
var new_on_destroy = on_mount.map(run).filter(is_function);
if (on_destroy) {
on_destroy.push.apply(on_destroy, _toConsumableArray(new_on_destroy));
} else {
// Edge case - component was destroyed immediately,
// most likely as a result of a binding initialising
run_all(new_on_destroy);
}
component.$$.on_mount = [];
});
after_update.forEach(add_render_callback);
}
function destroy_component(component, detaching) {
var $$ = component.$$;
if ($$.fragment !== null) {
run_all($$.on_destroy);
$$.fragment && $$.fragment.d(detaching); // TODO null out other refs, including component.$$ (but need to
// preserve final state?)
$$.on_destroy = $$.fragment = null;
$$.ctx = [];
}
}
function make_dirty(component, i) {
if (component.$$.dirty[0] === -1) {
dirty_components.push(component);
schedule_update();
component.$$.dirty.fill(0);
}
component.$$.dirty[i / 31 | 0] |= 1 << i % 31;
}
function init(component, options, instance, create_fragment, not_equal, props) {
var dirty = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : [-1];
var parent_component = current_component;
set_current_component(component);
var prop_values = options.props || {};
var $$ = component.$$ = {
fragment: null,
ctx: null,
// state
props: props,
update: noop,
not_equal: not_equal,
bound: blank_object(),
// lifecycle
on_mount: [],
on_destroy: [],
before_update: [],
after_update: [],
context: new Map(parent_component ? parent_component.$$.context : []),
// everything else
callbacks: blank_object(),
dirty: dirty
};
var ready = false;
$$.ctx = instance ? instance(component, prop_values, function (i, ret) {
var value = (arguments.length <= 2 ? 0 : arguments.length - 2) ? arguments.length <= 2 ? undefined : arguments[2] : ret;
if ($$.ctx && not_equal($$.ctx[i], $$.ctx[i] = value)) {
if ($$.bound[i]) $$.bound[i](value);
if (ready) make_dirty(component, i);
}
return ret;
}) : [];
$$.update();
ready = true;
run_all($$.before_update); // `false` as a special case of no DOM component
$$.fragment = create_fragment ? create_fragment($$.ctx) : false;
if (options.target) {
if (options.hydrate) {
var nodes = children(options.target); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
$$.fragment && $$.fragment.l(nodes);
nodes.forEach(detach);
} else {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
$$.fragment && $$.fragment.c();
}
if (options.intro) transition_in(component.$$.fragment);
mount_component(component, options.target, options.anchor);
flush();
}
set_current_component(parent_component);
}
var SvelteComponent = /*#__PURE__*/function () {
function SvelteComponent() {
_classCallCheck(this, SvelteComponent);
}
_createClass(SvelteComponent, [{
key: "$destroy",
value: function $destroy() {
destroy_component(this, 1);
this.$destroy = noop;
}
}, {
key: "$on",
value: function $on(type, callback) {
var callbacks = this.$$.callbacks[type] || (this.$$.callbacks[type] = []);
callbacks.push(callback);
return function () {
var index = callbacks.indexOf(callback);
if (index !== -1) callbacks.splice(index, 1);
};
}
}, {
key: "$set",
value: function $set() {// overridden by instance, if it has props
}
}]);
return SvelteComponent;
}();
var position = "PrependContainer";
var defaults = {};
function init$1(eventDetail) {
eventDetail.defaults.styling = {
prefix: "bootstrap3",
container: "alert",
notice: "alert-warning",
info: "alert-info",
success: "alert-success",
error: "alert-danger",
// Confirm Module
"action-bar": "bootstrap3-ml",
"prompt-bar": "bootstrap3-ml",
btn: "btn btn-default bootstrap3-mx-1",
"btn-primary": "btn-primary",
"btn-secondary": "",
input: "form-control"
};
}
var Bootstrap3 = /*#__PURE__*/function (_SvelteComponent) {
_inherits(Bootstrap3, _SvelteComponent);
var _super = _createSuper(Bootstrap3);
function Bootstrap3(options) {
var _this;
_classCallCheck(this, Bootstrap3);
_this = _super.call(this);
init(_assertThisInitialized(_this), options, null, null, safe_not_equal, {});
return _this;
}
return Bootstrap3;
}(SvelteComponent);
exports.default = Bootstrap3;
exports.defaults = defaults;
exports.init = init$1;
exports.position = position;
Object.defineProperty(exports, '__esModule', { value: true });
})));
| {
return fn();
} | identifier_body |
PNotifyBootstrap3.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(global = global || self, factory(global.PNotifyBootstrap3 = {}));
}(this, (function (exports) { 'use strict';
function _typeof(obj) {
"@babel/helpers - typeof";
if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
_typeof = function (obj) {
return typeof obj;
};
} else {
_typeof = function (obj) {
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
};
}
return _typeof(obj);
}
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
|
function _defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
function _createClass(Constructor, protoProps, staticProps) {
if (protoProps) _defineProperties(Constructor.prototype, protoProps);
if (staticProps) _defineProperties(Constructor, staticProps);
return Constructor;
}
function _inherits(subClass, superClass) {
if (typeof superClass !== "function" && superClass !== null) {
throw new TypeError("Super expression must either be null or a function");
}
subClass.prototype = Object.create(superClass && superClass.prototype, {
constructor: {
value: subClass,
writable: true,
configurable: true
}
});
if (superClass) _setPrototypeOf(subClass, superClass);
}
function _getPrototypeOf(o) {
_getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) {
return o.__proto__ || Object.getPrototypeOf(o);
};
return _getPrototypeOf(o);
}
function _setPrototypeOf(o, p) {
_setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) {
o.__proto__ = p;
return o;
};
return _setPrototypeOf(o, p);
}
function _isNativeReflectConstruct() {
if (typeof Reflect === "undefined" || !Reflect.construct) return false;
if (Reflect.construct.sham) return false;
if (typeof Proxy === "function") return true;
try {
Date.prototype.toString.call(Reflect.construct(Date, [], function () {}));
return true;
} catch (e) {
return false;
}
}
function _assertThisInitialized(self) {
if (self === void 0) {
throw new ReferenceError("this hasn't been initialised - super() hasn't been called");
}
return self;
}
function _possibleConstructorReturn(self, call) {
if (call && (typeof call === "object" || typeof call === "function")) {
return call;
}
return _assertThisInitialized(self);
}
function _createSuper(Derived) {
return function () {
var Super = _getPrototypeOf(Derived),
result;
if (_isNativeReflectConstruct()) {
var NewTarget = _getPrototypeOf(this).constructor;
result = Reflect.construct(Super, arguments, NewTarget);
} else {
result = Super.apply(this, arguments);
}
return _possibleConstructorReturn(this, result);
};
}
function _toConsumableArray(arr) {
return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
}
function _arrayWithoutHoles(arr) {
if (Array.isArray(arr)) return _arrayLikeToArray(arr);
}
function _iterableToArray(iter) {
if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter);
}
function _unsupportedIterableToArray(o, minLen) {
if (!o) return;
if (typeof o === "string") return _arrayLikeToArray(o, minLen);
var n = Object.prototype.toString.call(o).slice(8, -1);
if (n === "Object" && o.constructor) n = o.constructor.name;
if (n === "Map" || n === "Set") return Array.from(n);
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
}
function _arrayLikeToArray(arr, len) {
if (len == null || len > arr.length) len = arr.length;
for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
return arr2;
}
function _nonIterableSpread() {
throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
}
function noop() {}
function run(fn) {
return fn();
}
function blank_object() {
return Object.create(null);
}
function run_all(fns) {
fns.forEach(run);
}
function is_function(thing) {
return typeof thing === 'function';
}
function safe_not_equal(a, b) {
return a != a ? b == b : a !== b || a && _typeof(a) === 'object' || typeof a === 'function';
}
function detach(node) {
node.parentNode.removeChild(node);
}
function children(element) {
return Array.from(element.childNodes);
}
var current_component;
function set_current_component(component) {
current_component = component;
}
var dirty_components = [];
var binding_callbacks = [];
var render_callbacks = [];
var flush_callbacks = [];
var resolved_promise = Promise.resolve();
var update_scheduled = false;
function schedule_update() {
if (!update_scheduled) {
update_scheduled = true;
resolved_promise.then(flush);
}
}
function add_render_callback(fn) {
render_callbacks.push(fn);
}
var flushing = false;
var seen_callbacks = new Set();
function flush() {
if (flushing) return;
flushing = true;
do {
// first, call beforeUpdate functions
// and update components
for (var i = 0; i < dirty_components.length; i += 1) {
var component = dirty_components[i];
set_current_component(component);
update(component.$$);
}
dirty_components.length = 0;
while (binding_callbacks.length) {
binding_callbacks.pop()();
} // then, once components are updated, call
// afterUpdate functions. This may cause
// subsequent updates...
for (var _i = 0; _i < render_callbacks.length; _i += 1) {
var callback = render_callbacks[_i];
if (!seen_callbacks.has(callback)) {
// ...so guard against infinite loops
seen_callbacks.add(callback);
callback();
}
}
render_callbacks.length = 0;
} while (dirty_components.length);
while (flush_callbacks.length) {
flush_callbacks.pop()();
}
update_scheduled = false;
flushing = false;
seen_callbacks.clear();
}
function update($$) {
if ($$.fragment !== null) {
$$.update();
run_all($$.before_update);
var dirty = $$.dirty;
$$.dirty = [-1];
$$.fragment && $$.fragment.p($$.ctx, dirty);
$$.after_update.forEach(add_render_callback);
}
}
var outroing = new Set();
function transition_in(block, local) {
if (block && block.i) {
outroing["delete"](block);
block.i(local);
}
}
function mount_component(component, target, anchor) {
var _component$$$ = component.$$,
fragment = _component$$$.fragment,
on_mount = _component$$$.on_mount,
on_destroy = _component$$$.on_destroy,
after_update = _component$$$.after_update;
fragment && fragment.m(target, anchor); // onMount happens before the initial afterUpdate
add_render_callback(function () {
var new_on_destroy = on_mount.map(run).filter(is_function);
if (on_destroy) {
on_destroy.push.apply(on_destroy, _toConsumableArray(new_on_destroy));
} else {
// Edge case - component was destroyed immediately,
// most likely as a result of a binding initialising
run_all(new_on_destroy);
}
component.$$.on_mount = [];
});
after_update.forEach(add_render_callback);
}
function destroy_component(component, detaching) {
var $$ = component.$$;
if ($$.fragment !== null) {
run_all($$.on_destroy);
$$.fragment && $$.fragment.d(detaching); // TODO null out other refs, including component.$$ (but need to
// preserve final state?)
$$.on_destroy = $$.fragment = null;
$$.ctx = [];
}
}
function make_dirty(component, i) {
if (component.$$.dirty[0] === -1) {
dirty_components.push(component);
schedule_update();
component.$$.dirty.fill(0);
}
component.$$.dirty[i / 31 | 0] |= 1 << i % 31;
}
function init(component, options, instance, create_fragment, not_equal, props) {
var dirty = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : [-1];
var parent_component = current_component;
set_current_component(component);
var prop_values = options.props || {};
var $$ = component.$$ = {
fragment: null,
ctx: null,
// state
props: props,
update: noop,
not_equal: not_equal,
bound: blank_object(),
// lifecycle
on_mount: [],
on_destroy: [],
before_update: [],
after_update: [],
context: new Map(parent_component ? parent_component.$$.context : []),
// everything else
callbacks: blank_object(),
dirty: dirty
};
var ready = false;
$$.ctx = instance ? instance(component, prop_values, function (i, ret) {
var value = (arguments.length <= 2 ? 0 : arguments.length - 2) ? arguments.length <= 2 ? undefined : arguments[2] : ret;
if ($$.ctx && not_equal($$.ctx[i], $$.ctx[i] = value)) {
if ($$.bound[i]) $$.bound[i](value);
if (ready) make_dirty(component, i);
}
return ret;
}) : [];
$$.update();
ready = true;
run_all($$.before_update); // `false` as a special case of no DOM component
$$.fragment = create_fragment ? create_fragment($$.ctx) : false;
if (options.target) {
if (options.hydrate) {
var nodes = children(options.target); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
$$.fragment && $$.fragment.l(nodes);
nodes.forEach(detach);
} else {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
$$.fragment && $$.fragment.c();
}
if (options.intro) transition_in(component.$$.fragment);
mount_component(component, options.target, options.anchor);
flush();
}
set_current_component(parent_component);
}
var SvelteComponent = /*#__PURE__*/function () {
function SvelteComponent() {
_classCallCheck(this, SvelteComponent);
}
_createClass(SvelteComponent, [{
key: "$destroy",
value: function $destroy() {
destroy_component(this, 1);
this.$destroy = noop;
}
}, {
key: "$on",
value: function $on(type, callback) {
var callbacks = this.$$.callbacks[type] || (this.$$.callbacks[type] = []);
callbacks.push(callback);
return function () {
var index = callbacks.indexOf(callback);
if (index !== -1) callbacks.splice(index, 1);
};
}
}, {
key: "$set",
value: function $set() {// overridden by instance, if it has props
}
}]);
return SvelteComponent;
}();
var position = "PrependContainer";
var defaults = {};
function init$1(eventDetail) {
eventDetail.defaults.styling = {
prefix: "bootstrap3",
container: "alert",
notice: "alert-warning",
info: "alert-info",
success: "alert-success",
error: "alert-danger",
// Confirm Module
"action-bar": "bootstrap3-ml",
"prompt-bar": "bootstrap3-ml",
btn: "btn btn-default bootstrap3-mx-1",
"btn-primary": "btn-primary",
"btn-secondary": "",
input: "form-control"
};
}
var Bootstrap3 = /*#__PURE__*/function (_SvelteComponent) {
_inherits(Bootstrap3, _SvelteComponent);
var _super = _createSuper(Bootstrap3);
function Bootstrap3(options) {
var _this;
_classCallCheck(this, Bootstrap3);
_this = _super.call(this);
init(_assertThisInitialized(_this), options, null, null, safe_not_equal, {});
return _this;
}
return Bootstrap3;
}(SvelteComponent);
exports.default = Bootstrap3;
exports.defaults = defaults;
exports.init = init$1;
exports.position = position;
Object.defineProperty(exports, '__esModule', { value: true });
}))); | }
}
| random_line_split |
PNotifyBootstrap3.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(global = global || self, factory(global.PNotifyBootstrap3 = {}));
}(this, (function (exports) { 'use strict';
function _typeof(obj) {
"@babel/helpers - typeof";
if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
_typeof = function (obj) {
return typeof obj;
};
} else {
_typeof = function (obj) {
return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
};
}
return _typeof(obj);
}
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
function _defineProperties(target, props) {
for (var i = 0; i < props.length; i++) {
var descriptor = props[i];
descriptor.enumerable = descriptor.enumerable || false;
descriptor.configurable = true;
if ("value" in descriptor) descriptor.writable = true;
Object.defineProperty(target, descriptor.key, descriptor);
}
}
function _createClass(Constructor, protoProps, staticProps) {
if (protoProps) _defineProperties(Constructor.prototype, protoProps);
if (staticProps) _defineProperties(Constructor, staticProps);
return Constructor;
}
function _inherits(subClass, superClass) {
if (typeof superClass !== "function" && superClass !== null) {
throw new TypeError("Super expression must either be null or a function");
}
subClass.prototype = Object.create(superClass && superClass.prototype, {
constructor: {
value: subClass,
writable: true,
configurable: true
}
});
if (superClass) _setPrototypeOf(subClass, superClass);
}
function _getPrototypeOf(o) {
_getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) {
return o.__proto__ || Object.getPrototypeOf(o);
};
return _getPrototypeOf(o);
}
function _setPrototypeOf(o, p) {
_setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) {
o.__proto__ = p;
return o;
};
return _setPrototypeOf(o, p);
}
function _isNativeReflectConstruct() {
if (typeof Reflect === "undefined" || !Reflect.construct) return false;
if (Reflect.construct.sham) return false;
if (typeof Proxy === "function") return true;
try {
Date.prototype.toString.call(Reflect.construct(Date, [], function () {}));
return true;
} catch (e) {
return false;
}
}
function _assertThisInitialized(self) {
if (self === void 0) {
throw new ReferenceError("this hasn't been initialised - super() hasn't been called");
}
return self;
}
function _possibleConstructorReturn(self, call) {
if (call && (typeof call === "object" || typeof call === "function")) {
return call;
}
return _assertThisInitialized(self);
}
function _createSuper(Derived) {
return function () {
var Super = _getPrototypeOf(Derived),
result;
if (_isNativeReflectConstruct()) {
var NewTarget = _getPrototypeOf(this).constructor;
result = Reflect.construct(Super, arguments, NewTarget);
} else {
result = Super.apply(this, arguments);
}
return _possibleConstructorReturn(this, result);
};
}
function _toConsumableArray(arr) {
return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
}
function _arrayWithoutHoles(arr) {
if (Array.isArray(arr)) return _arrayLikeToArray(arr);
}
function _iterableToArray(iter) {
if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter);
}
function _unsupportedIterableToArray(o, minLen) {
if (!o) return;
if (typeof o === "string") return _arrayLikeToArray(o, minLen);
var n = Object.prototype.toString.call(o).slice(8, -1);
if (n === "Object" && o.constructor) n = o.constructor.name;
if (n === "Map" || n === "Set") return Array.from(n);
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
}
function _arrayLikeToArray(arr, len) {
if (len == null || len > arr.length) len = arr.length;
for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
return arr2;
}
function _nonIterableSpread() {
throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
}
function noop() {}
function run(fn) {
return fn();
}
function blank_object() {
return Object.create(null);
}
function run_all(fns) {
fns.forEach(run);
}
function is_function(thing) {
return typeof thing === 'function';
}
function safe_not_equal(a, b) {
return a != a ? b == b : a !== b || a && _typeof(a) === 'object' || typeof a === 'function';
}
function detach(node) {
node.parentNode.removeChild(node);
}
function children(element) {
return Array.from(element.childNodes);
}
var current_component;
function set_current_component(component) {
current_component = component;
}
var dirty_components = [];
var binding_callbacks = [];
var render_callbacks = [];
var flush_callbacks = [];
var resolved_promise = Promise.resolve();
var update_scheduled = false;
function schedule_update() {
if (!update_scheduled) {
update_scheduled = true;
resolved_promise.then(flush);
}
}
function add_render_callback(fn) {
render_callbacks.push(fn);
}
var flushing = false;
var seen_callbacks = new Set();
function flush() {
if (flushing) return;
flushing = true;
do {
// first, call beforeUpdate functions
// and update components
for (var i = 0; i < dirty_components.length; i += 1) {
var component = dirty_components[i];
set_current_component(component);
update(component.$$);
}
dirty_components.length = 0;
while (binding_callbacks.length) {
binding_callbacks.pop()();
} // then, once components are updated, call
// afterUpdate functions. This may cause
// subsequent updates...
for (var _i = 0; _i < render_callbacks.length; _i += 1) {
var callback = render_callbacks[_i];
if (!seen_callbacks.has(callback)) {
// ...so guard against infinite loops
seen_callbacks.add(callback);
callback();
}
}
render_callbacks.length = 0;
} while (dirty_components.length);
while (flush_callbacks.length) {
flush_callbacks.pop()();
}
update_scheduled = false;
flushing = false;
seen_callbacks.clear();
}
function | ($$) {
if ($$.fragment !== null) {
$$.update();
run_all($$.before_update);
var dirty = $$.dirty;
$$.dirty = [-1];
$$.fragment && $$.fragment.p($$.ctx, dirty);
$$.after_update.forEach(add_render_callback);
}
}
var outroing = new Set();
function transition_in(block, local) {
if (block && block.i) {
outroing["delete"](block);
block.i(local);
}
}
function mount_component(component, target, anchor) {
var _component$$$ = component.$$,
fragment = _component$$$.fragment,
on_mount = _component$$$.on_mount,
on_destroy = _component$$$.on_destroy,
after_update = _component$$$.after_update;
fragment && fragment.m(target, anchor); // onMount happens before the initial afterUpdate
add_render_callback(function () {
var new_on_destroy = on_mount.map(run).filter(is_function);
if (on_destroy) {
on_destroy.push.apply(on_destroy, _toConsumableArray(new_on_destroy));
} else {
// Edge case - component was destroyed immediately,
// most likely as a result of a binding initialising
run_all(new_on_destroy);
}
component.$$.on_mount = [];
});
after_update.forEach(add_render_callback);
}
function destroy_component(component, detaching) {
var $$ = component.$$;
if ($$.fragment !== null) {
run_all($$.on_destroy);
$$.fragment && $$.fragment.d(detaching); // TODO null out other refs, including component.$$ (but need to
// preserve final state?)
$$.on_destroy = $$.fragment = null;
$$.ctx = [];
}
}
function make_dirty(component, i) {
if (component.$$.dirty[0] === -1) {
dirty_components.push(component);
schedule_update();
component.$$.dirty.fill(0);
}
component.$$.dirty[i / 31 | 0] |= 1 << i % 31;
}
function init(component, options, instance, create_fragment, not_equal, props) {
var dirty = arguments.length > 6 && arguments[6] !== undefined ? arguments[6] : [-1];
var parent_component = current_component;
set_current_component(component);
var prop_values = options.props || {};
var $$ = component.$$ = {
fragment: null,
ctx: null,
// state
props: props,
update: noop,
not_equal: not_equal,
bound: blank_object(),
// lifecycle
on_mount: [],
on_destroy: [],
before_update: [],
after_update: [],
context: new Map(parent_component ? parent_component.$$.context : []),
// everything else
callbacks: blank_object(),
dirty: dirty
};
var ready = false;
$$.ctx = instance ? instance(component, prop_values, function (i, ret) {
var value = (arguments.length <= 2 ? 0 : arguments.length - 2) ? arguments.length <= 2 ? undefined : arguments[2] : ret;
if ($$.ctx && not_equal($$.ctx[i], $$.ctx[i] = value)) {
if ($$.bound[i]) $$.bound[i](value);
if (ready) make_dirty(component, i);
}
return ret;
}) : [];
$$.update();
ready = true;
run_all($$.before_update); // `false` as a special case of no DOM component
$$.fragment = create_fragment ? create_fragment($$.ctx) : false;
if (options.target) {
if (options.hydrate) {
var nodes = children(options.target); // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
$$.fragment && $$.fragment.l(nodes);
nodes.forEach(detach);
} else {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
$$.fragment && $$.fragment.c();
}
if (options.intro) transition_in(component.$$.fragment);
mount_component(component, options.target, options.anchor);
flush();
}
set_current_component(parent_component);
}
var SvelteComponent = /*#__PURE__*/function () {
function SvelteComponent() {
_classCallCheck(this, SvelteComponent);
}
_createClass(SvelteComponent, [{
key: "$destroy",
value: function $destroy() {
destroy_component(this, 1);
this.$destroy = noop;
}
}, {
key: "$on",
value: function $on(type, callback) {
var callbacks = this.$$.callbacks[type] || (this.$$.callbacks[type] = []);
callbacks.push(callback);
return function () {
var index = callbacks.indexOf(callback);
if (index !== -1) callbacks.splice(index, 1);
};
}
}, {
key: "$set",
value: function $set() {// overridden by instance, if it has props
}
}]);
return SvelteComponent;
}();
var position = "PrependContainer";
var defaults = {};
function init$1(eventDetail) {
eventDetail.defaults.styling = {
prefix: "bootstrap3",
container: "alert",
notice: "alert-warning",
info: "alert-info",
success: "alert-success",
error: "alert-danger",
// Confirm Module
"action-bar": "bootstrap3-ml",
"prompt-bar": "bootstrap3-ml",
btn: "btn btn-default bootstrap3-mx-1",
"btn-primary": "btn-primary",
"btn-secondary": "",
input: "form-control"
};
}
var Bootstrap3 = /*#__PURE__*/function (_SvelteComponent) {
_inherits(Bootstrap3, _SvelteComponent);
var _super = _createSuper(Bootstrap3);
function Bootstrap3(options) {
var _this;
_classCallCheck(this, Bootstrap3);
_this = _super.call(this);
init(_assertThisInitialized(_this), options, null, null, safe_not_equal, {});
return _this;
}
return Bootstrap3;
}(SvelteComponent);
exports.default = Bootstrap3;
exports.defaults = defaults;
exports.init = init$1;
exports.position = position;
Object.defineProperty(exports, '__esModule', { value: true });
})));
| update | identifier_name |
prittynoteAppManager.js | /**
*
*@Author - Eugene Mutai
*@Twitter - JheneKnights
*
* Date: 3/10/13
* Time: 5:11 PM
* Description: Model javascript Script for PrittyNote
*
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://www.opensource.org/licenses/mit-license.php
* http://www.opensource.org/licenses/gpl-2.0.php
*
* Copyright (C) 2013
* @Version - Full, Object Oriented, Stable, Pretty
* @Codenames - sha1: cc99d04a02371d96ecaefd2254be75b048e80373, md5: ce3d6db7d4dd0a93a5383c08809ea513
*/
var prittyNote = {
//default prittyNote variables
canvasSize: $(window).width() - 10,
maxWidth: function() {
return prittyNote.canvasSize * 0.8
},
x: function() {
return (prittyNote.canvasSize - prittyNote.maxWidth())/2;
}, //allowed width divided by 2
y: function() {
return prittyNote.x();
},
font: "24px 'Comfortaa'",
fontSize: 28,
drawText: undefined,
bgImage:false,
theImage: undefined,
canvas: 'prittynote',
editCanvas: false,
userDef: false,
limit: {
max: 350,
min: 2,
images: 200
},
sidebars : {
themes: $(".side-bar-themes"),
fonts: $(".side-bar-fonts")
},
elapsetime: false, //time between two intervals on mouse click and hold
share: false,
URL: {
test: "http://localhost/status/app/",
http: "http://app.prittynote.com/app/"
},
setValue: function(value) {
document.getElementsByTagName('textarea')[0].value = value;
},
getValue: function(){
prittyNote.drawText = document.getElementsByTagName('textarea')[0].value;
return prittyNote.drawText;
},
getStatus: function() {
var text = prittyNote.getValue()
prittyNote.drawCanvas(text);
},
//Give the user a basic IDEA of how his prittyNote will look like
getColors: function() {
var clr = $("#text").val(),
bgclr = $("#bgclr").val(),
hashtagclr = $("#hashtag").val();
return {"text": clr, "bgcolor": bgclr, "hashtag": hashtagclr};
},
//FUNCTION TO DRAW THE CANVAS
drawCanvas: function(text) {
var image;
var words = text.split(" ");
var color = prittyNote.getColors();
//make sure you use the current font size chosen
var font = prittyNote.font.replace(/\d\d/gi, prittyNote.fontSize);
var maxWidth = prittyNote.maxWidth();
var clr = "#" + color.text,
bgclr = "#" + color.bgcolor,
hTagclr = "#" + color.hashtag;
var lineHeight = parseFloat(font, 10) + parseFloat(font, 10)/8;
prittyNote.clearCanvasGrid(prittyNote.canvas);
var canvas = document.getElementById(prittyNote.canvas); //the canvas ID
var context = canvas.getContext('2d');
canvas.width = prittyNote.canvasSize;
//if the user has chosen to reposition the note, use his offsets instead
var x = prittyNote.userDef !== false ? prittyNote.userDef.x: prittyNote.x();
var y = 0; //prittyNote.y;
//change the max width if the user has editted the position of the note
maxWidth = prittyNote.userDef !== false ? $(window).width() * 30: maxWidth;
console.log("the max width: " + maxWidth)
//If the user has added a background image
if(prittyNote.bgImage) image = prittyNote.theImage; else image = false;
//get the height of his text content
var ht = prittyNote.getHeight(text, context, x, y, maxWidth, lineHeight, image);
//To centralise the quote on the note canvas
var offset = Math.round(canvas.width - ht.height)/2;
if(prittyNote.userDef) {
y = prittyNote.userDef.y
}else{
//if the top offset is larger than the one set, make y that
y = offset < prittyNote.y() ? prittyNote.y(): offset;
}
console.log("This will be the top offset -- y:" + y + ", x:" + x + ", ht:" + ht.height);
canvas.height = canvas.width;
context.globalAlpha = 1;
if(image) {
var imageObj = new Image();
imageObj.onload = function() {
context.drawImage(imageObj, image.sx, image.sy, image.sw, image.sh, 0, 0, canvas.width, canvas.height);
context.fillStyle = '#000';
context.globalAlpha = 0.5;
context.fillRect(0, 0, canvas.width, canvas.height);
context.fillStyle = clr;
context.font = font;
context.globalAlpha = 1
prittyNote.wrapText(context, text, x, y, maxWidth, canvas.width, lineHeight, clr, hTagclr);
};
imageObj.src = image.src;
}else{
context.fillStyle = bgclr;
context.fillRect(0, 0, canvas.width, canvas.height);
context.globalAlpha = 1;
context.fillStyle = clr;
context.font = font;
prittyNote.wrapText(context, text, x, y, maxWidth, canvas.width, lineHeight, clr, hTagclr);
}
var res = "letters: " + text.length + " | words: " + words.length + " | width: " + canvas.width + "px";
$("#imagepath").html(res).data("default-text", res);
},
//function to calculate the height to assign the canvas dynamically
getHeight: function(text, ctx, x, y, mW, lH, img) {
var words = text.split(" "); //all words one by one
var c = 0, a = x, h;
var br = /(`)[\w]{0,}/
$.map(words, function(wd) {
var string = wd + " ";
var m = ctx.measureText(string);
var w = m.width;
var b = br.test(string);
if(b) y += lH, x = a, c++;
x += w;
if(x > mW) {
x = a;
y += lH;
c++;
}
})
c++
var wrapH = (c * 2) * lH;
h = lH + wrapH; // + lH;
//if(img) h += lH;
//if(h < 200) h = 200;
return {height: h, wrapheight: wrapH, offset: y, newlines: c, text: text};
},
//wrap the text so as to fit in the Canvas
wrapText: function(ctx, text, x, y, mW, cW, lH, clr, hTagclr) {
var words = text.split(' '); //split the string into words
var line = '', p, a = x; //required variables "a" keeps default "x" pos
var hash = /(\#|\@)[\w]{0,}/, //match hash tags & mentions
rest = /(\#\#)[\w]{0,}/, //match for double tags to print all the rest a diff color
startquote = /\"[\w]{0,}/, //if start of quote
endquote = /([\w]\"){0,}/, //end of quote
br = /(`)[\w]{0,}/;
| for (var n= 0; n<words.length; n++) {
var string = words[n] + " ";
var m = ctx.measureText(string);
var w = m.width; //width of word + " "
var p = hash.test(string); //match string to regex
var r = rest.test(string);
var sq = startquote.test(string);
var eq = endquote.test(string);
var b = br.test(string);
//console.log(pr); //debugging purposes
//test for ## -- change color of the rest of sentence if true
if(r) {
ctx.fillStyle = hTagclr;
clr = hTagclr; //change default color
string = string.replace('##', ''); //remove the double hashtags
w = ctx.measureText(string).width; //recalculate width to remove whitespaces left
}
//test for new line
else if(b) {
y += lH //jump downwards one more //next line
x = a //restart writing from x = 0
string = string.replace('`', ''); //remove the underscore
w = ctx.measureText(string).width; //recalculate width to remove whitespaces left
}
else{
//test for quotes, will depict the quote length and color it all
if(p) { //change color of only single words with single hash tags
ctx.fillStyle = hTagclr;
string = string.replace('#', '');
w = ctx.measureText(string).width; //recalculate width to remove whitespaces left
}
//reset default text color if not
else{
if(qc == 0) ctx.fillStyle = clr;
}
}
ctx.fillText(string, x, y); //print it out
x += w; //set next "x" offset for the next word
var xnw = ctx.measureText(words[n+1] + " ").width; //check for the future next word
var xn = x + xnw;
//console.log(xn);
if(xn >= cW) { //try it's existence to see if it breaks the maxW rule
y += lH;
x = a;
}else{ //if it doesn't continue as if it hasn't yet bin plotted down
if(x > mW) {
x = a;
y += lH; //new line
}
}
}
ctx.fillText(line, x, y);
},
//FUNCTION TO CLEAR CANVAS
clearCanvasGrid: function(){
var canvas = document.getElementById(prittyNote.canvas);
var context = canvas.getContext('2d');
//context.beginPath();
// Store the current transformation matrix
context.save();
// Use the identity matrix while clearing the canvas
context.setTransform(1, 0, 0, 1, 0, 0);
context.clearRect(0, 0, canvas.width, canvas.height);
// Restore the transform
context.restore(); //CLEARS THE SPECIFIC CANVAS COMPLETELY FOR NEW DRAWING
},
//checks to see if the chosen file is an image
isImage: function(imagedata) {
var allowed_types = ['gallery', 'jpeg', 'jpg', 'png', 'gif', 'bmp', 'JPEG', 'JPG', 'PNG', 'GIF', 'BMP'],
itscool = false
var imgtype = imagedata.toString().split(';')
imgtype = imgtype[0].split('/')
console.log(imgtype)
if($.inArray(imgtype[1], allowed_types) > -1) {
itscool = true
}
return itscool
},
readImage: function(input) {
var image
if (input.files && input.files[0]) {
var reader = new FileReader();
reader.onload = function (e) {
image = e.target.result
prittyNote.bgImage = prittyNote.isImage(image)
if(prittyNote.bgImage) {
prittyNote.theImage = image
prittyNote.drawCanvas(prittyNote.getValue(), image)
}
}
reader.readAsDataURL(input.files[0]);
}
},
removeImage: function() {
prittyNote.bgImage = false;
prittyNote.drawCanvas(prittyNote.getValue())
},
//AJAX REQUEST TO SEND CANVAS DATA TO CREATE IMAGE
makePrittyNote: function() {
var canvas = prittyNote.canvas;
var testCanvas = document.getElementById(canvas); //canvasID
var canvasData = testCanvas.toDataURL("image/png;"); //encrypt the data
AppFunctions.savingProgress(1);
//blackberry.ui.toast.show('Just a moment, saving PrittyNote...');
return testCanvas;
},
downloadPrittyNote: function() {
//Redraw the prittyNote
prittyNote.drawCanvas(prittyNote.getValue())
//get the Canvas element
var myCanvas = prittyNote.makePrittyNote();
AppFunctions.savingProgress(2);
//now download the image to storage
prittyNote.download.blackBerry(myCanvas);
console.log("Now downloading the Image to SDcard...");
},
download: {
blackBerry: function(canvas) {
var date = new Date();
var fileName = "prittyNote-" + date.toLocaleTimeString() + "-" + date.toLocaleDateString() + ".png";
var optionsBB = {
mode: blackberry.invoke.card.FILEPICKER_MODE_SAVER,
type: [blackberry.invoke.card.FILEPICKER_TYPE_PICTURE],
directory: [blackberry.io.sharedFolder],
defaultSaveFileNames: [fileName]
};
//Determine which request is ACTIVE
window.requestFileSystem = window.requestFileSystem || window.webkitRequestFileSystem;
blackberry.invoke.card.invokeFilePicker(optionsBB,
function (path) {
/* User chose a path, and we now have a variable referencing it! */
blackberry.io.sandbox = false;
var path = path + ".png";
console.log(path);
//Now Start the download progress...
window.requestFileSystem(window.PERSISTENT, 5*1024*1024 /*5MB*/,
function (fileSystem) {
//creating PrittyNote directory
fileSystem.root.getDirectory('PrittyNotes', {create: true}, function(dirEntry) {
//...
}, AppFunctions.onSavingError);
/* We were granted a FileSystem object. */
fileSystem.root.getFile(path, {create: true},
function (fileEntry) {
/* We were granted a FileEntry object. */
fileEntry.createWriter(
function (fileWriter) {
/* We were granted a FileWriter object! */
fileWriter.onerror = function (fileError) {
console.log('FileWriter Error: ' + fileError);
};
fileWriter.onprogress = function() {
AppFunctions.savingProgress(3);
}
fileWriter.onwriteend = function () {
//show completion of progress
AppFunctions.savingProgress(4, function() {
//+++++++++ add sharing options here ++++++++
//if USER has opted to share.
if(prittyNote.share) {
AppFunctions.shareThis({
type: "image/png",
image: "file://" + path
});
prittyNote.share = false;
}
});
//blackberry.ui.toast.show('PrittyNote saved successfully!');
};
//convert the image to a BLOB
canvas.toBlob(function (blob) {
fileWriter.write(blob);
},'image/png');
},
AppFunctions.onSavingError
);
},
AppFunctions.onSavingError
);
},
AppFunctions.onSavingError
);
},
function (reason) {
/* User cancelled. */
console.log('User Cancelled: ' + reason);
},
function (error) {
/* Invoked. */
if (error) {
console.log('Invoke Error: ' + error);
}
}
);
},
webPlatform: function(canvas) {
var downloadImage = $('a[data-action="download"]');
// When the download link is clicked, get the
// DataURL of the image and set it as href:
var url = canvas;
downloadImage.attr('href', url);
//Now trigger the image to be downloaded
downloadImage.click();
}
},
onSavingError: function(e) {
var msg = '';
switch (e.code) {
case FileError.QUOTA_EXCEEDED_ERR:
msg = 'QUOTA_EXCEEDED_ERR';
break;
case FileError.NOT_FOUND_ERR:
msg = 'NOT_FOUND_ERR';
break;
case FileError.SECURITY_ERR:
msg = 'SECURITY_ERR';
break;
case FileError.INVALID_MODIFICATION_ERR:
msg = 'INVALID_MODIFICATION_ERR';
break;
case FileError.INVALID_STATE_ERR:
msg = 'INVALID_STATE_ERR';
break;
default:
msg = 'Unknown Error';
break;
};
console.log('Error: ' + msg);
},
//AFTER GETING RESPONSE FROM THE CANVASTOPNG SEND DATA TO MATCH THE IMAGE AND LOCATION IN AN ARRAY/JSON FILE
imageMergedata: function(imgid, userId, imgpath) {
var echo = '<img src="../images/accept.png" alt="OK" title="Success"/>';
$.get("./getlocajax.php", {imgid: imgid, user: userId, text: prittyNote.getValue(), path: imgpath},
function(data) {
$("#imagepath").html(echo + " " + data.message);
$("#" + prittyNote.canvas).wrap('<a target="_blank" href="' + data.path +'" title="' + data.text +'" />');
}, "json");
},
makeDemoNotes: function() {
//!st check if the browser supports LocalStorage technology
if(localStorage) { //If it does
if(localStorage.getItem('demoImages') == (null || "NaN")) {
localStorage.setItem('demoImages', "1");
prittyNote.downloadPrittyNote();
}else{
console.log("The number of notes the user has created are --> " + parseInt(localStorage.getItem('demoImages')));
if(parseInt(localStorage.getItem('demoImages')) >= prittyNote.limit.images) {
$('#loadingPrefh2').css('color', '#cc0000').html('Oops! That\'s all you can make. Please purchase the PRO version to be able to make more cool PrittyNotes, Thank you!').parent().show();
$('.purchaseBtn').css('visibility', 'visible');
}else{
var newValue = parseInt(localStorage.getItem('demoImages')) + 1;
localStorage.setItem('demoImages', newValue);
prittyNote.downloadPrittyNote();
}
}
} //If the browser is an old one, and doesn't support LocalStorage, fallback to Cookies
else{
if($.cookie('demoImages') == null) {
$.cookie('demoImages', 1, { expires: 1000, path: '/' });
prittyNote.downloadPrittyNote();
}else{
conole.log("The number of notes the user has created are --> " + parseInt($.cookie('demoImages')));
if(parseInt($.cookie('demoImages')) > prittyNote.limit.images) { //prevent more images from being made
$('#loadingPrefh2').css('color', '#cc0000').html('Oops! That\'s all you can make. Please purchase the PRO version to be able to make more cool PrittyNotes, Thank you!').parent().show();
$('.purchaseBtn').css('visibility', 'visible');
}else{ //else count them, and make images
var newValue = parseInt($.cookie('demoImages')) + 1;
$.cookie('demoImages', newValue);
prittyNote.downloadPrittyNote();
}
}
} //$('#loadingPref').fadeOut(200)
},
//function to check the string length, max characters
checkTextLength: function() {
var text = prittyNote.getValue(),
len = text.length;
//MAX allowed is 350
if(len > prittyNote.limit.max) {
prittyNote.drawCanvas('#Oh #Snap! You\'ve #written too much!')
}
else if(len < prittyNote.limit.min) { //least allowed is 20
prittyNote.drawCanvas('#Hmmm! You\'ve barely #written anything!')
}
else{//if everything is okay in between 20 - 350
prittyNote.drawCanvas(text);
}
},
keyEvents: function() {
$('#image').on('change', function() {
prittyNote.readImage(this)
})
$('.removeBg').on('click', function() {
prittyNote.removeImage()
})
$('input[data-color]').each(function() {
$(this).on({
focus: function() {
$(this).trigger('change');
//$.map(['blur', 'change', 'focus'], function(a) { $(this).trigger(a); });
console.log("triggered Keyup -- color selected", $(this).data('color'));
},
change: function() {
prittyNote.getColors();
prittyNote.checkTextLength();
},
blur: prittyNote.checkTextLength()
})
})
$('a').on('click', function(e) {
e.preventDefault();
})
},
//when the user holds on the canvas to drag around the text
onMouseDownOrUp: function(pos) {
prittyNote.userDef = {x: pos.left, y: pos.top}
prittyNote.drawCanvas(prittyNote.getValue())
},
getTimeDifference: function(earlierDate, laterDate) {
var nTotalDiff = laterDate.getTime() - earlierDate.getTime();
var oDiff = new Object();
oDiff.days = Math.floor(nTotalDiff/1000/60/60/24);
nTotalDiff -= oDiff.days*1000*60*60*24;
oDiff.hours = Math.floor(nTotalDiff/1000/60/60);
nTotalDiff -= oDiff.hours*1000*60*60;
oDiff.minutes = Math.floor(nTotalDiff/1000/60);
nTotalDiff -= oDiff.minutes*1000*60;
oDiff.seconds = Math.floor(nTotalDiff/1000);
return oDiff;
},
loadScript: function(url, callback) {
var script = document.createElement("script")
script.type = "text/javascript";
if(script.readyState){ //IE
script.onreadystatechange = function(){
if (script.readyState == "loaded" || script.readyState == "complete") {
script.onreadystatechange = null;
if(typeof callback == "function") callback();
}
};
}else{ //Others
script.onload = function(){
if(typeof callback == "function") callback(); //make sure it is a function
};
}
script.src = url;
document.getElementsByTagName("head")[0].appendChild(script);
}
};
//++++++++++++++++++ load Utilities jQuery Plug-in ++++++++++++++++++++++++
(function ($) {
var families = [], fonts = [], s = "";
$.fn.extend({
initPrittyNote: function() {
loadUtilities(this);
}
})
loadUtilities = function(options) {
var defaults = {
fileorurl: null,
version: {
trial: './js/stickinoteUtilitiesTRY.json',
pro: './js/stickinoteUtilitiesPRO.json'
},
script: {
google: "http://ajax.googleapis.com/ajax/libs/webfont/1.0.31/webfont.js",
fontface: "./js/jquery.fontface.js",
load: false
},
fontPath: "./fonts/",
loadIndicator: $('.logo p'),
loader: $('.show-app-progress'),
pro: false
}
var use = $.extend({}, defaults, options);
//Determine which version are we loading of the APP pro || trial
use.fileorurl = use.pro !== false ? use.version.trial: use.version.pro;
//Load the required fonts from the fonts-folder //the server through GooGle - DEPRECATED
$.getScript(use.script.fontface, function() {
//get the required JSON FILE to locate themes & fonts
$.getJSON(use.fileorurl, function(json) {
var pallete = json.data.palletes;
console.log('Total Themes that are Loaded -- ' + pallete.length)
var startVal = 0
var adVal = 50/pallete.length;
//++++++ LOADING THEMES ++++++
//now build the theme select option
$.each(pallete, function(x, p) {
$('ul#themes').append('<li data-theme="' + p.name + '">' + p.name + '</li>');
$('li[data-theme="' + p.name + '"]').css({backgroundColor: '#' + p.color[0], color: '#' + p.color[1]});
startVal += adVal;
use.loader.animate({width: startVal + "%"}, 50, "easeOutExpo")
console.log("Loaded theme...");
});
fonts = json.data.fonts;
prittyNote.fontSize = 23;
//Get the application's default font and apply it.
var deffont = $.grep(fonts, function(a,b) { return /comfortaa/gi.test(a.name); });
prittyNote.font = deffont[0].string;
//StartVal I presume is at "50 + %" now
adVal = 50/fonts.length;
//now begin building the theme option
$.each(fonts, function (e, w) {
//Load each font stupidly with getJSON, actually will load it
$.getJSON(use.fontPath + w.font, function() {}).always(function() {
//use response always, its actual function will do nothing
families.push(w.name);
$('ul#fonts').append('<li data-size="' + parseInt(w.string) + '" data-font="' + w.name + '">' + w.name + '</li>')
setTimeout(function() {
$('li[data-font="' + w.name + '"]')
.fontface({
fontName: w.name, //the font name
fileName: w.font, //and path
fontSize: parseInt(w.string) + "px" //the font size
})
.fontApply({ //apply the font to this specific element
fontFamily: w.name
//fontSize: parseInt(w.string) + 'px'
})
startVal += adVal;
setTimeout(function() {
use.loader.animate({width: startVal + "%"}, 50, "easeInOutCubic")
}, 0)
console.log("Loaded fonts now...");
}, 100)
})
});
//get the width of the parent progress bar //288
var parentW = use.loader.parent().width()
var draw = 0;
var Utilities = setInterval(function() {
if(use.loader.width() > (parentW * 0.99)) {
clearInterval(Utilities);
use.loadIndicator.html("Just a second please..");
console.log(families.toString());
//now draw the example canvas
if(draw == 0) prittyNote.drawCanvas(prittyNote.getValue()), draw++;
use.loadIndicator.html('Yaaeey! We good to go!');
if($('canvas#prittynote').width() > 200) {
setTimeout(function() {
$('.loading').fadeOut(1000, function() {
console.log("FAding OUUUUUTTTTTTTTTTTTTTTTTTTTTTTTTT")
prittyNote.drawCanvas(prittyNote.getValue())
})
}, 1000); //now remove the loading panel now
}
}
else if(use.loader.width() > (parentW/2)) {
use.loadIndicator.html("Loading Fonts now...")
}else{
use.loadIndicator.html("Loading Themes...")
}
}, 50);
//now bind each select element to it's response function
$("ul#themes").hammer().on("tap", 'li', function() {
installPallette($(this), pallete); //start keep watch in the select changes
});
$("ul#fonts").hammer().on("tap", 'li', function() {
installFont($(this), fonts); //start keep watch in the select changes
});
})
});
}
installPallette = function(el, pallete) {
var p = el.data('theme'); //get the selected value of the palettes
//loop thru the palette matching the required palette request
var x = $.grep(pallete, function(a,b) {
return a.name == p;
})
var colors = x[0].color;
console.log(p, colors);
$("#bgclr").val(colors[0])
$("#text").val(colors[1])
$("#hashtag").val(colors[2])
prittyNote.drawCanvas(prittyNote.getValue());
closeSideBars()
}
installFont = function(el) {
var f = el.data('font');
var x = $.grep(fonts, function(a,b) { // loop through the fonts getting the corresponding font match
return f == a.name;
})
var font = x[0];
prittyNote.font = font.string; //if found assign to font as the defualt in use from now
prittyNote.drawCanvas(prittyNote.getValue());
closeSideBars()
}
closeSideBars = function() {
$.sidr('close', 'side-bar-themes'); //close all side bars
$.sidr('close', 'side-bar-fonts');
}
})(jQuery); |
var qc = 0; //will count, 0 means return color to normal
| random_line_split |
prittynoteAppManager.js | /**
*
*@Author - Eugene Mutai
*@Twitter - JheneKnights
*
* Date: 3/10/13
* Time: 5:11 PM
* Description: Model javascript Script for PrittyNote
*
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://www.opensource.org/licenses/mit-license.php
* http://www.opensource.org/licenses/gpl-2.0.php
*
* Copyright (C) 2013
* @Version - Full, Object Oriented, Stable, Pretty
* @Codenames - sha1: cc99d04a02371d96ecaefd2254be75b048e80373, md5: ce3d6db7d4dd0a93a5383c08809ea513
*/
var prittyNote = {
//default prittyNote variables
canvasSize: $(window).width() - 10,
maxWidth: function() {
return prittyNote.canvasSize * 0.8
},
x: function() {
return (prittyNote.canvasSize - prittyNote.maxWidth())/2;
}, //allowed width divided by 2
y: function() {
return prittyNote.x();
},
font: "24px 'Comfortaa'",
fontSize: 28,
drawText: undefined,
bgImage:false,
theImage: undefined,
canvas: 'prittynote',
editCanvas: false,
userDef: false,
limit: {
max: 350,
min: 2,
images: 200
},
sidebars : {
themes: $(".side-bar-themes"),
fonts: $(".side-bar-fonts")
},
elapsetime: false, //time between two intervals on mouse click and hold
share: false,
URL: {
test: "http://localhost/status/app/",
http: "http://app.prittynote.com/app/"
},
setValue: function(value) {
document.getElementsByTagName('textarea')[0].value = value;
},
getValue: function(){
prittyNote.drawText = document.getElementsByTagName('textarea')[0].value;
return prittyNote.drawText;
},
getStatus: function() {
var text = prittyNote.getValue()
prittyNote.drawCanvas(text);
},
//Give the user a basic IDEA of how his prittyNote will look like
getColors: function() {
var clr = $("#text").val(),
bgclr = $("#bgclr").val(),
hashtagclr = $("#hashtag").val();
return {"text": clr, "bgcolor": bgclr, "hashtag": hashtagclr};
},
//FUNCTION TO DRAW THE CANVAS
drawCanvas: function(text) {
var image;
var words = text.split(" ");
var color = prittyNote.getColors();
//make sure you use the current font size chosen
var font = prittyNote.font.replace(/\d\d/gi, prittyNote.fontSize);
var maxWidth = prittyNote.maxWidth();
var clr = "#" + color.text,
bgclr = "#" + color.bgcolor,
hTagclr = "#" + color.hashtag;
var lineHeight = parseFloat(font, 10) + parseFloat(font, 10)/8;
prittyNote.clearCanvasGrid(prittyNote.canvas);
var canvas = document.getElementById(prittyNote.canvas); //the canvas ID
var context = canvas.getContext('2d');
canvas.width = prittyNote.canvasSize;
//if the user has chosen to reposition the note, use his offsets instead
var x = prittyNote.userDef !== false ? prittyNote.userDef.x: prittyNote.x();
var y = 0; //prittyNote.y;
//change the max width if the user has editted the position of the note
maxWidth = prittyNote.userDef !== false ? $(window).width() * 30: maxWidth;
console.log("the max width: " + maxWidth)
//If the user has added a background image
if(prittyNote.bgImage) image = prittyNote.theImage; else image = false;
//get the height of his text content
var ht = prittyNote.getHeight(text, context, x, y, maxWidth, lineHeight, image);
//To centralise the quote on the note canvas
var offset = Math.round(canvas.width - ht.height)/2;
if(prittyNote.userDef) {
y = prittyNote.userDef.y
}else{
//if the top offset is larger than the one set, make y that
y = offset < prittyNote.y() ? prittyNote.y(): offset;
}
console.log("This will be the top offset -- y:" + y + ", x:" + x + ", ht:" + ht.height);
canvas.height = canvas.width;
context.globalAlpha = 1;
if(image) {
var imageObj = new Image();
imageObj.onload = function() {
context.drawImage(imageObj, image.sx, image.sy, image.sw, image.sh, 0, 0, canvas.width, canvas.height);
context.fillStyle = '#000';
context.globalAlpha = 0.5;
context.fillRect(0, 0, canvas.width, canvas.height);
context.fillStyle = clr;
context.font = font;
context.globalAlpha = 1
prittyNote.wrapText(context, text, x, y, maxWidth, canvas.width, lineHeight, clr, hTagclr);
};
imageObj.src = image.src;
}else{
context.fillStyle = bgclr;
context.fillRect(0, 0, canvas.width, canvas.height);
context.globalAlpha = 1;
context.fillStyle = clr;
context.font = font;
prittyNote.wrapText(context, text, x, y, maxWidth, canvas.width, lineHeight, clr, hTagclr);
}
var res = "letters: " + text.length + " | words: " + words.length + " | width: " + canvas.width + "px";
$("#imagepath").html(res).data("default-text", res);
},
//function to calculate the height to assign the canvas dynamically
getHeight: function(text, ctx, x, y, mW, lH, img) {
var words = text.split(" "); //all words one by one
var c = 0, a = x, h;
var br = /(`)[\w]{0,}/
$.map(words, function(wd) {
var string = wd + " ";
var m = ctx.measureText(string);
var w = m.width;
var b = br.test(string);
if(b) y += lH, x = a, c++;
x += w;
if(x > mW) {
x = a;
y += lH;
c++;
}
})
c++
var wrapH = (c * 2) * lH;
h = lH + wrapH; // + lH;
//if(img) h += lH;
//if(h < 200) h = 200;
return {height: h, wrapheight: wrapH, offset: y, newlines: c, text: text};
},
//wrap the text so as to fit in the Canvas
wrapText: function(ctx, text, x, y, mW, cW, lH, clr, hTagclr) {
var words = text.split(' '); //split the string into words
var line = '', p, a = x; //required variables "a" keeps default "x" pos
var hash = /(\#|\@)[\w]{0,}/, //match hash tags & mentions
rest = /(\#\#)[\w]{0,}/, //match for double tags to print all the rest a diff color
startquote = /\"[\w]{0,}/, //if start of quote
endquote = /([\w]\"){0,}/, //end of quote
br = /(`)[\w]{0,}/;
var qc = 0; //will count, 0 means return color to normal
for (var n= 0; n<words.length; n++) {
var string = words[n] + " ";
var m = ctx.measureText(string);
var w = m.width; //width of word + " "
var p = hash.test(string); //match string to regex
var r = rest.test(string);
var sq = startquote.test(string);
var eq = endquote.test(string);
var b = br.test(string);
//console.log(pr); //debugging purposes
//test for ## -- change color of the rest of sentence if true
if(r) {
ctx.fillStyle = hTagclr;
clr = hTagclr; //change default color
string = string.replace('##', ''); //remove the double hashtags
w = ctx.measureText(string).width; //recalculate width to remove whitespaces left
}
//test for new line
else if(b) {
y += lH //jump downwards one more //next line
x = a //restart writing from x = 0
string = string.replace('`', ''); //remove the underscore
w = ctx.measureText(string).width; //recalculate width to remove whitespaces left
}
else |
ctx.fillText(string, x, y); //print it out
x += w; //set next "x" offset for the next word
var xnw = ctx.measureText(words[n+1] + " ").width; //check for the future next word
var xn = x + xnw;
//console.log(xn);
if(xn >= cW) { //try it's existence to see if it breaks the maxW rule
y += lH;
x = a;
}else{ //if it doesn't continue as if it hasn't yet bin plotted down
if(x > mW) {
x = a;
y += lH; //new line
}
}
}
ctx.fillText(line, x, y);
},
//FUNCTION TO CLEAR CANVAS
clearCanvasGrid: function(){
var canvas = document.getElementById(prittyNote.canvas);
var context = canvas.getContext('2d');
//context.beginPath();
// Store the current transformation matrix
context.save();
// Use the identity matrix while clearing the canvas
context.setTransform(1, 0, 0, 1, 0, 0);
context.clearRect(0, 0, canvas.width, canvas.height);
// Restore the transform
context.restore(); //CLEARS THE SPECIFIC CANVAS COMPLETELY FOR NEW DRAWING
},
//checks to see if the chosen file is an image
isImage: function(imagedata) {
var allowed_types = ['gallery', 'jpeg', 'jpg', 'png', 'gif', 'bmp', 'JPEG', 'JPG', 'PNG', 'GIF', 'BMP'],
itscool = false
var imgtype = imagedata.toString().split(';')
imgtype = imgtype[0].split('/')
console.log(imgtype)
if($.inArray(imgtype[1], allowed_types) > -1) {
itscool = true
}
return itscool
},
readImage: function(input) {
var image
if (input.files && input.files[0]) {
var reader = new FileReader();
reader.onload = function (e) {
image = e.target.result
prittyNote.bgImage = prittyNote.isImage(image)
if(prittyNote.bgImage) {
prittyNote.theImage = image
prittyNote.drawCanvas(prittyNote.getValue(), image)
}
}
reader.readAsDataURL(input.files[0]);
}
},
removeImage: function() {
prittyNote.bgImage = false;
prittyNote.drawCanvas(prittyNote.getValue())
},
//AJAX REQUEST TO SEND CANVAS DATA TO CREATE IMAGE
makePrittyNote: function() {
var canvas = prittyNote.canvas;
var testCanvas = document.getElementById(canvas); //canvasID
var canvasData = testCanvas.toDataURL("image/png;"); //encrypt the data
AppFunctions.savingProgress(1);
//blackberry.ui.toast.show('Just a moment, saving PrittyNote...');
return testCanvas;
},
downloadPrittyNote: function() {
//Redraw the prittyNote
prittyNote.drawCanvas(prittyNote.getValue())
//get the Canvas element
var myCanvas = prittyNote.makePrittyNote();
AppFunctions.savingProgress(2);
//now download the image to storage
prittyNote.download.blackBerry(myCanvas);
console.log("Now downloading the Image to SDcard...");
},
download: {
blackBerry: function(canvas) {
var date = new Date();
var fileName = "prittyNote-" + date.toLocaleTimeString() + "-" + date.toLocaleDateString() + ".png";
var optionsBB = {
mode: blackberry.invoke.card.FILEPICKER_MODE_SAVER,
type: [blackberry.invoke.card.FILEPICKER_TYPE_PICTURE],
directory: [blackberry.io.sharedFolder],
defaultSaveFileNames: [fileName]
};
//Determine which request is ACTIVE
window.requestFileSystem = window.requestFileSystem || window.webkitRequestFileSystem;
blackberry.invoke.card.invokeFilePicker(optionsBB,
function (path) {
/* User chose a path, and we now have a variable referencing it! */
blackberry.io.sandbox = false;
var path = path + ".png";
console.log(path);
//Now Start the download progress...
window.requestFileSystem(window.PERSISTENT, 5*1024*1024 /*5MB*/,
function (fileSystem) {
//creating PrittyNote directory
fileSystem.root.getDirectory('PrittyNotes', {create: true}, function(dirEntry) {
//...
}, AppFunctions.onSavingError);
/* We were granted a FileSystem object. */
fileSystem.root.getFile(path, {create: true},
function (fileEntry) {
/* We were granted a FileEntry object. */
fileEntry.createWriter(
function (fileWriter) {
/* We were granted a FileWriter object! */
fileWriter.onerror = function (fileError) {
console.log('FileWriter Error: ' + fileError);
};
fileWriter.onprogress = function() {
AppFunctions.savingProgress(3);
}
fileWriter.onwriteend = function () {
//show completion of progress
AppFunctions.savingProgress(4, function() {
//+++++++++ add sharing options here ++++++++
//if USER has opted to share.
if(prittyNote.share) {
AppFunctions.shareThis({
type: "image/png",
image: "file://" + path
});
prittyNote.share = false;
}
});
//blackberry.ui.toast.show('PrittyNote saved successfully!');
};
//convert the image to a BLOB
canvas.toBlob(function (blob) {
fileWriter.write(blob);
},'image/png');
},
AppFunctions.onSavingError
);
},
AppFunctions.onSavingError
);
},
AppFunctions.onSavingError
);
},
function (reason) {
/* User cancelled. */
console.log('User Cancelled: ' + reason);
},
function (error) {
/* Invoked. */
if (error) {
console.log('Invoke Error: ' + error);
}
}
);
},
webPlatform: function(canvas) {
var downloadImage = $('a[data-action="download"]');
// When the download link is clicked, get the
// DataURL of the image and set it as href:
var url = canvas;
downloadImage.attr('href', url);
//Now trigger the image to be downloaded
downloadImage.click();
}
},
onSavingError: function(e) {
var msg = '';
switch (e.code) {
case FileError.QUOTA_EXCEEDED_ERR:
msg = 'QUOTA_EXCEEDED_ERR';
break;
case FileError.NOT_FOUND_ERR:
msg = 'NOT_FOUND_ERR';
break;
case FileError.SECURITY_ERR:
msg = 'SECURITY_ERR';
break;
case FileError.INVALID_MODIFICATION_ERR:
msg = 'INVALID_MODIFICATION_ERR';
break;
case FileError.INVALID_STATE_ERR:
msg = 'INVALID_STATE_ERR';
break;
default:
msg = 'Unknown Error';
break;
};
console.log('Error: ' + msg);
},
//AFTER GETING RESPONSE FROM THE CANVASTOPNG SEND DATA TO MATCH THE IMAGE AND LOCATION IN AN ARRAY/JSON FILE
imageMergedata: function(imgid, userId, imgpath) {
var echo = '<img src="../images/accept.png" alt="OK" title="Success"/>';
$.get("./getlocajax.php", {imgid: imgid, user: userId, text: prittyNote.getValue(), path: imgpath},
function(data) {
$("#imagepath").html(echo + " " + data.message);
$("#" + prittyNote.canvas).wrap('<a target="_blank" href="' + data.path +'" title="' + data.text +'" />');
}, "json");
},
makeDemoNotes: function() {
//!st check if the browser supports LocalStorage technology
if(localStorage) { //If it does
if(localStorage.getItem('demoImages') == (null || "NaN")) {
localStorage.setItem('demoImages', "1");
prittyNote.downloadPrittyNote();
}else{
console.log("The number of notes the user has created are --> " + parseInt(localStorage.getItem('demoImages')));
if(parseInt(localStorage.getItem('demoImages')) >= prittyNote.limit.images) {
$('#loadingPrefh2').css('color', '#cc0000').html('Oops! That\'s all you can make. Please purchase the PRO version to be able to make more cool PrittyNotes, Thank you!').parent().show();
$('.purchaseBtn').css('visibility', 'visible');
}else{
var newValue = parseInt(localStorage.getItem('demoImages')) + 1;
localStorage.setItem('demoImages', newValue);
prittyNote.downloadPrittyNote();
}
}
} //If the browser is an old one, and doesn't support LocalStorage, fallback to Cookies
else{
if($.cookie('demoImages') == null) {
$.cookie('demoImages', 1, { expires: 1000, path: '/' });
prittyNote.downloadPrittyNote();
}else{
conole.log("The number of notes the user has created are --> " + parseInt($.cookie('demoImages')));
if(parseInt($.cookie('demoImages')) > prittyNote.limit.images) { //prevent more images from being made
$('#loadingPrefh2').css('color', '#cc0000').html('Oops! That\'s all you can make. Please purchase the PRO version to be able to make more cool PrittyNotes, Thank you!').parent().show();
$('.purchaseBtn').css('visibility', 'visible');
}else{ //else count them, and make images
var newValue = parseInt($.cookie('demoImages')) + 1;
$.cookie('demoImages', newValue);
prittyNote.downloadPrittyNote();
}
}
} //$('#loadingPref').fadeOut(200)
},
//function to check the string length, max characters
checkTextLength: function() {
var text = prittyNote.getValue(),
len = text.length;
//MAX allowed is 350
if(len > prittyNote.limit.max) {
prittyNote.drawCanvas('#Oh #Snap! You\'ve #written too much!')
}
else if(len < prittyNote.limit.min) { //least allowed is 20
prittyNote.drawCanvas('#Hmmm! You\'ve barely #written anything!')
}
else{//if everything is okay in between 20 - 350
prittyNote.drawCanvas(text);
}
},
keyEvents: function() {
$('#image').on('change', function() {
prittyNote.readImage(this)
})
$('.removeBg').on('click', function() {
prittyNote.removeImage()
})
$('input[data-color]').each(function() {
$(this).on({
focus: function() {
$(this).trigger('change');
//$.map(['blur', 'change', 'focus'], function(a) { $(this).trigger(a); });
console.log("triggered Keyup -- color selected", $(this).data('color'));
},
change: function() {
prittyNote.getColors();
prittyNote.checkTextLength();
},
blur: prittyNote.checkTextLength()
})
})
$('a').on('click', function(e) {
e.preventDefault();
})
},
//when the user holds on the canvas to drag around the text
onMouseDownOrUp: function(pos) {
prittyNote.userDef = {x: pos.left, y: pos.top}
prittyNote.drawCanvas(prittyNote.getValue())
},
getTimeDifference: function(earlierDate, laterDate) {
var nTotalDiff = laterDate.getTime() - earlierDate.getTime();
var oDiff = new Object();
oDiff.days = Math.floor(nTotalDiff/1000/60/60/24);
nTotalDiff -= oDiff.days*1000*60*60*24;
oDiff.hours = Math.floor(nTotalDiff/1000/60/60);
nTotalDiff -= oDiff.hours*1000*60*60;
oDiff.minutes = Math.floor(nTotalDiff/1000/60);
nTotalDiff -= oDiff.minutes*1000*60;
oDiff.seconds = Math.floor(nTotalDiff/1000);
return oDiff;
},
loadScript: function(url, callback) {
var script = document.createElement("script")
script.type = "text/javascript";
if(script.readyState){ //IE
script.onreadystatechange = function(){
if (script.readyState == "loaded" || script.readyState == "complete") {
script.onreadystatechange = null;
if(typeof callback == "function") callback();
}
};
}else{ //Others
script.onload = function(){
if(typeof callback == "function") callback(); //make sure it is a function
};
}
script.src = url;
document.getElementsByTagName("head")[0].appendChild(script);
}
};
//++++++++++++++++++ load Utilities jQuery Plug-in ++++++++++++++++++++++++
(function ($) {
var families = [], fonts = [], s = "";
$.fn.extend({
initPrittyNote: function() {
loadUtilities(this);
}
})
loadUtilities = function(options) {
var defaults = {
fileorurl: null,
version: {
trial: './js/stickinoteUtilitiesTRY.json',
pro: './js/stickinoteUtilitiesPRO.json'
},
script: {
google: "http://ajax.googleapis.com/ajax/libs/webfont/1.0.31/webfont.js",
fontface: "./js/jquery.fontface.js",
load: false
},
fontPath: "./fonts/",
loadIndicator: $('.logo p'),
loader: $('.show-app-progress'),
pro: false
}
var use = $.extend({}, defaults, options);
//Determine which version are we loading of the APP pro || trial
use.fileorurl = use.pro !== false ? use.version.trial: use.version.pro;
//Load the required fonts from the fonts-folder //the server through GooGle - DEPRECATED
$.getScript(use.script.fontface, function() {
//get the required JSON FILE to locate themes & fonts
$.getJSON(use.fileorurl, function(json) {
var pallete = json.data.palletes;
console.log('Total Themes that are Loaded -- ' + pallete.length)
var startVal = 0
var adVal = 50/pallete.length;
//++++++ LOADING THEMES ++++++
//now build the theme select option
$.each(pallete, function(x, p) {
$('ul#themes').append('<li data-theme="' + p.name + '">' + p.name + '</li>');
$('li[data-theme="' + p.name + '"]').css({backgroundColor: '#' + p.color[0], color: '#' + p.color[1]});
startVal += adVal;
use.loader.animate({width: startVal + "%"}, 50, "easeOutExpo")
console.log("Loaded theme...");
});
fonts = json.data.fonts;
prittyNote.fontSize = 23;
//Get the application's default font and apply it.
var deffont = $.grep(fonts, function(a,b) { return /comfortaa/gi.test(a.name); });
prittyNote.font = deffont[0].string;
//StartVal I presume is at "50 + %" now
adVal = 50/fonts.length;
//now begin building the theme option
$.each(fonts, function (e, w) {
//Load each font stupidly with getJSON, actually will load it
$.getJSON(use.fontPath + w.font, function() {}).always(function() {
//use response always, its actual function will do nothing
families.push(w.name);
$('ul#fonts').append('<li data-size="' + parseInt(w.string) + '" data-font="' + w.name + '">' + w.name + '</li>')
setTimeout(function() {
$('li[data-font="' + w.name + '"]')
.fontface({
fontName: w.name, //the font name
fileName: w.font, //and path
fontSize: parseInt(w.string) + "px" //the font size
})
.fontApply({ //apply the font to this specific element
fontFamily: w.name
//fontSize: parseInt(w.string) + 'px'
})
startVal += adVal;
setTimeout(function() {
use.loader.animate({width: startVal + "%"}, 50, "easeInOutCubic")
}, 0)
console.log("Loaded fonts now...");
}, 100)
})
});
//get the width of the parent progress bar //288
var parentW = use.loader.parent().width()
var draw = 0;
var Utilities = setInterval(function() {
if(use.loader.width() > (parentW * 0.99)) {
clearInterval(Utilities);
use.loadIndicator.html("Just a second please..");
console.log(families.toString());
//now draw the example canvas
if(draw == 0) prittyNote.drawCanvas(prittyNote.getValue()), draw++;
use.loadIndicator.html('Yaaeey! We good to go!');
if($('canvas#prittynote').width() > 200) {
setTimeout(function() {
$('.loading').fadeOut(1000, function() {
console.log("FAding OUUUUUTTTTTTTTTTTTTTTTTTTTTTTTTT")
prittyNote.drawCanvas(prittyNote.getValue())
})
}, 1000); //now remove the loading panel now
}
}
else if(use.loader.width() > (parentW/2)) {
use.loadIndicator.html("Loading Fonts now...")
}else{
use.loadIndicator.html("Loading Themes...")
}
}, 50);
//now bind each select element to it's response function
$("ul#themes").hammer().on("tap", 'li', function() {
installPallette($(this), pallete); //start keep watch in the select changes
});
$("ul#fonts").hammer().on("tap", 'li', function() {
installFont($(this), fonts); //start keep watch in the select changes
});
})
});
}
installPallette = function(el, pallete) {
var p = el.data('theme'); //get the selected value of the palettes
//loop thru the palette matching the required palette request
var x = $.grep(pallete, function(a,b) {
return a.name == p;
})
var colors = x[0].color;
console.log(p, colors);
$("#bgclr").val(colors[0])
$("#text").val(colors[1])
$("#hashtag").val(colors[2])
prittyNote.drawCanvas(prittyNote.getValue());
closeSideBars()
}
installFont = function(el) {
var f = el.data('font');
var x = $.grep(fonts, function(a,b) { // loop through the fonts getting the corresponding font match
return f == a.name;
})
var font = x[0];
prittyNote.font = font.string; //if found assign to font as the defualt in use from now
prittyNote.drawCanvas(prittyNote.getValue());
closeSideBars()
}
closeSideBars = function() {
$.sidr('close', 'side-bar-themes'); //close all side bars
$.sidr('close', 'side-bar-fonts');
}
})(jQuery);
| {
//test for quotes, will depict the quote length and color it all
if(p) { //change color of only single words with single hash tags
ctx.fillStyle = hTagclr;
string = string.replace('#', '');
w = ctx.measureText(string).width; //recalculate width to remove whitespaces left
}
//reset default text color if not
else{
if(qc == 0) ctx.fillStyle = clr;
}
} | conditional_block |
press.py | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os, random, h5py, sklearn.metrics
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
import vgg
#vgg = nets.vgg
#import tensorflow.contrib.slim.nets as nets
#resnet_v1 = nets.resnet_v1
#import resnet, resnet_utils
pj = ut.pjoin
full_dim = 256
crop_dim = 224
train_iters = 10000
batch_size = 32
base_lr = 1e-3
#base_lr = 1e-3
#base_lr = 1e-2
gamma = 0.5
#step_size = 1000
step_size = 2500
#sample_dur_secs = 0.15
sample_dur_secs = 0.05
sample_fps = 60
gpu = '/gpu:0'
#init_path = '../results/resnet_v1_50.ckpt'
init_path = '../results/vgg_16.ckpt'
checkpoint_iters = 100
#finetune_top_only = True
finetune_top_only = False
model_style = 'diff'
augment = True
#augment = False
#model_style = 'dual'
def download_pretrained():
# https://github.com/tensorflow/models/tree/master/slim
ut.mkdir('../results')
# ut.sys_check('wget http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz '
# ' -O ../results/resnet_v1_50_2016_08_28.tar.gz')
# ut.sys_check('cd ../results; tar -xzf resnet_v1_50_2016_08_28.tar.gz')
ut.sys_check('wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz '
' -O ../results/vgg_16_2016_08_28.tar.gz')
ut.sys_check('cd ../results; tar -xzf vgg_16_2016_08_28.tar.gz')
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, label))
# return examples
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# 0.05, sample_fps, tmp_dir))
# fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
# prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
# ut.sys_check('cp %s %s' % (fname, prev_file))
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, prev_file, label))
# return examples
def extract_frames((vid_file, time, label, vid_idx, im_dir, prev_free_time)):
examples = []
with ut.TmpDir() as tmp_dir:
free_dur = 0.1
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(prev_free_time, vid_file, full_dim, full_dim, free_dur, sample_fps, tmp_dir))
#fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
assert len(ut.glob(pj(tmp_dir, '*.png'))), 'no frames for prev_free_time'
fname = random.choice(sorted(ut.glob(pj(tmp_dir, '*.png'))))
prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
ut.sys_check('cp %s %s' % (fname, prev_file))
with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# (vid_file, full_dim, full_dim, time,
# sample_dur_secs, sample_fps, tmp_dir))
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(time, vid_file, full_dim, full_dim,
sample_dur_secs, sample_fps, tmp_dir))
for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
ut.sys_check('cp %s %s' % (fname, im_file))
examples.append((im_file, prev_file, label, vid_file))
return examples
def examples_from_db((db_file, im_dir)):
examples = []
try:
with h5py.File(db_file, 'r') as db:
#print db.keys()
sc = lambda x : ig.scale(x, (full_dim, full_dim))
for x in ['A', 'B']:
im_file = ut.make_temp('.png', dir = im_dir)
prev_file = ut.make_temp('.png', dir = im_dir)
ig.save(im_file, sc(db['GelSight%s_image_post_gripping' % x]))
ig.save(prev_file, sc(db['GelSight%s_image_pre_gripping' % x]))
if 'is_gripping' in db:
label = int(np.array(db['is_gripping'])[0])
elif 'Is gripping?' in db:
label = int(np.array(db['Is gripping?'])[0])
else:
raise RuntimeError('No label!')
examples.append((im_file, prev_file, label, db_file))
except:
print 'Failed to open:', db_file
return examples
def write_data(vid_path, out_dir, train_frac = 0.75):
im_dir = ut.mkdir(pj(out_dir, 'ims'))
in_data = []
meta_files = sorted(ut.glob(vid_path, 'train', '*.txt'))
print 'meta files:'
for x in meta_files:
print x
print
for meta_idx, meta_file in enumerate(meta_files):
last_prev_time = 0.
vid_file = meta_file.replace('.txt', '.mp4')
for clip_idx, ex in enumerate(ut.read_lines(meta_file)):
prev_time = last_prev_time
vid_idx = '%05d_%05d' % (meta_idx, clip_idx)
print ex
s, time = ex.split()
time = float(time)
if s == 'p':
label = 1
elif s == 'n':
label = 0
last_prev_time = time
else:
raise RuntimeError()
in_data.append((vid_file, time, label, vid_idx, im_dir, prev_time))
print 'Writing:', len(in_data), 'sequences'
meta_examples = ut.flatten(ut.parmap(extract_frames, in_data))
meta_examples = ut.shuffled_with_seed(meta_examples)
# add manu examples
db_files = sorted(ut.sys_with_stdout('find ../data/manu-press -name "*.hdf5"').split())
db_files = ut.shuffled_with_seed(db_files)
print 'Train fraction:', train_frac
num_train = int(train_frac * len(db_files))
db_train = db_files[:num_train]
db_test = db_files[num_train:]
train_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_train]))
test_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_test]))
print 'Number of db train examples:', len(train_db_examples)
print 'Number of meta examples:', len(meta_examples)
train_examples = ut.shuffled_with_seed(meta_examples + train_db_examples)
ut.write_lines(pj(out_dir, 'train.csv'), ['%s,%s,%d,%s' % x for x in train_examples])
test_examples = ut.shuffled_with_seed(test_db_examples)
ut.write_lines(pj(out_dir, 'test.csv'), ['%s,%s,%d,%s' % x for x in test_examples])
def make_tf(path):
tf_file = pj(path, 'train.tf')
if os.path.exists(tf_file):
os.remove(tf_file)
writer = tf.python_io.TFRecordWriter(tf_file)
lines = ut.shuffled_with_seed(ut.read_lines(pj(path, 'train.csv')))
print 'Number of examples:', len(lines)
for line in lines:
fname, prev_fname, label, _ = line.split(',')
label = int(label)
s = ut.read_file(fname, binary = True)
s_prev = ut.read_file(prev_fname, binary = True)
feat = {'im': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s])),
'im_prev': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s_prev])),
'label' : tf.train.Feature(int64_list = tf.train.Int64List(value = [label]))}
ex = tf.train.Example(features = tf.train.Features(feature = feat))
writer.write(ex.SerializeToString())
writer.close()
def read_example(rec_queue):
reader = tf.TFRecordReader()
k, s = reader.read(rec_queue)
feats = {
'im' : tf.FixedLenFeature([], dtype=tf.string),
'im_prev' : tf.FixedLenFeature([], dtype=tf.string),
'label' : tf.FixedLenFeature([], tf.int64)
}
example = tf.parse_single_example(s, features = feats)
im = tf.image.decode_png(example['im'])
im_prev = tf.image.decode_png(example['im_prev'])
im.set_shape((full_dim, full_dim, 3))
im_prev.set_shape((full_dim, full_dim, 3))
if 0:
#im = tf.random_crop(im, (crop_dim, crop_dim, 3))
im = tf.image.resize_images(im, [crop_dim, crop_dim])
im_prev = tf.image.resize_images(im_prev, [crop_dim, crop_dim])
else:
im_combo = tf.concat([im, im_prev], 2)
im_combo = tf.random_crop(im_combo, (crop_dim, crop_dim, 6))
if augment:
im_combo = tf.image.random_flip_left_right(im_combo)
im_combo = tf.image.random_flip_up_down(im_combo)
# See https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py for
# an example of more aggressive brightness/contrast augmentation
# The loss stays very high if you do not convert to float first: I think this is because
# many values in the GelSight are close to 0/255, so they get saturated
im_combo = tf.cast(im_combo, tf.float32)
im_combo = tf.image.random_brightness(im_combo, max_delta=20)
im_combo = tf.image.random_contrast(im_combo, lower=0.9, upper=1.1)
#im_combo = tf.Print(im_combo, [tf.reduce_max(im_combo)])
im = im_combo[:, :, :3]
im_prev = im_combo[:, :, 3:]
label = example['label']
return im, im_prev, label
def read_data(path):
#queues = [tf.train.string_input_producer(tf_files)]
#example_list = [read_example(queue) for queue in queues]
tf_files = [pj(path, 'train.tf')]
queue = tf.train.string_input_producer(tf_files)
return tf.train.shuffle_batch(
read_example(queue), batch_size = batch_size,
capacity = 2000, min_after_dequeue = 500)
def normalize_ims(im):
if type(im) == type(np.array([])):
im = im.astype('float32')
else:
im = tf.cast(im, tf.float32)
return -1. + (2./255) * im
def shape(x, d = None):
s = x.get_shape().as_list()
return s if d is None else s[d]
# def make_model(ims, train):
# #ims = tf.Print(ims, ['ims before =', ims])
# ims = normalize_ims(ims)
# with slim.arg_scope(vgg.vgg_arg_scope()):
# logits, _ = vgg.vgg_16(ims, is_training = train, update_top_only = finetune_top_only, num_classes = 2)
# print shape(logits)
# return logits
def make_model(ims, ims_prev, train):
#ims = tf.Print(ims, ['ims before =', ims])
ims = normalize_ims(ims)
ims_prev = normalize_ims(ims_prev)
if model_style == 'diff':
logits, _ = vgg.vgg_dual_16(
ims - ims_prev, ims, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
elif model_style == 'dual':
logits, _ = vgg.vgg_dual_16(
ims, ims_prev, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
return logits
def moving_avg(name, x, vals = {}, avg_win_size = 100):
ut.add_dict_list(vals, name, x)
return np.mean(vals[name][-avg_win_size:])
def train(path, restore = False):
config = tf.ConfigProto(allow_soft_placement = True)
with tf.Graph().as_default(), tf.device(gpu), tf.Session(config = config) as sess:
global_step = tf.get_variable('global_step', [], initializer =
tf.constant_initializer(0), trainable = False)
ims, ims_prev, labels = read_data(path)
#tf.summary.image('im', ims)
logits = make_model(ims, ims_prev, train = True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels)
#logits = tf.Print(logits, ['logits =', logits[0, :], labels[0]])
loss = tf.reduce_mean(loss)
tf.summary.scalar('loss', loss)
eq = tf.equal(tf.argmax(logits, 1), labels)
acc = tf.reduce_mean(tf.cast(eq, tf.float32))
tf.summary.scalar('acc', acc)
lr = base_lr * gamma**(global_step // step_size)
opt = tf.train.MomentumOptimizer(lr, 0.9)
train_op = opt.minimize(loss, global_step = global_step)
bn_ups = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print 'Batch norm updates:', len(bn_ups)
train_op = tf.group(train_op, *bn_ups)
sess.run(tf.global_variables_initializer())
var_list = slim.get_variables_to_restore()
exclude = ['Adam', 'beta1_power', 'beta2_power', 'Momentum', 'global_step', 'logits', 'fc8', 'fc6_', 'fc7_', 'conv6']
var_list = [x for x in var_list if \
not any(name in x.name for name in exclude)]
train_dir = pj(path, 'training')
if restore:
tf.train.Saver().restore(sess, tf.train.latest_checkpoint(train_dir))
else:
tf.train.Saver(var_list).restore(sess, init_path)
#saver = tf.train.Saver()
tf.train.start_queue_runners(sess = sess)
summary_dir = ut.mkdir('../results/summary')
print 'tensorboard --logdir=%s' % summary_dir
sum_writer = tf.summary.FileWriter(summary_dir, sess.graph)
while True:
step = int(sess.run(global_step))
if (step == 10 or step % checkpoint_iters == 0) or step == train_iters - 1:
check_path = pj(ut.mkdir(train_dir), 'net.tf')
print 'Saving:', check_path
#saver.save(sess, check_path, global_step = global_step)
vs = slim.get_model_variables()
# print 'Variables:'
# for x in vs:
# print x.name
tf.train.Saver(vs).save(sess, check_path, global_step = global_step)
if step > train_iters:
break
merged = tf.summary.merge_all()
if step % 1 == 0:
[summary] = sess.run([merged])
sum_writer.add_summary(summary, step)
_, lr_val, loss_val, acc_val = sess.run([train_op, lr, loss, acc])
if step % 10 == 0:
print 'Iteration %d,' % step, 'lr = ', lr_val, 'loss:', \
moving_avg('loss', loss_val), 'acc:', moving_avg('acc', acc_val)
def | (todo = 'all',
vid_path = '/home/ao/Videos/Webcam',
#out_dir = '../results/press-data-v7/',
#out_dir = '../results/press-data-v8/',
#out_dir = '../results/press-data-v9/',
#out_dir = '../results/press-data-v10/',
out_dir = '../results/press-data-v11/',
restore = 0,
train_frac = 0.75):
todo = ut.make_todo(todo, 'im tf train test')
if 'im' in todo:
print vid_path
write_data(vid_path, out_dir, train_frac = train_frac)
if 'tf' in todo:
make_tf(out_dir)
if 'train' in todo:
train(out_dir, restore = restore)
if 'test' in todo:
test(out_dir)
# class NetClf:
# def __init__(self, model_file, gpu = '/cpu:0'):
# self.sess = None
# #self.train_path = train_path
# self.model_file = model_file
# self.gpu = gpu
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# #self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# #self.sess = tf.Session()
# #self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
# self.sess = tf.Session()
# #self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
# self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# # var_list = slim.get_variables_to_restore()
# # print 'Restoring:'
# # for x in var_list:
# # print x.name
# #self.sess.run(tf.global_variables_initializer())
# #tf.train.Saver(var_list).restore(self.sess, self.model_file)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None]})
# print 'logits =', logits
# ut.toc()
# return ut.softmax(logits[0])[1]
class NetClf:
def __init__(self, model_file, gpu = '/cpu:0'):
self.sess = None
self.model_file = model_file
self.gpu = gpu
def __del__(self):
self.deinit()
def init(self):
if self.sess is None:
#self.model_file = tf.train.latest_checkpoint(self.train_path)
print 'Restoring:',self.model_file
with tf.device(self.gpu):
tf.reset_default_graph()
print self.gpu
tf.Graph().as_default()
#self.sess = tf.Session()
#self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
self.sess = tf.Session()
#self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_prev_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_prev_input')
#ims = tf.cast(self.im_input, tf.float32)
self.logits = make_model(self.im_input, self.im_prev_input, train = False)
# var_list = slim.get_variables_to_restore()
# print 'Restoring:'
# for x in var_list:
# print x.name
#self.sess.run(tf.global_variables_initializer())
#tf.train.Saver(var_list).restore(self.sess, self.model_file)
tf.train.Saver().restore(self.sess, self.model_file)
tf.get_default_graph().finalize()
def deinit(self):
if self.sess is not None:
self.sess.close()
self.sess = None
def format_im(self, im):
return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
def predict(self, im, im_prev):
self.init()
im = self.format_im(im)
im_prev = self.format_im(im_prev)
#ut.tic()
[logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None],
self.im_prev_input : im_prev[None]})
#print 'logits =', logits
#ut.toc()
return ut.softmax(logits[0])[1]
# class NetClf:
# #def __init__(self, train_path, gpu = '/cpu:0'):
# def __init__(self, train_path, gpu = '/gpu:0'):
# self.sess = None
# self.train_path = train_path
# self.gpu = gpu
# self.num_crops = 10
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# self.sess = tf.Session()
# self.im_input = tf.placeholder(tf.uint8, (self.num_crops, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# #return [ig.scale(im, (crop_dim, crop_dim), 1)]*10 #.astype('float32')
# dim = crop_dim
# dh = (im.shape[0] - dim)
# crops = np.zeros((self.num_crops, dim, dim, 3), dtype = np.uint8)
# crops[0] = ut.crop_center(im, dim)
# i = 1
# for y in np.linspace(0, dh, 3).astype('l'):
# dw = (im.shape[1] - dim)
# for x in np.linspace(0, dw, 3).astype('l'):
# crops[i] = im[y : y + dim, x : x + dim]
# i += 1
# return np.array(crops, 'float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im})
# print 'logits =', logits
# ut.toc()
# return np.mean(map(ut.softmax, logits), axis = 0)[1]
def test(path, match_str = None):
train_dir = pj(path, 'training')
check_path = tf.train.latest_checkpoint(train_dir)
print 'Restoring from:', check_path
net = NetClf(check_path, gpu)
examples = []
for line in ut.read_lines(pj(path, 'test.csv')):
s = line.split(',')
#print match_str, s[3]
print s
if (match_str is not None) and (match_str not in s[3]):
print 'skipping'
continue
examples.append((s[0], s[1], int(s[2]), s[3]))
print 'Testing on:', len(examples), 'examples'
labels = []
probs = []
accs = []
table = []
for i, ex in enumerate(examples):
im_after = ig.load(ex[0])
im_prev = ig.load(ex[1])
label = ex[2]
prob = net.predict(im_after, im_prev)
#print prob, label
pred = int(prob >= 0.5)
labels.append(label)
probs.append(prob)
accs.append(pred == label)
if i < 50:
color = '#00DD00' if pred == label else '#DD0000'
row = [im_after, im_prev, ut.font_color_html('pred = %.3f' % prob, color), 'gt = %d' % label]
table.append(row)
labels = np.array(labels, 'bool')
probs = np.array(probs, 'float32')
accs = np.array(accs)
print 'Accuracy:', np.mean(accs)
print 'mAP:', sklearn.metrics.average_precision_score(labels, probs)
ig.show(table)
| run | identifier_name |
press.py | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os, random, h5py, sklearn.metrics
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
import vgg
#vgg = nets.vgg
#import tensorflow.contrib.slim.nets as nets
#resnet_v1 = nets.resnet_v1
#import resnet, resnet_utils
pj = ut.pjoin
full_dim = 256
crop_dim = 224
train_iters = 10000
batch_size = 32
base_lr = 1e-3
#base_lr = 1e-3
#base_lr = 1e-2
gamma = 0.5
#step_size = 1000
step_size = 2500
#sample_dur_secs = 0.15
sample_dur_secs = 0.05
sample_fps = 60
gpu = '/gpu:0'
#init_path = '../results/resnet_v1_50.ckpt'
init_path = '../results/vgg_16.ckpt'
checkpoint_iters = 100
#finetune_top_only = True
finetune_top_only = False
model_style = 'diff'
augment = True
#augment = False
#model_style = 'dual'
def download_pretrained():
# https://github.com/tensorflow/models/tree/master/slim
ut.mkdir('../results')
# ut.sys_check('wget http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz '
# ' -O ../results/resnet_v1_50_2016_08_28.tar.gz')
# ut.sys_check('cd ../results; tar -xzf resnet_v1_50_2016_08_28.tar.gz')
ut.sys_check('wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz '
' -O ../results/vgg_16_2016_08_28.tar.gz')
ut.sys_check('cd ../results; tar -xzf vgg_16_2016_08_28.tar.gz')
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, label))
# return examples
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# 0.05, sample_fps, tmp_dir))
# fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
# prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
# ut.sys_check('cp %s %s' % (fname, prev_file))
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, prev_file, label))
# return examples
def extract_frames((vid_file, time, label, vid_idx, im_dir, prev_free_time)):
examples = []
with ut.TmpDir() as tmp_dir:
free_dur = 0.1
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(prev_free_time, vid_file, full_dim, full_dim, free_dur, sample_fps, tmp_dir))
#fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
assert len(ut.glob(pj(tmp_dir, '*.png'))), 'no frames for prev_free_time'
fname = random.choice(sorted(ut.glob(pj(tmp_dir, '*.png'))))
prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
ut.sys_check('cp %s %s' % (fname, prev_file))
with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# (vid_file, full_dim, full_dim, time,
# sample_dur_secs, sample_fps, tmp_dir))
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(time, vid_file, full_dim, full_dim,
sample_dur_secs, sample_fps, tmp_dir))
for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
ut.sys_check('cp %s %s' % (fname, im_file))
examples.append((im_file, prev_file, label, vid_file))
return examples
def examples_from_db((db_file, im_dir)):
examples = []
try:
with h5py.File(db_file, 'r') as db:
#print db.keys()
sc = lambda x : ig.scale(x, (full_dim, full_dim))
for x in ['A', 'B']:
im_file = ut.make_temp('.png', dir = im_dir)
prev_file = ut.make_temp('.png', dir = im_dir)
ig.save(im_file, sc(db['GelSight%s_image_post_gripping' % x]))
ig.save(prev_file, sc(db['GelSight%s_image_pre_gripping' % x]))
if 'is_gripping' in db:
label = int(np.array(db['is_gripping'])[0])
elif 'Is gripping?' in db:
label = int(np.array(db['Is gripping?'])[0])
else:
|
examples.append((im_file, prev_file, label, db_file))
except:
print 'Failed to open:', db_file
return examples
def write_data(vid_path, out_dir, train_frac = 0.75):
im_dir = ut.mkdir(pj(out_dir, 'ims'))
in_data = []
meta_files = sorted(ut.glob(vid_path, 'train', '*.txt'))
print 'meta files:'
for x in meta_files:
print x
print
for meta_idx, meta_file in enumerate(meta_files):
last_prev_time = 0.
vid_file = meta_file.replace('.txt', '.mp4')
for clip_idx, ex in enumerate(ut.read_lines(meta_file)):
prev_time = last_prev_time
vid_idx = '%05d_%05d' % (meta_idx, clip_idx)
print ex
s, time = ex.split()
time = float(time)
if s == 'p':
label = 1
elif s == 'n':
label = 0
last_prev_time = time
else:
raise RuntimeError()
in_data.append((vid_file, time, label, vid_idx, im_dir, prev_time))
print 'Writing:', len(in_data), 'sequences'
meta_examples = ut.flatten(ut.parmap(extract_frames, in_data))
meta_examples = ut.shuffled_with_seed(meta_examples)
# add manu examples
db_files = sorted(ut.sys_with_stdout('find ../data/manu-press -name "*.hdf5"').split())
db_files = ut.shuffled_with_seed(db_files)
print 'Train fraction:', train_frac
num_train = int(train_frac * len(db_files))
db_train = db_files[:num_train]
db_test = db_files[num_train:]
train_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_train]))
test_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_test]))
print 'Number of db train examples:', len(train_db_examples)
print 'Number of meta examples:', len(meta_examples)
train_examples = ut.shuffled_with_seed(meta_examples + train_db_examples)
ut.write_lines(pj(out_dir, 'train.csv'), ['%s,%s,%d,%s' % x for x in train_examples])
test_examples = ut.shuffled_with_seed(test_db_examples)
ut.write_lines(pj(out_dir, 'test.csv'), ['%s,%s,%d,%s' % x for x in test_examples])
def make_tf(path):
tf_file = pj(path, 'train.tf')
if os.path.exists(tf_file):
os.remove(tf_file)
writer = tf.python_io.TFRecordWriter(tf_file)
lines = ut.shuffled_with_seed(ut.read_lines(pj(path, 'train.csv')))
print 'Number of examples:', len(lines)
for line in lines:
fname, prev_fname, label, _ = line.split(',')
label = int(label)
s = ut.read_file(fname, binary = True)
s_prev = ut.read_file(prev_fname, binary = True)
feat = {'im': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s])),
'im_prev': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s_prev])),
'label' : tf.train.Feature(int64_list = tf.train.Int64List(value = [label]))}
ex = tf.train.Example(features = tf.train.Features(feature = feat))
writer.write(ex.SerializeToString())
writer.close()
def read_example(rec_queue):
reader = tf.TFRecordReader()
k, s = reader.read(rec_queue)
feats = {
'im' : tf.FixedLenFeature([], dtype=tf.string),
'im_prev' : tf.FixedLenFeature([], dtype=tf.string),
'label' : tf.FixedLenFeature([], tf.int64)
}
example = tf.parse_single_example(s, features = feats)
im = tf.image.decode_png(example['im'])
im_prev = tf.image.decode_png(example['im_prev'])
im.set_shape((full_dim, full_dim, 3))
im_prev.set_shape((full_dim, full_dim, 3))
if 0:
#im = tf.random_crop(im, (crop_dim, crop_dim, 3))
im = tf.image.resize_images(im, [crop_dim, crop_dim])
im_prev = tf.image.resize_images(im_prev, [crop_dim, crop_dim])
else:
im_combo = tf.concat([im, im_prev], 2)
im_combo = tf.random_crop(im_combo, (crop_dim, crop_dim, 6))
if augment:
im_combo = tf.image.random_flip_left_right(im_combo)
im_combo = tf.image.random_flip_up_down(im_combo)
# See https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py for
# an example of more aggressive brightness/contrast augmentation
# The loss stays very high if you do not convert to float first: I think this is because
# many values in the GelSight are close to 0/255, so they get saturated
im_combo = tf.cast(im_combo, tf.float32)
im_combo = tf.image.random_brightness(im_combo, max_delta=20)
im_combo = tf.image.random_contrast(im_combo, lower=0.9, upper=1.1)
#im_combo = tf.Print(im_combo, [tf.reduce_max(im_combo)])
im = im_combo[:, :, :3]
im_prev = im_combo[:, :, 3:]
label = example['label']
return im, im_prev, label
def read_data(path):
#queues = [tf.train.string_input_producer(tf_files)]
#example_list = [read_example(queue) for queue in queues]
tf_files = [pj(path, 'train.tf')]
queue = tf.train.string_input_producer(tf_files)
return tf.train.shuffle_batch(
read_example(queue), batch_size = batch_size,
capacity = 2000, min_after_dequeue = 500)
def normalize_ims(im):
if type(im) == type(np.array([])):
im = im.astype('float32')
else:
im = tf.cast(im, tf.float32)
return -1. + (2./255) * im
def shape(x, d = None):
s = x.get_shape().as_list()
return s if d is None else s[d]
# def make_model(ims, train):
# #ims = tf.Print(ims, ['ims before =', ims])
# ims = normalize_ims(ims)
# with slim.arg_scope(vgg.vgg_arg_scope()):
# logits, _ = vgg.vgg_16(ims, is_training = train, update_top_only = finetune_top_only, num_classes = 2)
# print shape(logits)
# return logits
def make_model(ims, ims_prev, train):
#ims = tf.Print(ims, ['ims before =', ims])
ims = normalize_ims(ims)
ims_prev = normalize_ims(ims_prev)
if model_style == 'diff':
logits, _ = vgg.vgg_dual_16(
ims - ims_prev, ims, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
elif model_style == 'dual':
logits, _ = vgg.vgg_dual_16(
ims, ims_prev, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
return logits
def moving_avg(name, x, vals = {}, avg_win_size = 100):
ut.add_dict_list(vals, name, x)
return np.mean(vals[name][-avg_win_size:])
def train(path, restore = False):
config = tf.ConfigProto(allow_soft_placement = True)
with tf.Graph().as_default(), tf.device(gpu), tf.Session(config = config) as sess:
global_step = tf.get_variable('global_step', [], initializer =
tf.constant_initializer(0), trainable = False)
ims, ims_prev, labels = read_data(path)
#tf.summary.image('im', ims)
logits = make_model(ims, ims_prev, train = True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels)
#logits = tf.Print(logits, ['logits =', logits[0, :], labels[0]])
loss = tf.reduce_mean(loss)
tf.summary.scalar('loss', loss)
eq = tf.equal(tf.argmax(logits, 1), labels)
acc = tf.reduce_mean(tf.cast(eq, tf.float32))
tf.summary.scalar('acc', acc)
lr = base_lr * gamma**(global_step // step_size)
opt = tf.train.MomentumOptimizer(lr, 0.9)
train_op = opt.minimize(loss, global_step = global_step)
bn_ups = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print 'Batch norm updates:', len(bn_ups)
train_op = tf.group(train_op, *bn_ups)
sess.run(tf.global_variables_initializer())
var_list = slim.get_variables_to_restore()
exclude = ['Adam', 'beta1_power', 'beta2_power', 'Momentum', 'global_step', 'logits', 'fc8', 'fc6_', 'fc7_', 'conv6']
var_list = [x for x in var_list if \
not any(name in x.name for name in exclude)]
train_dir = pj(path, 'training')
if restore:
tf.train.Saver().restore(sess, tf.train.latest_checkpoint(train_dir))
else:
tf.train.Saver(var_list).restore(sess, init_path)
#saver = tf.train.Saver()
tf.train.start_queue_runners(sess = sess)
summary_dir = ut.mkdir('../results/summary')
print 'tensorboard --logdir=%s' % summary_dir
sum_writer = tf.summary.FileWriter(summary_dir, sess.graph)
while True:
step = int(sess.run(global_step))
if (step == 10 or step % checkpoint_iters == 0) or step == train_iters - 1:
check_path = pj(ut.mkdir(train_dir), 'net.tf')
print 'Saving:', check_path
#saver.save(sess, check_path, global_step = global_step)
vs = slim.get_model_variables()
# print 'Variables:'
# for x in vs:
# print x.name
tf.train.Saver(vs).save(sess, check_path, global_step = global_step)
if step > train_iters:
break
merged = tf.summary.merge_all()
if step % 1 == 0:
[summary] = sess.run([merged])
sum_writer.add_summary(summary, step)
_, lr_val, loss_val, acc_val = sess.run([train_op, lr, loss, acc])
if step % 10 == 0:
print 'Iteration %d,' % step, 'lr = ', lr_val, 'loss:', \
moving_avg('loss', loss_val), 'acc:', moving_avg('acc', acc_val)
def run(todo = 'all',
vid_path = '/home/ao/Videos/Webcam',
#out_dir = '../results/press-data-v7/',
#out_dir = '../results/press-data-v8/',
#out_dir = '../results/press-data-v9/',
#out_dir = '../results/press-data-v10/',
out_dir = '../results/press-data-v11/',
restore = 0,
train_frac = 0.75):
todo = ut.make_todo(todo, 'im tf train test')
if 'im' in todo:
print vid_path
write_data(vid_path, out_dir, train_frac = train_frac)
if 'tf' in todo:
make_tf(out_dir)
if 'train' in todo:
train(out_dir, restore = restore)
if 'test' in todo:
test(out_dir)
# class NetClf:
# def __init__(self, model_file, gpu = '/cpu:0'):
# self.sess = None
# #self.train_path = train_path
# self.model_file = model_file
# self.gpu = gpu
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# #self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# #self.sess = tf.Session()
# #self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
# self.sess = tf.Session()
# #self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
# self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# # var_list = slim.get_variables_to_restore()
# # print 'Restoring:'
# # for x in var_list:
# # print x.name
# #self.sess.run(tf.global_variables_initializer())
# #tf.train.Saver(var_list).restore(self.sess, self.model_file)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None]})
# print 'logits =', logits
# ut.toc()
# return ut.softmax(logits[0])[1]
class NetClf:
def __init__(self, model_file, gpu = '/cpu:0'):
self.sess = None
self.model_file = model_file
self.gpu = gpu
def __del__(self):
self.deinit()
def init(self):
if self.sess is None:
#self.model_file = tf.train.latest_checkpoint(self.train_path)
print 'Restoring:',self.model_file
with tf.device(self.gpu):
tf.reset_default_graph()
print self.gpu
tf.Graph().as_default()
#self.sess = tf.Session()
#self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
self.sess = tf.Session()
#self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_prev_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_prev_input')
#ims = tf.cast(self.im_input, tf.float32)
self.logits = make_model(self.im_input, self.im_prev_input, train = False)
# var_list = slim.get_variables_to_restore()
# print 'Restoring:'
# for x in var_list:
# print x.name
#self.sess.run(tf.global_variables_initializer())
#tf.train.Saver(var_list).restore(self.sess, self.model_file)
tf.train.Saver().restore(self.sess, self.model_file)
tf.get_default_graph().finalize()
def deinit(self):
if self.sess is not None:
self.sess.close()
self.sess = None
def format_im(self, im):
return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
def predict(self, im, im_prev):
self.init()
im = self.format_im(im)
im_prev = self.format_im(im_prev)
#ut.tic()
[logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None],
self.im_prev_input : im_prev[None]})
#print 'logits =', logits
#ut.toc()
return ut.softmax(logits[0])[1]
# class NetClf:
# #def __init__(self, train_path, gpu = '/cpu:0'):
# def __init__(self, train_path, gpu = '/gpu:0'):
# self.sess = None
# self.train_path = train_path
# self.gpu = gpu
# self.num_crops = 10
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# self.sess = tf.Session()
# self.im_input = tf.placeholder(tf.uint8, (self.num_crops, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# #return [ig.scale(im, (crop_dim, crop_dim), 1)]*10 #.astype('float32')
# dim = crop_dim
# dh = (im.shape[0] - dim)
# crops = np.zeros((self.num_crops, dim, dim, 3), dtype = np.uint8)
# crops[0] = ut.crop_center(im, dim)
# i = 1
# for y in np.linspace(0, dh, 3).astype('l'):
# dw = (im.shape[1] - dim)
# for x in np.linspace(0, dw, 3).astype('l'):
# crops[i] = im[y : y + dim, x : x + dim]
# i += 1
# return np.array(crops, 'float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im})
# print 'logits =', logits
# ut.toc()
# return np.mean(map(ut.softmax, logits), axis = 0)[1]
def test(path, match_str = None):
train_dir = pj(path, 'training')
check_path = tf.train.latest_checkpoint(train_dir)
print 'Restoring from:', check_path
net = NetClf(check_path, gpu)
examples = []
for line in ut.read_lines(pj(path, 'test.csv')):
s = line.split(',')
#print match_str, s[3]
print s
if (match_str is not None) and (match_str not in s[3]):
print 'skipping'
continue
examples.append((s[0], s[1], int(s[2]), s[3]))
print 'Testing on:', len(examples), 'examples'
labels = []
probs = []
accs = []
table = []
for i, ex in enumerate(examples):
im_after = ig.load(ex[0])
im_prev = ig.load(ex[1])
label = ex[2]
prob = net.predict(im_after, im_prev)
#print prob, label
pred = int(prob >= 0.5)
labels.append(label)
probs.append(prob)
accs.append(pred == label)
if i < 50:
color = '#00DD00' if pred == label else '#DD0000'
row = [im_after, im_prev, ut.font_color_html('pred = %.3f' % prob, color), 'gt = %d' % label]
table.append(row)
labels = np.array(labels, 'bool')
probs = np.array(probs, 'float32')
accs = np.array(accs)
print 'Accuracy:', np.mean(accs)
print 'mAP:', sklearn.metrics.average_precision_score(labels, probs)
ig.show(table)
| raise RuntimeError('No label!') | conditional_block |
press.py | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os, random, h5py, sklearn.metrics
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
import vgg
#vgg = nets.vgg
#import tensorflow.contrib.slim.nets as nets
#resnet_v1 = nets.resnet_v1
#import resnet, resnet_utils
pj = ut.pjoin
full_dim = 256
crop_dim = 224
train_iters = 10000
batch_size = 32
base_lr = 1e-3
#base_lr = 1e-3
#base_lr = 1e-2
gamma = 0.5
#step_size = 1000
step_size = 2500
#sample_dur_secs = 0.15
sample_dur_secs = 0.05
sample_fps = 60
gpu = '/gpu:0'
#init_path = '../results/resnet_v1_50.ckpt'
init_path = '../results/vgg_16.ckpt'
checkpoint_iters = 100
#finetune_top_only = True
finetune_top_only = False
model_style = 'diff'
augment = True
#augment = False
#model_style = 'dual'
def download_pretrained():
# https://github.com/tensorflow/models/tree/master/slim
ut.mkdir('../results')
# ut.sys_check('wget http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz '
# ' -O ../results/resnet_v1_50_2016_08_28.tar.gz')
# ut.sys_check('cd ../results; tar -xzf resnet_v1_50_2016_08_28.tar.gz')
ut.sys_check('wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz '
' -O ../results/vgg_16_2016_08_28.tar.gz')
ut.sys_check('cd ../results; tar -xzf vgg_16_2016_08_28.tar.gz')
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, label))
# return examples
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# 0.05, sample_fps, tmp_dir))
# fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
# prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
# ut.sys_check('cp %s %s' % (fname, prev_file))
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, prev_file, label))
# return examples
def extract_frames((vid_file, time, label, vid_idx, im_dir, prev_free_time)):
examples = []
with ut.TmpDir() as tmp_dir:
free_dur = 0.1
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(prev_free_time, vid_file, full_dim, full_dim, free_dur, sample_fps, tmp_dir))
#fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
assert len(ut.glob(pj(tmp_dir, '*.png'))), 'no frames for prev_free_time'
fname = random.choice(sorted(ut.glob(pj(tmp_dir, '*.png'))))
prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
ut.sys_check('cp %s %s' % (fname, prev_file))
with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# (vid_file, full_dim, full_dim, time,
# sample_dur_secs, sample_fps, tmp_dir))
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(time, vid_file, full_dim, full_dim,
sample_dur_secs, sample_fps, tmp_dir))
for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
ut.sys_check('cp %s %s' % (fname, im_file))
examples.append((im_file, prev_file, label, vid_file))
return examples
def examples_from_db((db_file, im_dir)):
examples = []
try:
with h5py.File(db_file, 'r') as db:
#print db.keys()
sc = lambda x : ig.scale(x, (full_dim, full_dim))
for x in ['A', 'B']:
im_file = ut.make_temp('.png', dir = im_dir)
prev_file = ut.make_temp('.png', dir = im_dir)
ig.save(im_file, sc(db['GelSight%s_image_post_gripping' % x]))
ig.save(prev_file, sc(db['GelSight%s_image_pre_gripping' % x]))
if 'is_gripping' in db:
label = int(np.array(db['is_gripping'])[0])
elif 'Is gripping?' in db:
label = int(np.array(db['Is gripping?'])[0])
else:
raise RuntimeError('No label!')
examples.append((im_file, prev_file, label, db_file))
except:
print 'Failed to open:', db_file
return examples
def write_data(vid_path, out_dir, train_frac = 0.75):
im_dir = ut.mkdir(pj(out_dir, 'ims'))
in_data = []
meta_files = sorted(ut.glob(vid_path, 'train', '*.txt'))
print 'meta files:'
for x in meta_files:
print x
print
for meta_idx, meta_file in enumerate(meta_files):
last_prev_time = 0.
vid_file = meta_file.replace('.txt', '.mp4')
for clip_idx, ex in enumerate(ut.read_lines(meta_file)):
prev_time = last_prev_time
vid_idx = '%05d_%05d' % (meta_idx, clip_idx)
print ex
s, time = ex.split()
time = float(time)
if s == 'p':
label = 1
elif s == 'n':
label = 0
last_prev_time = time
else:
raise RuntimeError()
in_data.append((vid_file, time, label, vid_idx, im_dir, prev_time))
print 'Writing:', len(in_data), 'sequences'
meta_examples = ut.flatten(ut.parmap(extract_frames, in_data))
meta_examples = ut.shuffled_with_seed(meta_examples)
# add manu examples
db_files = sorted(ut.sys_with_stdout('find ../data/manu-press -name "*.hdf5"').split())
db_files = ut.shuffled_with_seed(db_files)
print 'Train fraction:', train_frac
num_train = int(train_frac * len(db_files))
db_train = db_files[:num_train]
db_test = db_files[num_train:]
train_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_train]))
test_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_test]))
print 'Number of db train examples:', len(train_db_examples)
print 'Number of meta examples:', len(meta_examples)
train_examples = ut.shuffled_with_seed(meta_examples + train_db_examples)
ut.write_lines(pj(out_dir, 'train.csv'), ['%s,%s,%d,%s' % x for x in train_examples])
test_examples = ut.shuffled_with_seed(test_db_examples)
ut.write_lines(pj(out_dir, 'test.csv'), ['%s,%s,%d,%s' % x for x in test_examples])
def make_tf(path):
tf_file = pj(path, 'train.tf')
if os.path.exists(tf_file):
os.remove(tf_file)
writer = tf.python_io.TFRecordWriter(tf_file)
lines = ut.shuffled_with_seed(ut.read_lines(pj(path, 'train.csv')))
print 'Number of examples:', len(lines)
for line in lines:
fname, prev_fname, label, _ = line.split(',')
label = int(label)
s = ut.read_file(fname, binary = True)
s_prev = ut.read_file(prev_fname, binary = True)
feat = {'im': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s])),
'im_prev': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s_prev])),
'label' : tf.train.Feature(int64_list = tf.train.Int64List(value = [label]))}
ex = tf.train.Example(features = tf.train.Features(feature = feat))
writer.write(ex.SerializeToString())
writer.close()
def read_example(rec_queue):
reader = tf.TFRecordReader()
k, s = reader.read(rec_queue)
feats = {
'im' : tf.FixedLenFeature([], dtype=tf.string),
'im_prev' : tf.FixedLenFeature([], dtype=tf.string),
'label' : tf.FixedLenFeature([], tf.int64)
}
example = tf.parse_single_example(s, features = feats)
im = tf.image.decode_png(example['im'])
im_prev = tf.image.decode_png(example['im_prev'])
im.set_shape((full_dim, full_dim, 3))
im_prev.set_shape((full_dim, full_dim, 3))
if 0:
#im = tf.random_crop(im, (crop_dim, crop_dim, 3))
im = tf.image.resize_images(im, [crop_dim, crop_dim])
im_prev = tf.image.resize_images(im_prev, [crop_dim, crop_dim])
else:
im_combo = tf.concat([im, im_prev], 2)
im_combo = tf.random_crop(im_combo, (crop_dim, crop_dim, 6))
if augment:
im_combo = tf.image.random_flip_left_right(im_combo)
im_combo = tf.image.random_flip_up_down(im_combo)
# See https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py for
# an example of more aggressive brightness/contrast augmentation
# The loss stays very high if you do not convert to float first: I think this is because
# many values in the GelSight are close to 0/255, so they get saturated
im_combo = tf.cast(im_combo, tf.float32)
im_combo = tf.image.random_brightness(im_combo, max_delta=20)
im_combo = tf.image.random_contrast(im_combo, lower=0.9, upper=1.1)
#im_combo = tf.Print(im_combo, [tf.reduce_max(im_combo)])
im = im_combo[:, :, :3]
im_prev = im_combo[:, :, 3:]
label = example['label']
return im, im_prev, label
def read_data(path):
#queues = [tf.train.string_input_producer(tf_files)]
#example_list = [read_example(queue) for queue in queues]
tf_files = [pj(path, 'train.tf')]
queue = tf.train.string_input_producer(tf_files)
return tf.train.shuffle_batch(
read_example(queue), batch_size = batch_size,
capacity = 2000, min_after_dequeue = 500)
def normalize_ims(im):
if type(im) == type(np.array([])):
im = im.astype('float32')
else:
im = tf.cast(im, tf.float32)
return -1. + (2./255) * im
def shape(x, d = None):
s = x.get_shape().as_list()
return s if d is None else s[d]
# def make_model(ims, train):
# #ims = tf.Print(ims, ['ims before =', ims])
# ims = normalize_ims(ims)
# with slim.arg_scope(vgg.vgg_arg_scope()):
# logits, _ = vgg.vgg_16(ims, is_training = train, update_top_only = finetune_top_only, num_classes = 2)
# print shape(logits)
# return logits
def make_model(ims, ims_prev, train):
#ims = tf.Print(ims, ['ims before =', ims])
ims = normalize_ims(ims)
ims_prev = normalize_ims(ims_prev)
if model_style == 'diff':
logits, _ = vgg.vgg_dual_16(
ims - ims_prev, ims, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
elif model_style == 'dual':
logits, _ = vgg.vgg_dual_16(
ims, ims_prev, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
return logits
def moving_avg(name, x, vals = {}, avg_win_size = 100):
ut.add_dict_list(vals, name, x)
return np.mean(vals[name][-avg_win_size:])
def train(path, restore = False):
config = tf.ConfigProto(allow_soft_placement = True)
with tf.Graph().as_default(), tf.device(gpu), tf.Session(config = config) as sess:
global_step = tf.get_variable('global_step', [], initializer =
tf.constant_initializer(0), trainable = False)
ims, ims_prev, labels = read_data(path)
#tf.summary.image('im', ims)
logits = make_model(ims, ims_prev, train = True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels)
#logits = tf.Print(logits, ['logits =', logits[0, :], labels[0]])
loss = tf.reduce_mean(loss)
tf.summary.scalar('loss', loss)
eq = tf.equal(tf.argmax(logits, 1), labels)
acc = tf.reduce_mean(tf.cast(eq, tf.float32))
tf.summary.scalar('acc', acc)
lr = base_lr * gamma**(global_step // step_size)
opt = tf.train.MomentumOptimizer(lr, 0.9)
train_op = opt.minimize(loss, global_step = global_step)
bn_ups = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print 'Batch norm updates:', len(bn_ups)
train_op = tf.group(train_op, *bn_ups)
sess.run(tf.global_variables_initializer())
var_list = slim.get_variables_to_restore()
exclude = ['Adam', 'beta1_power', 'beta2_power', 'Momentum', 'global_step', 'logits', 'fc8', 'fc6_', 'fc7_', 'conv6']
var_list = [x for x in var_list if \
not any(name in x.name for name in exclude)]
train_dir = pj(path, 'training')
if restore:
tf.train.Saver().restore(sess, tf.train.latest_checkpoint(train_dir))
else:
tf.train.Saver(var_list).restore(sess, init_path)
#saver = tf.train.Saver()
tf.train.start_queue_runners(sess = sess)
summary_dir = ut.mkdir('../results/summary')
print 'tensorboard --logdir=%s' % summary_dir
sum_writer = tf.summary.FileWriter(summary_dir, sess.graph)
while True:
step = int(sess.run(global_step))
if (step == 10 or step % checkpoint_iters == 0) or step == train_iters - 1:
check_path = pj(ut.mkdir(train_dir), 'net.tf')
print 'Saving:', check_path
#saver.save(sess, check_path, global_step = global_step)
vs = slim.get_model_variables()
# print 'Variables:'
# for x in vs:
# print x.name
tf.train.Saver(vs).save(sess, check_path, global_step = global_step)
if step > train_iters:
break
merged = tf.summary.merge_all()
if step % 1 == 0:
[summary] = sess.run([merged])
sum_writer.add_summary(summary, step)
_, lr_val, loss_val, acc_val = sess.run([train_op, lr, loss, acc])
if step % 10 == 0:
print 'Iteration %d,' % step, 'lr = ', lr_val, 'loss:', \
moving_avg('loss', loss_val), 'acc:', moving_avg('acc', acc_val)
def run(todo = 'all',
vid_path = '/home/ao/Videos/Webcam',
#out_dir = '../results/press-data-v7/',
#out_dir = '../results/press-data-v8/',
#out_dir = '../results/press-data-v9/',
#out_dir = '../results/press-data-v10/',
out_dir = '../results/press-data-v11/',
restore = 0,
train_frac = 0.75):
todo = ut.make_todo(todo, 'im tf train test')
if 'im' in todo:
print vid_path
write_data(vid_path, out_dir, train_frac = train_frac)
if 'tf' in todo:
make_tf(out_dir)
if 'train' in todo:
train(out_dir, restore = restore)
if 'test' in todo:
test(out_dir)
# class NetClf:
# def __init__(self, model_file, gpu = '/cpu:0'):
# self.sess = None
# #self.train_path = train_path
# self.model_file = model_file
# self.gpu = gpu
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# #self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# #self.sess = tf.Session()
# #self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
# self.sess = tf.Session()
# #self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
# self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# # var_list = slim.get_variables_to_restore()
# # print 'Restoring:'
# # for x in var_list:
# # print x.name
# #self.sess.run(tf.global_variables_initializer())
# #tf.train.Saver(var_list).restore(self.sess, self.model_file)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None]})
# print 'logits =', logits
# ut.toc()
# return ut.softmax(logits[0])[1]
class NetClf:
def __init__(self, model_file, gpu = '/cpu:0'):
self.sess = None
self.model_file = model_file
self.gpu = gpu
def __del__(self):
self.deinit()
def init(self):
if self.sess is None:
#self.model_file = tf.train.latest_checkpoint(self.train_path)
print 'Restoring:',self.model_file
with tf.device(self.gpu):
tf.reset_default_graph()
print self.gpu
tf.Graph().as_default()
#self.sess = tf.Session()
#self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
self.sess = tf.Session()
#self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_prev_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_prev_input')
#ims = tf.cast(self.im_input, tf.float32)
self.logits = make_model(self.im_input, self.im_prev_input, train = False)
# var_list = slim.get_variables_to_restore()
# print 'Restoring:'
# for x in var_list:
# print x.name
#self.sess.run(tf.global_variables_initializer())
#tf.train.Saver(var_list).restore(self.sess, self.model_file)
tf.train.Saver().restore(self.sess, self.model_file)
tf.get_default_graph().finalize()
def deinit(self):
if self.sess is not None:
self.sess.close()
self.sess = None
def format_im(self, im):
return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
def predict(self, im, im_prev):
self.init()
im = self.format_im(im)
im_prev = self.format_im(im_prev)
#ut.tic()
[logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None],
self.im_prev_input : im_prev[None]})
#print 'logits =', logits
#ut.toc()
return ut.softmax(logits[0])[1]
# class NetClf:
# #def __init__(self, train_path, gpu = '/cpu:0'):
# def __init__(self, train_path, gpu = '/gpu:0'):
# self.sess = None
# self.train_path = train_path
# self.gpu = gpu
# self.num_crops = 10
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# self.sess = tf.Session()
# self.im_input = tf.placeholder(tf.uint8, (self.num_crops, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# #return [ig.scale(im, (crop_dim, crop_dim), 1)]*10 #.astype('float32')
# dim = crop_dim
# dh = (im.shape[0] - dim)
# crops = np.zeros((self.num_crops, dim, dim, 3), dtype = np.uint8)
# crops[0] = ut.crop_center(im, dim)
# i = 1
# for y in np.linspace(0, dh, 3).astype('l'):
# dw = (im.shape[1] - dim)
# for x in np.linspace(0, dw, 3).astype('l'):
# crops[i] = im[y : y + dim, x : x + dim]
# i += 1
# return np.array(crops, 'float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im})
# print 'logits =', logits
# ut.toc()
# return np.mean(map(ut.softmax, logits), axis = 0)[1]
def test(path, match_str = None): | print 'Restoring from:', check_path
net = NetClf(check_path, gpu)
examples = []
for line in ut.read_lines(pj(path, 'test.csv')):
s = line.split(',')
#print match_str, s[3]
print s
if (match_str is not None) and (match_str not in s[3]):
print 'skipping'
continue
examples.append((s[0], s[1], int(s[2]), s[3]))
print 'Testing on:', len(examples), 'examples'
labels = []
probs = []
accs = []
table = []
for i, ex in enumerate(examples):
im_after = ig.load(ex[0])
im_prev = ig.load(ex[1])
label = ex[2]
prob = net.predict(im_after, im_prev)
#print prob, label
pred = int(prob >= 0.5)
labels.append(label)
probs.append(prob)
accs.append(pred == label)
if i < 50:
color = '#00DD00' if pred == label else '#DD0000'
row = [im_after, im_prev, ut.font_color_html('pred = %.3f' % prob, color), 'gt = %d' % label]
table.append(row)
labels = np.array(labels, 'bool')
probs = np.array(probs, 'float32')
accs = np.array(accs)
print 'Accuracy:', np.mean(accs)
print 'mAP:', sklearn.metrics.average_precision_score(labels, probs)
ig.show(table) | train_dir = pj(path, 'training')
check_path = tf.train.latest_checkpoint(train_dir) | random_line_split |
press.py | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os, random, h5py, sklearn.metrics
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
import vgg
#vgg = nets.vgg
#import tensorflow.contrib.slim.nets as nets
#resnet_v1 = nets.resnet_v1
#import resnet, resnet_utils
pj = ut.pjoin
full_dim = 256
crop_dim = 224
train_iters = 10000
batch_size = 32
base_lr = 1e-3
#base_lr = 1e-3
#base_lr = 1e-2
gamma = 0.5
#step_size = 1000
step_size = 2500
#sample_dur_secs = 0.15
sample_dur_secs = 0.05
sample_fps = 60
gpu = '/gpu:0'
#init_path = '../results/resnet_v1_50.ckpt'
init_path = '../results/vgg_16.ckpt'
checkpoint_iters = 100
#finetune_top_only = True
finetune_top_only = False
model_style = 'diff'
augment = True
#augment = False
#model_style = 'dual'
def download_pretrained():
# https://github.com/tensorflow/models/tree/master/slim
ut.mkdir('../results')
# ut.sys_check('wget http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz '
# ' -O ../results/resnet_v1_50_2016_08_28.tar.gz')
# ut.sys_check('cd ../results; tar -xzf resnet_v1_50_2016_08_28.tar.gz')
ut.sys_check('wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz '
' -O ../results/vgg_16_2016_08_28.tar.gz')
ut.sys_check('cd ../results; tar -xzf vgg_16_2016_08_28.tar.gz')
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, label))
# return examples
# def extract_frames((vid_file, time, label, vid_idx, im_dir)):
# examples = []
# with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# 0.05, sample_fps, tmp_dir))
# fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
# prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
# ut.sys_check('cp %s %s' % (fname, prev_file))
# with ut.TmpDir() as tmp_dir:
# # ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# # (vid_file, full_dim, full_dim, time,
# # sample_dur_secs, sample_fps, tmp_dir))
# ut.sys_check('ffmpeg -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
# (time, vid_file, full_dim, full_dim,
# sample_dur_secs, sample_fps, tmp_dir))
# for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
# im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
# ut.sys_check('cp %s %s' % (fname, im_file))
# examples.append((im_file, prev_file, label))
# return examples
def extract_frames((vid_file, time, label, vid_idx, im_dir, prev_free_time)):
examples = []
with ut.TmpDir() as tmp_dir:
free_dur = 0.1
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(prev_free_time, vid_file, full_dim, full_dim, free_dur, sample_fps, tmp_dir))
#fname = sorted(ut.glob(pj(tmp_dir, '*.png')))[0]
assert len(ut.glob(pj(tmp_dir, '*.png'))), 'no frames for prev_free_time'
fname = random.choice(sorted(ut.glob(pj(tmp_dir, '*.png'))))
prev_file = pj(im_dir, 'prev_%s_%05d_%d.png' % (vid_idx, 0, label))
ut.sys_check('cp %s %s' % (fname, prev_file))
with ut.TmpDir() as tmp_dir:
# ut.sys_check('ffmpeg -i "%s" -vf scale=%d:%d -ss %f -t %f -r %d "%s/%%07d.png"' % \
# (vid_file, full_dim, full_dim, time,
# sample_dur_secs, sample_fps, tmp_dir))
ut.sys_check('ffmpeg -loglevel warning -ss %f -i "%s" -vf scale=%d:%d -t %f -r %d "%s/%%07d.png"' % \
(time, vid_file, full_dim, full_dim,
sample_dur_secs, sample_fps, tmp_dir))
for frame_idx, fname in enumerate(sorted(ut.glob(pj(tmp_dir, '*.png')))):
im_file = pj(im_dir, '%s_%05d_%d.png' % (vid_idx, frame_idx, label))
ut.sys_check('cp %s %s' % (fname, im_file))
examples.append((im_file, prev_file, label, vid_file))
return examples
def examples_from_db((db_file, im_dir)):
examples = []
try:
with h5py.File(db_file, 'r') as db:
#print db.keys()
sc = lambda x : ig.scale(x, (full_dim, full_dim))
for x in ['A', 'B']:
im_file = ut.make_temp('.png', dir = im_dir)
prev_file = ut.make_temp('.png', dir = im_dir)
ig.save(im_file, sc(db['GelSight%s_image_post_gripping' % x]))
ig.save(prev_file, sc(db['GelSight%s_image_pre_gripping' % x]))
if 'is_gripping' in db:
label = int(np.array(db['is_gripping'])[0])
elif 'Is gripping?' in db:
label = int(np.array(db['Is gripping?'])[0])
else:
raise RuntimeError('No label!')
examples.append((im_file, prev_file, label, db_file))
except:
print 'Failed to open:', db_file
return examples
def write_data(vid_path, out_dir, train_frac = 0.75):
im_dir = ut.mkdir(pj(out_dir, 'ims'))
in_data = []
meta_files = sorted(ut.glob(vid_path, 'train', '*.txt'))
print 'meta files:'
for x in meta_files:
print x
print
for meta_idx, meta_file in enumerate(meta_files):
last_prev_time = 0.
vid_file = meta_file.replace('.txt', '.mp4')
for clip_idx, ex in enumerate(ut.read_lines(meta_file)):
prev_time = last_prev_time
vid_idx = '%05d_%05d' % (meta_idx, clip_idx)
print ex
s, time = ex.split()
time = float(time)
if s == 'p':
label = 1
elif s == 'n':
label = 0
last_prev_time = time
else:
raise RuntimeError()
in_data.append((vid_file, time, label, vid_idx, im_dir, prev_time))
print 'Writing:', len(in_data), 'sequences'
meta_examples = ut.flatten(ut.parmap(extract_frames, in_data))
meta_examples = ut.shuffled_with_seed(meta_examples)
# add manu examples
db_files = sorted(ut.sys_with_stdout('find ../data/manu-press -name "*.hdf5"').split())
db_files = ut.shuffled_with_seed(db_files)
print 'Train fraction:', train_frac
num_train = int(train_frac * len(db_files))
db_train = db_files[:num_train]
db_test = db_files[num_train:]
train_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_train]))
test_db_examples = ut.flatten(ut.parmap(examples_from_db, [(x, im_dir) for x in db_test]))
print 'Number of db train examples:', len(train_db_examples)
print 'Number of meta examples:', len(meta_examples)
train_examples = ut.shuffled_with_seed(meta_examples + train_db_examples)
ut.write_lines(pj(out_dir, 'train.csv'), ['%s,%s,%d,%s' % x for x in train_examples])
test_examples = ut.shuffled_with_seed(test_db_examples)
ut.write_lines(pj(out_dir, 'test.csv'), ['%s,%s,%d,%s' % x for x in test_examples])
def make_tf(path):
|
def read_example(rec_queue):
reader = tf.TFRecordReader()
k, s = reader.read(rec_queue)
feats = {
'im' : tf.FixedLenFeature([], dtype=tf.string),
'im_prev' : tf.FixedLenFeature([], dtype=tf.string),
'label' : tf.FixedLenFeature([], tf.int64)
}
example = tf.parse_single_example(s, features = feats)
im = tf.image.decode_png(example['im'])
im_prev = tf.image.decode_png(example['im_prev'])
im.set_shape((full_dim, full_dim, 3))
im_prev.set_shape((full_dim, full_dim, 3))
if 0:
#im = tf.random_crop(im, (crop_dim, crop_dim, 3))
im = tf.image.resize_images(im, [crop_dim, crop_dim])
im_prev = tf.image.resize_images(im_prev, [crop_dim, crop_dim])
else:
im_combo = tf.concat([im, im_prev], 2)
im_combo = tf.random_crop(im_combo, (crop_dim, crop_dim, 6))
if augment:
im_combo = tf.image.random_flip_left_right(im_combo)
im_combo = tf.image.random_flip_up_down(im_combo)
# See https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py for
# an example of more aggressive brightness/contrast augmentation
# The loss stays very high if you do not convert to float first: I think this is because
# many values in the GelSight are close to 0/255, so they get saturated
im_combo = tf.cast(im_combo, tf.float32)
im_combo = tf.image.random_brightness(im_combo, max_delta=20)
im_combo = tf.image.random_contrast(im_combo, lower=0.9, upper=1.1)
#im_combo = tf.Print(im_combo, [tf.reduce_max(im_combo)])
im = im_combo[:, :, :3]
im_prev = im_combo[:, :, 3:]
label = example['label']
return im, im_prev, label
def read_data(path):
#queues = [tf.train.string_input_producer(tf_files)]
#example_list = [read_example(queue) for queue in queues]
tf_files = [pj(path, 'train.tf')]
queue = tf.train.string_input_producer(tf_files)
return tf.train.shuffle_batch(
read_example(queue), batch_size = batch_size,
capacity = 2000, min_after_dequeue = 500)
def normalize_ims(im):
if type(im) == type(np.array([])):
im = im.astype('float32')
else:
im = tf.cast(im, tf.float32)
return -1. + (2./255) * im
def shape(x, d = None):
s = x.get_shape().as_list()
return s if d is None else s[d]
# def make_model(ims, train):
# #ims = tf.Print(ims, ['ims before =', ims])
# ims = normalize_ims(ims)
# with slim.arg_scope(vgg.vgg_arg_scope()):
# logits, _ = vgg.vgg_16(ims, is_training = train, update_top_only = finetune_top_only, num_classes = 2)
# print shape(logits)
# return logits
def make_model(ims, ims_prev, train):
#ims = tf.Print(ims, ['ims before =', ims])
ims = normalize_ims(ims)
ims_prev = normalize_ims(ims_prev)
if model_style == 'diff':
logits, _ = vgg.vgg_dual_16(
ims - ims_prev, ims, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
elif model_style == 'dual':
logits, _ = vgg.vgg_dual_16(
ims, ims_prev, is_training = train,
update_top_only = finetune_top_only,
num_classes = 2)
return logits
def moving_avg(name, x, vals = {}, avg_win_size = 100):
ut.add_dict_list(vals, name, x)
return np.mean(vals[name][-avg_win_size:])
def train(path, restore = False):
config = tf.ConfigProto(allow_soft_placement = True)
with tf.Graph().as_default(), tf.device(gpu), tf.Session(config = config) as sess:
global_step = tf.get_variable('global_step', [], initializer =
tf.constant_initializer(0), trainable = False)
ims, ims_prev, labels = read_data(path)
#tf.summary.image('im', ims)
logits = make_model(ims, ims_prev, train = True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels)
#logits = tf.Print(logits, ['logits =', logits[0, :], labels[0]])
loss = tf.reduce_mean(loss)
tf.summary.scalar('loss', loss)
eq = tf.equal(tf.argmax(logits, 1), labels)
acc = tf.reduce_mean(tf.cast(eq, tf.float32))
tf.summary.scalar('acc', acc)
lr = base_lr * gamma**(global_step // step_size)
opt = tf.train.MomentumOptimizer(lr, 0.9)
train_op = opt.minimize(loss, global_step = global_step)
bn_ups = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print 'Batch norm updates:', len(bn_ups)
train_op = tf.group(train_op, *bn_ups)
sess.run(tf.global_variables_initializer())
var_list = slim.get_variables_to_restore()
exclude = ['Adam', 'beta1_power', 'beta2_power', 'Momentum', 'global_step', 'logits', 'fc8', 'fc6_', 'fc7_', 'conv6']
var_list = [x for x in var_list if \
not any(name in x.name for name in exclude)]
train_dir = pj(path, 'training')
if restore:
tf.train.Saver().restore(sess, tf.train.latest_checkpoint(train_dir))
else:
tf.train.Saver(var_list).restore(sess, init_path)
#saver = tf.train.Saver()
tf.train.start_queue_runners(sess = sess)
summary_dir = ut.mkdir('../results/summary')
print 'tensorboard --logdir=%s' % summary_dir
sum_writer = tf.summary.FileWriter(summary_dir, sess.graph)
while True:
step = int(sess.run(global_step))
if (step == 10 or step % checkpoint_iters == 0) or step == train_iters - 1:
check_path = pj(ut.mkdir(train_dir), 'net.tf')
print 'Saving:', check_path
#saver.save(sess, check_path, global_step = global_step)
vs = slim.get_model_variables()
# print 'Variables:'
# for x in vs:
# print x.name
tf.train.Saver(vs).save(sess, check_path, global_step = global_step)
if step > train_iters:
break
merged = tf.summary.merge_all()
if step % 1 == 0:
[summary] = sess.run([merged])
sum_writer.add_summary(summary, step)
_, lr_val, loss_val, acc_val = sess.run([train_op, lr, loss, acc])
if step % 10 == 0:
print 'Iteration %d,' % step, 'lr = ', lr_val, 'loss:', \
moving_avg('loss', loss_val), 'acc:', moving_avg('acc', acc_val)
def run(todo = 'all',
vid_path = '/home/ao/Videos/Webcam',
#out_dir = '../results/press-data-v7/',
#out_dir = '../results/press-data-v8/',
#out_dir = '../results/press-data-v9/',
#out_dir = '../results/press-data-v10/',
out_dir = '../results/press-data-v11/',
restore = 0,
train_frac = 0.75):
todo = ut.make_todo(todo, 'im tf train test')
if 'im' in todo:
print vid_path
write_data(vid_path, out_dir, train_frac = train_frac)
if 'tf' in todo:
make_tf(out_dir)
if 'train' in todo:
train(out_dir, restore = restore)
if 'test' in todo:
test(out_dir)
# class NetClf:
# def __init__(self, model_file, gpu = '/cpu:0'):
# self.sess = None
# #self.train_path = train_path
# self.model_file = model_file
# self.gpu = gpu
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# #self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# #self.sess = tf.Session()
# #self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
# self.sess = tf.Session()
# #self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
# self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# # var_list = slim.get_variables_to_restore()
# # print 'Restoring:'
# # for x in var_list:
# # print x.name
# #self.sess.run(tf.global_variables_initializer())
# #tf.train.Saver(var_list).restore(self.sess, self.model_file)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None]})
# print 'logits =', logits
# ut.toc()
# return ut.softmax(logits[0])[1]
class NetClf:
def __init__(self, model_file, gpu = '/cpu:0'):
self.sess = None
self.model_file = model_file
self.gpu = gpu
def __del__(self):
self.deinit()
def init(self):
if self.sess is None:
#self.model_file = tf.train.latest_checkpoint(self.train_path)
print 'Restoring:',self.model_file
with tf.device(self.gpu):
tf.reset_default_graph()
print self.gpu
tf.Graph().as_default()
#self.sess = tf.Session()
#self.sess = tf.Session(config = tf.ConfigProto(allow_soft_placement=True))
self.sess = tf.Session()
#self.im_input = tf.placeholder(tf.float32, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_input')
self.im_prev_input = tf.placeholder(tf.uint8, (1, crop_dim, crop_dim, 3), name = 'im_prev_input')
#ims = tf.cast(self.im_input, tf.float32)
self.logits = make_model(self.im_input, self.im_prev_input, train = False)
# var_list = slim.get_variables_to_restore()
# print 'Restoring:'
# for x in var_list:
# print x.name
#self.sess.run(tf.global_variables_initializer())
#tf.train.Saver(var_list).restore(self.sess, self.model_file)
tf.train.Saver().restore(self.sess, self.model_file)
tf.get_default_graph().finalize()
def deinit(self):
if self.sess is not None:
self.sess.close()
self.sess = None
def format_im(self, im):
return ig.scale(im, (crop_dim, crop_dim), 1)#.astype('float32')
def predict(self, im, im_prev):
self.init()
im = self.format_im(im)
im_prev = self.format_im(im_prev)
#ut.tic()
[logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im[None],
self.im_prev_input : im_prev[None]})
#print 'logits =', logits
#ut.toc()
return ut.softmax(logits[0])[1]
# class NetClf:
# #def __init__(self, train_path, gpu = '/cpu:0'):
# def __init__(self, train_path, gpu = '/gpu:0'):
# self.sess = None
# self.train_path = train_path
# self.gpu = gpu
# self.num_crops = 10
# def __del__(self):
# self.deinit()
# def init(self):
# if self.sess is None:
# self.model_file = tf.train.latest_checkpoint(self.train_path)
# print 'Restoring:',self.model_file
# with tf.device(self.gpu):
# tf.reset_default_graph()
# print self.gpu
# tf.Graph().as_default()
# self.sess = tf.Session()
# self.im_input = tf.placeholder(tf.uint8, (self.num_crops, crop_dim, crop_dim, 3), name = 'im_input')
# ims = tf.cast(self.im_input, tf.float32)
# self.logits = make_model(ims, train = False)
# tf.train.Saver().restore(self.sess, self.model_file)
# tf.get_default_graph().finalize()
# def deinit(self):
# if self.sess is not None:
# self.sess.close()
# self.sess = None
# def format_im(self, im):
# # im = ig.scale(im, self.full_shape)
# # h_off = (im.shape[0] - crop_dim) // 2
# # w_off = (im.shape[1] - crop_dim) // 2
# # im = im[h_off : h_off + crop_dim, w_off : w_off + crop_dim]
# # return im
# #return [ig.scale(im, (crop_dim, crop_dim), 1)]*10 #.astype('float32')
# dim = crop_dim
# dh = (im.shape[0] - dim)
# crops = np.zeros((self.num_crops, dim, dim, 3), dtype = np.uint8)
# crops[0] = ut.crop_center(im, dim)
# i = 1
# for y in np.linspace(0, dh, 3).astype('l'):
# dw = (im.shape[1] - dim)
# for x in np.linspace(0, dw, 3).astype('l'):
# crops[i] = im[y : y + dim, x : x + dim]
# i += 1
# return np.array(crops, 'float32')
# def predict(self, im):
# self.init()
# im = self.format_im(im)
# #print 'mean =', im.mean((0,1))
# ut.tic()
# [logits] = self.sess.run([self.logits], feed_dict = {self.im_input : im})
# print 'logits =', logits
# ut.toc()
# return np.mean(map(ut.softmax, logits), axis = 0)[1]
def test(path, match_str = None):
train_dir = pj(path, 'training')
check_path = tf.train.latest_checkpoint(train_dir)
print 'Restoring from:', check_path
net = NetClf(check_path, gpu)
examples = []
for line in ut.read_lines(pj(path, 'test.csv')):
s = line.split(',')
#print match_str, s[3]
print s
if (match_str is not None) and (match_str not in s[3]):
print 'skipping'
continue
examples.append((s[0], s[1], int(s[2]), s[3]))
print 'Testing on:', len(examples), 'examples'
labels = []
probs = []
accs = []
table = []
for i, ex in enumerate(examples):
im_after = ig.load(ex[0])
im_prev = ig.load(ex[1])
label = ex[2]
prob = net.predict(im_after, im_prev)
#print prob, label
pred = int(prob >= 0.5)
labels.append(label)
probs.append(prob)
accs.append(pred == label)
if i < 50:
color = '#00DD00' if pred == label else '#DD0000'
row = [im_after, im_prev, ut.font_color_html('pred = %.3f' % prob, color), 'gt = %d' % label]
table.append(row)
labels = np.array(labels, 'bool')
probs = np.array(probs, 'float32')
accs = np.array(accs)
print 'Accuracy:', np.mean(accs)
print 'mAP:', sklearn.metrics.average_precision_score(labels, probs)
ig.show(table)
| tf_file = pj(path, 'train.tf')
if os.path.exists(tf_file):
os.remove(tf_file)
writer = tf.python_io.TFRecordWriter(tf_file)
lines = ut.shuffled_with_seed(ut.read_lines(pj(path, 'train.csv')))
print 'Number of examples:', len(lines)
for line in lines:
fname, prev_fname, label, _ = line.split(',')
label = int(label)
s = ut.read_file(fname, binary = True)
s_prev = ut.read_file(prev_fname, binary = True)
feat = {'im': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s])),
'im_prev': tf.train.Feature(bytes_list = tf.train.BytesList(value = [s_prev])),
'label' : tf.train.Feature(int64_list = tf.train.Int64List(value = [label]))}
ex = tf.train.Example(features = tf.train.Features(feature = feat))
writer.write(ex.SerializeToString())
writer.close() | identifier_body |
argumentParser.py | #!/usr/bin/env python
'''
inStrain - parse command-line arguemnts
'''
__author__ = "Matt Olm and Alex Crits-Christoph"
__license__ = "MIT"
__email__ = "mattolm@gmail.com"
__status__ = "Development"
import os
import sys
import argparse
# Get the version
from ._version import __version__
"""
########################################
# Argument Parsing #
########################################
"""
class SmartFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def printHelp():
|
def parse_args(args):
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
subparsers = parser.add_subparsers(help='Desired operation',dest='operation')
# Make a parent parser for all of the subparsers
parent_parser = argparse.ArgumentParser(add_help=False)
Bflags = parent_parser.add_argument_group('SYSTEM PARAMETERS')
Bflags.add_argument('-p','--processes',help='Number of processes to use',default=6,type=int)
Bflags.add_argument('-d','--debug',help='Make extra debugging output',default=False,
action= "store_true")
Bflags.add_argument("-h", "--help", action="help", help="show this help message and exit")
Bflags.add_argument(
"--version",
action="version",
version="inStrain version {version}".format(version=__version__))
# Make a parent parser for read filtering
readfilter_parent = argparse.ArgumentParser(add_help=False)
fiflags = readfilter_parent.add_argument_group('READ FILTERING OPTIONS')
fiflags.add_argument("-l", "--min_read_ani", action="store", default=0.95, type=float, \
help='Minimum percent identity of read pairs to consensus to use the reads. Must be >, not >=')
fiflags.add_argument("--min_mapq", action="store", default=-1, type=int,\
help='Minimum mapq score of EITHER read in a pair to use that pair. Must be >, not >=')
fiflags.add_argument("--max_insert_relative", action="store", default=3, type=float, \
help='Multiplier to determine maximum insert size between two reads - default is to use 3x median insert size. Must be >, not >=')
fiflags.add_argument("--min_insert", action="store", default=50, type=int,\
help='Minimum insert size between two reads - default is 50 bp. If two reads are 50bp each and overlap completely, their insert will be 50. Must be >, not >=')
fiflags.add_argument("--pairing_filter", help="R|How should paired reads be handled?\n" \
+ "paired_only = Only paired reads are retained\n" \
+ 'non_discordant = Keep all paired reads and singleton reads that map to a single scaffold\n' \
+ "all_reads = Keep all reads regardless of pairing status (NOT RECOMMENDED; See documentation for deatils)\n", \
default = "paired_only", choices={'paired_only', 'non_discordant', 'all_reads'})
fiflags.add_argument("--priority_reads", help='The location of a list ' \
+ "of reads that should be retained regardless of pairing status " \
+ "(for example long reads or merged reads). This can be a .fastq " \
+ "file or text file with list of read names (will assume file is " \
+ "compressed if ends in .gz", default=None)
# Make a parent parser for read output
readoutput_parent = argparse.ArgumentParser(add_help=False)
fiflags = readoutput_parent.add_argument_group('READ OUTPUT OPTIONS')
# fiflags.add_argument("-s", "--generate_sam", action="store", default=None, \
# help='Specify the location to write a .sam file with filtered reads only.')
fiflags.add_argument("--detailed_mapping_info", action="store_true", default=False, help='Make a detailed read report indicating deatils about each individual mapped read')
# Make a parent parser for SNV calling
variant_parent = argparse.ArgumentParser(add_help=False)
fiflags = variant_parent.add_argument_group('VARIANT CALLING OPTIONS')
fiflags.add_argument("-c", "--min_cov", action="store", default=5, type=int, \
help='Minimum coverage to call an variant')
fiflags.add_argument("-f", "--min_freq", action="store", default=0.05, type=float, \
help='Minimum SNP frequency to confirm a SNV (both this AND the FDR snp count cutoff must be true to call a SNP).')
fiflags.add_argument("-fdr", "--fdr", action="store", default=1e-6, type=float,\
help='SNP false discovery rate- based on simulation data with a 0.1 percent error rate (Q30)')
# Make a parent for profile_genes
genes_parent = argparse.ArgumentParser(add_help=False)
Rflags = genes_parent.add_argument_group('GENE PROFILING OPTIONS')
Rflags.add_argument("-g", "--gene_file", action="store", default=None, \
help='Path to prodigal .fna genes file. If file ends in .gb or .gbk, will treat as a genbank file (EXPERIMENTAL; the name of the gene must be in the gene qualifier)')
# Make a parent for genome_wide
geneomewide_parent = argparse.ArgumentParser(add_help=False)
Rflags = geneomewide_parent.add_argument_group('GENOME WIDE OPTIONS')
Rflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
# Make a parent for handling mm
mm_parent = argparse.ArgumentParser(add_help=False)
Rflags = mm_parent.add_argument_group('READ ANI OPTIONS')
Rflags.add_argument('--mm_level', help="Create output files on the mm level (see documentation for info)",
action='store_true', default=False)
Rflags.add_argument('--skip_mm_profiling', action='store_true', default=False,\
help="Dont perform analysis on an mm level; saves RAM and time; impacts plots and raw_data")
'''
####### Arguments for profile operation ######
'''
# Make a parent for profile to go above the system arguments
profile_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = profile_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
# I/O Parameters
Iflags = profile_parent.add_argument_group('I/O PARAMETERS')
Iflags.add_argument("-o", "--output", action="store", default='inStrain', \
help='Output prefix')
Iflags.add_argument('--use_full_fasta_header', action='store_true', default=False,
help='Instead of using the fasta ID (space in header before space), use the full header. Needed for some mapping tools (including bbMap)')
Iflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
profile_parser = subparsers.add_parser("profile",formatter_class=SmartFormatter,\
parents = [profile_parent, parent_parser, readfilter_parent, readoutput_parent, variant_parent, genes_parent, geneomewide_parent, mm_parent], add_help=False)
# Other Parameters
Oflags = profile_parser.add_argument_group('PROFILE OPTIONS')
Oflags.add_argument('--database_mode', action='store_true', default=False,\
help="Set a number of parameters to values appropriate for mapping to a " \
+ "large fasta file. Will set: --min_read_ani 0.92 --skip_mm_profiling --min_genome_coverage 1")
Oflags.add_argument("--min_scaffold_reads", action="store", default=1, type=int,\
help='Minimum number of reads mapping to a scaffold to proceed with profiling it')
Oflags.add_argument("--min_genome_coverage", action="store", default=0, type=float,\
help='Minimum number of reads mapping to a genome to proceed with profiling it. MUST profile .stb if this is set')
Oflags.add_argument("--min_snp", action="store", default=20, \
help='Absolute minimum number of reads connecting two SNPs to calculate LD between them.')
Oflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store intermediate dictionaries in the pickle file; will result in significantly more RAM and disk usage")
Oflags.add_argument("--scaffolds_to_profile", action="store",\
help='Path to a file containing a list of scaffolds to profile- if provided will ONLY profile those scaffolds')
Oflags.add_argument("--rarefied_coverage", action='store', default=50,\
help='When calculating nucleotide diversity, also calculate a rarefied version with this much coverage')
Oflags.add_argument('--window_length', action='store', default=10000, type=int,\
help='Break scaffolds into windows of this length when profiling')
# Other Parameters
Iflags = profile_parser.add_argument_group('OTHER OPTIONS')
Iflags.add_argument('--skip_genome_wide', action='store_true', default=False,\
help="Do not generate tables that consider groups of scaffolds belonging to genomes")
Iflags.add_argument('--skip_plot_generation', action='store_true', default=False,\
help="Do not make plots")
'''
####### Arguments for compare operation ######
'''
# Make a parent for profile to go above the system arguments
compare_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = compare_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='instrainComparer', \
help='Output prefix')
compare_parser = subparsers.add_parser("compare",formatter_class=SmartFormatter,\
parents = [compare_parent, parent_parser, geneomewide_parent, variant_parent], add_help=False)
# Database mode parameters
Dflags = compare_parser.add_argument_group('DATABASE MODE PARAMETERS')
Dflags.add_argument('--database_mode', action='store_true', help=
"Using the parameters below, automatically determine which genomes are present in each Profile "
"and only compare scaffolds from those genomes. All profiles must have run Profile with the same .stb")
Dflags.add_argument('--breadth', default=0.5, type=float, help='Minimum breadth_minCov required to count a genome present')
# Other Parameters
Oflags = compare_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-sc", "--scaffolds", action="store",
help='Location to a list of scaffolds to compare. You can also make this a .fasta file and it will load the scaffold names')
Oflags.add_argument("--genome", action="store",
help='Run scaffolds belonging to this single genome only. Must provide an .stb file')
Oflags.add_argument('--store_coverage_overlap', action='store_true', default=False,\
help="Also store coverage overlap on an mm level")
Oflags.add_argument('--store_mismatch_locations', action='store_true', default=False,\
help="Store the locations of SNPs")
Oflags.add_argument('--include_self_comparisons', action='store_true', default=False,\
help="Also compare IS profiles against themself")
Oflags.add_argument('--skip_plot_generation', action='store_true', default=False, \
help="Dont create plots at the end of the run.")
Oflags.add_argument('--group_length', default=10000000,
help="How many bp to compare simultaneously (higher will use more RAM and run more quickly)", type=int)
Oflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
Cflags = compare_parser.add_argument_group('GENOME CLUSTERING OPTIONS')
Cflags.add_argument('-ani', "--ani_threshold", help='popANI threshold to cluster genomes at. Must provide .stb file to do so',
default=0.99999, type=float)
Cflags.add_argument('-cov', "--coverage_treshold", help='Minimum percent_genome_compared for a genome comparison' \
' to count; if below the popANI will be set to 0.', default=0.1, type=float)
Cflags.add_argument("--clusterAlg", help="Algorithm used to cluster genomes (passed\
to scipy.cluster.hierarchy.linkage)", default='average',
choices={'single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'})
Bflags = compare_parser.add_argument_group('SNV POOLING OPTIONS')
Bflags.add_argument('-bams', "--bams",
help='Location of .bam files used during inStrain profile commands; needed to pull low-frequency SNVs.' \
' MUST BE IN SAME ORDER AS THE INPUT FILES',
nargs='*')
Bflags.add_argument('--skip_popANI', action='store_true', default=False,\
help="Only run SNV Pooling; skip other compare operations")
# Gflags = compare_parser.add_argument_group('GREEDY CLUSTERING OPTIONS [THIS SECTION IS EXPERIMENTAL!]')
# Gflags.add_argument('--greedy_clustering', action='store_true', default=False,\
# help="Dont do pair-wise comparisons, do greedy clustering to only find the number of clsuters. If this is set, use the parameters below as well")
# Gflags.add_argument('--g_ani', action='store', default=0.99, type=float,\
# help="ANI threshold for greedy clustering- put the fraction not the percentage (e.g. 0.99, not 99)")
# Gflags.add_argument('--g_cov', action='store', default=0.99, type=float,\
# help="Alignment coverage for greedy clustering- put the fraction not the percentage (e.g. 0.5, not 10)")
# Gflags.add_argument('--g_mm', action='store', default=100, type=int,\
# help="Maximum read mismatch level")
'''
####### Arguments for parse_annotations operation ######
'''
parse_anno_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = parse_anno_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument('-a', '--annotations', help="R|A table or set of tables with gene annotations.\nMust be be a .csv file with two columns- `gene` and `anno`. See inStrain documentation for details\n(https://instrain.readthedocs.io/en/latest/user_manual.html#parse-annotations)",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='annotation_output', \
help='Output prefix')
parse_anno_parser = subparsers.add_parser("parse_annotations", formatter_class=SmartFormatter, \
parents=[parse_anno_parent, parent_parser],
add_help=False)
# Other Parameters
Oflags = parse_anno_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-b", "--min_genome_breadth", action="store", default=0.5, type=float, \
help='Only annotate genomes on genomes with at least this genome breadth. Requires having genomes called. Set to 0 to include all genes.')
Oflags.add_argument("-g", "--min_gene_breadth", action="store", default=0.8, type=float, \
help='Only annotate genes with at least this breadth. Set to 0 to include all genes.')
Oflags.add_argument('--store_rawdata', action='store_true', default=False,\
help="Store the raw data dictionary")
'''
####### Arguments for profile_genes operation ######
'''
# Make a parent for profile to go above the system arguments
genes_io = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = genes_io.add_argument_group('INPUT / OUTPUT')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store gene sequences in the IS object")
genes_parser = subparsers.add_parser("profile_genes",formatter_class=SmartFormatter,\
parents = [genes_parent, genes_io, parent_parser], add_help=False)
'''
####### Arguments for genome_wide operation ######
'''
# Make a parent for profile to go above the system arguments
genome_parser = subparsers.add_parser("genome_wide",formatter_class=SmartFormatter,\
parents = [geneomewide_parent, genes_io, mm_parent, parent_parser], add_help=False)
'''
####### Arguments for plot operation ######
'''
# Make a parent for profile to go above the system arguments
plot_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = plot_parent.add_argument_group('REQUIRED')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument("-pl", "--plots", help= "R|Plots. "
+ "Input 'all' or 'a' to plot all\n"
+ "1) Coverage and breadth vs. read mismatches\n"
+ "2) Genome-wide microdiversity metrics\n"
+ "3) Read-level ANI distribution\n"
+ "4) Major allele frequencies\n"
+ "5) Linkage decay\n"
+ "6) Read filtering plots\n"
+ "7) Scaffold inspection plot (large)\n"
+ "8) Linkage with SNP type (GENES REQUIRED)\n"
+ "9) Gene histograms (GENES REQUIRED)\n"
+ "10) Compare dendrograms (RUN ON COMPARE; NOT PROFILE)\n",
nargs='*', default='a')
POflags = plot_parent.add_argument_group('OPTIONAL FIGURE ADJUSTMENTS')
POflags.add_argument("-mb", "--minimum_breadth", default=0.5, type=float,
help= "Minimum breadth of coverage for genome to make it into plot (from 0-1).")
POflags.add_argument("-g", "--genomes", nargs='*',
help= "Only plot genomes with the names provided in this argument")
plot_parser = subparsers.add_parser("plot",formatter_class=SmartFormatter,\
parents = [plot_parent, parent_parser], add_help=False)
'''
####### Arguments for quick_profile operation ######
'''
# Make a parent for profile to go above the system arguments
quick_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = quick_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
quick_parser = subparsers.add_parser("quick_profile",formatter_class=SmartFormatter,\
parents = [quick_parent, parent_parser], add_help=False)
# Other Parameters
Oflags = quick_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
Oflags.add_argument("-o", "--output", action="store", \
help='Output prefix', default='QuickProfile')
Oflags.add_argument("--breadth_cutoff", type=float, default=0.5,
help='Minimum genome breadth to pull scaffolds')
Oflags.add_argument("--stringent_breadth_cutoff", type=float, default=0.00,
help='Minimum breadth to let scaffold into coverm raw results (done with greater than; NOT greater than or equal to)')
'''
####### Arguments for filter_reads operation ######
'''
# Make a parent for profile to go above the system arguments
reads_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = reads_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
Rflags.add_argument("-o", "--output", action="store", \
help='Location of folder to store read report(s)')
reads_parser = subparsers.add_parser("filter_reads",formatter_class=SmartFormatter,\
parents = [reads_parent, parent_parser, readfilter_parent,
readoutput_parent], add_help=False)
'''
####### Arguments for other operation ######
'''
other_parser = subparsers.add_parser("other",formatter_class=SmartFormatter,\
parents = [parent_parser], add_help=False)
# Other Parameters
Oflags = other_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('--old_IS', help="Convert an old inStrain version object to the newer version.")
Oflags.add_argument('--run_statistics', help='Generate runtime reports for an inStrain run.')
# Dependency checker
dep_parser = subparsers.add_parser("check_deps", formatter_class=SmartFormatter)
'''
####### PARSE THE ARGUMENTS ######
'''
# Handle the situation where the user wants the raw help
if (len(args) == 0 or args[0] == '-h' or args[0] == '--help'):
printHelp()
sys.exit(0)
else:
return parser.parse_args(args)
| print('')
print(' ...::: inStrain v' + __version__ + ' :::...''')
print('''\
Matt Olm and Alex Crits-Christoph. MIT License. Banfield Lab, UC Berkeley.
Choose one of the operations below for more detailed help. See https://instrain.readthedocs.io for documentation.
Example: inStrain profile -h
Main operations:
profile -> Create an inStrain profile (microdiversity analysis) from a mapping file
compare -> Compare multiple inStrain profiles (popANI, coverage_overlap, etc.)
Auxiliary operations:
check_deps -> Print a list of dependencies, versions, and whether they're working
parse_annotations -> Run a number of outputs based a table of gene annotations
quick_profile -> Quickly calculate coverage and breadth of a mapping using coverM
filter_reads -> Commands related to filtering reads from .bam files
plot -> Make figures from the results of "profile" or "compare"
other -> Other miscellaneous operations
''') | identifier_body |
argumentParser.py | #!/usr/bin/env python
'''
inStrain - parse command-line arguemnts
'''
__author__ = "Matt Olm and Alex Crits-Christoph"
__license__ = "MIT"
__email__ = "mattolm@gmail.com"
__status__ = "Development"
import os
import sys
import argparse
# Get the version
from ._version import __version__
"""
########################################
# Argument Parsing #
########################################
"""
class SmartFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def printHelp():
print('')
print(' ...::: inStrain v' + __version__ + ' :::...''')
print('''\
Matt Olm and Alex Crits-Christoph. MIT License. Banfield Lab, UC Berkeley.
Choose one of the operations below for more detailed help. See https://instrain.readthedocs.io for documentation.
Example: inStrain profile -h
Main operations:
profile -> Create an inStrain profile (microdiversity analysis) from a mapping file
compare -> Compare multiple inStrain profiles (popANI, coverage_overlap, etc.)
Auxiliary operations:
check_deps -> Print a list of dependencies, versions, and whether they're working
parse_annotations -> Run a number of outputs based a table of gene annotations
quick_profile -> Quickly calculate coverage and breadth of a mapping using coverM
filter_reads -> Commands related to filtering reads from .bam files
plot -> Make figures from the results of "profile" or "compare"
other -> Other miscellaneous operations
''')
def parse_args(args):
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
subparsers = parser.add_subparsers(help='Desired operation',dest='operation')
# Make a parent parser for all of the subparsers
parent_parser = argparse.ArgumentParser(add_help=False)
Bflags = parent_parser.add_argument_group('SYSTEM PARAMETERS')
Bflags.add_argument('-p','--processes',help='Number of processes to use',default=6,type=int)
Bflags.add_argument('-d','--debug',help='Make extra debugging output',default=False,
action= "store_true")
Bflags.add_argument("-h", "--help", action="help", help="show this help message and exit")
Bflags.add_argument(
"--version",
action="version",
version="inStrain version {version}".format(version=__version__))
# Make a parent parser for read filtering
readfilter_parent = argparse.ArgumentParser(add_help=False)
fiflags = readfilter_parent.add_argument_group('READ FILTERING OPTIONS')
fiflags.add_argument("-l", "--min_read_ani", action="store", default=0.95, type=float, \
help='Minimum percent identity of read pairs to consensus to use the reads. Must be >, not >=')
fiflags.add_argument("--min_mapq", action="store", default=-1, type=int,\
help='Minimum mapq score of EITHER read in a pair to use that pair. Must be >, not >=')
fiflags.add_argument("--max_insert_relative", action="store", default=3, type=float, \
help='Multiplier to determine maximum insert size between two reads - default is to use 3x median insert size. Must be >, not >=')
fiflags.add_argument("--min_insert", action="store", default=50, type=int,\
help='Minimum insert size between two reads - default is 50 bp. If two reads are 50bp each and overlap completely, their insert will be 50. Must be >, not >=')
fiflags.add_argument("--pairing_filter", help="R|How should paired reads be handled?\n" \
+ "paired_only = Only paired reads are retained\n" \
+ 'non_discordant = Keep all paired reads and singleton reads that map to a single scaffold\n' \
+ "all_reads = Keep all reads regardless of pairing status (NOT RECOMMENDED; See documentation for deatils)\n", \
default = "paired_only", choices={'paired_only', 'non_discordant', 'all_reads'})
fiflags.add_argument("--priority_reads", help='The location of a list ' \
+ "of reads that should be retained regardless of pairing status " \
+ "(for example long reads or merged reads). This can be a .fastq " \
+ "file or text file with list of read names (will assume file is " \
+ "compressed if ends in .gz", default=None)
# Make a parent parser for read output
readoutput_parent = argparse.ArgumentParser(add_help=False)
fiflags = readoutput_parent.add_argument_group('READ OUTPUT OPTIONS')
# fiflags.add_argument("-s", "--generate_sam", action="store", default=None, \
# help='Specify the location to write a .sam file with filtered reads only.')
fiflags.add_argument("--detailed_mapping_info", action="store_true", default=False, help='Make a detailed read report indicating deatils about each individual mapped read')
# Make a parent parser for SNV calling
variant_parent = argparse.ArgumentParser(add_help=False)
fiflags = variant_parent.add_argument_group('VARIANT CALLING OPTIONS')
fiflags.add_argument("-c", "--min_cov", action="store", default=5, type=int, \
help='Minimum coverage to call an variant')
fiflags.add_argument("-f", "--min_freq", action="store", default=0.05, type=float, \
help='Minimum SNP frequency to confirm a SNV (both this AND the FDR snp count cutoff must be true to call a SNP).')
fiflags.add_argument("-fdr", "--fdr", action="store", default=1e-6, type=float,\
help='SNP false discovery rate- based on simulation data with a 0.1 percent error rate (Q30)')
# Make a parent for profile_genes
genes_parent = argparse.ArgumentParser(add_help=False)
Rflags = genes_parent.add_argument_group('GENE PROFILING OPTIONS')
Rflags.add_argument("-g", "--gene_file", action="store", default=None, \
help='Path to prodigal .fna genes file. If file ends in .gb or .gbk, will treat as a genbank file (EXPERIMENTAL; the name of the gene must be in the gene qualifier)')
# Make a parent for genome_wide
geneomewide_parent = argparse.ArgumentParser(add_help=False)
Rflags = geneomewide_parent.add_argument_group('GENOME WIDE OPTIONS')
Rflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
# Make a parent for handling mm
mm_parent = argparse.ArgumentParser(add_help=False)
Rflags = mm_parent.add_argument_group('READ ANI OPTIONS')
Rflags.add_argument('--mm_level', help="Create output files on the mm level (see documentation for info)",
action='store_true', default=False)
Rflags.add_argument('--skip_mm_profiling', action='store_true', default=False,\
help="Dont perform analysis on an mm level; saves RAM and time; impacts plots and raw_data")
'''
####### Arguments for profile operation ######
'''
# Make a parent for profile to go above the system arguments
profile_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = profile_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
# I/O Parameters
Iflags = profile_parent.add_argument_group('I/O PARAMETERS')
Iflags.add_argument("-o", "--output", action="store", default='inStrain', \
help='Output prefix')
Iflags.add_argument('--use_full_fasta_header', action='store_true', default=False,
help='Instead of using the fasta ID (space in header before space), use the full header. Needed for some mapping tools (including bbMap)')
Iflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
profile_parser = subparsers.add_parser("profile",formatter_class=SmartFormatter,\
parents = [profile_parent, parent_parser, readfilter_parent, readoutput_parent, variant_parent, genes_parent, geneomewide_parent, mm_parent], add_help=False)
# Other Parameters
Oflags = profile_parser.add_argument_group('PROFILE OPTIONS')
Oflags.add_argument('--database_mode', action='store_true', default=False,\
help="Set a number of parameters to values appropriate for mapping to a " \
+ "large fasta file. Will set: --min_read_ani 0.92 --skip_mm_profiling --min_genome_coverage 1")
Oflags.add_argument("--min_scaffold_reads", action="store", default=1, type=int,\
help='Minimum number of reads mapping to a scaffold to proceed with profiling it')
Oflags.add_argument("--min_genome_coverage", action="store", default=0, type=float,\
help='Minimum number of reads mapping to a genome to proceed with profiling it. MUST profile .stb if this is set')
Oflags.add_argument("--min_snp", action="store", default=20, \
help='Absolute minimum number of reads connecting two SNPs to calculate LD between them.')
Oflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store intermediate dictionaries in the pickle file; will result in significantly more RAM and disk usage")
Oflags.add_argument("--scaffolds_to_profile", action="store",\
help='Path to a file containing a list of scaffolds to profile- if provided will ONLY profile those scaffolds')
Oflags.add_argument("--rarefied_coverage", action='store', default=50,\
help='When calculating nucleotide diversity, also calculate a rarefied version with this much coverage')
Oflags.add_argument('--window_length', action='store', default=10000, type=int,\
help='Break scaffolds into windows of this length when profiling')
# Other Parameters
Iflags = profile_parser.add_argument_group('OTHER OPTIONS')
Iflags.add_argument('--skip_genome_wide', action='store_true', default=False,\
help="Do not generate tables that consider groups of scaffolds belonging to genomes")
Iflags.add_argument('--skip_plot_generation', action='store_true', default=False,\
help="Do not make plots")
'''
####### Arguments for compare operation ######
'''
# Make a parent for profile to go above the system arguments
compare_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = compare_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='instrainComparer', \
help='Output prefix')
compare_parser = subparsers.add_parser("compare",formatter_class=SmartFormatter,\
parents = [compare_parent, parent_parser, geneomewide_parent, variant_parent], add_help=False)
# Database mode parameters
Dflags = compare_parser.add_argument_group('DATABASE MODE PARAMETERS')
Dflags.add_argument('--database_mode', action='store_true', help=
"Using the parameters below, automatically determine which genomes are present in each Profile "
"and only compare scaffolds from those genomes. All profiles must have run Profile with the same .stb")
Dflags.add_argument('--breadth', default=0.5, type=float, help='Minimum breadth_minCov required to count a genome present')
# Other Parameters
Oflags = compare_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-sc", "--scaffolds", action="store",
help='Location to a list of scaffolds to compare. You can also make this a .fasta file and it will load the scaffold names')
Oflags.add_argument("--genome", action="store",
help='Run scaffolds belonging to this single genome only. Must provide an .stb file')
Oflags.add_argument('--store_coverage_overlap', action='store_true', default=False,\
help="Also store coverage overlap on an mm level")
Oflags.add_argument('--store_mismatch_locations', action='store_true', default=False,\
help="Store the locations of SNPs")
Oflags.add_argument('--include_self_comparisons', action='store_true', default=False,\
help="Also compare IS profiles against themself")
Oflags.add_argument('--skip_plot_generation', action='store_true', default=False, \
help="Dont create plots at the end of the run.")
Oflags.add_argument('--group_length', default=10000000,
help="How many bp to compare simultaneously (higher will use more RAM and run more quickly)", type=int)
Oflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
Cflags = compare_parser.add_argument_group('GENOME CLUSTERING OPTIONS')
Cflags.add_argument('-ani', "--ani_threshold", help='popANI threshold to cluster genomes at. Must provide .stb file to do so',
default=0.99999, type=float)
Cflags.add_argument('-cov', "--coverage_treshold", help='Minimum percent_genome_compared for a genome comparison' \
' to count; if below the popANI will be set to 0.', default=0.1, type=float)
Cflags.add_argument("--clusterAlg", help="Algorithm used to cluster genomes (passed\
to scipy.cluster.hierarchy.linkage)", default='average',
choices={'single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'})
Bflags = compare_parser.add_argument_group('SNV POOLING OPTIONS')
Bflags.add_argument('-bams', "--bams",
help='Location of .bam files used during inStrain profile commands; needed to pull low-frequency SNVs.' \
' MUST BE IN SAME ORDER AS THE INPUT FILES',
nargs='*')
Bflags.add_argument('--skip_popANI', action='store_true', default=False,\
help="Only run SNV Pooling; skip other compare operations")
# Gflags = compare_parser.add_argument_group('GREEDY CLUSTERING OPTIONS [THIS SECTION IS EXPERIMENTAL!]')
# Gflags.add_argument('--greedy_clustering', action='store_true', default=False,\
# help="Dont do pair-wise comparisons, do greedy clustering to only find the number of clsuters. If this is set, use the parameters below as well")
# Gflags.add_argument('--g_ani', action='store', default=0.99, type=float,\
# help="ANI threshold for greedy clustering- put the fraction not the percentage (e.g. 0.99, not 99)")
# Gflags.add_argument('--g_cov', action='store', default=0.99, type=float,\
# help="Alignment coverage for greedy clustering- put the fraction not the percentage (e.g. 0.5, not 10)")
# Gflags.add_argument('--g_mm', action='store', default=100, type=int,\
# help="Maximum read mismatch level")
'''
####### Arguments for parse_annotations operation ######
'''
parse_anno_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = parse_anno_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument('-a', '--annotations', help="R|A table or set of tables with gene annotations.\nMust be be a .csv file with two columns- `gene` and `anno`. See inStrain documentation for details\n(https://instrain.readthedocs.io/en/latest/user_manual.html#parse-annotations)",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='annotation_output', \
help='Output prefix')
parse_anno_parser = subparsers.add_parser("parse_annotations", formatter_class=SmartFormatter, \
parents=[parse_anno_parent, parent_parser],
add_help=False)
# Other Parameters
Oflags = parse_anno_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-b", "--min_genome_breadth", action="store", default=0.5, type=float, \
help='Only annotate genomes on genomes with at least this genome breadth. Requires having genomes called. Set to 0 to include all genes.')
Oflags.add_argument("-g", "--min_gene_breadth", action="store", default=0.8, type=float, \
help='Only annotate genes with at least this breadth. Set to 0 to include all genes.')
Oflags.add_argument('--store_rawdata', action='store_true', default=False,\
help="Store the raw data dictionary")
'''
####### Arguments for profile_genes operation ######
'''
# Make a parent for profile to go above the system arguments
genes_io = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = genes_io.add_argument_group('INPUT / OUTPUT')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store gene sequences in the IS object")
genes_parser = subparsers.add_parser("profile_genes",formatter_class=SmartFormatter,\
parents = [genes_parent, genes_io, parent_parser], add_help=False)
'''
####### Arguments for genome_wide operation ######
'''
# Make a parent for profile to go above the system arguments
genome_parser = subparsers.add_parser("genome_wide",formatter_class=SmartFormatter,\
parents = [geneomewide_parent, genes_io, mm_parent, parent_parser], add_help=False)
'''
####### Arguments for plot operation ######
'''
# Make a parent for profile to go above the system arguments
plot_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = plot_parent.add_argument_group('REQUIRED')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument("-pl", "--plots", help= "R|Plots. "
+ "Input 'all' or 'a' to plot all\n"
+ "1) Coverage and breadth vs. read mismatches\n"
+ "2) Genome-wide microdiversity metrics\n"
+ "3) Read-level ANI distribution\n"
+ "4) Major allele frequencies\n"
+ "5) Linkage decay\n"
+ "6) Read filtering plots\n"
+ "7) Scaffold inspection plot (large)\n"
+ "8) Linkage with SNP type (GENES REQUIRED)\n"
+ "9) Gene histograms (GENES REQUIRED)\n"
+ "10) Compare dendrograms (RUN ON COMPARE; NOT PROFILE)\n",
nargs='*', default='a')
POflags = plot_parent.add_argument_group('OPTIONAL FIGURE ADJUSTMENTS')
POflags.add_argument("-mb", "--minimum_breadth", default=0.5, type=float,
help= "Minimum breadth of coverage for genome to make it into plot (from 0-1).")
POflags.add_argument("-g", "--genomes", nargs='*',
help= "Only plot genomes with the names provided in this argument")
plot_parser = subparsers.add_parser("plot",formatter_class=SmartFormatter,\
parents = [plot_parent, parent_parser], add_help=False)
'''
####### Arguments for quick_profile operation ######
'''
# Make a parent for profile to go above the system arguments
quick_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = quick_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
quick_parser = subparsers.add_parser("quick_profile",formatter_class=SmartFormatter,\
parents = [quick_parent, parent_parser], add_help=False)
# Other Parameters
Oflags = quick_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
Oflags.add_argument("-o", "--output", action="store", \
help='Output prefix', default='QuickProfile')
Oflags.add_argument("--breadth_cutoff", type=float, default=0.5,
help='Minimum genome breadth to pull scaffolds')
Oflags.add_argument("--stringent_breadth_cutoff", type=float, default=0.00,
help='Minimum breadth to let scaffold into coverm raw results (done with greater than; NOT greater than or equal to)')
'''
####### Arguments for filter_reads operation ######
'''
# Make a parent for profile to go above the system arguments
reads_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = reads_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
Rflags.add_argument("-o", "--output", action="store", \
help='Location of folder to store read report(s)')
reads_parser = subparsers.add_parser("filter_reads",formatter_class=SmartFormatter,\
parents = [reads_parent, parent_parser, readfilter_parent,
readoutput_parent], add_help=False)
'''
####### Arguments for other operation ######
'''
other_parser = subparsers.add_parser("other",formatter_class=SmartFormatter,\
parents = [parent_parser], add_help=False)
# Other Parameters
Oflags = other_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('--old_IS', help="Convert an old inStrain version object to the newer version.")
Oflags.add_argument('--run_statistics', help='Generate runtime reports for an inStrain run.')
# Dependency checker
dep_parser = subparsers.add_parser("check_deps", formatter_class=SmartFormatter)
'''
####### PARSE THE ARGUMENTS ######
'''
# Handle the situation where the user wants the raw help
if (len(args) == 0 or args[0] == '-h' or args[0] == '--help'):
|
else:
return parser.parse_args(args)
| printHelp()
sys.exit(0) | conditional_block |
argumentParser.py | #!/usr/bin/env python
'''
inStrain - parse command-line arguemnts
'''
__author__ = "Matt Olm and Alex Crits-Christoph"
__license__ = "MIT"
__email__ = "mattolm@gmail.com"
__status__ = "Development"
import os
import sys
import argparse
# Get the version
from ._version import __version__
"""
########################################
# Argument Parsing #
########################################
"""
class SmartFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def printHelp():
print('')
print(' ...::: inStrain v' + __version__ + ' :::...''')
print('''\
Matt Olm and Alex Crits-Christoph. MIT License. Banfield Lab, UC Berkeley.
Choose one of the operations below for more detailed help. See https://instrain.readthedocs.io for documentation.
Example: inStrain profile -h
Main operations:
profile -> Create an inStrain profile (microdiversity analysis) from a mapping file
compare -> Compare multiple inStrain profiles (popANI, coverage_overlap, etc.)
Auxiliary operations:
check_deps -> Print a list of dependencies, versions, and whether they're working
parse_annotations -> Run a number of outputs based a table of gene annotations
quick_profile -> Quickly calculate coverage and breadth of a mapping using coverM
filter_reads -> Commands related to filtering reads from .bam files
plot -> Make figures from the results of "profile" or "compare"
other -> Other miscellaneous operations
''')
def parse_args(args):
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
subparsers = parser.add_subparsers(help='Desired operation',dest='operation')
# Make a parent parser for all of the subparsers
parent_parser = argparse.ArgumentParser(add_help=False)
Bflags = parent_parser.add_argument_group('SYSTEM PARAMETERS')
Bflags.add_argument('-p','--processes',help='Number of processes to use',default=6,type=int)
Bflags.add_argument('-d','--debug',help='Make extra debugging output',default=False,
action= "store_true")
Bflags.add_argument("-h", "--help", action="help", help="show this help message and exit")
Bflags.add_argument(
"--version",
action="version",
version="inStrain version {version}".format(version=__version__))
# Make a parent parser for read filtering
readfilter_parent = argparse.ArgumentParser(add_help=False)
fiflags = readfilter_parent.add_argument_group('READ FILTERING OPTIONS')
fiflags.add_argument("-l", "--min_read_ani", action="store", default=0.95, type=float, \
help='Minimum percent identity of read pairs to consensus to use the reads. Must be >, not >=')
fiflags.add_argument("--min_mapq", action="store", default=-1, type=int,\
help='Minimum mapq score of EITHER read in a pair to use that pair. Must be >, not >=')
fiflags.add_argument("--max_insert_relative", action="store", default=3, type=float, \
help='Multiplier to determine maximum insert size between two reads - default is to use 3x median insert size. Must be >, not >=')
fiflags.add_argument("--min_insert", action="store", default=50, type=int,\
help='Minimum insert size between two reads - default is 50 bp. If two reads are 50bp each and overlap completely, their insert will be 50. Must be >, not >=')
fiflags.add_argument("--pairing_filter", help="R|How should paired reads be handled?\n" \
+ "paired_only = Only paired reads are retained\n" \
+ 'non_discordant = Keep all paired reads and singleton reads that map to a single scaffold\n' \
+ "all_reads = Keep all reads regardless of pairing status (NOT RECOMMENDED; See documentation for deatils)\n", \
default = "paired_only", choices={'paired_only', 'non_discordant', 'all_reads'})
fiflags.add_argument("--priority_reads", help='The location of a list ' \
+ "of reads that should be retained regardless of pairing status " \
+ "(for example long reads or merged reads). This can be a .fastq " \ | + "file or text file with list of read names (will assume file is " \
+ "compressed if ends in .gz", default=None)
# Make a parent parser for read output
readoutput_parent = argparse.ArgumentParser(add_help=False)
fiflags = readoutput_parent.add_argument_group('READ OUTPUT OPTIONS')
# fiflags.add_argument("-s", "--generate_sam", action="store", default=None, \
# help='Specify the location to write a .sam file with filtered reads only.')
fiflags.add_argument("--detailed_mapping_info", action="store_true", default=False, help='Make a detailed read report indicating deatils about each individual mapped read')
# Make a parent parser for SNV calling
variant_parent = argparse.ArgumentParser(add_help=False)
fiflags = variant_parent.add_argument_group('VARIANT CALLING OPTIONS')
fiflags.add_argument("-c", "--min_cov", action="store", default=5, type=int, \
help='Minimum coverage to call an variant')
fiflags.add_argument("-f", "--min_freq", action="store", default=0.05, type=float, \
help='Minimum SNP frequency to confirm a SNV (both this AND the FDR snp count cutoff must be true to call a SNP).')
fiflags.add_argument("-fdr", "--fdr", action="store", default=1e-6, type=float,\
help='SNP false discovery rate- based on simulation data with a 0.1 percent error rate (Q30)')
# Make a parent for profile_genes
genes_parent = argparse.ArgumentParser(add_help=False)
Rflags = genes_parent.add_argument_group('GENE PROFILING OPTIONS')
Rflags.add_argument("-g", "--gene_file", action="store", default=None, \
help='Path to prodigal .fna genes file. If file ends in .gb or .gbk, will treat as a genbank file (EXPERIMENTAL; the name of the gene must be in the gene qualifier)')
# Make a parent for genome_wide
geneomewide_parent = argparse.ArgumentParser(add_help=False)
Rflags = geneomewide_parent.add_argument_group('GENOME WIDE OPTIONS')
Rflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
# Make a parent for handling mm
mm_parent = argparse.ArgumentParser(add_help=False)
Rflags = mm_parent.add_argument_group('READ ANI OPTIONS')
Rflags.add_argument('--mm_level', help="Create output files on the mm level (see documentation for info)",
action='store_true', default=False)
Rflags.add_argument('--skip_mm_profiling', action='store_true', default=False,\
help="Dont perform analysis on an mm level; saves RAM and time; impacts plots and raw_data")
'''
####### Arguments for profile operation ######
'''
# Make a parent for profile to go above the system arguments
profile_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = profile_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
# I/O Parameters
Iflags = profile_parent.add_argument_group('I/O PARAMETERS')
Iflags.add_argument("-o", "--output", action="store", default='inStrain', \
help='Output prefix')
Iflags.add_argument('--use_full_fasta_header', action='store_true', default=False,
help='Instead of using the fasta ID (space in header before space), use the full header. Needed for some mapping tools (including bbMap)')
Iflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
profile_parser = subparsers.add_parser("profile",formatter_class=SmartFormatter,\
parents = [profile_parent, parent_parser, readfilter_parent, readoutput_parent, variant_parent, genes_parent, geneomewide_parent, mm_parent], add_help=False)
# Other Parameters
Oflags = profile_parser.add_argument_group('PROFILE OPTIONS')
Oflags.add_argument('--database_mode', action='store_true', default=False,\
help="Set a number of parameters to values appropriate for mapping to a " \
+ "large fasta file. Will set: --min_read_ani 0.92 --skip_mm_profiling --min_genome_coverage 1")
Oflags.add_argument("--min_scaffold_reads", action="store", default=1, type=int,\
help='Minimum number of reads mapping to a scaffold to proceed with profiling it')
Oflags.add_argument("--min_genome_coverage", action="store", default=0, type=float,\
help='Minimum number of reads mapping to a genome to proceed with profiling it. MUST profile .stb if this is set')
Oflags.add_argument("--min_snp", action="store", default=20, \
help='Absolute minimum number of reads connecting two SNPs to calculate LD between them.')
Oflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store intermediate dictionaries in the pickle file; will result in significantly more RAM and disk usage")
Oflags.add_argument("--scaffolds_to_profile", action="store",\
help='Path to a file containing a list of scaffolds to profile- if provided will ONLY profile those scaffolds')
Oflags.add_argument("--rarefied_coverage", action='store', default=50,\
help='When calculating nucleotide diversity, also calculate a rarefied version with this much coverage')
Oflags.add_argument('--window_length', action='store', default=10000, type=int,\
help='Break scaffolds into windows of this length when profiling')
# Other Parameters
Iflags = profile_parser.add_argument_group('OTHER OPTIONS')
Iflags.add_argument('--skip_genome_wide', action='store_true', default=False,\
help="Do not generate tables that consider groups of scaffolds belonging to genomes")
Iflags.add_argument('--skip_plot_generation', action='store_true', default=False,\
help="Do not make plots")
'''
####### Arguments for compare operation ######
'''
# Make a parent for profile to go above the system arguments
compare_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = compare_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='instrainComparer', \
help='Output prefix')
compare_parser = subparsers.add_parser("compare",formatter_class=SmartFormatter,\
parents = [compare_parent, parent_parser, geneomewide_parent, variant_parent], add_help=False)
# Database mode parameters
Dflags = compare_parser.add_argument_group('DATABASE MODE PARAMETERS')
Dflags.add_argument('--database_mode', action='store_true', help=
"Using the parameters below, automatically determine which genomes are present in each Profile "
"and only compare scaffolds from those genomes. All profiles must have run Profile with the same .stb")
Dflags.add_argument('--breadth', default=0.5, type=float, help='Minimum breadth_minCov required to count a genome present')
# Other Parameters
Oflags = compare_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-sc", "--scaffolds", action="store",
help='Location to a list of scaffolds to compare. You can also make this a .fasta file and it will load the scaffold names')
Oflags.add_argument("--genome", action="store",
help='Run scaffolds belonging to this single genome only. Must provide an .stb file')
Oflags.add_argument('--store_coverage_overlap', action='store_true', default=False,\
help="Also store coverage overlap on an mm level")
Oflags.add_argument('--store_mismatch_locations', action='store_true', default=False,\
help="Store the locations of SNPs")
Oflags.add_argument('--include_self_comparisons', action='store_true', default=False,\
help="Also compare IS profiles against themself")
Oflags.add_argument('--skip_plot_generation', action='store_true', default=False, \
help="Dont create plots at the end of the run.")
Oflags.add_argument('--group_length', default=10000000,
help="How many bp to compare simultaneously (higher will use more RAM and run more quickly)", type=int)
Oflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
Cflags = compare_parser.add_argument_group('GENOME CLUSTERING OPTIONS')
Cflags.add_argument('-ani', "--ani_threshold", help='popANI threshold to cluster genomes at. Must provide .stb file to do so',
default=0.99999, type=float)
Cflags.add_argument('-cov', "--coverage_treshold", help='Minimum percent_genome_compared for a genome comparison' \
' to count; if below the popANI will be set to 0.', default=0.1, type=float)
Cflags.add_argument("--clusterAlg", help="Algorithm used to cluster genomes (passed\
to scipy.cluster.hierarchy.linkage)", default='average',
choices={'single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'})
Bflags = compare_parser.add_argument_group('SNV POOLING OPTIONS')
Bflags.add_argument('-bams', "--bams",
help='Location of .bam files used during inStrain profile commands; needed to pull low-frequency SNVs.' \
' MUST BE IN SAME ORDER AS THE INPUT FILES',
nargs='*')
Bflags.add_argument('--skip_popANI', action='store_true', default=False,\
help="Only run SNV Pooling; skip other compare operations")
# Gflags = compare_parser.add_argument_group('GREEDY CLUSTERING OPTIONS [THIS SECTION IS EXPERIMENTAL!]')
# Gflags.add_argument('--greedy_clustering', action='store_true', default=False,\
# help="Dont do pair-wise comparisons, do greedy clustering to only find the number of clsuters. If this is set, use the parameters below as well")
# Gflags.add_argument('--g_ani', action='store', default=0.99, type=float,\
# help="ANI threshold for greedy clustering- put the fraction not the percentage (e.g. 0.99, not 99)")
# Gflags.add_argument('--g_cov', action='store', default=0.99, type=float,\
# help="Alignment coverage for greedy clustering- put the fraction not the percentage (e.g. 0.5, not 10)")
# Gflags.add_argument('--g_mm', action='store', default=100, type=int,\
# help="Maximum read mismatch level")
'''
####### Arguments for parse_annotations operation ######
'''
parse_anno_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = parse_anno_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument('-a', '--annotations', help="R|A table or set of tables with gene annotations.\nMust be be a .csv file with two columns- `gene` and `anno`. See inStrain documentation for details\n(https://instrain.readthedocs.io/en/latest/user_manual.html#parse-annotations)",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='annotation_output', \
help='Output prefix')
parse_anno_parser = subparsers.add_parser("parse_annotations", formatter_class=SmartFormatter, \
parents=[parse_anno_parent, parent_parser],
add_help=False)
# Other Parameters
Oflags = parse_anno_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-b", "--min_genome_breadth", action="store", default=0.5, type=float, \
help='Only annotate genomes on genomes with at least this genome breadth. Requires having genomes called. Set to 0 to include all genes.')
Oflags.add_argument("-g", "--min_gene_breadth", action="store", default=0.8, type=float, \
help='Only annotate genes with at least this breadth. Set to 0 to include all genes.')
Oflags.add_argument('--store_rawdata', action='store_true', default=False,\
help="Store the raw data dictionary")
'''
####### Arguments for profile_genes operation ######
'''
# Make a parent for profile to go above the system arguments
genes_io = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = genes_io.add_argument_group('INPUT / OUTPUT')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store gene sequences in the IS object")
genes_parser = subparsers.add_parser("profile_genes",formatter_class=SmartFormatter,\
parents = [genes_parent, genes_io, parent_parser], add_help=False)
'''
####### Arguments for genome_wide operation ######
'''
# Make a parent for profile to go above the system arguments
genome_parser = subparsers.add_parser("genome_wide",formatter_class=SmartFormatter,\
parents = [geneomewide_parent, genes_io, mm_parent, parent_parser], add_help=False)
'''
####### Arguments for plot operation ######
'''
# Make a parent for profile to go above the system arguments
plot_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = plot_parent.add_argument_group('REQUIRED')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument("-pl", "--plots", help= "R|Plots. "
+ "Input 'all' or 'a' to plot all\n"
+ "1) Coverage and breadth vs. read mismatches\n"
+ "2) Genome-wide microdiversity metrics\n"
+ "3) Read-level ANI distribution\n"
+ "4) Major allele frequencies\n"
+ "5) Linkage decay\n"
+ "6) Read filtering plots\n"
+ "7) Scaffold inspection plot (large)\n"
+ "8) Linkage with SNP type (GENES REQUIRED)\n"
+ "9) Gene histograms (GENES REQUIRED)\n"
+ "10) Compare dendrograms (RUN ON COMPARE; NOT PROFILE)\n",
nargs='*', default='a')
POflags = plot_parent.add_argument_group('OPTIONAL FIGURE ADJUSTMENTS')
POflags.add_argument("-mb", "--minimum_breadth", default=0.5, type=float,
help= "Minimum breadth of coverage for genome to make it into plot (from 0-1).")
POflags.add_argument("-g", "--genomes", nargs='*',
help= "Only plot genomes with the names provided in this argument")
plot_parser = subparsers.add_parser("plot",formatter_class=SmartFormatter,\
parents = [plot_parent, parent_parser], add_help=False)
'''
####### Arguments for quick_profile operation ######
'''
# Make a parent for profile to go above the system arguments
quick_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = quick_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
quick_parser = subparsers.add_parser("quick_profile",formatter_class=SmartFormatter,\
parents = [quick_parent, parent_parser], add_help=False)
# Other Parameters
Oflags = quick_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
Oflags.add_argument("-o", "--output", action="store", \
help='Output prefix', default='QuickProfile')
Oflags.add_argument("--breadth_cutoff", type=float, default=0.5,
help='Minimum genome breadth to pull scaffolds')
Oflags.add_argument("--stringent_breadth_cutoff", type=float, default=0.00,
help='Minimum breadth to let scaffold into coverm raw results (done with greater than; NOT greater than or equal to)')
'''
####### Arguments for filter_reads operation ######
'''
# Make a parent for profile to go above the system arguments
reads_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = reads_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
Rflags.add_argument("-o", "--output", action="store", \
help='Location of folder to store read report(s)')
reads_parser = subparsers.add_parser("filter_reads",formatter_class=SmartFormatter,\
parents = [reads_parent, parent_parser, readfilter_parent,
readoutput_parent], add_help=False)
'''
####### Arguments for other operation ######
'''
other_parser = subparsers.add_parser("other",formatter_class=SmartFormatter,\
parents = [parent_parser], add_help=False)
# Other Parameters
Oflags = other_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('--old_IS', help="Convert an old inStrain version object to the newer version.")
Oflags.add_argument('--run_statistics', help='Generate runtime reports for an inStrain run.')
# Dependency checker
dep_parser = subparsers.add_parser("check_deps", formatter_class=SmartFormatter)
'''
####### PARSE THE ARGUMENTS ######
'''
# Handle the situation where the user wants the raw help
if (len(args) == 0 or args[0] == '-h' or args[0] == '--help'):
printHelp()
sys.exit(0)
else:
return parser.parse_args(args) | random_line_split | |
argumentParser.py | #!/usr/bin/env python
'''
inStrain - parse command-line arguemnts
'''
__author__ = "Matt Olm and Alex Crits-Christoph"
__license__ = "MIT"
__email__ = "mattolm@gmail.com"
__status__ = "Development"
import os
import sys
import argparse
# Get the version
from ._version import __version__
"""
########################################
# Argument Parsing #
########################################
"""
class SmartFormatter(argparse.ArgumentDefaultsHelpFormatter):
def | (self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def printHelp():
print('')
print(' ...::: inStrain v' + __version__ + ' :::...''')
print('''\
Matt Olm and Alex Crits-Christoph. MIT License. Banfield Lab, UC Berkeley.
Choose one of the operations below for more detailed help. See https://instrain.readthedocs.io for documentation.
Example: inStrain profile -h
Main operations:
profile -> Create an inStrain profile (microdiversity analysis) from a mapping file
compare -> Compare multiple inStrain profiles (popANI, coverage_overlap, etc.)
Auxiliary operations:
check_deps -> Print a list of dependencies, versions, and whether they're working
parse_annotations -> Run a number of outputs based a table of gene annotations
quick_profile -> Quickly calculate coverage and breadth of a mapping using coverM
filter_reads -> Commands related to filtering reads from .bam files
plot -> Make figures from the results of "profile" or "compare"
other -> Other miscellaneous operations
''')
def parse_args(args):
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
subparsers = parser.add_subparsers(help='Desired operation',dest='operation')
# Make a parent parser for all of the subparsers
parent_parser = argparse.ArgumentParser(add_help=False)
Bflags = parent_parser.add_argument_group('SYSTEM PARAMETERS')
Bflags.add_argument('-p','--processes',help='Number of processes to use',default=6,type=int)
Bflags.add_argument('-d','--debug',help='Make extra debugging output',default=False,
action= "store_true")
Bflags.add_argument("-h", "--help", action="help", help="show this help message and exit")
Bflags.add_argument(
"--version",
action="version",
version="inStrain version {version}".format(version=__version__))
# Make a parent parser for read filtering
readfilter_parent = argparse.ArgumentParser(add_help=False)
fiflags = readfilter_parent.add_argument_group('READ FILTERING OPTIONS')
fiflags.add_argument("-l", "--min_read_ani", action="store", default=0.95, type=float, \
help='Minimum percent identity of read pairs to consensus to use the reads. Must be >, not >=')
fiflags.add_argument("--min_mapq", action="store", default=-1, type=int,\
help='Minimum mapq score of EITHER read in a pair to use that pair. Must be >, not >=')
fiflags.add_argument("--max_insert_relative", action="store", default=3, type=float, \
help='Multiplier to determine maximum insert size between two reads - default is to use 3x median insert size. Must be >, not >=')
fiflags.add_argument("--min_insert", action="store", default=50, type=int,\
help='Minimum insert size between two reads - default is 50 bp. If two reads are 50bp each and overlap completely, their insert will be 50. Must be >, not >=')
fiflags.add_argument("--pairing_filter", help="R|How should paired reads be handled?\n" \
+ "paired_only = Only paired reads are retained\n" \
+ 'non_discordant = Keep all paired reads and singleton reads that map to a single scaffold\n' \
+ "all_reads = Keep all reads regardless of pairing status (NOT RECOMMENDED; See documentation for deatils)\n", \
default = "paired_only", choices={'paired_only', 'non_discordant', 'all_reads'})
fiflags.add_argument("--priority_reads", help='The location of a list ' \
+ "of reads that should be retained regardless of pairing status " \
+ "(for example long reads or merged reads). This can be a .fastq " \
+ "file or text file with list of read names (will assume file is " \
+ "compressed if ends in .gz", default=None)
# Make a parent parser for read output
readoutput_parent = argparse.ArgumentParser(add_help=False)
fiflags = readoutput_parent.add_argument_group('READ OUTPUT OPTIONS')
# fiflags.add_argument("-s", "--generate_sam", action="store", default=None, \
# help='Specify the location to write a .sam file with filtered reads only.')
fiflags.add_argument("--detailed_mapping_info", action="store_true", default=False, help='Make a detailed read report indicating deatils about each individual mapped read')
# Make a parent parser for SNV calling
variant_parent = argparse.ArgumentParser(add_help=False)
fiflags = variant_parent.add_argument_group('VARIANT CALLING OPTIONS')
fiflags.add_argument("-c", "--min_cov", action="store", default=5, type=int, \
help='Minimum coverage to call an variant')
fiflags.add_argument("-f", "--min_freq", action="store", default=0.05, type=float, \
help='Minimum SNP frequency to confirm a SNV (both this AND the FDR snp count cutoff must be true to call a SNP).')
fiflags.add_argument("-fdr", "--fdr", action="store", default=1e-6, type=float,\
help='SNP false discovery rate- based on simulation data with a 0.1 percent error rate (Q30)')
# Make a parent for profile_genes
genes_parent = argparse.ArgumentParser(add_help=False)
Rflags = genes_parent.add_argument_group('GENE PROFILING OPTIONS')
Rflags.add_argument("-g", "--gene_file", action="store", default=None, \
help='Path to prodigal .fna genes file. If file ends in .gb or .gbk, will treat as a genbank file (EXPERIMENTAL; the name of the gene must be in the gene qualifier)')
# Make a parent for genome_wide
geneomewide_parent = argparse.ArgumentParser(add_help=False)
Rflags = geneomewide_parent.add_argument_group('GENOME WIDE OPTIONS')
Rflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
# Make a parent for handling mm
mm_parent = argparse.ArgumentParser(add_help=False)
Rflags = mm_parent.add_argument_group('READ ANI OPTIONS')
Rflags.add_argument('--mm_level', help="Create output files on the mm level (see documentation for info)",
action='store_true', default=False)
Rflags.add_argument('--skip_mm_profiling', action='store_true', default=False,\
help="Dont perform analysis on an mm level; saves RAM and time; impacts plots and raw_data")
'''
####### Arguments for profile operation ######
'''
# Make a parent for profile to go above the system arguments
profile_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = profile_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
# I/O Parameters
Iflags = profile_parent.add_argument_group('I/O PARAMETERS')
Iflags.add_argument("-o", "--output", action="store", default='inStrain', \
help='Output prefix')
Iflags.add_argument('--use_full_fasta_header', action='store_true', default=False,
help='Instead of using the fasta ID (space in header before space), use the full header. Needed for some mapping tools (including bbMap)')
Iflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
profile_parser = subparsers.add_parser("profile",formatter_class=SmartFormatter,\
parents = [profile_parent, parent_parser, readfilter_parent, readoutput_parent, variant_parent, genes_parent, geneomewide_parent, mm_parent], add_help=False)
# Other Parameters
Oflags = profile_parser.add_argument_group('PROFILE OPTIONS')
Oflags.add_argument('--database_mode', action='store_true', default=False,\
help="Set a number of parameters to values appropriate for mapping to a " \
+ "large fasta file. Will set: --min_read_ani 0.92 --skip_mm_profiling --min_genome_coverage 1")
Oflags.add_argument("--min_scaffold_reads", action="store", default=1, type=int,\
help='Minimum number of reads mapping to a scaffold to proceed with profiling it')
Oflags.add_argument("--min_genome_coverage", action="store", default=0, type=float,\
help='Minimum number of reads mapping to a genome to proceed with profiling it. MUST profile .stb if this is set')
Oflags.add_argument("--min_snp", action="store", default=20, \
help='Absolute minimum number of reads connecting two SNPs to calculate LD between them.')
Oflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store intermediate dictionaries in the pickle file; will result in significantly more RAM and disk usage")
Oflags.add_argument("--scaffolds_to_profile", action="store",\
help='Path to a file containing a list of scaffolds to profile- if provided will ONLY profile those scaffolds')
Oflags.add_argument("--rarefied_coverage", action='store', default=50,\
help='When calculating nucleotide diversity, also calculate a rarefied version with this much coverage')
Oflags.add_argument('--window_length', action='store', default=10000, type=int,\
help='Break scaffolds into windows of this length when profiling')
# Other Parameters
Iflags = profile_parser.add_argument_group('OTHER OPTIONS')
Iflags.add_argument('--skip_genome_wide', action='store_true', default=False,\
help="Do not generate tables that consider groups of scaffolds belonging to genomes")
Iflags.add_argument('--skip_plot_generation', action='store_true', default=False,\
help="Do not make plots")
'''
####### Arguments for compare operation ######
'''
# Make a parent for profile to go above the system arguments
compare_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = compare_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='instrainComparer', \
help='Output prefix')
compare_parser = subparsers.add_parser("compare",formatter_class=SmartFormatter,\
parents = [compare_parent, parent_parser, geneomewide_parent, variant_parent], add_help=False)
# Database mode parameters
Dflags = compare_parser.add_argument_group('DATABASE MODE PARAMETERS')
Dflags.add_argument('--database_mode', action='store_true', help=
"Using the parameters below, automatically determine which genomes are present in each Profile "
"and only compare scaffolds from those genomes. All profiles must have run Profile with the same .stb")
Dflags.add_argument('--breadth', default=0.5, type=float, help='Minimum breadth_minCov required to count a genome present')
# Other Parameters
Oflags = compare_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-sc", "--scaffolds", action="store",
help='Location to a list of scaffolds to compare. You can also make this a .fasta file and it will load the scaffold names')
Oflags.add_argument("--genome", action="store",
help='Run scaffolds belonging to this single genome only. Must provide an .stb file')
Oflags.add_argument('--store_coverage_overlap', action='store_true', default=False,\
help="Also store coverage overlap on an mm level")
Oflags.add_argument('--store_mismatch_locations', action='store_true', default=False,\
help="Store the locations of SNPs")
Oflags.add_argument('--include_self_comparisons', action='store_true', default=False,\
help="Also compare IS profiles against themself")
Oflags.add_argument('--skip_plot_generation', action='store_true', default=False, \
help="Dont create plots at the end of the run.")
Oflags.add_argument('--group_length', default=10000000,
help="How many bp to compare simultaneously (higher will use more RAM and run more quickly)", type=int)
Oflags.add_argument('--force_compress', action='store_true', default=False,
help='Force compression of all output files')
Cflags = compare_parser.add_argument_group('GENOME CLUSTERING OPTIONS')
Cflags.add_argument('-ani', "--ani_threshold", help='popANI threshold to cluster genomes at. Must provide .stb file to do so',
default=0.99999, type=float)
Cflags.add_argument('-cov', "--coverage_treshold", help='Minimum percent_genome_compared for a genome comparison' \
' to count; if below the popANI will be set to 0.', default=0.1, type=float)
Cflags.add_argument("--clusterAlg", help="Algorithm used to cluster genomes (passed\
to scipy.cluster.hierarchy.linkage)", default='average',
choices={'single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'})
Bflags = compare_parser.add_argument_group('SNV POOLING OPTIONS')
Bflags.add_argument('-bams', "--bams",
help='Location of .bam files used during inStrain profile commands; needed to pull low-frequency SNVs.' \
' MUST BE IN SAME ORDER AS THE INPUT FILES',
nargs='*')
Bflags.add_argument('--skip_popANI', action='store_true', default=False,\
help="Only run SNV Pooling; skip other compare operations")
# Gflags = compare_parser.add_argument_group('GREEDY CLUSTERING OPTIONS [THIS SECTION IS EXPERIMENTAL!]')
# Gflags.add_argument('--greedy_clustering', action='store_true', default=False,\
# help="Dont do pair-wise comparisons, do greedy clustering to only find the number of clsuters. If this is set, use the parameters below as well")
# Gflags.add_argument('--g_ani', action='store', default=0.99, type=float,\
# help="ANI threshold for greedy clustering- put the fraction not the percentage (e.g. 0.99, not 99)")
# Gflags.add_argument('--g_cov', action='store', default=0.99, type=float,\
# help="Alignment coverage for greedy clustering- put the fraction not the percentage (e.g. 0.5, not 10)")
# Gflags.add_argument('--g_mm', action='store', default=100, type=int,\
# help="Maximum read mismatch level")
'''
####### Arguments for parse_annotations operation ######
'''
parse_anno_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = parse_anno_parent.add_argument_group('REQUIRED')
Rflags.add_argument('-i', '--input', help="A list of inStrain objects, all mapped to the same .fasta file",
nargs='*', required=True)
Rflags.add_argument('-a', '--annotations', help="R|A table or set of tables with gene annotations.\nMust be be a .csv file with two columns- `gene` and `anno`. See inStrain documentation for details\n(https://instrain.readthedocs.io/en/latest/user_manual.html#parse-annotations)",
nargs='*', required=True)
Rflags.add_argument("-o", "--output", action="store", default='annotation_output', \
help='Output prefix')
parse_anno_parser = subparsers.add_parser("parse_annotations", formatter_class=SmartFormatter, \
parents=[parse_anno_parent, parent_parser],
add_help=False)
# Other Parameters
Oflags = parse_anno_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument("-b", "--min_genome_breadth", action="store", default=0.5, type=float, \
help='Only annotate genomes on genomes with at least this genome breadth. Requires having genomes called. Set to 0 to include all genes.')
Oflags.add_argument("-g", "--min_gene_breadth", action="store", default=0.8, type=float, \
help='Only annotate genes with at least this breadth. Set to 0 to include all genes.')
Oflags.add_argument('--store_rawdata', action='store_true', default=False,\
help="Store the raw data dictionary")
'''
####### Arguments for profile_genes operation ######
'''
# Make a parent for profile to go above the system arguments
genes_io = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = genes_io.add_argument_group('INPUT / OUTPUT')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument('--store_everything', action='store_true', default=False,\
help="Store gene sequences in the IS object")
genes_parser = subparsers.add_parser("profile_genes",formatter_class=SmartFormatter,\
parents = [genes_parent, genes_io, parent_parser], add_help=False)
'''
####### Arguments for genome_wide operation ######
'''
# Make a parent for profile to go above the system arguments
genome_parser = subparsers.add_parser("genome_wide",formatter_class=SmartFormatter,\
parents = [geneomewide_parent, genes_io, mm_parent, parent_parser], add_help=False)
'''
####### Arguments for plot operation ######
'''
# Make a parent for profile to go above the system arguments
plot_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = plot_parent.add_argument_group('REQUIRED')
Rflags.add_argument("-i", '--IS', help="an inStrain profile object", required=True)
Rflags.add_argument("-pl", "--plots", help= "R|Plots. "
+ "Input 'all' or 'a' to plot all\n"
+ "1) Coverage and breadth vs. read mismatches\n"
+ "2) Genome-wide microdiversity metrics\n"
+ "3) Read-level ANI distribution\n"
+ "4) Major allele frequencies\n"
+ "5) Linkage decay\n"
+ "6) Read filtering plots\n"
+ "7) Scaffold inspection plot (large)\n"
+ "8) Linkage with SNP type (GENES REQUIRED)\n"
+ "9) Gene histograms (GENES REQUIRED)\n"
+ "10) Compare dendrograms (RUN ON COMPARE; NOT PROFILE)\n",
nargs='*', default='a')
POflags = plot_parent.add_argument_group('OPTIONAL FIGURE ADJUSTMENTS')
POflags.add_argument("-mb", "--minimum_breadth", default=0.5, type=float,
help= "Minimum breadth of coverage for genome to make it into plot (from 0-1).")
POflags.add_argument("-g", "--genomes", nargs='*',
help= "Only plot genomes with the names provided in this argument")
plot_parser = subparsers.add_parser("plot",formatter_class=SmartFormatter,\
parents = [plot_parent, parent_parser], add_help=False)
'''
####### Arguments for quick_profile operation ######
'''
# Make a parent for profile to go above the system arguments
quick_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = quick_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
quick_parser = subparsers.add_parser("quick_profile",formatter_class=SmartFormatter,\
parents = [quick_parent, parent_parser], add_help=False)
# Other Parameters
Oflags = quick_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('-s', '--stb', help="Scaffold to bin. This can be a file with each line listing a scaffold and a bin name, tab-seperated. This can also be a space-seperated list of .fasta files, with one genome per .fasta file. If nothing is provided, all scaffolds will be treated as belonging to the same genome",
nargs='*', default=[])
Oflags.add_argument("-o", "--output", action="store", \
help='Output prefix', default='QuickProfile')
Oflags.add_argument("--breadth_cutoff", type=float, default=0.5,
help='Minimum genome breadth to pull scaffolds')
Oflags.add_argument("--stringent_breadth_cutoff", type=float, default=0.00,
help='Minimum breadth to let scaffold into coverm raw results (done with greater than; NOT greater than or equal to)')
'''
####### Arguments for filter_reads operation ######
'''
# Make a parent for profile to go above the system arguments
reads_parent = argparse.ArgumentParser(add_help=False)
# Required positional arguments
Rflags = reads_parent.add_argument_group('REQUIRED')
Rflags.add_argument("bam", help="Sorted .bam file")
Rflags.add_argument("fasta", help="Fasta file the bam is mapped to")
Rflags.add_argument("-o", "--output", action="store", \
help='Location of folder to store read report(s)')
reads_parser = subparsers.add_parser("filter_reads",formatter_class=SmartFormatter,\
parents = [reads_parent, parent_parser, readfilter_parent,
readoutput_parent], add_help=False)
'''
####### Arguments for other operation ######
'''
other_parser = subparsers.add_parser("other",formatter_class=SmartFormatter,\
parents = [parent_parser], add_help=False)
# Other Parameters
Oflags = other_parser.add_argument_group('OTHER OPTIONS')
Oflags.add_argument('--old_IS', help="Convert an old inStrain version object to the newer version.")
Oflags.add_argument('--run_statistics', help='Generate runtime reports for an inStrain run.')
# Dependency checker
dep_parser = subparsers.add_parser("check_deps", formatter_class=SmartFormatter)
'''
####### PARSE THE ARGUMENTS ######
'''
# Handle the situation where the user wants the raw help
if (len(args) == 0 or args[0] == '-h' or args[0] == '--help'):
printHelp()
sys.exit(0)
else:
return parser.parse_args(args)
| _split_lines | identifier_name |
template.js | (function ($) {
function batch(){
$(document).on('click', ".batch-price", function () {
var d = dialog({
title: '批量添加信息',
content: '<input type="text" id="price" value="" style="width: 50%;"><input style="margin-left: 10px;" type="button" value="确定" id="confirm">',
quickClose: true,
width: 120,
heigth: 20
});
d.show($(this)[0]);
var target = $(this).data('target');
$('#confirm').click(function (e) {
var price = $('#price').val();
if(!isNaN(price)){
$(target).val(price);
d.close().remove();
}else{
alert("请输入数字!");
}
});
});
}
batch();
})(jQuery);
function makeSku() {
var HEADER = "<th width='100px'>商品价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.memberPrice'></span><a>" +
"</th>" +
"<th width='100px'>市场价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.marketPrice'></span></a>" +
"</th>" +
"<th width='100px'>进货价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.enterPrice'></span></a>" +
"</th>" +
"<th width='100px'>库存" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.stock'></span></a>" +
"</th>" +
"<th width='100px'>利润" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.profit'></span></a>" +
"</th>" +
"<th width='160px'>条形码" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.barCode'></span></a>" +
"</th>";
var BODY = "<tr id=':sku'>:Tds" +
"<input type='hidden' name='sku[:sku][remark]' value=':Text'>" +
"<td>" +
"<input type='text' class='form-control memberPrice' name='sku[:sku][memberPrice]' placeholder='商品价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control marketPrice' name='sku[:sku][marketPrice]' placeholder='市场价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control enterPrice' name='sku[:sku][enterPrice]' placeholder='进货价格' value='0'>" +
"</td>" +
"<td><input type='text' class='form-control stock' name='sku[:sku][stock]' placeholder='库存' value='0'></td>" +
"<td><input type='text' class='form-control profit' name='sku[:sku][profit]' placeholder='利润' value='0'></td>" +
"<td><input type='text' class='form-control barCode' name='sku[:sku][barCode]' placeholder='条形码' value=''></td>" +
"</tr>";
var SKU_NAME = 'sku_name';
var SKU_CHECKBOX = "sku_checkbox";
var SKU_VALUE = 'sku_value';
// 创建表格中显示规格名称的这一列
function createNameTd(id) {
var td = document.createElement('td');
// 创建输入框,放入第一列中
var input = document.createElement('input');
input.name = SKU_NAME + '['+ id+']';
input.classList.add('form-control', SKU_NAME);
input.setAttribute('data-id', id);
td.appendChild(input);
return td;
}
// 创建表格中显示值得这一列
function createValueTd(id) {
var td = document.createElement('td');
// 创建增加按钮
var btn = document.createElement('button');
btn.innerText = "添加";
btn.type = "button";
btn.setAttribute('data-for-id', id);
btn.classList.add('btn','btn-primary', 'btn-sm', 'btnn');
//new addEvent(btn);
td.appendChild(btn);
return td;
}
// 创建复选框和输入框的组合
function createCheckInput(parentId, id) {
var container = document.createElement('div');
container.classList.add('col-sm-3');
container.style.width = '200px';
var group = document.createElement('div');
group.classList.add('input-group');
var span = document.createElement('span');
span.classList.add('input-group-addon');
var checkbox = document.createElement('input');
checkbox.type = "checkbox";
checkbox.classList.add(SKU_CHECKBOX);
checkbox.name = SKU_CHECKBOX + "[" + parentId + "][" + id + "]";
checkbox.setAttribute('data-for-id', parentId);
checkbox.setAttribute('data-id', id);
var input = document.createElement('input');
input.type = "input";
input.classList.add(SKU_VALUE);
input.name = SKU_VALUE + "[" + parentId + "][" + id + "]";
input.classList.add('form-control');
input.setAttribute('data-id', id);
span.appendChild(checkbox);
group.appendChild(span);
group.appendChild(input);
container.appendChild(group);
return container;
}
// 获取哪些已经是选中的数据
function getChecked() {
var result = [];
$("." + SKU_NAME).each(function () {
var checkboxes = $(":checkbox[data-for-id="+$(this).data('id')+"]:checked");
if (checkboxes.length > 0) {
result.push(checkboxes);
}
});
return result;
}
function getItems(values, i) {
if (i == undefined) {
i = 0;
}
var result = [];
var z = 0;
if (values.length == i + 1) {
for (var h = 0;h < values[i].length;h++) {
result[z] = [];
result[z].push(values[i][h]);
z++;
}
return result;
}
var temp = getItems(values, i + 1);
for (var j = 0;j < values[i].length;j++) {
for (var k = 0;k < temp.length;k++) { // 二维数组
result[z] = [];
result[z].push(values[i][j]);
result[z] = result[z].concat(temp[k]);
z++;
}
}
return result;
}
function makeHeader(item) {
var result = "";
for (var i in item) {
var forId = $(item[i]).data('forId');
var text = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
result += "<th>" + text + "</th>"
}
$("#skuHeader").html(result + HEADER);
}
function makeHtml(items) {
function ItemAdapter(item) {
var ids = [];
var values = [];
var texts = [];
this.getId = function () {
return ids.join(";");
};
| this.getText = function () {
return texts.join(";");
};
this.getValues = function () {
return values;
};
this.getTds = function () {
return "<td class='sku'>" + values.join("</td><td class='sku'>") + "</td>";
};
function init(checkboxes) {
for (var i in checkboxes) {
item = $(checkboxes[i]);
var forId = item.data('forId');
var id = item.data('id');
ids.push(forId + ":" + id);
var input = $("." + SKU_VALUE + "[name='" + SKU_VALUE + "[" + forId + "][" + id + "]']");
values.push(input.val());
var label = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
texts.push(label + ":" + input.val());
}
}
init(item)
}
function clear(ids) {
var trs = $("#skuBody").find('tr');
if (trs.length > 0) {
for (var i = 0; i < trs.length; i++) {
if ($.inArray($(trs[i]).attr('id'), ids) == -1) {
$(trs[i]).remove();
}
}
}
}
function makeBody(items) {
var objects = [];
var ids = [];
for (var i in items) {
var obj = new ItemAdapter(items[i]);
objects.push(obj);
ids.push(obj.getId());
}
// 清除冗余行数
clear(ids);
var trs = [];
if (items.length > 0) {
for (var i = 0; i < ids.length; i++) {
// 已经存在的,需要更新一下规格值文字
var tr = $("tr[id='" + ids[i] + "']");
var obj = objects[i];
if (tr.length > 0) {
tr.find('.sku').remove();
tr.find(':hidden').val(obj.getText());
tr.prepend(obj.getTds());
trs.push(tr);
} else {
console.log(2, obj.getTds());
var html = BODY.replace(/:sku/g, obj.getId()).replace(/:Text/g, obj.getText()).replace(/:Tds/g, obj.getTds());
trs.push($(html));
}
}
}
$("#skuBody").html('');
for (var i = 0; i < trs.length; i++) {
$("#skuBody").append(trs[i]);
}
}
makeBody(items);
}
function addEvent() {
$(document).on('click','.btnn', function() {
var id = $(this).attr('data-for-maxid');
id = ++id | 1000;
$(this).attr('data-for-maxid',id);
var ele = createCheckInput($(this).data('forId'), id);
$(this).before(ele);
});
}
function init() {
var id = 1000;
if (window.max != 0) {
id = ++window.max;
}
// 按钮点击事件,新增一行数据
$(document).on('click', '#skuBtnAdd', function () {
var sku = document.getElementById('tbodySku');
var tr = document.createElement('tr');
tr.appendChild(createNameTd(id));
tr.appendChild(createValueTd(id));
++id;
sku.appendChild(tr);
});
$(document).on('keypress input keydown drop paste', "." + SKU_NAME, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
}
});
$(document).on('click', "." + SKU_CHECKBOX, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
} else {
$("#skuBody").html('');
$("#skuHeader").html('');
}
});
$(document).on('keypress input keydown drop paste', '.' + SKU_VALUE, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
}
})
}
init();
addEvent();
}
makeSku();
| identifier_name | |
template.js | (function ($) {
function batch(){
$(document).on('click', ".batch-price", function () {
var d = dialog({
title: '批量添加信息',
content: '<input type="text" id="price" value="" style="width: 50%;"><input style="margin-left: 10px;" type="button" value="确定" id="confirm">',
quickClose: true, |
var target = $(this).data('target');
$('#confirm').click(function (e) {
var price = $('#price').val();
if(!isNaN(price)){
$(target).val(price);
d.close().remove();
}else{
alert("请输入数字!");
}
});
});
}
batch();
})(jQuery);
function makeSku() {
var HEADER = "<th width='100px'>商品价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.memberPrice'></span><a>" +
"</th>" +
"<th width='100px'>市场价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.marketPrice'></span></a>" +
"</th>" +
"<th width='100px'>进货价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.enterPrice'></span></a>" +
"</th>" +
"<th width='100px'>库存" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.stock'></span></a>" +
"</th>" +
"<th width='100px'>利润" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.profit'></span></a>" +
"</th>" +
"<th width='160px'>条形码" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.barCode'></span></a>" +
"</th>";
var BODY = "<tr id=':sku'>:Tds" +
"<input type='hidden' name='sku[:sku][remark]' value=':Text'>" +
"<td>" +
"<input type='text' class='form-control memberPrice' name='sku[:sku][memberPrice]' placeholder='商品价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control marketPrice' name='sku[:sku][marketPrice]' placeholder='市场价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control enterPrice' name='sku[:sku][enterPrice]' placeholder='进货价格' value='0'>" +
"</td>" +
"<td><input type='text' class='form-control stock' name='sku[:sku][stock]' placeholder='库存' value='0'></td>" +
"<td><input type='text' class='form-control profit' name='sku[:sku][profit]' placeholder='利润' value='0'></td>" +
"<td><input type='text' class='form-control barCode' name='sku[:sku][barCode]' placeholder='条形码' value=''></td>" +
"</tr>";
var SKU_NAME = 'sku_name';
var SKU_CHECKBOX = "sku_checkbox";
var SKU_VALUE = 'sku_value';
// 创建表格中显示规格名称的这一列
function createNameTd(id) {
var td = document.createElement('td');
// 创建输入框,放入第一列中
var input = document.createElement('input');
input.name = SKU_NAME + '['+ id+']';
input.classList.add('form-control', SKU_NAME);
input.setAttribute('data-id', id);
td.appendChild(input);
return td;
}
// 创建表格中显示值得这一列
function createValueTd(id) {
var td = document.createElement('td');
// 创建增加按钮
var btn = document.createElement('button');
btn.innerText = "添加";
btn.type = "button";
btn.setAttribute('data-for-id', id);
btn.classList.add('btn','btn-primary', 'btn-sm', 'btnn');
//new addEvent(btn);
td.appendChild(btn);
return td;
}
// 创建复选框和输入框的组合
function createCheckInput(parentId, id) {
var container = document.createElement('div');
container.classList.add('col-sm-3');
container.style.width = '200px';
var group = document.createElement('div');
group.classList.add('input-group');
var span = document.createElement('span');
span.classList.add('input-group-addon');
var checkbox = document.createElement('input');
checkbox.type = "checkbox";
checkbox.classList.add(SKU_CHECKBOX);
checkbox.name = SKU_CHECKBOX + "[" + parentId + "][" + id + "]";
checkbox.setAttribute('data-for-id', parentId);
checkbox.setAttribute('data-id', id);
var input = document.createElement('input');
input.type = "input";
input.classList.add(SKU_VALUE);
input.name = SKU_VALUE + "[" + parentId + "][" + id + "]";
input.classList.add('form-control');
input.setAttribute('data-id', id);
span.appendChild(checkbox);
group.appendChild(span);
group.appendChild(input);
container.appendChild(group);
return container;
}
// 获取哪些已经是选中的数据
function getChecked() {
var result = [];
$("." + SKU_NAME).each(function () {
var checkboxes = $(":checkbox[data-for-id="+$(this).data('id')+"]:checked");
if (checkboxes.length > 0) {
result.push(checkboxes);
}
});
return result;
}
function getItems(values, i) {
if (i == undefined) {
i = 0;
}
var result = [];
var z = 0;
if (values.length == i + 1) {
for (var h = 0;h < values[i].length;h++) {
result[z] = [];
result[z].push(values[i][h]);
z++;
}
return result;
}
var temp = getItems(values, i + 1);
for (var j = 0;j < values[i].length;j++) {
for (var k = 0;k < temp.length;k++) { // 二维数组
result[z] = [];
result[z].push(values[i][j]);
result[z] = result[z].concat(temp[k]);
z++;
}
}
return result;
}
function makeHeader(item) {
var result = "";
for (var i in item) {
var forId = $(item[i]).data('forId');
var text = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
result += "<th>" + text + "</th>"
}
$("#skuHeader").html(result + HEADER);
}
function makeHtml(items) {
function ItemAdapter(item) {
var ids = [];
var values = [];
var texts = [];
this.getId = function () {
return ids.join(";");
};
this.getText = function () {
return texts.join(";");
};
this.getValues = function () {
return values;
};
this.getTds = function () {
return "<td class='sku'>" + values.join("</td><td class='sku'>") + "</td>";
};
function init(checkboxes) {
for (var i in checkboxes) {
item = $(checkboxes[i]);
var forId = item.data('forId');
var id = item.data('id');
ids.push(forId + ":" + id);
var input = $("." + SKU_VALUE + "[name='" + SKU_VALUE + "[" + forId + "][" + id + "]']");
values.push(input.val());
var label = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
texts.push(label + ":" + input.val());
}
}
init(item)
}
function clear(ids) {
var trs = $("#skuBody").find('tr');
if (trs.length > 0) {
for (var i = 0; i < trs.length; i++) {
if ($.inArray($(trs[i]).attr('id'), ids) == -1) {
$(trs[i]).remove();
}
}
}
}
function makeBody(items) {
var objects = [];
var ids = [];
for (var i in items) {
var obj = new ItemAdapter(items[i]);
objects.push(obj);
ids.push(obj.getId());
}
// 清除冗余行数
clear(ids);
var trs = [];
if (items.length > 0) {
for (var i = 0; i < ids.length; i++) {
// 已经存在的,需要更新一下规格值文字
var tr = $("tr[id='" + ids[i] + "']");
var obj = objects[i];
if (tr.length > 0) {
tr.find('.sku').remove();
tr.find(':hidden').val(obj.getText());
tr.prepend(obj.getTds());
trs.push(tr);
} else {
console.log(2, obj.getTds());
var html = BODY.replace(/:sku/g, obj.getId()).replace(/:Text/g, obj.getText()).replace(/:Tds/g, obj.getTds());
trs.push($(html));
}
}
}
$("#skuBody").html('');
for (var i = 0; i < trs.length; i++) {
$("#skuBody").append(trs[i]);
}
}
makeBody(items);
}
function addEvent() {
$(document).on('click','.btnn', function() {
var id = $(this).attr('data-for-maxid');
id = ++id | 1000;
$(this).attr('data-for-maxid',id);
var ele = createCheckInput($(this).data('forId'), id);
$(this).before(ele);
});
}
function init() {
var id = 1000;
if (window.max != 0) {
id = ++window.max;
}
// 按钮点击事件,新增一行数据
$(document).on('click', '#skuBtnAdd', function () {
var sku = document.getElementById('tbodySku');
var tr = document.createElement('tr');
tr.appendChild(createNameTd(id));
tr.appendChild(createValueTd(id));
++id;
sku.appendChild(tr);
});
$(document).on('keypress input keydown drop paste', "." + SKU_NAME, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
}
});
$(document).on('click', "." + SKU_CHECKBOX, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
} else {
$("#skuBody").html('');
$("#skuHeader").html('');
}
});
$(document).on('keypress input keydown drop paste', '.' + SKU_VALUE, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
}
})
}
init();
addEvent();
}
makeSku(); | width: 120,
heigth: 20
});
d.show($(this)[0]); | random_line_split |
template.js | (function ($) {
function batch(){
$(document).on('click', ".batch-price", function () {
var d = dialog({
title: '批量添加信息',
content: '<input type="text" id="price" value="" style="width: 50%;"><input style="margin-left: 10px;" type="button" value="确定" id="confirm">',
quickClose: true,
width: 120,
heigth: 20
});
d.show($(this)[0]);
var target = $(this).data('target');
$('#confirm').click(function (e) {
var price = $('#price').val();
if(!isNaN(price)){
$(target).val(price);
d.close().remove();
}else{
alert("请输入数字!");
}
});
});
}
batch();
})(jQuery);
function makeSku() {
var HEADER = "<th width='100px'>商品价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.memberPrice'></span><a>" +
"</th>" +
"<th width='100px'>市场价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.marketPrice'></span></a>" +
"</th>" +
"<th width='100px'>进货价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.enterPrice'></span></a>" +
"</th>" +
"<th width='100px'>库存" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.stock'></span></a>" +
"</th>" +
"<th width='100px'>利润" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.profit'></span></a>" +
"</th>" +
"<th width='160px'>条形码" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.barCode'></span></a>" +
"</th>";
var BODY = "<tr id=':sku'>:Tds" +
"<input type='hidden' name='sku[:sku][remark]' value=':Text'>" +
"<td>" +
"<input type='text' class='form-control memberPrice' name='sku[:sku][memberPrice]' placeholder='商品价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control marketPrice' name='sku[:sku][marketPrice]' placeholder='市场价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control enterPrice' name='sku[:sku][enterPrice]' placeholder='进货价格' value='0'>" +
"</td>" +
"<td><input type='text' class='form-control stock' name='sku[:sku][stock]' placeholder='库存' value='0'></td>" +
"<td><input type='text' class='form-control profit' name='sku[:sku][profit]' placeholder='利润' value='0'></td>" +
"<td><input type='text' class='form-control barCode' name='sku[:sku][barCode]' placeholder='条形码' value=''></td>" +
"</tr>";
var SKU_NAME = 'sku_name';
var SKU_CHECKBOX = "sku_checkbox";
var SKU_VALUE = 'sku_value';
// 创建表格中显示规格名称的这一列
function createNameTd(id) {
var td = document.createElement('td');
// 创建输入框,放入第一列中
var input = document.createElement('input');
input.name = SKU_NAME + '['+ id+']';
input.classList.add('form-control', SKU_NAME);
input.setAttribute('data-id', id);
td.appendChild(input);
return td;
}
// 创建表格中显示值得这一列
function createValueTd(id) {
var td = document.createElement('td');
// 创建增加按钮
var btn = document.createElement('button');
btn.innerText = "添加";
btn.type = "button";
btn.setAttribute('data-for-id', id);
btn.classList.add('btn','btn-primary', 'btn-sm', 'btnn');
//new addEvent(btn);
td.appendChild(btn);
return td;
}
// 创建复选框和输入框的组合
function createCheckInput(parentId, id) {
var container = document.createElement('div');
container.classList.add('col-sm-3');
container.style.width = '200px';
var group = document.createElement('div');
group.classList.add('input-group');
var span = document.createElement('span');
span.classList.add('input-group-addon');
var checkbox = document.createElement('input');
checkbox.type = "checkbox";
checkbox.classList.add(SKU_CHECKBOX);
checkbox.name = SKU_CHECKBOX + "[" + parentId + "][" + id + "]";
checkbox.setAttribute('data-for-id', parentId);
checkbox.setAttribute('data-id', id);
var input = document.createElement('input');
input.type = "input";
input.classList.add(SKU_VALUE);
input.name = SKU_VALUE + "[" + parentId + "][" + id + "]";
input.classList.add('form-control');
input.setAttribute('data-id', id);
span.appendChild(checkbox);
group.appendChild(span);
group.appendChild(input);
container.appendChild(group);
return container;
}
// 获取哪些已经是选中的数据
function getChecked() {
var result = [];
$("." + SKU_NAME).each(function () {
var checkboxes = $(":checkbox[data-for-id="+$(this).data('id')+"]:checked");
if (checkboxes.length > 0) {
result.push(checkboxes);
}
});
return result;
}
function getItems(values, i) {
if (i == undefined) {
i = 0;
}
var result = [];
var z = 0;
if (values.length == i + 1) {
for (var h = 0;h < values[i].length;h++) {
result[z] = [];
result[z].push(values[i][h]);
z++;
}
return result;
}
var temp = getItems(values, i + 1);
for (var j = 0;j < values[i].length;j++) {
for (var k = 0;k < temp.length;k++) { // 二维数组
result[z] = [];
result[z].push(values[i][j]);
result[z] = result[z].concat(temp[k]);
z++;
}
}
return result;
}
function makeHeader(item) {
var result = "";
for (var i in item) {
var forId = $(item[i]).data('forId');
var text = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
result += "<th>" + text + "</th>"
}
$("#skuHeader").html(result + HEADER);
}
function makeHtml(items) {
function ItemAdapter(item) {
var ids = [];
var values = [];
var texts = [];
this.getId = function () {
return ids.join(";");
};
this.getText = function () {
return texts.join(";");
};
this.getValues = function () {
return values;
};
this.getTds = function () {
return "<td class='sku'>" + values.join("</td><td class='sku'>") + "</td>";
};
function init(checkboxes) {
for (var i in checkboxes) {
item = $(checkboxes[i]);
var forId = item.data('forId');
var id = item.data('id');
ids.push(forId + ":" + id);
var input = $("." + SKU_VALUE + "[name='" + SKU_VALUE + "[" + forId + "][" + id + "]']");
values.push(input.val());
var label = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
texts.push(label + ":" + input.val());
}
}
init(item)
}
function clear(ids) {
var trs = $("#skuBody").find('tr');
if (trs.length > 0) {
for (var i = 0; i < trs.length; i++) {
if ($.inArray($(trs[i]).attr('id'), ids) == -1) {
$(trs[i]).remove();
}
}
}
}
function makeBody(items) {
var objects = [];
var ids = [];
for (var i in items) {
var obj = new ItemAdapter(items[i]);
objects.push(obj);
ids.push(obj.getId());
}
// 清除冗余行数
clear(ids);
var trs = [];
if (items.length > 0) {
for (var i = 0; i < ids.length; i++) {
// 已经存在的,需要更新一下规格值文字
var tr = $("tr[id='" + ids[i] + "']");
var obj = objects[i];
if (tr.length > 0) {
tr.find('.sku').remove();
tr.find(':hidden').val(obj.getText());
tr.prepend(obj.getTds());
trs.push(tr);
} else {
console.log(2, obj.getTds());
var html = BODY.replace(/:sku/g, obj.getId()).replace(/:Text/g, obj.getText()).replace(/:Tds/g, obj.getTds());
trs.push($(html));
}
}
}
$("#skuBody").html('');
for (var i = 0; i < trs.length; i++) {
$("#skuBody").append(trs[i]);
}
}
makeBody(items);
}
function addEvent() {
$(document).on('click','.btnn', function() {
var id = $(this).attr('data-for-maxid');
id = ++id | 1000;
$(this).attr('data-for-maxid',id);
var ele = createCheckInput($(this).data('forId'), id);
$(this).before(ele);
});
}
function init() {
var id = 1000;
if (window.max != 0) {
id = ++window.max;
}
// 按钮点击事件,新增一行数据
$(document).on('click', '#skuBtnAdd', function () {
var sku = document.getElementById('tbodySku');
var tr = document.createElement('tr');
tr.appendChild(createNameTd(id));
tr | ++id;
sku.appendChild(tr);
});
$(document).on('keypress input keydown drop paste', "." + SKU_NAME, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
}
});
$(document).on('click', "." + SKU_CHECKBOX, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
} else {
$("#skuBody").html('');
$("#skuHeader").html('');
}
});
$(document).on('keypress input keydown drop paste', '.' + SKU_VALUE, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
}
})
}
init();
addEvent();
}
makeSku();
| .appendChild(createValueTd(id));
| conditional_block |
template.js | (function ($) {
function batch(){
$(document).on('click', ".batch-price", function () {
var d = dialog({
title: '批量添加信息',
content: '<input type="text" id="price" value="" style="width: 50%;"><input style="margin-left: 10px;" type="button" value="确定" id="confirm">',
quickClose: true,
width: 120,
heigth: 20
});
d.show($(this)[0]);
var target = $(this).data('target');
$('#confirm').click(function (e) {
var price = $('#price').val();
if(!isNaN(price)){
$(target).val(price);
d.close().remove();
}else{
alert("请输入数字!");
}
});
});
}
batch();
})(jQuery);
function makeSku() {
var HEADER = "<th width='100px'>商品价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.memberPrice'></span><a>" +
"</th>" +
"<th width='100px'>市场价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.marketPrice'></span></a>" +
"</th>" +
"<th width='100px'>进货价格" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.enterPrice'></span></a>" +
"</th>" +
"<th width='100px'>库存" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.stock'></span></a>" +
"</th>" +
"<th width='100px'>利润" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.profit'></span></a>" +
"</th>" +
"<th width='160px'>条形码" +
"<a><span class='glyphicon glyphicon-edit batch-price pull-right' data-target='.barCode'></span></a>" +
"</th>";
var BODY = "<tr id=':sku'>:Tds" +
"<input type='hidden' name='sku[:sku][remark]' value=':Text'>" +
"<td>" +
"<input type='text' class='form-control memberPrice' name='sku[:sku][memberPrice]' placeholder='商品价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control marketPrice' name='sku[:sku][marketPrice]' placeholder='市场价格' value='0'>" +
"</td>" +
"<td>" +
"<input type='text' class='form-control enterPrice' name='sku[:sku][enterPrice]' placeholder='进货价格' value='0'>" +
"</td>" +
"<td><input type='text' class='form-control stock' name='sku[:sku][stock]' placeholder='库存' value='0'></td>" +
"<td><input type='text' class='form-control profit' name='sku[:sku][profit]' placeholder='利润' value='0'></td>" +
"<td><input type='text' class='form-control barCode' name='sku[:sku][barCode]' placeholder='条形码' value=''></td>" +
"</tr>";
var SKU_NAME = 'sku_name';
var SKU_CHECKBOX = "sku_checkbox";
var SKU_VALUE = 'sku_value';
// 创建表格中显示规格名称的这一列
function createNameTd(id) {
var td = document.createElement('td');
// 创建输入框,放入第一列中
var input = document.createElement('input');
input.name = SKU_NAME + '['+ id+']';
input.classList.add('form-control', SKU_NAME);
input.setAttribute('data-id', id);
td.appendChild(input);
return td;
}
// 创建表格中显示值得这一列
function createValueTd(id) {
var td = document.createElement('td');
// 创建增加按钮
var btn = document.createElement('button');
btn.innerText = "添加";
btn.type = "button";
btn.setAttribute('data-for-id', id);
btn.classList.add('btn','btn-primary', 'btn-sm', 'btnn');
//new addEvent(btn);
td.appendChild(btn);
return td;
}
// 创建复选框和输入框的组合
function createCheckInput(parentId, id) {
var container = document.createElement('div');
container.classList.add('col-sm-3');
container.style.width = '200px';
var group = document.createElement('div');
group.classList.add('input-group');
var span = document.createElement('span');
span.classList.add('input-group-addon');
var checkbox = document.createElement('input');
checkbox.type = "checkbox";
checkbox.classList.add(SKU_CHECKBOX);
checkbox.name = SKU_CHECKBOX + "[" + parentId + "][" + id + "]";
checkbox.setAttribute('data-for-id', parentId);
checkbox.setAttribute('data-id', id);
var input = document.createElement('input');
input.type = "input";
input.classList.add(SKU_VALUE);
input.name = SKU_VALUE + "[" + parentId + "][" + id + "]";
input.classList.add('form-control');
input.setAttribute('data-id', id);
span.appendChild(checkbox);
group.appendChild(span);
group.appendChild(input);
container.appendChild(group);
return container;
}
// 获取哪些已经是选中的数据
function getChecked() {
var result = [];
$("." + SKU_NAME).each(function () {
var checkboxes = $(":checkbox[data-for-id="+$(this).data('id')+"]:checked");
if (checkboxes.length > 0) {
result.push(checkboxes);
}
});
return result;
}
function getItems(values, i) {
if (i == undefined) {
i = 0;
}
var result = [];
var z = 0;
if (values.length == i + 1) {
for (var h = 0;h < values[i].length;h++) {
result[z] = [];
result[z].push(values[i][h]);
z++;
}
return result;
}
var temp = getItems(values, i + 1);
for (var j = 0;j < values[i].length;j++) {
for (var k = 0;k < temp.length;k++) { // 二维数组
result[z] = [];
result[z].push(values[i][j]);
result[z] = result[z].concat(temp[k]);
z++;
}
}
return result;
}
function makeHeader(item) {
var result = "";
for (var i in item) {
var forId = $(item[i]).data('forId');
var text = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
result += "<th>" + text + "</th>"
}
$("#skuHeader").html(result + HEADER);
}
function makeHtml(items) {
function ItemAdapter(item) {
var ids = [];
var values = [];
var texts = [];
this.getId = function () {
return ids.join(";");
};
this.getText = function () {
retur | $(trs[i]).remove();
}
}
}
}
function makeBody(items) {
var objects = [];
var ids = [];
for (var i in items) {
var obj = new ItemAdapter(items[i]);
objects.push(obj);
ids.push(obj.getId());
}
// 清除冗余行数
clear(ids);
var trs = [];
if (items.length > 0) {
for (var i = 0; i < ids.length; i++) {
// 已经存在的,需要更新一下规格值文字
var tr = $("tr[id='" + ids[i] + "']");
var obj = objects[i];
if (tr.length > 0) {
tr.find('.sku').remove();
tr.find(':hidden').val(obj.getText());
tr.prepend(obj.getTds());
trs.push(tr);
} else {
console.log(2, obj.getTds());
var html = BODY.replace(/:sku/g, obj.getId()).replace(/:Text/g, obj.getText()).replace(/:Tds/g, obj.getTds());
trs.push($(html));
}
}
}
$("#skuBody").html('');
for (var i = 0; i < trs.length; i++) {
$("#skuBody").append(trs[i]);
}
}
makeBody(items);
}
function addEvent() {
$(document).on('click','.btnn', function() {
var id = $(this).attr('data-for-maxid');
id = ++id | 1000;
$(this).attr('data-for-maxid',id);
var ele = createCheckInput($(this).data('forId'), id);
$(this).before(ele);
});
}
function init() {
var id = 1000;
if (window.max != 0) {
id = ++window.max;
}
// 按钮点击事件,新增一行数据
$(document).on('click', '#skuBtnAdd', function () {
var sku = document.getElementById('tbodySku');
var tr = document.createElement('tr');
tr.appendChild(createNameTd(id));
tr.appendChild(createValueTd(id));
++id;
sku.appendChild(tr);
});
$(document).on('keypress input keydown drop paste', "." + SKU_NAME, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
}
});
$(document).on('click', "." + SKU_CHECKBOX, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
} else {
$("#skuBody").html('');
$("#skuHeader").html('');
}
});
$(document).on('keypress input keydown drop paste', '.' + SKU_VALUE, function () {
var values = getChecked();
if (values.length > 0) {
var items = getItems(values, 0);
makeHeader(items[0]);
makeHtml(items);
}
})
}
init();
addEvent();
}
makeSku();
| n texts.join(";");
};
this.getValues = function () {
return values;
};
this.getTds = function () {
return "<td class='sku'>" + values.join("</td><td class='sku'>") + "</td>";
};
function init(checkboxes) {
for (var i in checkboxes) {
item = $(checkboxes[i]);
var forId = item.data('forId');
var id = item.data('id');
ids.push(forId + ":" + id);
var input = $("." + SKU_VALUE + "[name='" + SKU_VALUE + "[" + forId + "][" + id + "]']");
values.push(input.val());
var label = $("." + SKU_NAME + "[data-id=" + forId + "]").val();
texts.push(label + ":" + input.val());
}
}
init(item)
}
function clear(ids) {
var trs = $("#skuBody").find('tr');
if (trs.length > 0) {
for (var i = 0; i < trs.length; i++) {
if ($.inArray($(trs[i]).attr('id'), ids) == -1) {
| identifier_body |
ZenModelBase.py | ##############################################################################
#
# Copyright (C) Zenoss, Inc. 2007, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
__doc__="""ZenModelBase
$Id: ZenModelBase.py,v 1.17 2004/04/23 19:11:58 edahl Exp $"""
__version__ = "$Revision: 1.17 $"[11:-2]
import re
import time
import sys
from xml.sax import saxutils
from urllib import unquote
from cgi import escape
import zope.component
import zope.interface
from datetime import datetime
import pytz
from OFS.ObjectManager import checkValidId as globalCheckValidId
from AccessControl import ClassSecurityInfo, getSecurityManager, Unauthorized
from AccessControl.class_init import InitializeClass
from Acquisition import aq_base, aq_chain
from zope.component import getGlobalSiteManager, getUtility
from Products.ZenModel.interfaces import IZenDocProvider
from Products.ZenRelations.ZenPropertyManager import iscustprop
from Products.ZenUtils.Utils import zenpathsplit, zenpathjoin, getDisplayType
from Products.ZenUtils.Utils import createHierarchyObj, getHierarchyObj
from Products.ZenUtils.Utils import getObjByPath, unpublished
from Products.ZenUtils.csrf import get_csrf_token
from Products.ZenUtils.Utils import prepId as globalPrepId, isXmlRpc
from Products.ZenWidgets import messaging
from Products.ZenUtils.Time import convertTimestampToTimeZone, isoDateTime, convertJsTimeFormatToPy
from Products.ZenUI3.browser.interfaces import INewPath
from Products.ZenMessaging.audit import audit as auditFn
from ZenossSecurity import *
from Products.ZenUtils.virtual_root import IVirtualRoot
_MARKER = object()
class ZenModelBase(object):
"""
All ZenModel Persistent classes inherit from this class. It provides some
screen management functionality, and general utility methods.
"""
_zendoc = ''
sub_meta_types = ()
#prodStateThreshold = 500
security = ClassSecurityInfo()
def __call__(self):
"""
Invokes the default view.
"""
if isXmlRpc(self.REQUEST):
return self
else:
newpath = INewPath(self)
newpath = getUtility(IVirtualRoot).ensure_virtual_root(newpath)
self.REQUEST.response.redirect(newpath)
def __repr__(self):
"""Return a short string describing the object."""
return "<{0} at {1}>".format(
self.__class__.__name__, '/'.join(self.getPhysicalPath()),
)
index_html = None # This special value informs ZPublisher to use __call__
security.declareProtected(ZEN_VIEW, 'view')
def view(self):
'''
Returns the default view even if index_html is overridden.
@permission: ZEN_VIEW
'''
return self()
def __hash__(self):
return hash(self.id)
def prepId(self, id, subchar='_'):
"""
Clean out an id of illegal characters.
@type id: string
@param subchar: Character to be substituted with illegal characters
@type subchar: string
@rtype: string
>>> dmd.Devices.prepId('ab^*cd')
'ab__cd'
>>> dmd.Devices.prepId('ab^*cd', subchar='Z')
'abZZcd'
>>> dmd.Devices.prepId('/boot')
'boot'
>>> dmd.Devices.prepId('/')
'-'
>>> dmd.Devices.prepId(' mydev ')
'mydev'
"""
return globalPrepId(id, subchar)
def checkValidId(self, id, prep_id = False):
"""
Checks that an id is a valid Zope id. Looks for invalid characters and
checks that the id doesn't already exist in this context.
@type id: string
@type prep_id: boolean
@rtype: boolean
>>> dmd.Devices.checkValidId('^*')
'The id "^*" contains characters illegal in URLs.'
>>> dmd.Devices.checkValidId('Server')
'The id "Server" is invalid - it is already in use.'
>>> dmd.Devices.checkValidId('ZenTestId')
True
"""
new_id = unquote(id)
if prep_id: new_id = self.prepId(id)
try:
globalCheckValidId(self, new_id)
return True
except Exception:
return str(sys.exc_info()[1])
def getUnusedId(self, relName, baseKey, extensionIter=None):
"""
Return a new id that is not already in use in the relationship. If
baseKey is not already in use, return that. Otherwise append values
from extensionIter to baseKey until an used key is found. The default
extensionIter appends integers starting with 2 and counting up.
@type relName: string
@type baseKey: string
@type extensionIter: iterator
@rtype: string
"""
import itertools
if extensionIter is None:
extensionIter = itertools.count(2)
rel = getattr(self, relName)
candidate = baseKey
while candidate in rel.objectIds():
candidate = self.prepId('%s%s' % (baseKey, extensionIter.next()))
return candidate
def getIdLink(self):
"""
DEPRECATED Return an a link to this object with its id as the name.
@return: An HTML link to this object
@rtype: string
>>> dmd.Devices.getIdLink()
'<a href="/zport/dmd/Devices">/</a>'
"""
return self.urlLink()
def callZenScreen(self, REQUEST, redirect=False):
"""
Call and return screen specified by zenScreenName value of REQUEST.
If zenScreenName is not present call the default screen. This is used
in functions that are called from forms to get back to the correct
screen with the correct context.
"""
if REQUEST is None or getattr(REQUEST, 'dontRender', False):
# EventView uses a FakeRequest class to avoid the overhead
# of rendering pages as result of ajax calls.
return ''
screenName = REQUEST.get("zenScreenName", "")
if not redirect and REQUEST.get("redirect", None) :
redirect = True
if redirect:
path = getUtility(IVirtualRoot).ensure_virtual_root(
self.getPrimaryUrlPath())
nurl = "%s/%s" % (path, screenName)
REQUEST['RESPONSE'].redirect(nurl)
else:
REQUEST['URL'] = "%s/%s" % (self.absolute_url_path(), screenName)
screen = getattr(self, screenName, False)
if not screen: return self()
return screen()
@unpublished
def zenScreenUrl(self):
"""
Return the url for the current screen as defined by zenScreenName.
If zenScreenName is not found in the request the request url is used.
@return: An url to this object
@rtype: string
"""
screenName = self.REQUEST.get("zenScreenName", "")
if not screenName: return self.REQUEST.URL
return self.getPrimaryUrlPath() + "/" + screenName
@unpublished
def urlLink(self, text=None, url=None, attrs={}):
"""
Return an anchor tag if the user has access to the remote object.
@param text: the text to place within the anchor tag or string.
Defaults to the id of this object.
@param url: url for the href. Default is getPrimaryUrlPath
@type attrs: dict
@param attrs: any other attributes to be place in the in the tag.
@return: An HTML link to this object
@rtype: string
"""
if not text:
text = self.titleOrId()
text = escape(text)
if not self.checkRemotePerm("View", self):
return text
if not url:
url = self.getPrimaryUrlPath()
if len(attrs):
return '<a href="%s" %s>%s</a>' % (url,
' '.join('%s="%s"' % (x,y) for x,y in attrs.items()),
text)
else:
return '<a href="%s">%s</a>' % (url, text)
def getBreadCrumbUrlPath(self):
"""
Return the url to be used in breadcrumbs for this object. normally
this is equal to getPrimaryUrlPath. It can be used as a hook to modify
the url so that it points towards a different tab then the default.
@return: A url to this object
@rtype: string
>>> dmd.Devices.getBreadCrumbUrlPath()
'/zport/dmd/Devices'
>>> rc = dmd.Reports._getOb('Graph Reports')
>>> rc.manage_addGraphReport('test').getBreadCrumbUrlPath()
'/zport/dmd/Reports/Graph%20Reports/test/editGraphReport'
"""
return self.getPrimaryUrlPath()
def getBreadCrumbName(self):
return self.title_or_id()
def breadCrumbs(self, terminator='dmd', terminate=lambda x: False):
"""
Return the data to create the breadcrumb links for this object.
This is a list of tuples where the first value is the URL of the bread
crumb and the second is the lable.
@return: List of tuples to create a bread crumbs
@rtype: list
>>> dmd.Devices.Server.breadCrumbs()
[('/zport/dmd/Devices', 'Devices'),
('/zport/dmd/Devices/Server', 'Server')]
"""
links = []
curDir = self.primaryAq()
while curDir.id != terminator and not terminate(curDir):
if curDir.meta_type == 'ToManyContRelationship':
curDir = curDir.getPrimaryParent()
continue
if not getattr(aq_base(curDir),"getBreadCrumbUrlPath", False):
break
url = ""
if self.checkRemotePerm("View", curDir):
url = curDir.getBreadCrumbUrlPath()
links.append((url, curDir.getBreadCrumbName()))
curDir = curDir.aq_parent
links.reverse()
return links
def upToOrganizerBreadCrumbs(self, terminator='dmd'):
def isOrganizer(curDir):
from Products.ZenModel.Organizer import Organizer
try:
return isinstance(curDir, Organizer)
except Exception:
return False
return ZenModelBase.breadCrumbs(self, terminator, isOrganizer)
security.declareProtected(ZEN_COMMON, 'checkRemotePerm')
def checkRemotePerm(self, permission, robject):
"""
Look to see if the current user has permission on remote object.
@param permission: Zope permission to be tested. ie "View"
@param robject: remote objecct on which test is run. Will test on
primary acquisition path.
@rtype: boolean
@permission: ZEN_COMMON
"""
user = getSecurityManager().getUser()
return user.has_permission(permission, robject.primaryAq())
security.declareProtected(ZEN_VIEW, 'zentinelTabs')
def zentinelTabs(self, templateName, REQUEST=None):
"""
Return a list of hashes that define the screen tabs for this object.
Keys in the hash are:
- action = the name of the page template for this tab
- name = the label used on the tab
- permissions = a tuple of permissions to view this template
@permission: ZEN_VIEW
>>> dmd.Devices.zentinelTabs('deviceOrganizerStatus')
[{'action': 'deviceOrganizerStatus', 'selected': True,
'name': 'Classes', 'permissions': ('View',)},
{'action': 'viewEvents', 'name': 'Events', 'permissions': ('View',)},
{'action': 'zPropertyEdit', 'name': 'Configuration Properties',
'permissions': ('View',)},
{'action': 'perfConfig', 'name': 'Templates',
'permissions': ('Manage DMD',)}]
"""
tabs = []
user = getSecurityManager().getUser()
actions = self.factory_type_information[0]['actions']
selectedTabName = self._selectedTabName(templateName, REQUEST)
for a in actions:
def permfilter(p): return user.has_permission(p,self)
permok = filter(permfilter, a['permissions'])
if not a.get('visible', True) or not permok:
continue
a = a.copy()
if a['action'] == selectedTabName: a['selected'] = True
tabs.append(a)
return tabs
def _selectedTabName(self, templateName, REQUEST=None):
if REQUEST and REQUEST.get('selectedTabName', '') :
selectedTabName = REQUEST.get('selectedTabName', '')
else:
selectedTabName = templateName
requestUrl = REQUEST['URL'] if REQUEST else None
if not selectedTabName and requestUrl and requestUrl.rfind('/') != -1:
selectedTabName = requestUrl[requestUrl.rfind('/') + 1:]
if selectedTabName.startswith('@@'):
selectedTabName = selectedTabName[2:]
return selectedTabName
security.declareProtected(ZEN_MANAGE_DMD, 'zmanage_editProperties')
def zmanage_editProperties(self, REQUEST=None, redirect=False, audit=True):
"""
Edit a ZenModel object and return its proper page template.
Object will be reindexed if nessesary.
@permission: ZEN_MANAGE_DMD
"""
self.manage_changeProperties(**REQUEST.form)
index_object = getattr(self, 'index_object', lambda self: None)
index_object()
if REQUEST:
messaging.IMessageSender(self).sendToBrowser(
'Properties Saved',
"Saved At: %s" % self.getCurrentUserNowString()
)
if audit:
auditType = getDisplayType(self)
auditKind = 'Setting' if auditType == 'DataRoot' else auditType
auditFn(['UI', auditKind, 'Edit'],
data_=REQUEST.form,
skipFields_=('redirect',
'zenScreenName',
'zmanage_editProperties',
'curpasswd'),
maskFields_=('smtpPass'))
return self.callZenScreen(REQUEST, redirect=redirect)
security.declareProtected(ZEN_VIEW, 'getPrimaryDmdId')
def getPrimaryDmdId(self, rootName="dmd", subrel=""):
"""
Return the full dmd id of this object for instance /Devices/Server.
Everything before dmd is removed. A different rootName can be passed
to stop at a different object in the path. If subrel is passed any
relationship name in the path to the object will be removed.
@param rootName: Name of root
@type rootName: string
@param subrel: Name of relation
@type subrel: string
@return: Path to object
@rtype: string
@permission: ZEN_VIEW
"""
path = list(self.getPrimaryPath())
path = path[path.index(rootName)+1:]
if subrel: path = filter(lambda x: x != subrel, path)
return '/'+'/'.join(path)
@unpublished
def zenpathjoin(self, path):
"""
DEPRECATED Build a Zenoss path based on a list or tuple.
@type path: list or tuple
>>> dmd.zenpathjoin(('zport', 'dmd', 'Devices', 'Server'))
'/zport/dmd/Devices/Server'
"""
return zenpathjoin(path)
def zenpathsplit(self, path):
"""
DEPRECATED Split a path on its '/'.
"""
return zenpathsplit(path)
def createHierarchyObj(self, root, name, factory, relpath="", alog=None):
"""
DEPRECATED this is only seems to be used in Organizer.createOrganizer -
Create an object from its path we use relpath to skip down any missing
relations in the path and factory is the constructor for this object.
"""
return createHierarchyObj(root, name, factory, relpath, alog)
@unpublished
def getHierarchyObj(self, root, name, relpath):
"""
DEPRECATED this doesn't seem to be used anywere don't use it!!!
"""
return getHierarchyObj(root, name, relpath)
def getDmd(self):
"""
DEPRECATED Return the dmd root object with unwraped acquisition path.
>>> dmd.Devices.Server.getDmd()
<DataRoot at /zport/dmd>
"""
for obj in aq_chain(self):
if getattr(obj, 'id', None) == 'dmd': return obj
def getDmdRoot(self, name):
"""
Return a dmd root organizer such as "Systems". The acquisition path
will be cleaned so that it points directly to the root.
>>> dmd.Devices.Server.getDmdRoot("Systems")
<System at /zport/dmd/Systems>
"""
dmd = self.getDmd()
return dmd._getOb(name)
def getDmdObj(self, path):
"""
DEPRECATED Return an object from path that starts at dmd.
>>> dmd.getDmdObj('/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
if path.startswith("/"): path = path[1:]
return self.getDmd().getObjByPath(path)
def getZopeObj(self, path):
"""
DEPRECATED Return an object from path tat starts at zope root.
>>> dmd.getZopeObj('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return self.getObjByPath(path)
def convertToUsersTimeZone(self, timestamp):
"""
This is an instance method so that it is available to
tal statements, such as reports.
"""
user = self.zport.dmd.ZenUsers.getUserSettings()
if user.timezone:
utc_dt = pytz.utc.localize(datetime.utcfromtimestamp(int(timestamp)))
tz = pytz.timezone(user.timezone)
tval = tz.normalize(utc_dt.astimezone(tz))
return tval.strftime(convertJsTimeFormatToPy(user.dateFormat+" "+user.timeFormat))
return isoDateTime(timestamp)
def getCurrentUserNowString(self):
return self.convertToUsersTimeZone(time.time())
def getNowString(self):
"""
Return the current time as a string in the format '2007/09/27 14:09:53'.
@rtype: string
"""
return time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
def todayDate(self):
"""
Return today's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
return time.strftime("%m/%d/%Y", time.localtime())
def yesterdayDate(self):
"""
Return yesterday's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
yesterday = time.time() - 24*3600
return time.strftime("%m/%d/%Y", time.localtime(yesterday))
def all_meta_types(self, interfaces=None):
"""
DEPRECATED Override the ObjectManager method that is used to control
the items available in the add drop down in the ZMI. It uses the
attribute sub_menu_items to create the data structures. This is a list
of meta_types for the available classes. This functionality is rarely
used in Zenoss because the ZMI is not the perfered management
interface.
"""
mts = super(ZenModelBase,self).all_meta_types(interfaces)
if self.sub_meta_types:
mts = filter(lambda mt: mt['name'] in self.sub_meta_types, mts)
return mts
security.declareProtected('Delete objects', 'manage_deleteObjects')
def | (self, ids=(), REQUEST=None):
"""
Delete objects by id from this object and return to the current
template as defined by callZenScreen. Uses ObjectManager._delObject to
remove the object.
@permission: ZEN_VIEW
"""
for id in ids: self._delObject(id)
if REQUEST:
return self.callZenScreen(REQUEST)
def custPropertyIds(self):
"""
List custom properties that are defined at root node. Custom properties
start with a lower "c" followed by a uppercase character.
"""
return self.zenPropertyIds(pfilt=iscustprop)
def custPropertyMap(self):
"""
Return custom property definitions.
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return self.zenPropertyMap(pfilt=iscustprop)
def visibleCustPropertyMap(self):
"""
List custom property definitions that are visible using
custPropertyMap::
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return [ p for p in self.zenPropertyMap(pfilt=iscustprop) \
if p.get('visible', True) ]
security.declareProtected(ZEN_MANAGE_DMD, 'saveCustProperties')
def saveCustProperties(self, REQUEST):
"""
Save custom properties from REQUEST.form.
@permission: ZEN_MANAGE_DMD
"""
redirect = self.saveZenProperties(iscustprop, REQUEST)
auditFn(['UI', getDisplayType(self), 'Edit'], self, data_=REQUEST.form,
skipFields_=('zenScreenName', 'saveCustProperties'))
return redirect
def getObjByPath(self, path):
"""
Lookup and object by its path. Basically does a Zope unrestricted
traverse on the path given.
@type path: list or string /zport/dmd/Devices
>>> dmd.getObjByPath(('zport','dmd','Devices'))
<DeviceClass at /zport/dmd/Devices>
>>> dmd.getObjByPath(('Devices','Server'))
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return getObjByPath(self, path)
def isLocalName(self, name):
"""
Check to see if a name is local to our current context or if it comes
from our acquisition chain.
@rtype: boolean
>>> dmd.isLocalName('Devices')
True
>>> dmd.Devices.Server.isLocalName('Devices')
False
"""
v = getattr(aq_base(self), name, '__ZENMARKER__')
return v != '__ZENMARKER__'
security.declareProtected(ZEN_VIEW, 'helpLink')
def helpLink(self):
"""
DEPRECATED Return a link to the objects help file.
@permission: ZEN_VIEW
"""
path = self.__class__.__module__.split('.')
className = path[-1].replace('Class','')
product = path[-2]
path = ("", "Control_Panel", "Products", product, "Help",
"%s.stx"%className)
# check to see if we have a help screen
app = self.getPhysicalRoot()
try:
app.restrictedTraverse(path)
except (KeyError, Unauthorized):
return ""
url = "/HelpSys?help_url="+ "/".join(path)
return """<a class="tabletitle" href="%s" \
onClick="window.open('%s','zope_help','width=600,height=500, \
menubar=yes,toolbar=yes,scrollbars=yes,resizable=yes'); \
return false;" onMouseOver="window.status='Open online help'; \
return true;" onMouseOut="window.status=''; return true;">Help!</a>
""" % (url, url)
security.declareProtected(ZEN_VIEW, 'getIconPath')
def getIconPath(self):
"""
Return the icon associated with this object. The icon path is defined
in the zProperty zIcon.
@return: Path to icon
@rtype: string
@permission: ZEN_VIEW
"""
try:
return self.primaryAq().zIcon
except AttributeError:
return '/zport/dmd/img/icons/noicon.png'
def aqBaseHasAttr(self, attr):
"""
Return hasattr(aq_base(self), attr)
This is a convenience function for use in templates, where it's not
so easy to make a similar call directly.
hasattr itself will swallow exceptions, so we don't want to use that.
We also need to allow for values of None, so something like
getattr(aq_base(self, attr, None) doesn't really tell us anything.
Testing __dict__ is not a good choice because it doesn't allow
for properties (and I believe __getitem__ calls.)
So while this looks pretty attrocious, it might be the most sane
solution.
"""
return getattr(aq_base(self), attr, _MARKER) is not _MARKER
def get_csrf_token(self):
"""
Returns string with CSRF token for current user.
"""
return get_csrf_token(self.REQUEST)
class ZenModelZenDocProvider(object):
zope.interface.implements(IZenDocProvider)
zope.component.adapts(ZenModelBase)
def __init__(self, zenModelBase):
self._underlyingObject = zenModelBase
def getZendoc(self):
zendoc = self._underlyingObject._zendoc
if not zendoc and self._underlyingObject.aqBaseHasAttr( 'description' ):
zendoc = self._underlyingObject.description
return zendoc
def setZendoc(self, zendocText):
self._underlyingObject._zendoc = zendocText
def exportZendoc(self,ofile):
"""Return an xml representation of a RelationshipManagers zendoc
<property id='_zendoc' type='string' mode='w'>
value
</property>
"""
value = self.getZendoc()
if not value: return
ofile.write("<property id='zendoc' type='string'>\n")
if not isinstance(value, basestring):
value = unicode(value)
elif isinstance(value, str):
value = value.decode('latin-1')
ofile.write(saxutils.escape(value).encode('utf-8')+"\n")
ofile.write("</property>\n")
InitializeClass(ZenModelBase)
| manage_deleteObjects | identifier_name |
ZenModelBase.py | ##############################################################################
#
# Copyright (C) Zenoss, Inc. 2007, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
__doc__="""ZenModelBase
$Id: ZenModelBase.py,v 1.17 2004/04/23 19:11:58 edahl Exp $"""
__version__ = "$Revision: 1.17 $"[11:-2]
import re
import time
import sys
from xml.sax import saxutils
from urllib import unquote
from cgi import escape
import zope.component
import zope.interface
from datetime import datetime
import pytz
from OFS.ObjectManager import checkValidId as globalCheckValidId
from AccessControl import ClassSecurityInfo, getSecurityManager, Unauthorized
from AccessControl.class_init import InitializeClass
from Acquisition import aq_base, aq_chain
from zope.component import getGlobalSiteManager, getUtility
from Products.ZenModel.interfaces import IZenDocProvider
from Products.ZenRelations.ZenPropertyManager import iscustprop
from Products.ZenUtils.Utils import zenpathsplit, zenpathjoin, getDisplayType
from Products.ZenUtils.Utils import createHierarchyObj, getHierarchyObj
from Products.ZenUtils.Utils import getObjByPath, unpublished
from Products.ZenUtils.csrf import get_csrf_token
from Products.ZenUtils.Utils import prepId as globalPrepId, isXmlRpc
from Products.ZenWidgets import messaging
from Products.ZenUtils.Time import convertTimestampToTimeZone, isoDateTime, convertJsTimeFormatToPy
from Products.ZenUI3.browser.interfaces import INewPath
from Products.ZenMessaging.audit import audit as auditFn
from ZenossSecurity import *
from Products.ZenUtils.virtual_root import IVirtualRoot
_MARKER = object()
class ZenModelBase(object):
"""
All ZenModel Persistent classes inherit from this class. It provides some
screen management functionality, and general utility methods.
"""
_zendoc = ''
sub_meta_types = ()
#prodStateThreshold = 500
security = ClassSecurityInfo()
def __call__(self):
"""
Invokes the default view.
"""
if isXmlRpc(self.REQUEST):
return self
else:
newpath = INewPath(self)
newpath = getUtility(IVirtualRoot).ensure_virtual_root(newpath)
self.REQUEST.response.redirect(newpath)
def __repr__(self):
"""Return a short string describing the object."""
return "<{0} at {1}>".format(
self.__class__.__name__, '/'.join(self.getPhysicalPath()),
)
index_html = None # This special value informs ZPublisher to use __call__
security.declareProtected(ZEN_VIEW, 'view')
def view(self):
'''
Returns the default view even if index_html is overridden.
@permission: ZEN_VIEW
'''
return self()
def __hash__(self):
return hash(self.id)
def prepId(self, id, subchar='_'):
"""
Clean out an id of illegal characters.
@type id: string
@param subchar: Character to be substituted with illegal characters
@type subchar: string
@rtype: string
>>> dmd.Devices.prepId('ab^*cd')
'ab__cd'
>>> dmd.Devices.prepId('ab^*cd', subchar='Z')
'abZZcd'
>>> dmd.Devices.prepId('/boot')
'boot'
>>> dmd.Devices.prepId('/')
'-'
>>> dmd.Devices.prepId(' mydev ')
'mydev'
"""
return globalPrepId(id, subchar)
def checkValidId(self, id, prep_id = False):
"""
Checks that an id is a valid Zope id. Looks for invalid characters and
checks that the id doesn't already exist in this context.
@type id: string
@type prep_id: boolean
@rtype: boolean
>>> dmd.Devices.checkValidId('^*')
'The id "^*" contains characters illegal in URLs.'
>>> dmd.Devices.checkValidId('Server')
'The id "Server" is invalid - it is already in use.'
>>> dmd.Devices.checkValidId('ZenTestId')
True
"""
new_id = unquote(id)
if prep_id: new_id = self.prepId(id)
try:
globalCheckValidId(self, new_id)
return True
except Exception:
return str(sys.exc_info()[1])
def getUnusedId(self, relName, baseKey, extensionIter=None):
"""
Return a new id that is not already in use in the relationship. If
baseKey is not already in use, return that. Otherwise append values
from extensionIter to baseKey until an used key is found. The default
extensionIter appends integers starting with 2 and counting up.
@type relName: string
@type baseKey: string
@type extensionIter: iterator
@rtype: string
"""
import itertools
if extensionIter is None:
extensionIter = itertools.count(2)
rel = getattr(self, relName)
candidate = baseKey
while candidate in rel.objectIds():
candidate = self.prepId('%s%s' % (baseKey, extensionIter.next()))
return candidate
def getIdLink(self):
"""
DEPRECATED Return an a link to this object with its id as the name.
@return: An HTML link to this object
@rtype: string
>>> dmd.Devices.getIdLink()
'<a href="/zport/dmd/Devices">/</a>'
"""
return self.urlLink()
def callZenScreen(self, REQUEST, redirect=False):
"""
Call and return screen specified by zenScreenName value of REQUEST.
If zenScreenName is not present call the default screen. This is used
in functions that are called from forms to get back to the correct
screen with the correct context.
"""
if REQUEST is None or getattr(REQUEST, 'dontRender', False):
# EventView uses a FakeRequest class to avoid the overhead
# of rendering pages as result of ajax calls.
return ''
screenName = REQUEST.get("zenScreenName", "")
if not redirect and REQUEST.get("redirect", None) :
redirect = True
if redirect:
path = getUtility(IVirtualRoot).ensure_virtual_root(
self.getPrimaryUrlPath())
nurl = "%s/%s" % (path, screenName)
REQUEST['RESPONSE'].redirect(nurl)
else:
REQUEST['URL'] = "%s/%s" % (self.absolute_url_path(), screenName)
screen = getattr(self, screenName, False)
if not screen: return self()
return screen()
@unpublished
def zenScreenUrl(self):
"""
Return the url for the current screen as defined by zenScreenName.
If zenScreenName is not found in the request the request url is used.
@return: An url to this object
@rtype: string
"""
screenName = self.REQUEST.get("zenScreenName", "")
if not screenName: return self.REQUEST.URL
return self.getPrimaryUrlPath() + "/" + screenName
@unpublished
def urlLink(self, text=None, url=None, attrs={}):
"""
Return an anchor tag if the user has access to the remote object.
@param text: the text to place within the anchor tag or string.
Defaults to the id of this object.
@param url: url for the href. Default is getPrimaryUrlPath
@type attrs: dict
@param attrs: any other attributes to be place in the in the tag.
@return: An HTML link to this object
@rtype: string
"""
if not text:
text = self.titleOrId()
text = escape(text)
if not self.checkRemotePerm("View", self):
return text
if not url:
url = self.getPrimaryUrlPath()
if len(attrs):
return '<a href="%s" %s>%s</a>' % (url,
' '.join('%s="%s"' % (x,y) for x,y in attrs.items()),
text)
else:
return '<a href="%s">%s</a>' % (url, text)
def getBreadCrumbUrlPath(self):
"""
Return the url to be used in breadcrumbs for this object. normally
this is equal to getPrimaryUrlPath. It can be used as a hook to modify
the url so that it points towards a different tab then the default.
@return: A url to this object
@rtype: string
>>> dmd.Devices.getBreadCrumbUrlPath()
'/zport/dmd/Devices'
>>> rc = dmd.Reports._getOb('Graph Reports')
>>> rc.manage_addGraphReport('test').getBreadCrumbUrlPath()
'/zport/dmd/Reports/Graph%20Reports/test/editGraphReport'
"""
return self.getPrimaryUrlPath()
def getBreadCrumbName(self):
return self.title_or_id()
def breadCrumbs(self, terminator='dmd', terminate=lambda x: False):
"""
Return the data to create the breadcrumb links for this object.
This is a list of tuples where the first value is the URL of the bread
crumb and the second is the lable.
@return: List of tuples to create a bread crumbs
@rtype: list
>>> dmd.Devices.Server.breadCrumbs()
[('/zport/dmd/Devices', 'Devices'),
('/zport/dmd/Devices/Server', 'Server')]
"""
links = []
curDir = self.primaryAq()
while curDir.id != terminator and not terminate(curDir):
if curDir.meta_type == 'ToManyContRelationship':
curDir = curDir.getPrimaryParent()
continue
if not getattr(aq_base(curDir),"getBreadCrumbUrlPath", False):
break
url = ""
if self.checkRemotePerm("View", curDir):
url = curDir.getBreadCrumbUrlPath()
links.append((url, curDir.getBreadCrumbName()))
curDir = curDir.aq_parent
links.reverse()
return links
def upToOrganizerBreadCrumbs(self, terminator='dmd'):
def isOrganizer(curDir):
from Products.ZenModel.Organizer import Organizer
try:
return isinstance(curDir, Organizer)
except Exception:
return False
return ZenModelBase.breadCrumbs(self, terminator, isOrganizer)
security.declareProtected(ZEN_COMMON, 'checkRemotePerm')
def checkRemotePerm(self, permission, robject):
"""
Look to see if the current user has permission on remote object.
@param permission: Zope permission to be tested. ie "View"
@param robject: remote objecct on which test is run. Will test on
primary acquisition path.
@rtype: boolean
@permission: ZEN_COMMON
"""
user = getSecurityManager().getUser()
return user.has_permission(permission, robject.primaryAq())
security.declareProtected(ZEN_VIEW, 'zentinelTabs')
def zentinelTabs(self, templateName, REQUEST=None):
"""
Return a list of hashes that define the screen tabs for this object.
Keys in the hash are:
- action = the name of the page template for this tab
- name = the label used on the tab
- permissions = a tuple of permissions to view this template
@permission: ZEN_VIEW
>>> dmd.Devices.zentinelTabs('deviceOrganizerStatus')
[{'action': 'deviceOrganizerStatus', 'selected': True,
'name': 'Classes', 'permissions': ('View',)},
{'action': 'viewEvents', 'name': 'Events', 'permissions': ('View',)},
{'action': 'zPropertyEdit', 'name': 'Configuration Properties',
'permissions': ('View',)},
{'action': 'perfConfig', 'name': 'Templates',
'permissions': ('Manage DMD',)}]
"""
tabs = []
user = getSecurityManager().getUser()
actions = self.factory_type_information[0]['actions']
selectedTabName = self._selectedTabName(templateName, REQUEST)
for a in actions:
def permfilter(p): return user.has_permission(p,self)
permok = filter(permfilter, a['permissions'])
if not a.get('visible', True) or not permok:
continue
a = a.copy()
if a['action'] == selectedTabName: a['selected'] = True
tabs.append(a)
return tabs
def _selectedTabName(self, templateName, REQUEST=None):
if REQUEST and REQUEST.get('selectedTabName', '') :
selectedTabName = REQUEST.get('selectedTabName', '')
else:
selectedTabName = templateName
requestUrl = REQUEST['URL'] if REQUEST else None
if not selectedTabName and requestUrl and requestUrl.rfind('/') != -1:
selectedTabName = requestUrl[requestUrl.rfind('/') + 1:]
if selectedTabName.startswith('@@'):
selectedTabName = selectedTabName[2:]
return selectedTabName
security.declareProtected(ZEN_MANAGE_DMD, 'zmanage_editProperties')
def zmanage_editProperties(self, REQUEST=None, redirect=False, audit=True):
"""
Edit a ZenModel object and return its proper page template.
Object will be reindexed if nessesary.
@permission: ZEN_MANAGE_DMD
"""
self.manage_changeProperties(**REQUEST.form)
index_object = getattr(self, 'index_object', lambda self: None)
index_object()
if REQUEST:
messaging.IMessageSender(self).sendToBrowser(
'Properties Saved',
"Saved At: %s" % self.getCurrentUserNowString()
)
if audit:
auditType = getDisplayType(self)
auditKind = 'Setting' if auditType == 'DataRoot' else auditType
auditFn(['UI', auditKind, 'Edit'],
data_=REQUEST.form,
skipFields_=('redirect',
'zenScreenName',
'zmanage_editProperties',
'curpasswd'),
maskFields_=('smtpPass'))
return self.callZenScreen(REQUEST, redirect=redirect)
security.declareProtected(ZEN_VIEW, 'getPrimaryDmdId')
def getPrimaryDmdId(self, rootName="dmd", subrel=""):
"""
Return the full dmd id of this object for instance /Devices/Server.
Everything before dmd is removed. A different rootName can be passed
to stop at a different object in the path. If subrel is passed any
relationship name in the path to the object will be removed.
@param rootName: Name of root
@type rootName: string
@param subrel: Name of relation
@type subrel: string
@return: Path to object
@rtype: string
@permission: ZEN_VIEW
"""
path = list(self.getPrimaryPath())
path = path[path.index(rootName)+1:]
if subrel: path = filter(lambda x: x != subrel, path)
return '/'+'/'.join(path)
@unpublished
def zenpathjoin(self, path):
"""
DEPRECATED Build a Zenoss path based on a list or tuple.
@type path: list or tuple
>>> dmd.zenpathjoin(('zport', 'dmd', 'Devices', 'Server'))
'/zport/dmd/Devices/Server'
"""
return zenpathjoin(path)
def zenpathsplit(self, path):
|
def createHierarchyObj(self, root, name, factory, relpath="", alog=None):
"""
DEPRECATED this is only seems to be used in Organizer.createOrganizer -
Create an object from its path we use relpath to skip down any missing
relations in the path and factory is the constructor for this object.
"""
return createHierarchyObj(root, name, factory, relpath, alog)
@unpublished
def getHierarchyObj(self, root, name, relpath):
"""
DEPRECATED this doesn't seem to be used anywere don't use it!!!
"""
return getHierarchyObj(root, name, relpath)
def getDmd(self):
"""
DEPRECATED Return the dmd root object with unwraped acquisition path.
>>> dmd.Devices.Server.getDmd()
<DataRoot at /zport/dmd>
"""
for obj in aq_chain(self):
if getattr(obj, 'id', None) == 'dmd': return obj
def getDmdRoot(self, name):
"""
Return a dmd root organizer such as "Systems". The acquisition path
will be cleaned so that it points directly to the root.
>>> dmd.Devices.Server.getDmdRoot("Systems")
<System at /zport/dmd/Systems>
"""
dmd = self.getDmd()
return dmd._getOb(name)
def getDmdObj(self, path):
"""
DEPRECATED Return an object from path that starts at dmd.
>>> dmd.getDmdObj('/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
if path.startswith("/"): path = path[1:]
return self.getDmd().getObjByPath(path)
def getZopeObj(self, path):
"""
DEPRECATED Return an object from path tat starts at zope root.
>>> dmd.getZopeObj('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return self.getObjByPath(path)
def convertToUsersTimeZone(self, timestamp):
"""
This is an instance method so that it is available to
tal statements, such as reports.
"""
user = self.zport.dmd.ZenUsers.getUserSettings()
if user.timezone:
utc_dt = pytz.utc.localize(datetime.utcfromtimestamp(int(timestamp)))
tz = pytz.timezone(user.timezone)
tval = tz.normalize(utc_dt.astimezone(tz))
return tval.strftime(convertJsTimeFormatToPy(user.dateFormat+" "+user.timeFormat))
return isoDateTime(timestamp)
def getCurrentUserNowString(self):
return self.convertToUsersTimeZone(time.time())
def getNowString(self):
"""
Return the current time as a string in the format '2007/09/27 14:09:53'.
@rtype: string
"""
return time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
def todayDate(self):
"""
Return today's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
return time.strftime("%m/%d/%Y", time.localtime())
def yesterdayDate(self):
"""
Return yesterday's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
yesterday = time.time() - 24*3600
return time.strftime("%m/%d/%Y", time.localtime(yesterday))
def all_meta_types(self, interfaces=None):
"""
DEPRECATED Override the ObjectManager method that is used to control
the items available in the add drop down in the ZMI. It uses the
attribute sub_menu_items to create the data structures. This is a list
of meta_types for the available classes. This functionality is rarely
used in Zenoss because the ZMI is not the perfered management
interface.
"""
mts = super(ZenModelBase,self).all_meta_types(interfaces)
if self.sub_meta_types:
mts = filter(lambda mt: mt['name'] in self.sub_meta_types, mts)
return mts
security.declareProtected('Delete objects', 'manage_deleteObjects')
def manage_deleteObjects(self, ids=(), REQUEST=None):
"""
Delete objects by id from this object and return to the current
template as defined by callZenScreen. Uses ObjectManager._delObject to
remove the object.
@permission: ZEN_VIEW
"""
for id in ids: self._delObject(id)
if REQUEST:
return self.callZenScreen(REQUEST)
def custPropertyIds(self):
"""
List custom properties that are defined at root node. Custom properties
start with a lower "c" followed by a uppercase character.
"""
return self.zenPropertyIds(pfilt=iscustprop)
def custPropertyMap(self):
"""
Return custom property definitions.
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return self.zenPropertyMap(pfilt=iscustprop)
def visibleCustPropertyMap(self):
"""
List custom property definitions that are visible using
custPropertyMap::
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return [ p for p in self.zenPropertyMap(pfilt=iscustprop) \
if p.get('visible', True) ]
security.declareProtected(ZEN_MANAGE_DMD, 'saveCustProperties')
def saveCustProperties(self, REQUEST):
"""
Save custom properties from REQUEST.form.
@permission: ZEN_MANAGE_DMD
"""
redirect = self.saveZenProperties(iscustprop, REQUEST)
auditFn(['UI', getDisplayType(self), 'Edit'], self, data_=REQUEST.form,
skipFields_=('zenScreenName', 'saveCustProperties'))
return redirect
def getObjByPath(self, path):
"""
Lookup and object by its path. Basically does a Zope unrestricted
traverse on the path given.
@type path: list or string /zport/dmd/Devices
>>> dmd.getObjByPath(('zport','dmd','Devices'))
<DeviceClass at /zport/dmd/Devices>
>>> dmd.getObjByPath(('Devices','Server'))
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return getObjByPath(self, path)
def isLocalName(self, name):
"""
Check to see if a name is local to our current context or if it comes
from our acquisition chain.
@rtype: boolean
>>> dmd.isLocalName('Devices')
True
>>> dmd.Devices.Server.isLocalName('Devices')
False
"""
v = getattr(aq_base(self), name, '__ZENMARKER__')
return v != '__ZENMARKER__'
security.declareProtected(ZEN_VIEW, 'helpLink')
def helpLink(self):
"""
DEPRECATED Return a link to the objects help file.
@permission: ZEN_VIEW
"""
path = self.__class__.__module__.split('.')
className = path[-1].replace('Class','')
product = path[-2]
path = ("", "Control_Panel", "Products", product, "Help",
"%s.stx"%className)
# check to see if we have a help screen
app = self.getPhysicalRoot()
try:
app.restrictedTraverse(path)
except (KeyError, Unauthorized):
return ""
url = "/HelpSys?help_url="+ "/".join(path)
return """<a class="tabletitle" href="%s" \
onClick="window.open('%s','zope_help','width=600,height=500, \
menubar=yes,toolbar=yes,scrollbars=yes,resizable=yes'); \
return false;" onMouseOver="window.status='Open online help'; \
return true;" onMouseOut="window.status=''; return true;">Help!</a>
""" % (url, url)
security.declareProtected(ZEN_VIEW, 'getIconPath')
def getIconPath(self):
"""
Return the icon associated with this object. The icon path is defined
in the zProperty zIcon.
@return: Path to icon
@rtype: string
@permission: ZEN_VIEW
"""
try:
return self.primaryAq().zIcon
except AttributeError:
return '/zport/dmd/img/icons/noicon.png'
def aqBaseHasAttr(self, attr):
"""
Return hasattr(aq_base(self), attr)
This is a convenience function for use in templates, where it's not
so easy to make a similar call directly.
hasattr itself will swallow exceptions, so we don't want to use that.
We also need to allow for values of None, so something like
getattr(aq_base(self, attr, None) doesn't really tell us anything.
Testing __dict__ is not a good choice because it doesn't allow
for properties (and I believe __getitem__ calls.)
So while this looks pretty attrocious, it might be the most sane
solution.
"""
return getattr(aq_base(self), attr, _MARKER) is not _MARKER
def get_csrf_token(self):
"""
Returns string with CSRF token for current user.
"""
return get_csrf_token(self.REQUEST)
class ZenModelZenDocProvider(object):
zope.interface.implements(IZenDocProvider)
zope.component.adapts(ZenModelBase)
def __init__(self, zenModelBase):
self._underlyingObject = zenModelBase
def getZendoc(self):
zendoc = self._underlyingObject._zendoc
if not zendoc and self._underlyingObject.aqBaseHasAttr( 'description' ):
zendoc = self._underlyingObject.description
return zendoc
def setZendoc(self, zendocText):
self._underlyingObject._zendoc = zendocText
def exportZendoc(self,ofile):
"""Return an xml representation of a RelationshipManagers zendoc
<property id='_zendoc' type='string' mode='w'>
value
</property>
"""
value = self.getZendoc()
if not value: return
ofile.write("<property id='zendoc' type='string'>\n")
if not isinstance(value, basestring):
value = unicode(value)
elif isinstance(value, str):
value = value.decode('latin-1')
ofile.write(saxutils.escape(value).encode('utf-8')+"\n")
ofile.write("</property>\n")
InitializeClass(ZenModelBase)
| """
DEPRECATED Split a path on its '/'.
"""
return zenpathsplit(path) | identifier_body |
ZenModelBase.py | ##############################################################################
#
# Copyright (C) Zenoss, Inc. 2007, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
__doc__="""ZenModelBase
$Id: ZenModelBase.py,v 1.17 2004/04/23 19:11:58 edahl Exp $"""
__version__ = "$Revision: 1.17 $"[11:-2]
import re
import time
import sys
from xml.sax import saxutils
from urllib import unquote
from cgi import escape
import zope.component
import zope.interface
from datetime import datetime
import pytz
from OFS.ObjectManager import checkValidId as globalCheckValidId
from AccessControl import ClassSecurityInfo, getSecurityManager, Unauthorized
from AccessControl.class_init import InitializeClass
from Acquisition import aq_base, aq_chain
from zope.component import getGlobalSiteManager, getUtility
from Products.ZenModel.interfaces import IZenDocProvider
from Products.ZenRelations.ZenPropertyManager import iscustprop
from Products.ZenUtils.Utils import zenpathsplit, zenpathjoin, getDisplayType
from Products.ZenUtils.Utils import createHierarchyObj, getHierarchyObj
from Products.ZenUtils.Utils import getObjByPath, unpublished
from Products.ZenUtils.csrf import get_csrf_token
from Products.ZenUtils.Utils import prepId as globalPrepId, isXmlRpc
from Products.ZenWidgets import messaging
from Products.ZenUtils.Time import convertTimestampToTimeZone, isoDateTime, convertJsTimeFormatToPy
from Products.ZenUI3.browser.interfaces import INewPath
from Products.ZenMessaging.audit import audit as auditFn
from ZenossSecurity import *
from Products.ZenUtils.virtual_root import IVirtualRoot
_MARKER = object()
class ZenModelBase(object):
"""
All ZenModel Persistent classes inherit from this class. It provides some
screen management functionality, and general utility methods.
"""
_zendoc = ''
sub_meta_types = ()
#prodStateThreshold = 500
security = ClassSecurityInfo()
def __call__(self):
"""
Invokes the default view.
"""
if isXmlRpc(self.REQUEST):
return self
else:
newpath = INewPath(self)
newpath = getUtility(IVirtualRoot).ensure_virtual_root(newpath)
self.REQUEST.response.redirect(newpath)
def __repr__(self):
"""Return a short string describing the object."""
return "<{0} at {1}>".format(
self.__class__.__name__, '/'.join(self.getPhysicalPath()),
)
index_html = None # This special value informs ZPublisher to use __call__
security.declareProtected(ZEN_VIEW, 'view')
def view(self):
'''
Returns the default view even if index_html is overridden.
@permission: ZEN_VIEW
'''
return self()
def __hash__(self):
return hash(self.id)
def prepId(self, id, subchar='_'):
"""
Clean out an id of illegal characters.
@type id: string
@param subchar: Character to be substituted with illegal characters
@type subchar: string
@rtype: string
>>> dmd.Devices.prepId('ab^*cd')
'ab__cd'
>>> dmd.Devices.prepId('ab^*cd', subchar='Z')
'abZZcd'
>>> dmd.Devices.prepId('/boot')
'boot'
>>> dmd.Devices.prepId('/')
'-'
>>> dmd.Devices.prepId(' mydev ')
'mydev'
"""
return globalPrepId(id, subchar)
def checkValidId(self, id, prep_id = False):
"""
Checks that an id is a valid Zope id. Looks for invalid characters and
checks that the id doesn't already exist in this context.
@type id: string
@type prep_id: boolean
@rtype: boolean
>>> dmd.Devices.checkValidId('^*')
'The id "^*" contains characters illegal in URLs.'
>>> dmd.Devices.checkValidId('Server')
'The id "Server" is invalid - it is already in use.'
>>> dmd.Devices.checkValidId('ZenTestId')
True
"""
new_id = unquote(id)
if prep_id: new_id = self.prepId(id)
try:
globalCheckValidId(self, new_id)
return True
except Exception:
return str(sys.exc_info()[1])
def getUnusedId(self, relName, baseKey, extensionIter=None):
"""
Return a new id that is not already in use in the relationship. If
baseKey is not already in use, return that. Otherwise append values
from extensionIter to baseKey until an used key is found. The default
extensionIter appends integers starting with 2 and counting up.
@type relName: string
@type baseKey: string
@type extensionIter: iterator
@rtype: string
"""
import itertools
if extensionIter is None:
extensionIter = itertools.count(2)
rel = getattr(self, relName)
candidate = baseKey
while candidate in rel.objectIds():
candidate = self.prepId('%s%s' % (baseKey, extensionIter.next()))
return candidate
def getIdLink(self): | DEPRECATED Return an a link to this object with its id as the name.
@return: An HTML link to this object
@rtype: string
>>> dmd.Devices.getIdLink()
'<a href="/zport/dmd/Devices">/</a>'
"""
return self.urlLink()
def callZenScreen(self, REQUEST, redirect=False):
"""
Call and return screen specified by zenScreenName value of REQUEST.
If zenScreenName is not present call the default screen. This is used
in functions that are called from forms to get back to the correct
screen with the correct context.
"""
if REQUEST is None or getattr(REQUEST, 'dontRender', False):
# EventView uses a FakeRequest class to avoid the overhead
# of rendering pages as result of ajax calls.
return ''
screenName = REQUEST.get("zenScreenName", "")
if not redirect and REQUEST.get("redirect", None) :
redirect = True
if redirect:
path = getUtility(IVirtualRoot).ensure_virtual_root(
self.getPrimaryUrlPath())
nurl = "%s/%s" % (path, screenName)
REQUEST['RESPONSE'].redirect(nurl)
else:
REQUEST['URL'] = "%s/%s" % (self.absolute_url_path(), screenName)
screen = getattr(self, screenName, False)
if not screen: return self()
return screen()
@unpublished
def zenScreenUrl(self):
"""
Return the url for the current screen as defined by zenScreenName.
If zenScreenName is not found in the request the request url is used.
@return: An url to this object
@rtype: string
"""
screenName = self.REQUEST.get("zenScreenName", "")
if not screenName: return self.REQUEST.URL
return self.getPrimaryUrlPath() + "/" + screenName
@unpublished
def urlLink(self, text=None, url=None, attrs={}):
"""
Return an anchor tag if the user has access to the remote object.
@param text: the text to place within the anchor tag or string.
Defaults to the id of this object.
@param url: url for the href. Default is getPrimaryUrlPath
@type attrs: dict
@param attrs: any other attributes to be place in the in the tag.
@return: An HTML link to this object
@rtype: string
"""
if not text:
text = self.titleOrId()
text = escape(text)
if not self.checkRemotePerm("View", self):
return text
if not url:
url = self.getPrimaryUrlPath()
if len(attrs):
return '<a href="%s" %s>%s</a>' % (url,
' '.join('%s="%s"' % (x,y) for x,y in attrs.items()),
text)
else:
return '<a href="%s">%s</a>' % (url, text)
def getBreadCrumbUrlPath(self):
"""
Return the url to be used in breadcrumbs for this object. normally
this is equal to getPrimaryUrlPath. It can be used as a hook to modify
the url so that it points towards a different tab then the default.
@return: A url to this object
@rtype: string
>>> dmd.Devices.getBreadCrumbUrlPath()
'/zport/dmd/Devices'
>>> rc = dmd.Reports._getOb('Graph Reports')
>>> rc.manage_addGraphReport('test').getBreadCrumbUrlPath()
'/zport/dmd/Reports/Graph%20Reports/test/editGraphReport'
"""
return self.getPrimaryUrlPath()
def getBreadCrumbName(self):
return self.title_or_id()
def breadCrumbs(self, terminator='dmd', terminate=lambda x: False):
"""
Return the data to create the breadcrumb links for this object.
This is a list of tuples where the first value is the URL of the bread
crumb and the second is the lable.
@return: List of tuples to create a bread crumbs
@rtype: list
>>> dmd.Devices.Server.breadCrumbs()
[('/zport/dmd/Devices', 'Devices'),
('/zport/dmd/Devices/Server', 'Server')]
"""
links = []
curDir = self.primaryAq()
while curDir.id != terminator and not terminate(curDir):
if curDir.meta_type == 'ToManyContRelationship':
curDir = curDir.getPrimaryParent()
continue
if not getattr(aq_base(curDir),"getBreadCrumbUrlPath", False):
break
url = ""
if self.checkRemotePerm("View", curDir):
url = curDir.getBreadCrumbUrlPath()
links.append((url, curDir.getBreadCrumbName()))
curDir = curDir.aq_parent
links.reverse()
return links
def upToOrganizerBreadCrumbs(self, terminator='dmd'):
def isOrganizer(curDir):
from Products.ZenModel.Organizer import Organizer
try:
return isinstance(curDir, Organizer)
except Exception:
return False
return ZenModelBase.breadCrumbs(self, terminator, isOrganizer)
security.declareProtected(ZEN_COMMON, 'checkRemotePerm')
def checkRemotePerm(self, permission, robject):
"""
Look to see if the current user has permission on remote object.
@param permission: Zope permission to be tested. ie "View"
@param robject: remote objecct on which test is run. Will test on
primary acquisition path.
@rtype: boolean
@permission: ZEN_COMMON
"""
user = getSecurityManager().getUser()
return user.has_permission(permission, robject.primaryAq())
security.declareProtected(ZEN_VIEW, 'zentinelTabs')
def zentinelTabs(self, templateName, REQUEST=None):
"""
Return a list of hashes that define the screen tabs for this object.
Keys in the hash are:
- action = the name of the page template for this tab
- name = the label used on the tab
- permissions = a tuple of permissions to view this template
@permission: ZEN_VIEW
>>> dmd.Devices.zentinelTabs('deviceOrganizerStatus')
[{'action': 'deviceOrganizerStatus', 'selected': True,
'name': 'Classes', 'permissions': ('View',)},
{'action': 'viewEvents', 'name': 'Events', 'permissions': ('View',)},
{'action': 'zPropertyEdit', 'name': 'Configuration Properties',
'permissions': ('View',)},
{'action': 'perfConfig', 'name': 'Templates',
'permissions': ('Manage DMD',)}]
"""
tabs = []
user = getSecurityManager().getUser()
actions = self.factory_type_information[0]['actions']
selectedTabName = self._selectedTabName(templateName, REQUEST)
for a in actions:
def permfilter(p): return user.has_permission(p,self)
permok = filter(permfilter, a['permissions'])
if not a.get('visible', True) or not permok:
continue
a = a.copy()
if a['action'] == selectedTabName: a['selected'] = True
tabs.append(a)
return tabs
def _selectedTabName(self, templateName, REQUEST=None):
if REQUEST and REQUEST.get('selectedTabName', '') :
selectedTabName = REQUEST.get('selectedTabName', '')
else:
selectedTabName = templateName
requestUrl = REQUEST['URL'] if REQUEST else None
if not selectedTabName and requestUrl and requestUrl.rfind('/') != -1:
selectedTabName = requestUrl[requestUrl.rfind('/') + 1:]
if selectedTabName.startswith('@@'):
selectedTabName = selectedTabName[2:]
return selectedTabName
security.declareProtected(ZEN_MANAGE_DMD, 'zmanage_editProperties')
def zmanage_editProperties(self, REQUEST=None, redirect=False, audit=True):
"""
Edit a ZenModel object and return its proper page template.
Object will be reindexed if nessesary.
@permission: ZEN_MANAGE_DMD
"""
self.manage_changeProperties(**REQUEST.form)
index_object = getattr(self, 'index_object', lambda self: None)
index_object()
if REQUEST:
messaging.IMessageSender(self).sendToBrowser(
'Properties Saved',
"Saved At: %s" % self.getCurrentUserNowString()
)
if audit:
auditType = getDisplayType(self)
auditKind = 'Setting' if auditType == 'DataRoot' else auditType
auditFn(['UI', auditKind, 'Edit'],
data_=REQUEST.form,
skipFields_=('redirect',
'zenScreenName',
'zmanage_editProperties',
'curpasswd'),
maskFields_=('smtpPass'))
return self.callZenScreen(REQUEST, redirect=redirect)
security.declareProtected(ZEN_VIEW, 'getPrimaryDmdId')
def getPrimaryDmdId(self, rootName="dmd", subrel=""):
"""
Return the full dmd id of this object for instance /Devices/Server.
Everything before dmd is removed. A different rootName can be passed
to stop at a different object in the path. If subrel is passed any
relationship name in the path to the object will be removed.
@param rootName: Name of root
@type rootName: string
@param subrel: Name of relation
@type subrel: string
@return: Path to object
@rtype: string
@permission: ZEN_VIEW
"""
path = list(self.getPrimaryPath())
path = path[path.index(rootName)+1:]
if subrel: path = filter(lambda x: x != subrel, path)
return '/'+'/'.join(path)
@unpublished
def zenpathjoin(self, path):
"""
DEPRECATED Build a Zenoss path based on a list or tuple.
@type path: list or tuple
>>> dmd.zenpathjoin(('zport', 'dmd', 'Devices', 'Server'))
'/zport/dmd/Devices/Server'
"""
return zenpathjoin(path)
def zenpathsplit(self, path):
"""
DEPRECATED Split a path on its '/'.
"""
return zenpathsplit(path)
def createHierarchyObj(self, root, name, factory, relpath="", alog=None):
"""
DEPRECATED this is only seems to be used in Organizer.createOrganizer -
Create an object from its path we use relpath to skip down any missing
relations in the path and factory is the constructor for this object.
"""
return createHierarchyObj(root, name, factory, relpath, alog)
@unpublished
def getHierarchyObj(self, root, name, relpath):
"""
DEPRECATED this doesn't seem to be used anywere don't use it!!!
"""
return getHierarchyObj(root, name, relpath)
def getDmd(self):
"""
DEPRECATED Return the dmd root object with unwraped acquisition path.
>>> dmd.Devices.Server.getDmd()
<DataRoot at /zport/dmd>
"""
for obj in aq_chain(self):
if getattr(obj, 'id', None) == 'dmd': return obj
def getDmdRoot(self, name):
"""
Return a dmd root organizer such as "Systems". The acquisition path
will be cleaned so that it points directly to the root.
>>> dmd.Devices.Server.getDmdRoot("Systems")
<System at /zport/dmd/Systems>
"""
dmd = self.getDmd()
return dmd._getOb(name)
def getDmdObj(self, path):
"""
DEPRECATED Return an object from path that starts at dmd.
>>> dmd.getDmdObj('/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
if path.startswith("/"): path = path[1:]
return self.getDmd().getObjByPath(path)
def getZopeObj(self, path):
"""
DEPRECATED Return an object from path tat starts at zope root.
>>> dmd.getZopeObj('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return self.getObjByPath(path)
def convertToUsersTimeZone(self, timestamp):
"""
This is an instance method so that it is available to
tal statements, such as reports.
"""
user = self.zport.dmd.ZenUsers.getUserSettings()
if user.timezone:
utc_dt = pytz.utc.localize(datetime.utcfromtimestamp(int(timestamp)))
tz = pytz.timezone(user.timezone)
tval = tz.normalize(utc_dt.astimezone(tz))
return tval.strftime(convertJsTimeFormatToPy(user.dateFormat+" "+user.timeFormat))
return isoDateTime(timestamp)
def getCurrentUserNowString(self):
return self.convertToUsersTimeZone(time.time())
def getNowString(self):
"""
Return the current time as a string in the format '2007/09/27 14:09:53'.
@rtype: string
"""
return time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
def todayDate(self):
"""
Return today's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
return time.strftime("%m/%d/%Y", time.localtime())
def yesterdayDate(self):
"""
Return yesterday's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
yesterday = time.time() - 24*3600
return time.strftime("%m/%d/%Y", time.localtime(yesterday))
def all_meta_types(self, interfaces=None):
"""
DEPRECATED Override the ObjectManager method that is used to control
the items available in the add drop down in the ZMI. It uses the
attribute sub_menu_items to create the data structures. This is a list
of meta_types for the available classes. This functionality is rarely
used in Zenoss because the ZMI is not the perfered management
interface.
"""
mts = super(ZenModelBase,self).all_meta_types(interfaces)
if self.sub_meta_types:
mts = filter(lambda mt: mt['name'] in self.sub_meta_types, mts)
return mts
security.declareProtected('Delete objects', 'manage_deleteObjects')
def manage_deleteObjects(self, ids=(), REQUEST=None):
"""
Delete objects by id from this object and return to the current
template as defined by callZenScreen. Uses ObjectManager._delObject to
remove the object.
@permission: ZEN_VIEW
"""
for id in ids: self._delObject(id)
if REQUEST:
return self.callZenScreen(REQUEST)
def custPropertyIds(self):
"""
List custom properties that are defined at root node. Custom properties
start with a lower "c" followed by a uppercase character.
"""
return self.zenPropertyIds(pfilt=iscustprop)
def custPropertyMap(self):
"""
Return custom property definitions.
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return self.zenPropertyMap(pfilt=iscustprop)
def visibleCustPropertyMap(self):
"""
List custom property definitions that are visible using
custPropertyMap::
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return [ p for p in self.zenPropertyMap(pfilt=iscustprop) \
if p.get('visible', True) ]
security.declareProtected(ZEN_MANAGE_DMD, 'saveCustProperties')
def saveCustProperties(self, REQUEST):
"""
Save custom properties from REQUEST.form.
@permission: ZEN_MANAGE_DMD
"""
redirect = self.saveZenProperties(iscustprop, REQUEST)
auditFn(['UI', getDisplayType(self), 'Edit'], self, data_=REQUEST.form,
skipFields_=('zenScreenName', 'saveCustProperties'))
return redirect
def getObjByPath(self, path):
"""
Lookup and object by its path. Basically does a Zope unrestricted
traverse on the path given.
@type path: list or string /zport/dmd/Devices
>>> dmd.getObjByPath(('zport','dmd','Devices'))
<DeviceClass at /zport/dmd/Devices>
>>> dmd.getObjByPath(('Devices','Server'))
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return getObjByPath(self, path)
def isLocalName(self, name):
"""
Check to see if a name is local to our current context or if it comes
from our acquisition chain.
@rtype: boolean
>>> dmd.isLocalName('Devices')
True
>>> dmd.Devices.Server.isLocalName('Devices')
False
"""
v = getattr(aq_base(self), name, '__ZENMARKER__')
return v != '__ZENMARKER__'
security.declareProtected(ZEN_VIEW, 'helpLink')
def helpLink(self):
"""
DEPRECATED Return a link to the objects help file.
@permission: ZEN_VIEW
"""
path = self.__class__.__module__.split('.')
className = path[-1].replace('Class','')
product = path[-2]
path = ("", "Control_Panel", "Products", product, "Help",
"%s.stx"%className)
# check to see if we have a help screen
app = self.getPhysicalRoot()
try:
app.restrictedTraverse(path)
except (KeyError, Unauthorized):
return ""
url = "/HelpSys?help_url="+ "/".join(path)
return """<a class="tabletitle" href="%s" \
onClick="window.open('%s','zope_help','width=600,height=500, \
menubar=yes,toolbar=yes,scrollbars=yes,resizable=yes'); \
return false;" onMouseOver="window.status='Open online help'; \
return true;" onMouseOut="window.status=''; return true;">Help!</a>
""" % (url, url)
security.declareProtected(ZEN_VIEW, 'getIconPath')
def getIconPath(self):
"""
Return the icon associated with this object. The icon path is defined
in the zProperty zIcon.
@return: Path to icon
@rtype: string
@permission: ZEN_VIEW
"""
try:
return self.primaryAq().zIcon
except AttributeError:
return '/zport/dmd/img/icons/noicon.png'
def aqBaseHasAttr(self, attr):
"""
Return hasattr(aq_base(self), attr)
This is a convenience function for use in templates, where it's not
so easy to make a similar call directly.
hasattr itself will swallow exceptions, so we don't want to use that.
We also need to allow for values of None, so something like
getattr(aq_base(self, attr, None) doesn't really tell us anything.
Testing __dict__ is not a good choice because it doesn't allow
for properties (and I believe __getitem__ calls.)
So while this looks pretty attrocious, it might be the most sane
solution.
"""
return getattr(aq_base(self), attr, _MARKER) is not _MARKER
def get_csrf_token(self):
"""
Returns string with CSRF token for current user.
"""
return get_csrf_token(self.REQUEST)
class ZenModelZenDocProvider(object):
zope.interface.implements(IZenDocProvider)
zope.component.adapts(ZenModelBase)
def __init__(self, zenModelBase):
self._underlyingObject = zenModelBase
def getZendoc(self):
zendoc = self._underlyingObject._zendoc
if not zendoc and self._underlyingObject.aqBaseHasAttr( 'description' ):
zendoc = self._underlyingObject.description
return zendoc
def setZendoc(self, zendocText):
self._underlyingObject._zendoc = zendocText
def exportZendoc(self,ofile):
"""Return an xml representation of a RelationshipManagers zendoc
<property id='_zendoc' type='string' mode='w'>
value
</property>
"""
value = self.getZendoc()
if not value: return
ofile.write("<property id='zendoc' type='string'>\n")
if not isinstance(value, basestring):
value = unicode(value)
elif isinstance(value, str):
value = value.decode('latin-1')
ofile.write(saxutils.escape(value).encode('utf-8')+"\n")
ofile.write("</property>\n")
InitializeClass(ZenModelBase) | """ | random_line_split |
ZenModelBase.py | ##############################################################################
#
# Copyright (C) Zenoss, Inc. 2007, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
__doc__="""ZenModelBase
$Id: ZenModelBase.py,v 1.17 2004/04/23 19:11:58 edahl Exp $"""
__version__ = "$Revision: 1.17 $"[11:-2]
import re
import time
import sys
from xml.sax import saxutils
from urllib import unquote
from cgi import escape
import zope.component
import zope.interface
from datetime import datetime
import pytz
from OFS.ObjectManager import checkValidId as globalCheckValidId
from AccessControl import ClassSecurityInfo, getSecurityManager, Unauthorized
from AccessControl.class_init import InitializeClass
from Acquisition import aq_base, aq_chain
from zope.component import getGlobalSiteManager, getUtility
from Products.ZenModel.interfaces import IZenDocProvider
from Products.ZenRelations.ZenPropertyManager import iscustprop
from Products.ZenUtils.Utils import zenpathsplit, zenpathjoin, getDisplayType
from Products.ZenUtils.Utils import createHierarchyObj, getHierarchyObj
from Products.ZenUtils.Utils import getObjByPath, unpublished
from Products.ZenUtils.csrf import get_csrf_token
from Products.ZenUtils.Utils import prepId as globalPrepId, isXmlRpc
from Products.ZenWidgets import messaging
from Products.ZenUtils.Time import convertTimestampToTimeZone, isoDateTime, convertJsTimeFormatToPy
from Products.ZenUI3.browser.interfaces import INewPath
from Products.ZenMessaging.audit import audit as auditFn
from ZenossSecurity import *
from Products.ZenUtils.virtual_root import IVirtualRoot
_MARKER = object()
class ZenModelBase(object):
"""
All ZenModel Persistent classes inherit from this class. It provides some
screen management functionality, and general utility methods.
"""
_zendoc = ''
sub_meta_types = ()
#prodStateThreshold = 500
security = ClassSecurityInfo()
def __call__(self):
"""
Invokes the default view.
"""
if isXmlRpc(self.REQUEST):
return self
else:
newpath = INewPath(self)
newpath = getUtility(IVirtualRoot).ensure_virtual_root(newpath)
self.REQUEST.response.redirect(newpath)
def __repr__(self):
"""Return a short string describing the object."""
return "<{0} at {1}>".format(
self.__class__.__name__, '/'.join(self.getPhysicalPath()),
)
index_html = None # This special value informs ZPublisher to use __call__
security.declareProtected(ZEN_VIEW, 'view')
def view(self):
'''
Returns the default view even if index_html is overridden.
@permission: ZEN_VIEW
'''
return self()
def __hash__(self):
return hash(self.id)
def prepId(self, id, subchar='_'):
"""
Clean out an id of illegal characters.
@type id: string
@param subchar: Character to be substituted with illegal characters
@type subchar: string
@rtype: string
>>> dmd.Devices.prepId('ab^*cd')
'ab__cd'
>>> dmd.Devices.prepId('ab^*cd', subchar='Z')
'abZZcd'
>>> dmd.Devices.prepId('/boot')
'boot'
>>> dmd.Devices.prepId('/')
'-'
>>> dmd.Devices.prepId(' mydev ')
'mydev'
"""
return globalPrepId(id, subchar)
def checkValidId(self, id, prep_id = False):
"""
Checks that an id is a valid Zope id. Looks for invalid characters and
checks that the id doesn't already exist in this context.
@type id: string
@type prep_id: boolean
@rtype: boolean
>>> dmd.Devices.checkValidId('^*')
'The id "^*" contains characters illegal in URLs.'
>>> dmd.Devices.checkValidId('Server')
'The id "Server" is invalid - it is already in use.'
>>> dmd.Devices.checkValidId('ZenTestId')
True
"""
new_id = unquote(id)
if prep_id: new_id = self.prepId(id)
try:
globalCheckValidId(self, new_id)
return True
except Exception:
return str(sys.exc_info()[1])
def getUnusedId(self, relName, baseKey, extensionIter=None):
"""
Return a new id that is not already in use in the relationship. If
baseKey is not already in use, return that. Otherwise append values
from extensionIter to baseKey until an used key is found. The default
extensionIter appends integers starting with 2 and counting up.
@type relName: string
@type baseKey: string
@type extensionIter: iterator
@rtype: string
"""
import itertools
if extensionIter is None:
extensionIter = itertools.count(2)
rel = getattr(self, relName)
candidate = baseKey
while candidate in rel.objectIds():
candidate = self.prepId('%s%s' % (baseKey, extensionIter.next()))
return candidate
def getIdLink(self):
"""
DEPRECATED Return an a link to this object with its id as the name.
@return: An HTML link to this object
@rtype: string
>>> dmd.Devices.getIdLink()
'<a href="/zport/dmd/Devices">/</a>'
"""
return self.urlLink()
def callZenScreen(self, REQUEST, redirect=False):
"""
Call and return screen specified by zenScreenName value of REQUEST.
If zenScreenName is not present call the default screen. This is used
in functions that are called from forms to get back to the correct
screen with the correct context.
"""
if REQUEST is None or getattr(REQUEST, 'dontRender', False):
# EventView uses a FakeRequest class to avoid the overhead
# of rendering pages as result of ajax calls.
return ''
screenName = REQUEST.get("zenScreenName", "")
if not redirect and REQUEST.get("redirect", None) :
redirect = True
if redirect:
path = getUtility(IVirtualRoot).ensure_virtual_root(
self.getPrimaryUrlPath())
nurl = "%s/%s" % (path, screenName)
REQUEST['RESPONSE'].redirect(nurl)
else:
REQUEST['URL'] = "%s/%s" % (self.absolute_url_path(), screenName)
screen = getattr(self, screenName, False)
if not screen: return self()
return screen()
@unpublished
def zenScreenUrl(self):
"""
Return the url for the current screen as defined by zenScreenName.
If zenScreenName is not found in the request the request url is used.
@return: An url to this object
@rtype: string
"""
screenName = self.REQUEST.get("zenScreenName", "")
if not screenName: return self.REQUEST.URL
return self.getPrimaryUrlPath() + "/" + screenName
@unpublished
def urlLink(self, text=None, url=None, attrs={}):
"""
Return an anchor tag if the user has access to the remote object.
@param text: the text to place within the anchor tag or string.
Defaults to the id of this object.
@param url: url for the href. Default is getPrimaryUrlPath
@type attrs: dict
@param attrs: any other attributes to be place in the in the tag.
@return: An HTML link to this object
@rtype: string
"""
if not text:
text = self.titleOrId()
text = escape(text)
if not self.checkRemotePerm("View", self):
return text
if not url:
url = self.getPrimaryUrlPath()
if len(attrs):
return '<a href="%s" %s>%s</a>' % (url,
' '.join('%s="%s"' % (x,y) for x,y in attrs.items()),
text)
else:
return '<a href="%s">%s</a>' % (url, text)
def getBreadCrumbUrlPath(self):
"""
Return the url to be used in breadcrumbs for this object. normally
this is equal to getPrimaryUrlPath. It can be used as a hook to modify
the url so that it points towards a different tab then the default.
@return: A url to this object
@rtype: string
>>> dmd.Devices.getBreadCrumbUrlPath()
'/zport/dmd/Devices'
>>> rc = dmd.Reports._getOb('Graph Reports')
>>> rc.manage_addGraphReport('test').getBreadCrumbUrlPath()
'/zport/dmd/Reports/Graph%20Reports/test/editGraphReport'
"""
return self.getPrimaryUrlPath()
def getBreadCrumbName(self):
return self.title_or_id()
def breadCrumbs(self, terminator='dmd', terminate=lambda x: False):
"""
Return the data to create the breadcrumb links for this object.
This is a list of tuples where the first value is the URL of the bread
crumb and the second is the lable.
@return: List of tuples to create a bread crumbs
@rtype: list
>>> dmd.Devices.Server.breadCrumbs()
[('/zport/dmd/Devices', 'Devices'),
('/zport/dmd/Devices/Server', 'Server')]
"""
links = []
curDir = self.primaryAq()
while curDir.id != terminator and not terminate(curDir):
if curDir.meta_type == 'ToManyContRelationship':
curDir = curDir.getPrimaryParent()
continue
if not getattr(aq_base(curDir),"getBreadCrumbUrlPath", False):
break
url = ""
if self.checkRemotePerm("View", curDir):
url = curDir.getBreadCrumbUrlPath()
links.append((url, curDir.getBreadCrumbName()))
curDir = curDir.aq_parent
links.reverse()
return links
def upToOrganizerBreadCrumbs(self, terminator='dmd'):
def isOrganizer(curDir):
from Products.ZenModel.Organizer import Organizer
try:
return isinstance(curDir, Organizer)
except Exception:
return False
return ZenModelBase.breadCrumbs(self, terminator, isOrganizer)
security.declareProtected(ZEN_COMMON, 'checkRemotePerm')
def checkRemotePerm(self, permission, robject):
"""
Look to see if the current user has permission on remote object.
@param permission: Zope permission to be tested. ie "View"
@param robject: remote objecct on which test is run. Will test on
primary acquisition path.
@rtype: boolean
@permission: ZEN_COMMON
"""
user = getSecurityManager().getUser()
return user.has_permission(permission, robject.primaryAq())
security.declareProtected(ZEN_VIEW, 'zentinelTabs')
def zentinelTabs(self, templateName, REQUEST=None):
"""
Return a list of hashes that define the screen tabs for this object.
Keys in the hash are:
- action = the name of the page template for this tab
- name = the label used on the tab
- permissions = a tuple of permissions to view this template
@permission: ZEN_VIEW
>>> dmd.Devices.zentinelTabs('deviceOrganizerStatus')
[{'action': 'deviceOrganizerStatus', 'selected': True,
'name': 'Classes', 'permissions': ('View',)},
{'action': 'viewEvents', 'name': 'Events', 'permissions': ('View',)},
{'action': 'zPropertyEdit', 'name': 'Configuration Properties',
'permissions': ('View',)},
{'action': 'perfConfig', 'name': 'Templates',
'permissions': ('Manage DMD',)}]
"""
tabs = []
user = getSecurityManager().getUser()
actions = self.factory_type_information[0]['actions']
selectedTabName = self._selectedTabName(templateName, REQUEST)
for a in actions:
def permfilter(p): return user.has_permission(p,self)
permok = filter(permfilter, a['permissions'])
if not a.get('visible', True) or not permok:
continue
a = a.copy()
if a['action'] == selectedTabName: a['selected'] = True
tabs.append(a)
return tabs
def _selectedTabName(self, templateName, REQUEST=None):
if REQUEST and REQUEST.get('selectedTabName', '') :
selectedTabName = REQUEST.get('selectedTabName', '')
else:
selectedTabName = templateName
requestUrl = REQUEST['URL'] if REQUEST else None
if not selectedTabName and requestUrl and requestUrl.rfind('/') != -1:
selectedTabName = requestUrl[requestUrl.rfind('/') + 1:]
if selectedTabName.startswith('@@'):
selectedTabName = selectedTabName[2:]
return selectedTabName
security.declareProtected(ZEN_MANAGE_DMD, 'zmanage_editProperties')
def zmanage_editProperties(self, REQUEST=None, redirect=False, audit=True):
"""
Edit a ZenModel object and return its proper page template.
Object will be reindexed if nessesary.
@permission: ZEN_MANAGE_DMD
"""
self.manage_changeProperties(**REQUEST.form)
index_object = getattr(self, 'index_object', lambda self: None)
index_object()
if REQUEST:
messaging.IMessageSender(self).sendToBrowser(
'Properties Saved',
"Saved At: %s" % self.getCurrentUserNowString()
)
if audit:
auditType = getDisplayType(self)
auditKind = 'Setting' if auditType == 'DataRoot' else auditType
auditFn(['UI', auditKind, 'Edit'],
data_=REQUEST.form,
skipFields_=('redirect',
'zenScreenName',
'zmanage_editProperties',
'curpasswd'),
maskFields_=('smtpPass'))
return self.callZenScreen(REQUEST, redirect=redirect)
security.declareProtected(ZEN_VIEW, 'getPrimaryDmdId')
def getPrimaryDmdId(self, rootName="dmd", subrel=""):
"""
Return the full dmd id of this object for instance /Devices/Server.
Everything before dmd is removed. A different rootName can be passed
to stop at a different object in the path. If subrel is passed any
relationship name in the path to the object will be removed.
@param rootName: Name of root
@type rootName: string
@param subrel: Name of relation
@type subrel: string
@return: Path to object
@rtype: string
@permission: ZEN_VIEW
"""
path = list(self.getPrimaryPath())
path = path[path.index(rootName)+1:]
if subrel: path = filter(lambda x: x != subrel, path)
return '/'+'/'.join(path)
@unpublished
def zenpathjoin(self, path):
"""
DEPRECATED Build a Zenoss path based on a list or tuple.
@type path: list or tuple
>>> dmd.zenpathjoin(('zport', 'dmd', 'Devices', 'Server'))
'/zport/dmd/Devices/Server'
"""
return zenpathjoin(path)
def zenpathsplit(self, path):
"""
DEPRECATED Split a path on its '/'.
"""
return zenpathsplit(path)
def createHierarchyObj(self, root, name, factory, relpath="", alog=None):
"""
DEPRECATED this is only seems to be used in Organizer.createOrganizer -
Create an object from its path we use relpath to skip down any missing
relations in the path and factory is the constructor for this object.
"""
return createHierarchyObj(root, name, factory, relpath, alog)
@unpublished
def getHierarchyObj(self, root, name, relpath):
"""
DEPRECATED this doesn't seem to be used anywere don't use it!!!
"""
return getHierarchyObj(root, name, relpath)
def getDmd(self):
"""
DEPRECATED Return the dmd root object with unwraped acquisition path.
>>> dmd.Devices.Server.getDmd()
<DataRoot at /zport/dmd>
"""
for obj in aq_chain(self):
if getattr(obj, 'id', None) == 'dmd': return obj
def getDmdRoot(self, name):
"""
Return a dmd root organizer such as "Systems". The acquisition path
will be cleaned so that it points directly to the root.
>>> dmd.Devices.Server.getDmdRoot("Systems")
<System at /zport/dmd/Systems>
"""
dmd = self.getDmd()
return dmd._getOb(name)
def getDmdObj(self, path):
"""
DEPRECATED Return an object from path that starts at dmd.
>>> dmd.getDmdObj('/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
if path.startswith("/"): path = path[1:]
return self.getDmd().getObjByPath(path)
def getZopeObj(self, path):
"""
DEPRECATED Return an object from path tat starts at zope root.
>>> dmd.getZopeObj('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return self.getObjByPath(path)
def convertToUsersTimeZone(self, timestamp):
"""
This is an instance method so that it is available to
tal statements, such as reports.
"""
user = self.zport.dmd.ZenUsers.getUserSettings()
if user.timezone:
utc_dt = pytz.utc.localize(datetime.utcfromtimestamp(int(timestamp)))
tz = pytz.timezone(user.timezone)
tval = tz.normalize(utc_dt.astimezone(tz))
return tval.strftime(convertJsTimeFormatToPy(user.dateFormat+" "+user.timeFormat))
return isoDateTime(timestamp)
def getCurrentUserNowString(self):
return self.convertToUsersTimeZone(time.time())
def getNowString(self):
"""
Return the current time as a string in the format '2007/09/27 14:09:53'.
@rtype: string
"""
return time.strftime("%Y/%m/%d %H:%M:%S", time.localtime())
def todayDate(self):
"""
Return today's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
return time.strftime("%m/%d/%Y", time.localtime())
def yesterdayDate(self):
"""
Return yesterday's date as a string in the format 'mm/dd/yyyy'.
@rtype: string
"""
yesterday = time.time() - 24*3600
return time.strftime("%m/%d/%Y", time.localtime(yesterday))
def all_meta_types(self, interfaces=None):
"""
DEPRECATED Override the ObjectManager method that is used to control
the items available in the add drop down in the ZMI. It uses the
attribute sub_menu_items to create the data structures. This is a list
of meta_types for the available classes. This functionality is rarely
used in Zenoss because the ZMI is not the perfered management
interface.
"""
mts = super(ZenModelBase,self).all_meta_types(interfaces)
if self.sub_meta_types:
mts = filter(lambda mt: mt['name'] in self.sub_meta_types, mts)
return mts
security.declareProtected('Delete objects', 'manage_deleteObjects')
def manage_deleteObjects(self, ids=(), REQUEST=None):
"""
Delete objects by id from this object and return to the current
template as defined by callZenScreen. Uses ObjectManager._delObject to
remove the object.
@permission: ZEN_VIEW
"""
for id in ids: self._delObject(id)
if REQUEST:
|
def custPropertyIds(self):
"""
List custom properties that are defined at root node. Custom properties
start with a lower "c" followed by a uppercase character.
"""
return self.zenPropertyIds(pfilt=iscustprop)
def custPropertyMap(self):
"""
Return custom property definitions.
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return self.zenPropertyMap(pfilt=iscustprop)
def visibleCustPropertyMap(self):
"""
List custom property definitions that are visible using
custPropertyMap::
@rtype: [{'id':'cName','label':'Name', 'type':'string'},]
"""
return [ p for p in self.zenPropertyMap(pfilt=iscustprop) \
if p.get('visible', True) ]
security.declareProtected(ZEN_MANAGE_DMD, 'saveCustProperties')
def saveCustProperties(self, REQUEST):
"""
Save custom properties from REQUEST.form.
@permission: ZEN_MANAGE_DMD
"""
redirect = self.saveZenProperties(iscustprop, REQUEST)
auditFn(['UI', getDisplayType(self), 'Edit'], self, data_=REQUEST.form,
skipFields_=('zenScreenName', 'saveCustProperties'))
return redirect
def getObjByPath(self, path):
"""
Lookup and object by its path. Basically does a Zope unrestricted
traverse on the path given.
@type path: list or string /zport/dmd/Devices
>>> dmd.getObjByPath(('zport','dmd','Devices'))
<DeviceClass at /zport/dmd/Devices>
>>> dmd.getObjByPath(('Devices','Server'))
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('/zport/dmd/Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
>>> dmd.getObjByPath('Devices/Server')
<DeviceClass at /zport/dmd/Devices/Server>
"""
return getObjByPath(self, path)
def isLocalName(self, name):
"""
Check to see if a name is local to our current context or if it comes
from our acquisition chain.
@rtype: boolean
>>> dmd.isLocalName('Devices')
True
>>> dmd.Devices.Server.isLocalName('Devices')
False
"""
v = getattr(aq_base(self), name, '__ZENMARKER__')
return v != '__ZENMARKER__'
security.declareProtected(ZEN_VIEW, 'helpLink')
def helpLink(self):
"""
DEPRECATED Return a link to the objects help file.
@permission: ZEN_VIEW
"""
path = self.__class__.__module__.split('.')
className = path[-1].replace('Class','')
product = path[-2]
path = ("", "Control_Panel", "Products", product, "Help",
"%s.stx"%className)
# check to see if we have a help screen
app = self.getPhysicalRoot()
try:
app.restrictedTraverse(path)
except (KeyError, Unauthorized):
return ""
url = "/HelpSys?help_url="+ "/".join(path)
return """<a class="tabletitle" href="%s" \
onClick="window.open('%s','zope_help','width=600,height=500, \
menubar=yes,toolbar=yes,scrollbars=yes,resizable=yes'); \
return false;" onMouseOver="window.status='Open online help'; \
return true;" onMouseOut="window.status=''; return true;">Help!</a>
""" % (url, url)
security.declareProtected(ZEN_VIEW, 'getIconPath')
def getIconPath(self):
"""
Return the icon associated with this object. The icon path is defined
in the zProperty zIcon.
@return: Path to icon
@rtype: string
@permission: ZEN_VIEW
"""
try:
return self.primaryAq().zIcon
except AttributeError:
return '/zport/dmd/img/icons/noicon.png'
def aqBaseHasAttr(self, attr):
"""
Return hasattr(aq_base(self), attr)
This is a convenience function for use in templates, where it's not
so easy to make a similar call directly.
hasattr itself will swallow exceptions, so we don't want to use that.
We also need to allow for values of None, so something like
getattr(aq_base(self, attr, None) doesn't really tell us anything.
Testing __dict__ is not a good choice because it doesn't allow
for properties (and I believe __getitem__ calls.)
So while this looks pretty attrocious, it might be the most sane
solution.
"""
return getattr(aq_base(self), attr, _MARKER) is not _MARKER
def get_csrf_token(self):
"""
Returns string with CSRF token for current user.
"""
return get_csrf_token(self.REQUEST)
class ZenModelZenDocProvider(object):
zope.interface.implements(IZenDocProvider)
zope.component.adapts(ZenModelBase)
def __init__(self, zenModelBase):
self._underlyingObject = zenModelBase
def getZendoc(self):
zendoc = self._underlyingObject._zendoc
if not zendoc and self._underlyingObject.aqBaseHasAttr( 'description' ):
zendoc = self._underlyingObject.description
return zendoc
def setZendoc(self, zendocText):
self._underlyingObject._zendoc = zendocText
def exportZendoc(self,ofile):
"""Return an xml representation of a RelationshipManagers zendoc
<property id='_zendoc' type='string' mode='w'>
value
</property>
"""
value = self.getZendoc()
if not value: return
ofile.write("<property id='zendoc' type='string'>\n")
if not isinstance(value, basestring):
value = unicode(value)
elif isinstance(value, str):
value = value.decode('latin-1')
ofile.write(saxutils.escape(value).encode('utf-8')+"\n")
ofile.write("</property>\n")
InitializeClass(ZenModelBase)
| return self.callZenScreen(REQUEST) | conditional_block |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
}
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include 'start,end' or 'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\
Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) | else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if !message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if !convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if !enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn update_labels(label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
}
| {
message.say("Channel unset");
} | conditional_block |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
}
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include 'start,end' or 'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\
Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) {
message.say("Channel unset");
} else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if !message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if !convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if !enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn | (label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
}
| update_labels | identifier_name |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler |
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include 'start,end' or 'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\
Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) {
message.say("Channel unset");
} else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if !message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if !convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if !enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn update_labels(label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
}
| {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
} | identifier_body |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
}
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include 'start,end' or 'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\ | enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) {
message.say("Channel unset");
} else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if !message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if !convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if !enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn update_labels(label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
} | Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
| random_line_split |
core_model_estimator.py | """ Core tensorflow model that basically encapsulates all the basic ops
in order to run an experiment.
"""
import os
from absl import logging
import tensorflow as tf
from tensorflow.contrib import tpu
from .core_datamanager_estimator import DataManagerTPU as DataManager
class CoreModelTPU(object):
def __init__(self,
tf_session: tf.Session,
learning_rate: float,
training_dataset: DataManager = None,
validation_dataset: DataManager = None,
output_path: str = '../outputs',
use_tpu: str = False,
tpu_name: list = [],
data_dir= '/mnt/iowa_bucket/cifar10/data/'
):
self.data_dir = data_dir
if output_path[-1] == '/':
output_path = output_path[:-1]
self.output_path = output_path + '/' + self.__class__.__name__
self.session = tf_session
# TODO Get rid of the .datasource thing
self.dataset = {}
# if training_dataset: self.dataset['train'] = training_dataset.datasource
# if validation_dataset: self.dataset['validation'] = validation_dataset.datasource
self.datasource = {}
self.datasource['train'] = training_dataset
self.datasource['validation'] = validation_dataset
self._train_model = True if training_dataset is not None else False
self._validate_model = True if validation_dataset is not None else False
self.learning_rate = learning_rate
self.use_tpu = use_tpu
def | (self, data_source: DataManager , mode: str): #pylint: disable=E0202
"""Definition of the model to use. Do not modify the function here
placeholder for the actual definition in `model/` (see example)
Args:
data_source (DataManager): Data manager object for the input data
mode (str): Training and testing? # TODO Properly implement
Raises:
NotImplementedError: Model has to be implemented yet (in a separate instance in model/)
"""
raise NotImplementedError('No model defined.')
def build_model(self):
""" Build the model. """
if self.use_tpu:
self._tpu_build()
else:
self._regular_build()
def _tpu_build(self):
"""Build with TPUEstimators for TPU usage"""
def _define_model(features, labels, mode, params):
data_source = (features, labels)
self.outputs = {}
self.losses = {}
self.otters = {}
outputs, losses, others = self.define_model(data_source, mode)
if mode == tf.estimator.ModeKeys.EVAL:
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses, eval_metrics=others)
if mode == tf.estimator.ModeKeys.PREDICT:
return tpu.TPUEstimatorSpec(
mode=mode, predictions=outputs
)
if mode == tf.estimator.ModeKeys.TRAIN:
self.losses['train'] = losses
self._build_optimizer(tpu_support=True)
if not len(self.optimize_ops) == 1:
logging.error('Implementati Error: More than one optimizer defined')
logging.warning(' [*] Selecting only the first optimizer')
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses[0], train_op=self.optimize_ops[0]
)
tpu_name = ['node-1'] # TODO Bring outside
tpu_iterations = 500 # TODO Bring outside
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu_name)
run_config = tf.contrib.tpu.RunConfig(
model_dir=self.output_path,
cluster=tpu_cluster_resolver,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu.TPUConfig(tpu_iterations),
)
self.estimator = tpu.TPUEstimator(
model_fn=_define_model,
use_tpu=True,
train_batch_size=32*4, #self.dataset['train'].batch_size,
eval_batch_size=32*4, #self.dataset['validation'].batch_size,
config=run_config,
params={"data_dir": self.data_dir}
)
def _regular_build(self):
"""Normal build for CPU/GPU usage"""
# This overwrites define_model, is that ok?
self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101
self.define_model,
create_scope_now_=True)
self.outputs = {}
self.losses = {}
self.otters = {}
def _build(mode):
outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)
self.outputs[mode] = outputs
self.losses[mode] = losses
self.otters[mode] = others
if mode == 'train':
self._build_optimizer()
# TODO Move clean and summary to proper section
self.summary_ops = {}
if self._train_model:
_build('train')
summary = []
for idx, loss in enumerate(self.losses['train']):
summary.append(
tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['train']):
summary.append(
tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))
self.summary_ops['train'] = tf.summary.merge(summary)
if self._validate_model:
_build('validation')
summary = []
for idx, loss in enumerate(self.losses['validation']):
summary.append(
tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['validation']):
summary.append(
tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))
self.summary_ops['validation'] = tf.summary.merge(summary)
self.writer = tf.summary.FileWriter(self.output_path,
self.session.graph)
self.saver = tf.train.Saver()
# TODO Add routine to save
logging.info('Model construction complete.')
def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):
"""Buids the optimizer(s) to minimize the loss(es) of the model.
Args:
optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which
optimizer to use.
tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard
optimier, required for TPU usage.
"""
self.optimize_ops = []
for loss in self.losses['train']: # TODO Create apropoiate external training scheme
optimize_op = optimizer_to_use(
learning_rate=self.learning_rate
)
if tpu_support:
optimize_op = tpu.CrossShardOptimizer(optimize_op)
optimize_op = optimize_op.minimize(
loss=loss,
global_step=tf.train.get_global_step()
)
self.optimize_ops.append(optimize_op)
logging.info('Optimizers built')
def train(self, steps, input_fn=None):
if self.use_tpu:
self._tpu_train(steps, input_fn)
else:
self._regular_train(steps)
def _tpu_train(self, steps, input_fn):
# def _input_fn(params):
# featuers, labels = self.datasource['train'].input_fn(params['batch_size'])
# return featuers, labels
self.estimator.train(
input_fn=input_fn,
max_steps=steps)
logging.info('Es ist train?')
self.estimator.evaluate(
input_fn=self.dataset['validation'],
steps=steps/50
)
print("\nTest set accuracy: {accuracy:0.3f}\n".format(**eval_result))
def _regular_train(self, steps):
# Initialize or check if checkpoint # TODO add checkpoint manager
self.session.run(tf.global_variables_initializer())
initial_step = self._restore()
fetches = {}
fetches['optimize_ops'] = self.optimize_ops
# fetches['losses'] = self.losses['train']
# if self.otters['train']:
# fetches['others'] = self.otters['train']
fetches['summary_ops'] = self.summary_ops['train']
for step in range(initial_step, steps): # TODO start from checkpoint steps
# TODO clean code and optimize ops
train_out = self.session.run(fetches=fetches)
self.writer.add_summary(train_out['summary_ops'], global_step=step)
if step % 50 == 0: # TODO every how many steps? Automate?
val = self._validate(step)
logging.info('Step {} -- Validation result: {}'.format(step, val))
if step % 1000 == 0: # For now just another arbitrary number (how heavy is saving?)
self._save(step)
logging.info('Done training.')
def _validate(self, global_step):
""" Run network on validation set """
# Todo clean summaries and add example outputs
fetches = {}
fetches['losses'] = self.losses['validation']
if self.otters['train']:
fetches['others'] = self.otters['validation']
fetches['summary_ops'] = self.summary_ops['validation']
validation_out = self.session.run(fetches=fetches)
self.writer.add_summary(validation_out['summary_ops'], global_step=global_step)
del validation_out['summary_ops']
return validation_out
def _save(self, step):
"""Save the model weights.
Args:
step (int): Training step.
"""
output_path = self.output_path + '/checkpoints/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
self.saver.save(self.session, save_path=output_path,global_step=step)
def _restore(self):
"""Restore the trained variables from the last stored checkpoint
Returns:
int: The training step when this model was saved.
"""
output_path = self.output_path + '/checkpoints/'
checkpoint = tf.train.latest_checkpoint(output_path)
if checkpoint:
self.saver.restore(self.session, save_path=checkpoint)
restored_step = int(checkpoint.split('-')[-1]) # Robust enough?
return restored_step
logging.info('Starting training from scratch.')
return 0
def evaluate(self):
pass
| define_model | identifier_name |
core_model_estimator.py | """ Core tensorflow model that basically encapsulates all the basic ops
in order to run an experiment.
"""
import os
from absl import logging
import tensorflow as tf
from tensorflow.contrib import tpu
from .core_datamanager_estimator import DataManagerTPU as DataManager
class CoreModelTPU(object):
def __init__(self,
tf_session: tf.Session,
learning_rate: float,
training_dataset: DataManager = None,
validation_dataset: DataManager = None,
output_path: str = '../outputs',
use_tpu: str = False,
tpu_name: list = [],
data_dir= '/mnt/iowa_bucket/cifar10/data/'
):
self.data_dir = data_dir
if output_path[-1] == '/':
output_path = output_path[:-1]
self.output_path = output_path + '/' + self.__class__.__name__
self.session = tf_session
# TODO Get rid of the .datasource thing
self.dataset = {}
# if training_dataset: self.dataset['train'] = training_dataset.datasource
# if validation_dataset: self.dataset['validation'] = validation_dataset.datasource
self.datasource = {}
self.datasource['train'] = training_dataset
self.datasource['validation'] = validation_dataset
self._train_model = True if training_dataset is not None else False
self._validate_model = True if validation_dataset is not None else False
self.learning_rate = learning_rate
self.use_tpu = use_tpu
def define_model(self, data_source: DataManager , mode: str): #pylint: disable=E0202
"""Definition of the model to use. Do not modify the function here
placeholder for the actual definition in `model/` (see example)
Args:
data_source (DataManager): Data manager object for the input data
mode (str): Training and testing? # TODO Properly implement
Raises:
NotImplementedError: Model has to be implemented yet (in a separate instance in model/)
"""
raise NotImplementedError('No model defined.')
def build_model(self):
""" Build the model. """
if self.use_tpu:
self._tpu_build()
else:
self._regular_build()
def _tpu_build(self):
"""Build with TPUEstimators for TPU usage"""
def _define_model(features, labels, mode, params):
data_source = (features, labels)
self.outputs = {}
self.losses = {}
self.otters = {}
outputs, losses, others = self.define_model(data_source, mode)
if mode == tf.estimator.ModeKeys.EVAL:
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses, eval_metrics=others)
if mode == tf.estimator.ModeKeys.PREDICT:
return tpu.TPUEstimatorSpec(
mode=mode, predictions=outputs
)
if mode == tf.estimator.ModeKeys.TRAIN:
self.losses['train'] = losses
self._build_optimizer(tpu_support=True)
if not len(self.optimize_ops) == 1:
logging.error('Implementati Error: More than one optimizer defined')
logging.warning(' [*] Selecting only the first optimizer')
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses[0], train_op=self.optimize_ops[0]
)
tpu_name = ['node-1'] # TODO Bring outside
tpu_iterations = 500 # TODO Bring outside
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu_name)
run_config = tf.contrib.tpu.RunConfig(
model_dir=self.output_path,
cluster=tpu_cluster_resolver,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu.TPUConfig(tpu_iterations),
)
self.estimator = tpu.TPUEstimator(
model_fn=_define_model,
use_tpu=True,
train_batch_size=32*4, #self.dataset['train'].batch_size,
eval_batch_size=32*4, #self.dataset['validation'].batch_size,
config=run_config,
params={"data_dir": self.data_dir}
)
def _regular_build(self):
"""Normal build for CPU/GPU usage"""
# This overwrites define_model, is that ok?
self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101
self.define_model,
create_scope_now_=True)
self.outputs = {}
self.losses = {}
self.otters = {}
def _build(mode):
outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)
self.outputs[mode] = outputs
self.losses[mode] = losses
self.otters[mode] = others
if mode == 'train':
self._build_optimizer()
# TODO Move clean and summary to proper section
self.summary_ops = {}
if self._train_model:
_build('train')
summary = []
for idx, loss in enumerate(self.losses['train']):
summary.append(
tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['train']):
summary.append(
tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))
self.summary_ops['train'] = tf.summary.merge(summary)
if self._validate_model:
_build('validation')
summary = []
for idx, loss in enumerate(self.losses['validation']):
|
for idx, element in enumerate(self.otters['validation']):
summary.append(
tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))
self.summary_ops['validation'] = tf.summary.merge(summary)
self.writer = tf.summary.FileWriter(self.output_path,
self.session.graph)
self.saver = tf.train.Saver()
# TODO Add routine to save
logging.info('Model construction complete.')
def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):
"""Buids the optimizer(s) to minimize the loss(es) of the model.
Args:
optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which
optimizer to use.
tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard
optimier, required for TPU usage.
"""
self.optimize_ops = []
for loss in self.losses['train']: # TODO Create apropoiate external training scheme
optimize_op = optimizer_to_use(
learning_rate=self.learning_rate
)
if tpu_support:
optimize_op = tpu.CrossShardOptimizer(optimize_op)
optimize_op = optimize_op.minimize(
loss=loss,
global_step=tf.train.get_global_step()
)
self.optimize_ops.append(optimize_op)
logging.info('Optimizers built')
def train(self, steps, input_fn=None):
if self.use_tpu:
self._tpu_train(steps, input_fn)
else:
self._regular_train(steps)
def _tpu_train(self, steps, input_fn):
# def _input_fn(params):
# featuers, labels = self.datasource['train'].input_fn(params['batch_size'])
# return featuers, labels
self.estimator.train(
input_fn=input_fn,
max_steps=steps)
logging.info('Es ist train?')
self.estimator.evaluate(
input_fn=self.dataset['validation'],
steps=steps/50
)
print("\nTest set accuracy: {accuracy:0.3f}\n".format(**eval_result))
def _regular_train(self, steps):
# Initialize or check if checkpoint # TODO add checkpoint manager
self.session.run(tf.global_variables_initializer())
initial_step = self._restore()
fetches = {}
fetches['optimize_ops'] = self.optimize_ops
# fetches['losses'] = self.losses['train']
# if self.otters['train']:
# fetches['others'] = self.otters['train']
fetches['summary_ops'] = self.summary_ops['train']
for step in range(initial_step, steps): # TODO start from checkpoint steps
# TODO clean code and optimize ops
train_out = self.session.run(fetches=fetches)
self.writer.add_summary(train_out['summary_ops'], global_step=step)
if step % 50 == 0: # TODO every how many steps? Automate?
val = self._validate(step)
logging.info('Step {} -- Validation result: {}'.format(step, val))
if step % 1000 == 0: # For now just another arbitrary number (how heavy is saving?)
self._save(step)
logging.info('Done training.')
def _validate(self, global_step):
""" Run network on validation set """
# Todo clean summaries and add example outputs
fetches = {}
fetches['losses'] = self.losses['validation']
if self.otters['train']:
fetches['others'] = self.otters['validation']
fetches['summary_ops'] = self.summary_ops['validation']
validation_out = self.session.run(fetches=fetches)
self.writer.add_summary(validation_out['summary_ops'], global_step=global_step)
del validation_out['summary_ops']
return validation_out
def _save(self, step):
"""Save the model weights.
Args:
step (int): Training step.
"""
output_path = self.output_path + '/checkpoints/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
self.saver.save(self.session, save_path=output_path,global_step=step)
def _restore(self):
"""Restore the trained variables from the last stored checkpoint
Returns:
int: The training step when this model was saved.
"""
output_path = self.output_path + '/checkpoints/'
checkpoint = tf.train.latest_checkpoint(output_path)
if checkpoint:
self.saver.restore(self.session, save_path=checkpoint)
restored_step = int(checkpoint.split('-')[-1]) # Robust enough?
return restored_step
logging.info('Starting training from scratch.')
return 0
def evaluate(self):
pass
| summary.append(
tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss)) | conditional_block |
core_model_estimator.py | """ Core tensorflow model that basically encapsulates all the basic ops
in order to run an experiment.
"""
import os
from absl import logging
import tensorflow as tf
from tensorflow.contrib import tpu
from .core_datamanager_estimator import DataManagerTPU as DataManager
class CoreModelTPU(object):
def __init__(self,
tf_session: tf.Session,
learning_rate: float,
training_dataset: DataManager = None,
validation_dataset: DataManager = None,
output_path: str = '../outputs',
use_tpu: str = False,
tpu_name: list = [],
data_dir= '/mnt/iowa_bucket/cifar10/data/'
):
self.data_dir = data_dir
if output_path[-1] == '/':
output_path = output_path[:-1]
self.output_path = output_path + '/' + self.__class__.__name__
self.session = tf_session
# TODO Get rid of the .datasource thing
self.dataset = {}
# if training_dataset: self.dataset['train'] = training_dataset.datasource
# if validation_dataset: self.dataset['validation'] = validation_dataset.datasource
self.datasource = {}
self.datasource['train'] = training_dataset
self.datasource['validation'] = validation_dataset
self._train_model = True if training_dataset is not None else False
self._validate_model = True if validation_dataset is not None else False
self.learning_rate = learning_rate
self.use_tpu = use_tpu
def define_model(self, data_source: DataManager , mode: str): #pylint: disable=E0202
"""Definition of the model to use. Do not modify the function here
placeholder for the actual definition in `model/` (see example)
Args:
data_source (DataManager): Data manager object for the input data
mode (str): Training and testing? # TODO Properly implement
Raises:
NotImplementedError: Model has to be implemented yet (in a separate instance in model/)
"""
raise NotImplementedError('No model defined.')
def build_model(self):
""" Build the model. """
if self.use_tpu:
self._tpu_build()
else:
self._regular_build()
def _tpu_build(self):
"""Build with TPUEstimators for TPU usage"""
def _define_model(features, labels, mode, params):
data_source = (features, labels)
self.outputs = {}
self.losses = {}
self.otters = {}
outputs, losses, others = self.define_model(data_source, mode)
if mode == tf.estimator.ModeKeys.EVAL:
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses, eval_metrics=others)
if mode == tf.estimator.ModeKeys.PREDICT:
return tpu.TPUEstimatorSpec(
mode=mode, predictions=outputs
)
if mode == tf.estimator.ModeKeys.TRAIN:
self.losses['train'] = losses
self._build_optimizer(tpu_support=True)
if not len(self.optimize_ops) == 1:
logging.error('Implementati Error: More than one optimizer defined')
logging.warning(' [*] Selecting only the first optimizer')
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses[0], train_op=self.optimize_ops[0]
)
tpu_name = ['node-1'] # TODO Bring outside
tpu_iterations = 500 # TODO Bring outside
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu_name)
run_config = tf.contrib.tpu.RunConfig(
model_dir=self.output_path,
cluster=tpu_cluster_resolver,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu.TPUConfig(tpu_iterations),
)
self.estimator = tpu.TPUEstimator(
model_fn=_define_model,
use_tpu=True,
train_batch_size=32*4, #self.dataset['train'].batch_size,
eval_batch_size=32*4, #self.dataset['validation'].batch_size,
config=run_config,
params={"data_dir": self.data_dir}
)
def _regular_build(self):
"""Normal build for CPU/GPU usage"""
# This overwrites define_model, is that ok?
self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101
self.define_model,
create_scope_now_=True)
self.outputs = {}
self.losses = {}
self.otters = {}
def _build(mode):
outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)
self.outputs[mode] = outputs
self.losses[mode] = losses
self.otters[mode] = others
if mode == 'train':
self._build_optimizer()
# TODO Move clean and summary to proper section
self.summary_ops = {}
if self._train_model:
_build('train')
summary = []
for idx, loss in enumerate(self.losses['train']):
summary.append(
tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['train']):
summary.append(
tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))
self.summary_ops['train'] = tf.summary.merge(summary)
if self._validate_model:
_build('validation')
summary = []
for idx, loss in enumerate(self.losses['validation']):
summary.append(
tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['validation']):
summary.append(
tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))
self.summary_ops['validation'] = tf.summary.merge(summary)
self.writer = tf.summary.FileWriter(self.output_path,
self.session.graph)
self.saver = tf.train.Saver()
# TODO Add routine to save
logging.info('Model construction complete.')
def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):
"""Buids the optimizer(s) to minimize the loss(es) of the model.
Args:
optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which
optimizer to use.
tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard
optimier, required for TPU usage.
"""
self.optimize_ops = []
for loss in self.losses['train']: # TODO Create apropoiate external training scheme
optimize_op = optimizer_to_use(
learning_rate=self.learning_rate
)
if tpu_support:
optimize_op = tpu.CrossShardOptimizer(optimize_op)
optimize_op = optimize_op.minimize(
loss=loss,
global_step=tf.train.get_global_step()
)
self.optimize_ops.append(optimize_op)
logging.info('Optimizers built')
def train(self, steps, input_fn=None):
if self.use_tpu:
self._tpu_train(steps, input_fn)
else:
self._regular_train(steps)
def _tpu_train(self, steps, input_fn):
# def _input_fn(params):
# featuers, labels = self.datasource['train'].input_fn(params['batch_size'])
# return featuers, labels
self.estimator.train(
input_fn=input_fn,
max_steps=steps)
logging.info('Es ist train?')
self.estimator.evaluate(
input_fn=self.dataset['validation'],
steps=steps/50
)
print("\nTest set accuracy: {accuracy:0.3f}\n".format(**eval_result))
def _regular_train(self, steps):
# Initialize or check if checkpoint # TODO add checkpoint manager
self.session.run(tf.global_variables_initializer())
initial_step = self._restore()
fetches = {}
fetches['optimize_ops'] = self.optimize_ops
# fetches['losses'] = self.losses['train']
# if self.otters['train']:
# fetches['others'] = self.otters['train']
fetches['summary_ops'] = self.summary_ops['train']
for step in range(initial_step, steps): # TODO start from checkpoint steps
# TODO clean code and optimize ops
train_out = self.session.run(fetches=fetches)
self.writer.add_summary(train_out['summary_ops'], global_step=step)
if step % 50 == 0: # TODO every how many steps? Automate?
val = self._validate(step)
logging.info('Step {} -- Validation result: {}'.format(step, val))
if step % 1000 == 0: # For now just another arbitrary number (how heavy is saving?)
self._save(step)
logging.info('Done training.')
def _validate(self, global_step):
""" Run network on validation set """
# Todo clean summaries and add example outputs
fetches = {}
fetches['losses'] = self.losses['validation']
if self.otters['train']:
fetches['others'] = self.otters['validation']
fetches['summary_ops'] = self.summary_ops['validation']
validation_out = self.session.run(fetches=fetches)
self.writer.add_summary(validation_out['summary_ops'], global_step=global_step)
del validation_out['summary_ops']
return validation_out
def _save(self, step):
"""Save the model weights.
Args:
step (int): Training step.
"""
output_path = self.output_path + '/checkpoints/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
self.saver.save(self.session, save_path=output_path,global_step=step)
def _restore(self):
"""Restore the trained variables from the last stored checkpoint
Returns:
int: The training step when this model was saved.
"""
output_path = self.output_path + '/checkpoints/'
checkpoint = tf.train.latest_checkpoint(output_path)
if checkpoint:
self.saver.restore(self.session, save_path=checkpoint)
restored_step = int(checkpoint.split('-')[-1]) # Robust enough? | pass | return restored_step
logging.info('Starting training from scratch.')
return 0
def evaluate(self): | random_line_split |
core_model_estimator.py | """ Core tensorflow model that basically encapsulates all the basic ops
in order to run an experiment.
"""
import os
from absl import logging
import tensorflow as tf
from tensorflow.contrib import tpu
from .core_datamanager_estimator import DataManagerTPU as DataManager
class CoreModelTPU(object):
def __init__(self,
tf_session: tf.Session,
learning_rate: float,
training_dataset: DataManager = None,
validation_dataset: DataManager = None,
output_path: str = '../outputs',
use_tpu: str = False,
tpu_name: list = [],
data_dir= '/mnt/iowa_bucket/cifar10/data/'
):
self.data_dir = data_dir
if output_path[-1] == '/':
output_path = output_path[:-1]
self.output_path = output_path + '/' + self.__class__.__name__
self.session = tf_session
# TODO Get rid of the .datasource thing
self.dataset = {}
# if training_dataset: self.dataset['train'] = training_dataset.datasource
# if validation_dataset: self.dataset['validation'] = validation_dataset.datasource
self.datasource = {}
self.datasource['train'] = training_dataset
self.datasource['validation'] = validation_dataset
self._train_model = True if training_dataset is not None else False
self._validate_model = True if validation_dataset is not None else False
self.learning_rate = learning_rate
self.use_tpu = use_tpu
def define_model(self, data_source: DataManager , mode: str): #pylint: disable=E0202
"""Definition of the model to use. Do not modify the function here
placeholder for the actual definition in `model/` (see example)
Args:
data_source (DataManager): Data manager object for the input data
mode (str): Training and testing? # TODO Properly implement
Raises:
NotImplementedError: Model has to be implemented yet (in a separate instance in model/)
"""
raise NotImplementedError('No model defined.')
def build_model(self):
""" Build the model. """
if self.use_tpu:
self._tpu_build()
else:
self._regular_build()
def _tpu_build(self):
"""Build with TPUEstimators for TPU usage"""
def _define_model(features, labels, mode, params):
data_source = (features, labels)
self.outputs = {}
self.losses = {}
self.otters = {}
outputs, losses, others = self.define_model(data_source, mode)
if mode == tf.estimator.ModeKeys.EVAL:
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses, eval_metrics=others)
if mode == tf.estimator.ModeKeys.PREDICT:
return tpu.TPUEstimatorSpec(
mode=mode, predictions=outputs
)
if mode == tf.estimator.ModeKeys.TRAIN:
self.losses['train'] = losses
self._build_optimizer(tpu_support=True)
if not len(self.optimize_ops) == 1:
logging.error('Implementati Error: More than one optimizer defined')
logging.warning(' [*] Selecting only the first optimizer')
return tpu.TPUEstimatorSpec(
mode=mode, loss=losses[0], train_op=self.optimize_ops[0]
)
tpu_name = ['node-1'] # TODO Bring outside
tpu_iterations = 500 # TODO Bring outside
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu_name)
run_config = tf.contrib.tpu.RunConfig(
model_dir=self.output_path,
cluster=tpu_cluster_resolver,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu.TPUConfig(tpu_iterations),
)
self.estimator = tpu.TPUEstimator(
model_fn=_define_model,
use_tpu=True,
train_batch_size=32*4, #self.dataset['train'].batch_size,
eval_batch_size=32*4, #self.dataset['validation'].batch_size,
config=run_config,
params={"data_dir": self.data_dir}
)
def _regular_build(self):
"""Normal build for CPU/GPU usage"""
# This overwrites define_model, is that ok?
self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101
self.define_model,
create_scope_now_=True)
self.outputs = {}
self.losses = {}
self.otters = {}
def _build(mode):
outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)
self.outputs[mode] = outputs
self.losses[mode] = losses
self.otters[mode] = others
if mode == 'train':
self._build_optimizer()
# TODO Move clean and summary to proper section
self.summary_ops = {}
if self._train_model:
_build('train')
summary = []
for idx, loss in enumerate(self.losses['train']):
summary.append(
tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['train']):
summary.append(
tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))
self.summary_ops['train'] = tf.summary.merge(summary)
if self._validate_model:
_build('validation')
summary = []
for idx, loss in enumerate(self.losses['validation']):
summary.append(
tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))
for idx, element in enumerate(self.otters['validation']):
summary.append(
tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))
self.summary_ops['validation'] = tf.summary.merge(summary)
self.writer = tf.summary.FileWriter(self.output_path,
self.session.graph)
self.saver = tf.train.Saver()
# TODO Add routine to save
logging.info('Model construction complete.')
def _build_optimizer(self, optimizer_to_use=tf.train.AdamOptimizer, tpu_support=False):
"""Buids the optimizer(s) to minimize the loss(es) of the model.
Args:
optimizer_to_use (tf optimizer, optional): Defaults to tf.train.AdamOptimizer. Which
optimizer to use.
tpu_support (bool, optional): Defaults to False. If the optimizer has to support shard
optimier, required for TPU usage.
"""
self.optimize_ops = []
for loss in self.losses['train']: # TODO Create apropoiate external training scheme
optimize_op = optimizer_to_use(
learning_rate=self.learning_rate
)
if tpu_support:
optimize_op = tpu.CrossShardOptimizer(optimize_op)
optimize_op = optimize_op.minimize(
loss=loss,
global_step=tf.train.get_global_step()
)
self.optimize_ops.append(optimize_op)
logging.info('Optimizers built')
def train(self, steps, input_fn=None):
|
def _tpu_train(self, steps, input_fn):
# def _input_fn(params):
# featuers, labels = self.datasource['train'].input_fn(params['batch_size'])
# return featuers, labels
self.estimator.train(
input_fn=input_fn,
max_steps=steps)
logging.info('Es ist train?')
self.estimator.evaluate(
input_fn=self.dataset['validation'],
steps=steps/50
)
print("\nTest set accuracy: {accuracy:0.3f}\n".format(**eval_result))
def _regular_train(self, steps):
# Initialize or check if checkpoint # TODO add checkpoint manager
self.session.run(tf.global_variables_initializer())
initial_step = self._restore()
fetches = {}
fetches['optimize_ops'] = self.optimize_ops
# fetches['losses'] = self.losses['train']
# if self.otters['train']:
# fetches['others'] = self.otters['train']
fetches['summary_ops'] = self.summary_ops['train']
for step in range(initial_step, steps): # TODO start from checkpoint steps
# TODO clean code and optimize ops
train_out = self.session.run(fetches=fetches)
self.writer.add_summary(train_out['summary_ops'], global_step=step)
if step % 50 == 0: # TODO every how many steps? Automate?
val = self._validate(step)
logging.info('Step {} -- Validation result: {}'.format(step, val))
if step % 1000 == 0: # For now just another arbitrary number (how heavy is saving?)
self._save(step)
logging.info('Done training.')
def _validate(self, global_step):
""" Run network on validation set """
# Todo clean summaries and add example outputs
fetches = {}
fetches['losses'] = self.losses['validation']
if self.otters['train']:
fetches['others'] = self.otters['validation']
fetches['summary_ops'] = self.summary_ops['validation']
validation_out = self.session.run(fetches=fetches)
self.writer.add_summary(validation_out['summary_ops'], global_step=global_step)
del validation_out['summary_ops']
return validation_out
def _save(self, step):
"""Save the model weights.
Args:
step (int): Training step.
"""
output_path = self.output_path + '/checkpoints/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
self.saver.save(self.session, save_path=output_path,global_step=step)
def _restore(self):
"""Restore the trained variables from the last stored checkpoint
Returns:
int: The training step when this model was saved.
"""
output_path = self.output_path + '/checkpoints/'
checkpoint = tf.train.latest_checkpoint(output_path)
if checkpoint:
self.saver.restore(self.session, save_path=checkpoint)
restored_step = int(checkpoint.split('-')[-1]) # Robust enough?
return restored_step
logging.info('Starting training from scratch.')
return 0
def evaluate(self):
pass
| if self.use_tpu:
self._tpu_train(steps, input_fn)
else:
self._regular_train(steps) | identifier_body |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar);
self
}
fn chain_scalars<'s, E: Curve>(
mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) |
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt {
Self::new().chain(bytes).result_bigint()
}
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn create_sha256_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data , different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key , different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
}
| {
return scalar;
} | conditional_block |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar); | mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) {
return scalar;
}
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt {
Self::new().chain(bytes).result_bigint()
}
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn create_sha256_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data , different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key , different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
} | self
}
fn chain_scalars<'s, E: Curve>( | random_line_split |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar);
self
}
fn chain_scalars<'s, E: Curve>(
mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) {
return scalar;
}
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt {
Self::new().chain(bytes).result_bigint()
}
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn | <E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data , different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key , different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
}
| create_sha256_from_ge_test | identifier_name |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar);
self
}
fn chain_scalars<'s, E: Curve>(
mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) {
return scalar;
}
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt |
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn create_sha256_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data , different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key , different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
}
| {
Self::new().chain(bytes).result_bigint()
} | identifier_body |
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn | (h: &[u8]) -> bool {
(h[2] & 0x2) != 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08) != 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10) != 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10) != 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20) != 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) & !3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit {
return 0;
}
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
}
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h) != 0
&& hdr_get_bitrate(h) != 15
&& hdr_get_sample_rate(h) != 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&& !(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &= !3;
}
if frame_bytes != 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) {
4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if !hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode != MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx != 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba != 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m != 0 {
let s;
if (mask & m) != 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands != 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i] != 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
}
| hdr_test_padding | identifier_name |
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn hdr_test_padding(h: &[u8]) -> bool {
(h[2] & 0x2) != 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08) != 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10) != 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10) != 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20) != 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) & !3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit |
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
}
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h) != 0
&& hdr_get_bitrate(h) != 15
&& hdr_get_sample_rate(h) != 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&& !(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &= !3;
}
if frame_bytes != 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) {
4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if !hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode != MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx != 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba != 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m != 0 {
let s;
if (mask & m) != 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands != 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i] != 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
}
| {
return 0;
} | conditional_block |
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn hdr_test_padding(h: &[u8]) -> bool {
(h[2] & 0x2) != 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08) != 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10) != 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10) != 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20) != 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) & !3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit {
return 0;
}
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
}
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h) != 0
&& hdr_get_bitrate(h) != 15
&& hdr_get_sample_rate(h) != 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&& !(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &= !3;
}
if frame_bytes != 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) { | 4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if !hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode != MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx != 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba != 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m != 0 {
let s;
if (mask & m) != 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands != 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i] != 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
} | random_line_split | |
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn hdr_test_padding(h: &[u8]) -> bool {
(h[2] & 0x2) != 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08) != 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10) != 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10) != 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20) != 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) & !3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 |
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h) != 0
&& hdr_get_bitrate(h) != 15
&& hdr_get_sample_rate(h) != 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&& !(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &= !3;
}
if frame_bytes != 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) {
4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if !hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode != MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx != 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba != 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m != 0 {
let s;
if (mask & m) != 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands != 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i] != 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
}
| {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit {
return 0;
}
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
} | identifier_body |
core.py | from math import atan2, acos, cos, sin
nqcflags = 9 # total number of qc flags
passflag = 0 # "pass" qc value.
suspflag = 1 # "suspect" qc value.
warnflag = 2 # "warning" qc value.
failflag = 3 # "failed" qc value.
notestflag = 8 # "not-tested" qc value.
mflag = 9 # "missing" qc value.
irangeflag = 0 # index for range test qc flag.
istepflag = 1 # index for step test qc flag.
ipersistflag = 2 # index for persistence test flag.
ispatialflag = 3 # index for spatial test qc flag.
mvc = -888888.0
obsmvc = -888888.0
trace = -666666.0
ispd = 1
idir = 1
iu = 1
iv = 1
def range_check(obs, nstnnets, var, ivar, qc_flag):
iqcflag = irangeflag
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
allowed_range = {
'temp': (225, 323),
'dew': (225, 323),
'wind_dir': (0, 360),
'wind_speed': (0, 60),
'wind_gust': (0, 70),
'pressure': (80000, 107840), # Pa, not hPa!
'rel_hum': (0, 105),
# let's keep the values for variables we don't have (yet?)
'mixr': (0, 25),
'pcp6': (0, 256),
'pcp24': (0, 508)
}
try:
minrange, maxrange = allowed_range[var]
except KeyError:
raise ValueError('Cannot recognize variable type')
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = mflag
continue
if obs[d, s, ivar] < minrange or obs[d, s, ivar] > maxrange:
# Don't flag traces for precip!
qc_flag[d, s, ivar, iqcflag] = passflag if var == 'pcp' and obs == trace else failflag
else:
qc_flag[d, s, ivar, iqcflag] = passflag
def test_temp_vs_dew(obs, nstnnets, var, ivar, qc_flag):
# If there is both temp and tdew and after we made all the tests.
|
def step_check(obs, nstnnets, var, ivar, qc_flag):
"""
Perform "delta test" which, for each station checks each hour
in a day, checks for jumps between consecutive observations
that exceed a given threshold.
"""
iqcflag = istepflag
ndts = len(obs[:, 0, ivar]) # # of hours in d.
level1 = suspflag
level2 = warnflag
# if too_many_spikes (hours around which a spike is found) or more are found, flag all ndts worth.
too_many_spikes = 2
# set two maximum absolute steps allowable for each variable, one for "suspect" flag and one for "warning" flag.
steps = {
'temp': (10.0, 15.0, 8.0),
'dew': (10.0, 15.0, 8.0),
'wind_gust': (40.0, 50.0, None),
'rel_hum': (60.0, 80.0, None),
'pressure': (1500.0, 2000.0, 1000.0),
'wind_speed': (25.0, 35.0, 15.0),
'wind_dir': (361., 361., None),
'mixr': (7.0, 10.0, None),
'pcpn1': (1.0, 2.0, None),
}
try:
step1, step2, step_3pt = steps[var]
except KeyError:
raise ValueError('Unrecognized variable')
for s in range(nstnnets):
spikes_found = 0
for d in range(ndts):
if d == 0:
qc_flag[d, s, ivar, iqcflag] = notestflag
continue
# FIRST SET OF TESTS
if qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or qc_flag[d, s, ivar, irangeflag] in [failflag, mflag]:
qc_flag[d, s, ivar, iqcflag] = notestflag
elif qc_flag[d, s, ivar, istepflag] in [level1, level2]:
pass # do nothing... must have been flagged by 3pt test for the previous d.
elif step1 < abs(obs[d, s, ivar] - obs[d-1, s, ivar]) < step2: # flag current and previous time with susp
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) >= step2: # flag current and previous time with warning
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level2
qc_flag[d, s, ivar, iqcflag] = level2
else:
qc_flag[d, s, ivar, iqcflag] = passflag
# New test which looks for smaller spike in windspeed that is a 1 hour jump that come back down immediately
# not implemented?
# SECOND SET OF TESTS
# if (var in ['spd', 't', 'td', 'slp']) and d != ndts - 1: # our code never enters there although it should
if var in ['wind_speed', 'temp', 'dew', 'pressure'] and d != ndts - 1:
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d+1, s, ivar, irangeflag] in [failflag, mflag]):
pass
# do nothing if any of the 3 pts in question are missing or out of range.
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) > step_3pt and abs(obs[d, s, ivar] - obs[d+1, s, ivar]) > step_3pt:
# !-- Don't recount this spike if it was already counted above.
if qc_flag[d, s, ivar, iqcflag] not in [level1, level2]:
spikes_found += 1
# flag current, previous, and next time with "suspect" flag (lower level flag as this is a new test)
qc_flag[d+1, s, ivar, iqcflag] = level1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
if spikes_found >= too_many_spikes:
for d in range(ndts):
if qc_flag[d, s, ivar, iqcflag] != notestflag:
qc_flag[d, s, ivar, iqcflag] = level2
def persistence_check(obs, nstnnets, var, ivar, qc_flag):
"""
For each station, for a day of data, calculate mean and std
deviation. Compare std deviation to set values and if it's too
small, flag entire day as suspect (1) or warning (2). Also
check the difference between subsequent obs and flag if this
difference is too small.
"""
iqcflag = ipersistflag
ndts = len(obs[:, 0, ivar]) # number of dts in obs.
min_nobs = 8 # minimum number of good obs to do 24-hr std deviation test.
level = warnflag
pdeltas = {
'dew': 0.1,
'wind_gust': 0.0,
'rel_hum': 0.1,
'mixr': 0.1,
'pcpn1': mvc,
'pressure': 10.0,
'temp': 0.1,
'wind_speed': 0.0,
'wind_dir': 0.1
}
try:
pdelta = pdeltas[var]
except KeyError:
raise ValueError('Unrecognized variable')
# For each station, determine # of good obs (individually) for standard deviation and maxdelta portions of the
# persistence test. Get maxdelta while we're at it too.
for s in range(nstnnets):
deltacount = 0
vali = []
val = []
maxdelta = mvc
# Loop through each dt gathering all non-missing/flagged obs
# and their indices into 'val' and 'vali' respectively.
# Also determine the 'maxdelta' between successive non-missing/flagged obs.
for d in range(ndts):
# get 'maxdelta' between successive non-missing obs.
if d > 0:
# assuming range check has been performed.
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
obs[d, s, ivar] == mvc and obs[d-1, s, ivar] == mvc):
pass # do nothing to maxdelta.
elif abs(obs[d, s, ivar] - obs[d - 1, s, ivar]) > maxdelta:
# both the current and previous values are ok, so get a delta value between them.
deltacount += 1 # TODO this depends on when would the highest delta show up!
maxdelta = abs(obs[d, s, ivar] - obs[d-1, s, ivar])
if qc_flag[d, s, ivar, irangeflag] in [failflag, notestflag, mflag] or obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = notestflag
else:
vali.append(d)
val.append(obs[d, s, ivar])
# Only do standard deviation portion of test if there's more than 'min_obs' number of non-missing obs.
if len(val) >= min_nobs:
mean = sum(val) / len(val)
sd = (sum([(v - mean) ** 2 for v in val]) / len(val))
# if the maxdelta between any successive obs is too small or if stdev is too small,
# flag all non-missing (vali) values.
if sd <= pdelta:
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali: # Make sure not to stomp on previous persistence tests! # TODO how could we?
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# Only do maxdelta portion of test if there's more than 'min_obs' number of deltas found ('deltacount')
# for calculating a 'maxdelta'.
if deltacount >= min_nobs:
if maxdelta != mvc and maxdelta < pdelta:
if var != 'rel_hum' or val[0] < 99.0: # Don't flag RH if it's 99-100% (saturated). # still unclear why they took only the first value
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali:
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# If there wasn't enough good obs to do this the stddev or maxdelta tests, set flags to indeterminate.
if len(vali) < min_nobs and deltacount < min_nobs:
for idx in vali: # Make sure not to stomp on previous persistence tests!
if qc_flag[idx, s, ivar, iqcflag] != level and qc_flag[idx, s, ivar, iqcflag] != passflag:
qc_flag[idx, s, ivar, iqcflag] = notestflag
def spatial_check(obs, nstnnets, lat, lon, elev, var, ivar, qc_flag):
"""
Spatial_check does does a spatial QC test using a simple neighbor check whereby it looks at stations within a radius
and elevation band and checks if at least one value is near the value in question. If not, it tries a bigger radius
and checks again, and if not again, the value is flagged.
"""
ndts = len(obs[:, 0, 0]) # number of hours in obs.
roism = 100.0 # smaller radius of influence.
roibg = 150.0 # bigger radius of influence.
min_stations = 2 # min # of stns needed for testing.
level1 = suspflag
level2 = warnflag
latdiff = 3.0
londiff = 3.0
thresholds = {
'pressure': (750.0, 1000.0, 1000.0),
'temp': (5.556, 8.333, 150.0), # (10degF), (15degF)
'dew': (5.556, 8.333, 150.0), # (10degF), (15degF)
'wind_speed': (7.65, 10.2, 250.0), # (15kts), (20kts)
'wind_dir': (360.0, 360.0, 250.0),
'rel_hum': (75.0, 85.0, 250.0),
'pcp6': (76.2, 101.6, 500.0), # (mm; eq 3 inches), (mm; eq 4 inches)
'pcp24': (152.4, 203.2, 500.0), # (mm; eq 6 inches), (mm; eq 8 inches).
}
try:
maxvdiff1, maxvdiff2, max_elev_diff = thresholds[var]
except KeyError:
raise ValueError('Unrecognized variable')
# If variable is precip, look for traces make them 0.0 (not permanently as these data values don't get sent back out)
if var == 'pcp':
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == trace:
obs[d, s, ivar] = 0.0 # obs[:,:,ivar]...
# Cliff's simple similar neighbor test.
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == mvc or elev[d, s] == mvc or qc_flag[d, s, ivar, irangeflag] == failflag:
qc_flag[d, s, ivar, ispatialflag] = notestflag
continue
valsm2 = []
valbg2 = []
# for each station, check it versus every other station (except itself). First time through get # of
# stations within radius of influence to determine if we can do this test.
for ss in range(nstnnets):
if ss == s or obs[d, ss, ivar] == mvc \
or elev[d, ss] == mvc or lat[d, ss] == mvc or lon[d, ss] == mvc \
or abs(lat[d, ss] - lat[d, s]) > latdiff or abs(lon[d, ss] - lon[d, s]) > londiff \
or abs(elev[d, ss] - elev[d, s]) > max_elev_diff:
continue
if qc_flag[d, ss, ivar, irangeflag] == failflag \
or qc_flag[d, ss, ivar, istepflag] in [suspflag, warnflag] \
or qc_flag[d, ss, ivar, ipersistflag] in [suspflag, warnflag]:
continue
dist = distance(lat[d, s], lon[d, s], lat[d, ss], lon[d, ss])
obsdiff = abs(obs[d, ss, ivar] - obs[d, s, ivar])
if dist < roism:
valsm2.append(obsdiff)
elif dist < roibg:
valbg2.append(obsdiff)
# !--- If any obs found in roi was <= maxvdiff1, it's a pass. If none found <= maxvdiff1,
# but one is >= maxvdiff1 & < maxvdiff2, it's "suspect." Otherwise it's "warning." Look in big roi too.
if len(valsm2) >= min_stations:
mindiffsm = min(valsm2)
if mindiffsm <= maxvdiff1:
qc_flag[d, s, ivar, ispatialflag] = passflag
elif maxvdiff1 < mindiffsm <= maxvdiff2:
qc_flag[d, s, ivar, ispatialflag] = level1
else:
qc_flag[d, s, ivar, ispatialflag] = level2
elif len(valsm2) < min_stations <= len(valbg2):
qc_flag[d, s, ivar, ispatialflag] = passflag if min(valbg2) <= maxvdiff2 else level1
else: # not enough obs in either roi to do test.
qc_flag[d, s, ivar, ispatialflag] = notestflag
#
# !-----------------------------------------------------------------------------
# !--- Spatial Test for Wind Direction
# !-----------------------------------------------------------------------------
#
# subroutine spatial_check_dir(obs, dts, stnnets, lat, lon, elev, &
# var, ivar, &
# qc_flag, roi_data_file)
#
# !--- spatial_check_dir does does a spatial QC test on wind direction
# !--- by gathering the closest 'min_stations' stations within a
# !--- radius 'roi' and calculating a vector average spd and dir from
# !--- those stations, and if the dir from the station in question is
# !--- more than 'dir_diff' degrees from the vector average, the
# !--- station's dir is flagged.
# !---
# !--- obs data array with dimensions hrs, variables, stations,
# !--- stnnets character array of station names and netids
# !--- lat station latitude for each station in d array.
# !--- lon station longitude for each station in d array.
# !--- elev station elevation for each station in d array.
# !--- var character string name of the variable
# !--- ivar array index of d for the variable in question
# !--- qc_flag array of incoming/outgoing quality control flags.
#
# !-- changing values of d for zeroing traces -- can't define 'IN' intent.
# real, dimension(:,:,:) :: obs
# character(len=10), dimension(:), intent(IN) :: dts
# character(len=*), dimension(:), intent(IN) :: stnnets
# real, dimension(:,:), intent(IN) :: lat
# real, dimension(:,:), intent(IN) :: lon
# real, dimension(:,:), intent(IN) :: elev
# character(len=*), intent(IN) :: var
# integer, intent(IN) :: ivar
# integer, dimension(:,:,:,:), intent(INOUT) :: qc_flag
# character(len=*) , intent(IN) :: roi_data_file
#
# integer :: d, i, n, s, ss, nstnnets, ndts
# integer :: min_stations, debug, level1, level2
# integer :: found_one, countem_sm, countem_bg
# integer :: print_roi_data
# real :: max_elev_diff, elev_diff, maxvdiff1, maxvdiff2
# real :: dist, roi_sm, roi_bg, latdiff, londiff, dir_diff
# real :: dir_thresh, spd_thresh, spd_thresh_roi, min_diff
#
# character(len=10) :: dt_c
#
# character(len=2), allocatable, dimension(:) :: netids
#
# real, allocatable, dimension(:) :: roi_dist
# real, allocatable, dimension(:) :: roi_spd
# real, allocatable, dimension(:) :: roi_dir
# real, allocatable, dimension(:) :: roi_lat
# real, allocatable, dimension(:) :: roi_lon
# real, allocatable, dimension(:) :: roi_elev
# character(len=8), allocatable, dimension(:) :: roi_stnnets
#
# real, allocatable, dimension(:) :: dist_sort
# integer, allocatable, dimension(:) :: isort
#
# intrinsic maxval,minval,size
#
# !--- Settings appropriate for all variables.
# debug = 0 ! sets debug level (sets what to print)
# print_roi_data = 1
# nstnnets = size(stnnets) ! get # of stations in obs.
# ndts = size(obs(:,1,1)) ! # of hours in obs.
# min_stations = 5 ! min # of stns needed for testing.
# roi_sm = 50 ! small radius for dir qc test.
# roi_bg = 75 ! big radius for dir qc test.
# dir_thresh = 85 ! diff in dir required for flagging.
# spd_thresh = 2.5722 ! spd_thresh for station in question.
# spd_thresh_roi = 0.5144 ! spd_thresh for stations in roi.
# level1 = suspflag ! "suspect" flag.
# level2 = warnflag ! "warning" flag.
# latdiff = 3.0 ! to speed up station finding...
# londiff = 3.0 ! to speed up station finding...
# max_elev_diff = 99999999.0 ! for now, allow any elevations.
#
# !--- pull netids out of the stnnets.
# allocate(netids(nstnnets))
# netids = stnnets(:)(7:8)
#
# allocate( roi_dist (nstnnets) )
# allocate( roi_spd (nstnnets) )
# allocate( roi_dir (nstnnets) )
# allocate( roi_stnnets (nstnnets) )
# allocate( roi_lat (nstnnets) )
# allocate( roi_lon (nstnnets) )
# allocate( roi_elev (nstnnets) )
#
# !--- Loop through each date.
# do d = 1, ndts
#
# dt_c = dts(d)
#
# !--- Loop through each station.
# do s = 1, nstnnets
#
# !--- Skip this station/obs if it's mvc, or if the speed is
# !--- below 'spd_thresh'. Since as of 1/16/2007 many obs
# !--- have bad elevation data, NOT skipping if this is missing
# !--- (probably change this when station elevation data gets
# !--- fixed by Dave Carey).
# if (obs(d,s,ispd) .lt. spd_thresh .or. &
# obs(d,s,ispd) .eq. mvc .or. &
# obs(d,s,idir) .eq. mvc .or. &
# obs(d,s,iu) .eq. mvc .or. &
# obs(d,s,iv) .eq. mvc .or. &
# qc_flag(d,s,ispd,irangeflag) .eq. failflag .or. &
# qc_flag(d,s,idir,irangeflag) .eq. failflag) then
# qc_flag(d,s,ivar,ispatialflag) = notestflag
# cycle
# end if
#
# found_one = 0
# countem_sm = 0
# countem_bg = 0
# !--- for each station, check it versus every other station
# !--- (except itself). First time through get # of stations
# !--- within radius of influence to determine if we can do
# !--- this test.
# do ss = 1, nstnnets
#
# !--- skip station for any of the following reasons: same
# !--- station as 's' (ss.eq.s), it's not within a lat/lon box
# !--- defined by latdiff/long diff, the value is mvc,
# !--- the value has been flagged in other tests, or the
# !--- elevation data is mvc.
# if ( abs(lat(d,ss) - lat(d,s) ) .gt. latdiff) cycle
# if ( abs(lon(d,ss) - lon(d,s) ) .gt. londiff) cycle
# if ( obs(d,ss,ispd) .eq. mvc ) cycle
# if ( obs(d,ss,idir) .eq. mvc ) cycle
# if ( obs(d,ss,iu) .eq. mvc ) cycle
# if ( obs(d,ss,iv) .eq. mvc ) cycle
# if ( lat(d,ss) .eq. mvc ) cycle
# if ( lon(d,ss) .eq. mvc ) cycle
# if ( obs(d,ss,ispd) .lt. spd_thresh_roi ) cycle
# if ( abs(elev(d,ss) - elev(d,s) ) .gt. max_elev_diff) cycle
# if ( qc_flag(d,ss,ivar,irangeflag) .eq. failflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. warnflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. warnflag ) cycle
# if ( ss .eq. s ) cycle
#
# call distance(lat(d,s),lon(d,s), lat(d,ss),lon(d,ss), dist)
#
# !--- Only add up number of stations in small roi, and keep
# !--- around values in big roi.
# if (dist .le. roi_sm) then
# countem_sm = countem_sm + 1
# end if
# if (dist .le. roi_bg) then
# countem_bg = countem_bg + 1
# roi_dist(countem_bg) = dist
# roi_spd(countem_bg) = obs(d,ss,ispd)
# roi_dir(countem_bg) = obs(d,ss,idir)
# roi_stnnets(countem_bg) = stnnets(ss)
# roi_lat(countem_bg) = lat(d,ss)
# roi_lon(countem_bg) = lon(d,ss)
# roi_elev(countem_bg) = elev(d,ss)
# end if
#
# end do
#
# !--- if there's enough good stations in small roi, expand to big
# !--- roi and use data from it.
# if (countem_sm .ge. min_stations .and. &
# countem_bg .ge. min_stations) then
#
# !--- calculate the minimum difference between the direction
# !--- in question and the obs in the roi.
# min_diff = 9999.0
# do i = 1, countem_bg
# ! call ndir_diff(dir_diff, roi_dir(i), obs(d,s,idir))
# if (dir_diff .lt. min_diff) then
# min_diff = dir_diff
# end if
# end do
#
# !--- if vector average speed as well as the station in
# !--- question's speed is high enough and the direction is
# !--- different enough from the vector average, flag it.
# if (min_diff .gt. dir_thresh) then
# qc_flag(d,s,ivar,ispatialflag) = warnflag
#
# !--- if set, print out station and roi stations data.
# 17 format(i1,',', a10, ',', a8, ',', 4(f12.3,','), f12.3)
# if (print_roi_data .eq. 1) then
# open(unit=100, file=trim(roi_data_file), &
# position="append")
# write(100,17) 1, dt_c, &
# stnnets(s),elev(d,s),lat(d,s),&
# lon(d,s), obs(d,s,idir), obs(d,s,ispd)
# do i = 1, countem_bg
# write(100,17) 0, dt_c, &
# roi_stnnets(i), roi_elev(i), &
# roi_lat(i), roi_lon(i), &
# roi_dir(i), roi_spd(i)
# end do
# close(100)
# end if
# end if
#
# else
#
# !--- not enough obs in roi to do test.
# qc_flag(d,s,ivar,ispatialflag) = notestflag
#
# end if
#
# end do !--- stations do loop.
# end do !--- dates do loop.
#
# end subroutine spatial_check_dir
#
def distance(lat1, lon1, lat2, lon2):
pi = atan2(0.0, -1.0)
theta = lon1 - lon2
lat1_r = lat1 * (pi / 180.)
lat2_r = lat2 * (pi / 180.)
theta_r = theta * (pi / 180.)
dist = acos(sin(lat1_r) * sin(lat2_r) + cos(lat1_r) * cos(lat2_r) * cos(theta_r))
dist_temp = (dist * (180. / pi)) * 60 * 1.1515
# only returning in km units for now.
return dist_temp * 1.609344
| td_gt_t_tol = 2.0 # tolerance for dew point being > temperature.
iqcflag = irangeflag # array index of range test.
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
it = 0 # index temp
itd = 0 # index temp dew
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if qc_flag[d, s, ivar, iqcflag] == passflag:
# !--- if this is temperature or dew point, make sure dew point
# !--- is not greater than temperature (by 'td_gt_t_tol').
if var in ['t', 'temp', 'td', 'dewp']:
if obs[d, s, itd] != mvc and obs[d, s, it] != mvc and obs[d, s, itd] - obs[d, s, it] > td_gt_t_tol:
qc_flag[d, s, it, iqcflag] = 2
qc_flag[d, s, itd, iqcflag] = 2 # why hardcoded 2? warnflag | identifier_body |
core.py | from math import atan2, acos, cos, sin
nqcflags = 9 # total number of qc flags
passflag = 0 # "pass" qc value.
suspflag = 1 # "suspect" qc value.
warnflag = 2 # "warning" qc value.
failflag = 3 # "failed" qc value.
notestflag = 8 # "not-tested" qc value.
mflag = 9 # "missing" qc value.
irangeflag = 0 # index for range test qc flag.
istepflag = 1 # index for step test qc flag.
ipersistflag = 2 # index for persistence test flag.
ispatialflag = 3 # index for spatial test qc flag.
mvc = -888888.0
obsmvc = -888888.0
trace = -666666.0
ispd = 1
idir = 1
iu = 1
iv = 1
def range_check(obs, nstnnets, var, ivar, qc_flag):
iqcflag = irangeflag
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
allowed_range = {
'temp': (225, 323),
'dew': (225, 323),
'wind_dir': (0, 360),
'wind_speed': (0, 60),
'wind_gust': (0, 70),
'pressure': (80000, 107840), # Pa, not hPa!
'rel_hum': (0, 105),
# let's keep the values for variables we don't have (yet?)
'mixr': (0, 25),
'pcp6': (0, 256),
'pcp24': (0, 508)
}
try:
minrange, maxrange = allowed_range[var]
except KeyError:
raise ValueError('Cannot recognize variable type')
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = mflag
continue
if obs[d, s, ivar] < minrange or obs[d, s, ivar] > maxrange:
# Don't flag traces for precip!
qc_flag[d, s, ivar, iqcflag] = passflag if var == 'pcp' and obs == trace else failflag
else:
qc_flag[d, s, ivar, iqcflag] = passflag
def test_temp_vs_dew(obs, nstnnets, var, ivar, qc_flag):
# If there is both temp and tdew and after we made all the tests.
td_gt_t_tol = 2.0 # tolerance for dew point being > temperature.
iqcflag = irangeflag # array index of range test.
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
it = 0 # index temp
itd = 0 # index temp dew
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if qc_flag[d, s, ivar, iqcflag] == passflag:
# !--- if this is temperature or dew point, make sure dew point
# !--- is not greater than temperature (by 'td_gt_t_tol').
if var in ['t', 'temp', 'td', 'dewp']:
if obs[d, s, itd] != mvc and obs[d, s, it] != mvc and obs[d, s, itd] - obs[d, s, it] > td_gt_t_tol:
qc_flag[d, s, it, iqcflag] = 2
qc_flag[d, s, itd, iqcflag] = 2 # why hardcoded 2? warnflag
def step_check(obs, nstnnets, var, ivar, qc_flag):
"""
Perform "delta test" which, for each station checks each hour
in a day, checks for jumps between consecutive observations
that exceed a given threshold.
"""
iqcflag = istepflag
ndts = len(obs[:, 0, ivar]) # # of hours in d.
level1 = suspflag
level2 = warnflag
# if too_many_spikes (hours around which a spike is found) or more are found, flag all ndts worth.
too_many_spikes = 2
# set two maximum absolute steps allowable for each variable, one for "suspect" flag and one for "warning" flag.
steps = {
'temp': (10.0, 15.0, 8.0),
'dew': (10.0, 15.0, 8.0),
'wind_gust': (40.0, 50.0, None),
'rel_hum': (60.0, 80.0, None),
'pressure': (1500.0, 2000.0, 1000.0),
'wind_speed': (25.0, 35.0, 15.0),
'wind_dir': (361., 361., None),
'mixr': (7.0, 10.0, None),
'pcpn1': (1.0, 2.0, None),
}
try:
step1, step2, step_3pt = steps[var]
except KeyError:
raise ValueError('Unrecognized variable')
for s in range(nstnnets):
spikes_found = 0
for d in range(ndts):
if d == 0:
qc_flag[d, s, ivar, iqcflag] = notestflag
continue
# FIRST SET OF TESTS
if qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or qc_flag[d, s, ivar, irangeflag] in [failflag, mflag]:
qc_flag[d, s, ivar, iqcflag] = notestflag
elif qc_flag[d, s, ivar, istepflag] in [level1, level2]:
pass # do nothing... must have been flagged by 3pt test for the previous d.
elif step1 < abs(obs[d, s, ivar] - obs[d-1, s, ivar]) < step2: # flag current and previous time with susp
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) >= step2: # flag current and previous time with warning
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level2
qc_flag[d, s, ivar, iqcflag] = level2
else:
qc_flag[d, s, ivar, iqcflag] = passflag
# New test which looks for smaller spike in windspeed that is a 1 hour jump that come back down immediately
# not implemented?
# SECOND SET OF TESTS
# if (var in ['spd', 't', 'td', 'slp']) and d != ndts - 1: # our code never enters there although it should
if var in ['wind_speed', 'temp', 'dew', 'pressure'] and d != ndts - 1:
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d+1, s, ivar, irangeflag] in [failflag, mflag]):
pass
# do nothing if any of the 3 pts in question are missing or out of range.
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) > step_3pt and abs(obs[d, s, ivar] - obs[d+1, s, ivar]) > step_3pt:
# !-- Don't recount this spike if it was already counted above.
if qc_flag[d, s, ivar, iqcflag] not in [level1, level2]:
spikes_found += 1
# flag current, previous, and next time with "suspect" flag (lower level flag as this is a new test)
qc_flag[d+1, s, ivar, iqcflag] = level1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
if spikes_found >= too_many_spikes:
for d in range(ndts):
if qc_flag[d, s, ivar, iqcflag] != notestflag:
qc_flag[d, s, ivar, iqcflag] = level2
def persistence_check(obs, nstnnets, var, ivar, qc_flag):
"""
For each station, for a day of data, calculate mean and std
deviation. Compare std deviation to set values and if it's too
small, flag entire day as suspect (1) or warning (2). Also
check the difference between subsequent obs and flag if this
difference is too small.
"""
iqcflag = ipersistflag
ndts = len(obs[:, 0, ivar]) # number of dts in obs.
min_nobs = 8 # minimum number of good obs to do 24-hr std deviation test.
level = warnflag
pdeltas = {
'dew': 0.1,
'wind_gust': 0.0,
'rel_hum': 0.1,
'mixr': 0.1,
'pcpn1': mvc,
'pressure': 10.0,
'temp': 0.1,
'wind_speed': 0.0,
'wind_dir': 0.1
}
try:
pdelta = pdeltas[var]
except KeyError:
raise ValueError('Unrecognized variable')
# For each station, determine # of good obs (individually) for standard deviation and maxdelta portions of the
# persistence test. Get maxdelta while we're at it too.
for s in range(nstnnets):
deltacount = 0
vali = []
val = []
maxdelta = mvc
# Loop through each dt gathering all non-missing/flagged obs
# and their indices into 'val' and 'vali' respectively.
# Also determine the 'maxdelta' between successive non-missing/flagged obs.
for d in range(ndts):
# get 'maxdelta' between successive non-missing obs.
if d > 0:
# assuming range check has been performed.
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
obs[d, s, ivar] == mvc and obs[d-1, s, ivar] == mvc):
pass # do nothing to maxdelta.
elif abs(obs[d, s, ivar] - obs[d - 1, s, ivar]) > maxdelta:
# both the current and previous values are ok, so get a delta value between them.
deltacount += 1 # TODO this depends on when would the highest delta show up!
maxdelta = abs(obs[d, s, ivar] - obs[d-1, s, ivar])
if qc_flag[d, s, ivar, irangeflag] in [failflag, notestflag, mflag] or obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = notestflag
else:
vali.append(d)
val.append(obs[d, s, ivar])
# Only do standard deviation portion of test if there's more than 'min_obs' number of non-missing obs.
if len(val) >= min_nobs:
mean = sum(val) / len(val)
sd = (sum([(v - mean) ** 2 for v in val]) / len(val))
# if the maxdelta between any successive obs is too small or if stdev is too small,
# flag all non-missing (vali) values.
if sd <= pdelta:
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali: # Make sure not to stomp on previous persistence tests! # TODO how could we?
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# Only do maxdelta portion of test if there's more than 'min_obs' number of deltas found ('deltacount')
# for calculating a 'maxdelta'.
if deltacount >= min_nobs:
if maxdelta != mvc and maxdelta < pdelta:
if var != 'rel_hum' or val[0] < 99.0: # Don't flag RH if it's 99-100% (saturated). # still unclear why they took only the first value
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali:
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# If there wasn't enough good obs to do this the stddev or maxdelta tests, set flags to indeterminate.
if len(vali) < min_nobs and deltacount < min_nobs:
|
def spatial_check(obs, nstnnets, lat, lon, elev, var, ivar, qc_flag):
"""
Spatial_check does does a spatial QC test using a simple neighbor check whereby it looks at stations within a radius
and elevation band and checks if at least one value is near the value in question. If not, it tries a bigger radius
and checks again, and if not again, the value is flagged.
"""
ndts = len(obs[:, 0, 0]) # number of hours in obs.
roism = 100.0 # smaller radius of influence.
roibg = 150.0 # bigger radius of influence.
min_stations = 2 # min # of stns needed for testing.
level1 = suspflag
level2 = warnflag
latdiff = 3.0
londiff = 3.0
thresholds = {
'pressure': (750.0, 1000.0, 1000.0),
'temp': (5.556, 8.333, 150.0), # (10degF), (15degF)
'dew': (5.556, 8.333, 150.0), # (10degF), (15degF)
'wind_speed': (7.65, 10.2, 250.0), # (15kts), (20kts)
'wind_dir': (360.0, 360.0, 250.0),
'rel_hum': (75.0, 85.0, 250.0),
'pcp6': (76.2, 101.6, 500.0), # (mm; eq 3 inches), (mm; eq 4 inches)
'pcp24': (152.4, 203.2, 500.0), # (mm; eq 6 inches), (mm; eq 8 inches).
}
try:
maxvdiff1, maxvdiff2, max_elev_diff = thresholds[var]
except KeyError:
raise ValueError('Unrecognized variable')
# If variable is precip, look for traces make them 0.0 (not permanently as these data values don't get sent back out)
if var == 'pcp':
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == trace:
obs[d, s, ivar] = 0.0 # obs[:,:,ivar]...
# Cliff's simple similar neighbor test.
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == mvc or elev[d, s] == mvc or qc_flag[d, s, ivar, irangeflag] == failflag:
qc_flag[d, s, ivar, ispatialflag] = notestflag
continue
valsm2 = []
valbg2 = []
# for each station, check it versus every other station (except itself). First time through get # of
# stations within radius of influence to determine if we can do this test.
for ss in range(nstnnets):
if ss == s or obs[d, ss, ivar] == mvc \
or elev[d, ss] == mvc or lat[d, ss] == mvc or lon[d, ss] == mvc \
or abs(lat[d, ss] - lat[d, s]) > latdiff or abs(lon[d, ss] - lon[d, s]) > londiff \
or abs(elev[d, ss] - elev[d, s]) > max_elev_diff:
continue
if qc_flag[d, ss, ivar, irangeflag] == failflag \
or qc_flag[d, ss, ivar, istepflag] in [suspflag, warnflag] \
or qc_flag[d, ss, ivar, ipersistflag] in [suspflag, warnflag]:
continue
dist = distance(lat[d, s], lon[d, s], lat[d, ss], lon[d, ss])
obsdiff = abs(obs[d, ss, ivar] - obs[d, s, ivar])
if dist < roism:
valsm2.append(obsdiff)
elif dist < roibg:
valbg2.append(obsdiff)
# !--- If any obs found in roi was <= maxvdiff1, it's a pass. If none found <= maxvdiff1,
# but one is >= maxvdiff1 & < maxvdiff2, it's "suspect." Otherwise it's "warning." Look in big roi too.
if len(valsm2) >= min_stations:
mindiffsm = min(valsm2)
if mindiffsm <= maxvdiff1:
qc_flag[d, s, ivar, ispatialflag] = passflag
elif maxvdiff1 < mindiffsm <= maxvdiff2:
qc_flag[d, s, ivar, ispatialflag] = level1
else:
qc_flag[d, s, ivar, ispatialflag] = level2
elif len(valsm2) < min_stations <= len(valbg2):
qc_flag[d, s, ivar, ispatialflag] = passflag if min(valbg2) <= maxvdiff2 else level1
else: # not enough obs in either roi to do test.
qc_flag[d, s, ivar, ispatialflag] = notestflag
#
# !-----------------------------------------------------------------------------
# !--- Spatial Test for Wind Direction
# !-----------------------------------------------------------------------------
#
# subroutine spatial_check_dir(obs, dts, stnnets, lat, lon, elev, &
# var, ivar, &
# qc_flag, roi_data_file)
#
# !--- spatial_check_dir does does a spatial QC test on wind direction
# !--- by gathering the closest 'min_stations' stations within a
# !--- radius 'roi' and calculating a vector average spd and dir from
# !--- those stations, and if the dir from the station in question is
# !--- more than 'dir_diff' degrees from the vector average, the
# !--- station's dir is flagged.
# !---
# !--- obs data array with dimensions hrs, variables, stations,
# !--- stnnets character array of station names and netids
# !--- lat station latitude for each station in d array.
# !--- lon station longitude for each station in d array.
# !--- elev station elevation for each station in d array.
# !--- var character string name of the variable
# !--- ivar array index of d for the variable in question
# !--- qc_flag array of incoming/outgoing quality control flags.
#
# !-- changing values of d for zeroing traces -- can't define 'IN' intent.
# real, dimension(:,:,:) :: obs
# character(len=10), dimension(:), intent(IN) :: dts
# character(len=*), dimension(:), intent(IN) :: stnnets
# real, dimension(:,:), intent(IN) :: lat
# real, dimension(:,:), intent(IN) :: lon
# real, dimension(:,:), intent(IN) :: elev
# character(len=*), intent(IN) :: var
# integer, intent(IN) :: ivar
# integer, dimension(:,:,:,:), intent(INOUT) :: qc_flag
# character(len=*) , intent(IN) :: roi_data_file
#
# integer :: d, i, n, s, ss, nstnnets, ndts
# integer :: min_stations, debug, level1, level2
# integer :: found_one, countem_sm, countem_bg
# integer :: print_roi_data
# real :: max_elev_diff, elev_diff, maxvdiff1, maxvdiff2
# real :: dist, roi_sm, roi_bg, latdiff, londiff, dir_diff
# real :: dir_thresh, spd_thresh, spd_thresh_roi, min_diff
#
# character(len=10) :: dt_c
#
# character(len=2), allocatable, dimension(:) :: netids
#
# real, allocatable, dimension(:) :: roi_dist
# real, allocatable, dimension(:) :: roi_spd
# real, allocatable, dimension(:) :: roi_dir
# real, allocatable, dimension(:) :: roi_lat
# real, allocatable, dimension(:) :: roi_lon
# real, allocatable, dimension(:) :: roi_elev
# character(len=8), allocatable, dimension(:) :: roi_stnnets
#
# real, allocatable, dimension(:) :: dist_sort
# integer, allocatable, dimension(:) :: isort
#
# intrinsic maxval,minval,size
#
# !--- Settings appropriate for all variables.
# debug = 0 ! sets debug level (sets what to print)
# print_roi_data = 1
# nstnnets = size(stnnets) ! get # of stations in obs.
# ndts = size(obs(:,1,1)) ! # of hours in obs.
# min_stations = 5 ! min # of stns needed for testing.
# roi_sm = 50 ! small radius for dir qc test.
# roi_bg = 75 ! big radius for dir qc test.
# dir_thresh = 85 ! diff in dir required for flagging.
# spd_thresh = 2.5722 ! spd_thresh for station in question.
# spd_thresh_roi = 0.5144 ! spd_thresh for stations in roi.
# level1 = suspflag ! "suspect" flag.
# level2 = warnflag ! "warning" flag.
# latdiff = 3.0 ! to speed up station finding...
# londiff = 3.0 ! to speed up station finding...
# max_elev_diff = 99999999.0 ! for now, allow any elevations.
#
# !--- pull netids out of the stnnets.
# allocate(netids(nstnnets))
# netids = stnnets(:)(7:8)
#
# allocate( roi_dist (nstnnets) )
# allocate( roi_spd (nstnnets) )
# allocate( roi_dir (nstnnets) )
# allocate( roi_stnnets (nstnnets) )
# allocate( roi_lat (nstnnets) )
# allocate( roi_lon (nstnnets) )
# allocate( roi_elev (nstnnets) )
#
# !--- Loop through each date.
# do d = 1, ndts
#
# dt_c = dts(d)
#
# !--- Loop through each station.
# do s = 1, nstnnets
#
# !--- Skip this station/obs if it's mvc, or if the speed is
# !--- below 'spd_thresh'. Since as of 1/16/2007 many obs
# !--- have bad elevation data, NOT skipping if this is missing
# !--- (probably change this when station elevation data gets
# !--- fixed by Dave Carey).
# if (obs(d,s,ispd) .lt. spd_thresh .or. &
# obs(d,s,ispd) .eq. mvc .or. &
# obs(d,s,idir) .eq. mvc .or. &
# obs(d,s,iu) .eq. mvc .or. &
# obs(d,s,iv) .eq. mvc .or. &
# qc_flag(d,s,ispd,irangeflag) .eq. failflag .or. &
# qc_flag(d,s,idir,irangeflag) .eq. failflag) then
# qc_flag(d,s,ivar,ispatialflag) = notestflag
# cycle
# end if
#
# found_one = 0
# countem_sm = 0
# countem_bg = 0
# !--- for each station, check it versus every other station
# !--- (except itself). First time through get # of stations
# !--- within radius of influence to determine if we can do
# !--- this test.
# do ss = 1, nstnnets
#
# !--- skip station for any of the following reasons: same
# !--- station as 's' (ss.eq.s), it's not within a lat/lon box
# !--- defined by latdiff/long diff, the value is mvc,
# !--- the value has been flagged in other tests, or the
# !--- elevation data is mvc.
# if ( abs(lat(d,ss) - lat(d,s) ) .gt. latdiff) cycle
# if ( abs(lon(d,ss) - lon(d,s) ) .gt. londiff) cycle
# if ( obs(d,ss,ispd) .eq. mvc ) cycle
# if ( obs(d,ss,idir) .eq. mvc ) cycle
# if ( obs(d,ss,iu) .eq. mvc ) cycle
# if ( obs(d,ss,iv) .eq. mvc ) cycle
# if ( lat(d,ss) .eq. mvc ) cycle
# if ( lon(d,ss) .eq. mvc ) cycle
# if ( obs(d,ss,ispd) .lt. spd_thresh_roi ) cycle
# if ( abs(elev(d,ss) - elev(d,s) ) .gt. max_elev_diff) cycle
# if ( qc_flag(d,ss,ivar,irangeflag) .eq. failflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. warnflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. warnflag ) cycle
# if ( ss .eq. s ) cycle
#
# call distance(lat(d,s),lon(d,s), lat(d,ss),lon(d,ss), dist)
#
# !--- Only add up number of stations in small roi, and keep
# !--- around values in big roi.
# if (dist .le. roi_sm) then
# countem_sm = countem_sm + 1
# end if
# if (dist .le. roi_bg) then
# countem_bg = countem_bg + 1
# roi_dist(countem_bg) = dist
# roi_spd(countem_bg) = obs(d,ss,ispd)
# roi_dir(countem_bg) = obs(d,ss,idir)
# roi_stnnets(countem_bg) = stnnets(ss)
# roi_lat(countem_bg) = lat(d,ss)
# roi_lon(countem_bg) = lon(d,ss)
# roi_elev(countem_bg) = elev(d,ss)
# end if
#
# end do
#
# !--- if there's enough good stations in small roi, expand to big
# !--- roi and use data from it.
# if (countem_sm .ge. min_stations .and. &
# countem_bg .ge. min_stations) then
#
# !--- calculate the minimum difference between the direction
# !--- in question and the obs in the roi.
# min_diff = 9999.0
# do i = 1, countem_bg
# ! call ndir_diff(dir_diff, roi_dir(i), obs(d,s,idir))
# if (dir_diff .lt. min_diff) then
# min_diff = dir_diff
# end if
# end do
#
# !--- if vector average speed as well as the station in
# !--- question's speed is high enough and the direction is
# !--- different enough from the vector average, flag it.
# if (min_diff .gt. dir_thresh) then
# qc_flag(d,s,ivar,ispatialflag) = warnflag
#
# !--- if set, print out station and roi stations data.
# 17 format(i1,',', a10, ',', a8, ',', 4(f12.3,','), f12.3)
# if (print_roi_data .eq. 1) then
# open(unit=100, file=trim(roi_data_file), &
# position="append")
# write(100,17) 1, dt_c, &
# stnnets(s),elev(d,s),lat(d,s),&
# lon(d,s), obs(d,s,idir), obs(d,s,ispd)
# do i = 1, countem_bg
# write(100,17) 0, dt_c, &
# roi_stnnets(i), roi_elev(i), &
# roi_lat(i), roi_lon(i), &
# roi_dir(i), roi_spd(i)
# end do
# close(100)
# end if
# end if
#
# else
#
# !--- not enough obs in roi to do test.
# qc_flag(d,s,ivar,ispatialflag) = notestflag
#
# end if
#
# end do !--- stations do loop.
# end do !--- dates do loop.
#
# end subroutine spatial_check_dir
#
def distance(lat1, lon1, lat2, lon2):
pi = atan2(0.0, -1.0)
theta = lon1 - lon2
lat1_r = lat1 * (pi / 180.)
lat2_r = lat2 * (pi / 180.)
theta_r = theta * (pi / 180.)
dist = acos(sin(lat1_r) * sin(lat2_r) + cos(lat1_r) * cos(lat2_r) * cos(theta_r))
dist_temp = (dist * (180. / pi)) * 60 * 1.1515
# only returning in km units for now.
return dist_temp * 1.609344
| for idx in vali: # Make sure not to stomp on previous persistence tests!
if qc_flag[idx, s, ivar, iqcflag] != level and qc_flag[idx, s, ivar, iqcflag] != passflag:
qc_flag[idx, s, ivar, iqcflag] = notestflag | conditional_block |
core.py | from math import atan2, acos, cos, sin
nqcflags = 9 # total number of qc flags
passflag = 0 # "pass" qc value.
suspflag = 1 # "suspect" qc value.
warnflag = 2 # "warning" qc value.
failflag = 3 # "failed" qc value.
notestflag = 8 # "not-tested" qc value.
mflag = 9 # "missing" qc value.
irangeflag = 0 # index for range test qc flag.
istepflag = 1 # index for step test qc flag.
ipersistflag = 2 # index for persistence test flag.
ispatialflag = 3 # index for spatial test qc flag.
mvc = -888888.0
obsmvc = -888888.0
trace = -666666.0
ispd = 1
idir = 1
iu = 1
iv = 1
def range_check(obs, nstnnets, var, ivar, qc_flag):
iqcflag = irangeflag
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
allowed_range = {
'temp': (225, 323),
'dew': (225, 323),
'wind_dir': (0, 360),
'wind_speed': (0, 60),
'wind_gust': (0, 70),
'pressure': (80000, 107840), # Pa, not hPa!
'rel_hum': (0, 105),
# let's keep the values for variables we don't have (yet?)
'mixr': (0, 25),
'pcp6': (0, 256),
'pcp24': (0, 508)
}
try:
minrange, maxrange = allowed_range[var]
except KeyError:
raise ValueError('Cannot recognize variable type')
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = mflag
continue
if obs[d, s, ivar] < minrange or obs[d, s, ivar] > maxrange:
# Don't flag traces for precip!
qc_flag[d, s, ivar, iqcflag] = passflag if var == 'pcp' and obs == trace else failflag
else:
qc_flag[d, s, ivar, iqcflag] = passflag
def test_temp_vs_dew(obs, nstnnets, var, ivar, qc_flag):
# If there is both temp and tdew and after we made all the tests.
td_gt_t_tol = 2.0 # tolerance for dew point being > temperature.
iqcflag = irangeflag # array index of range test.
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
it = 0 # index temp
itd = 0 # index temp dew
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if qc_flag[d, s, ivar, iqcflag] == passflag:
# !--- if this is temperature or dew point, make sure dew point
# !--- is not greater than temperature (by 'td_gt_t_tol').
if var in ['t', 'temp', 'td', 'dewp']:
if obs[d, s, itd] != mvc and obs[d, s, it] != mvc and obs[d, s, itd] - obs[d, s, it] > td_gt_t_tol:
qc_flag[d, s, it, iqcflag] = 2
qc_flag[d, s, itd, iqcflag] = 2 # why hardcoded 2? warnflag
def step_check(obs, nstnnets, var, ivar, qc_flag):
"""
Perform "delta test" which, for each station checks each hour
in a day, checks for jumps between consecutive observations
that exceed a given threshold.
"""
iqcflag = istepflag
ndts = len(obs[:, 0, ivar]) # # of hours in d.
level1 = suspflag
level2 = warnflag
# if too_many_spikes (hours around which a spike is found) or more are found, flag all ndts worth.
too_many_spikes = 2
# set two maximum absolute steps allowable for each variable, one for "suspect" flag and one for "warning" flag.
steps = {
'temp': (10.0, 15.0, 8.0),
'dew': (10.0, 15.0, 8.0),
'wind_gust': (40.0, 50.0, None),
'rel_hum': (60.0, 80.0, None),
'pressure': (1500.0, 2000.0, 1000.0),
'wind_speed': (25.0, 35.0, 15.0),
'wind_dir': (361., 361., None),
'mixr': (7.0, 10.0, None),
'pcpn1': (1.0, 2.0, None),
}
try:
step1, step2, step_3pt = steps[var]
except KeyError:
raise ValueError('Unrecognized variable')
for s in range(nstnnets):
spikes_found = 0
for d in range(ndts):
if d == 0:
qc_flag[d, s, ivar, iqcflag] = notestflag
continue
# FIRST SET OF TESTS
if qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or qc_flag[d, s, ivar, irangeflag] in [failflag, mflag]:
qc_flag[d, s, ivar, iqcflag] = notestflag
elif qc_flag[d, s, ivar, istepflag] in [level1, level2]:
pass # do nothing... must have been flagged by 3pt test for the previous d.
elif step1 < abs(obs[d, s, ivar] - obs[d-1, s, ivar]) < step2: # flag current and previous time with susp
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) >= step2: # flag current and previous time with warning
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level2
qc_flag[d, s, ivar, iqcflag] = level2
else:
qc_flag[d, s, ivar, iqcflag] = passflag
# New test which looks for smaller spike in windspeed that is a 1 hour jump that come back down immediately
# not implemented?
# SECOND SET OF TESTS
# if (var in ['spd', 't', 'td', 'slp']) and d != ndts - 1: # our code never enters there although it should
if var in ['wind_speed', 'temp', 'dew', 'pressure'] and d != ndts - 1:
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d+1, s, ivar, irangeflag] in [failflag, mflag]):
pass
# do nothing if any of the 3 pts in question are missing or out of range.
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) > step_3pt and abs(obs[d, s, ivar] - obs[d+1, s, ivar]) > step_3pt:
# !-- Don't recount this spike if it was already counted above.
if qc_flag[d, s, ivar, iqcflag] not in [level1, level2]:
spikes_found += 1
# flag current, previous, and next time with "suspect" flag (lower level flag as this is a new test)
qc_flag[d+1, s, ivar, iqcflag] = level1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
if spikes_found >= too_many_spikes:
for d in range(ndts):
if qc_flag[d, s, ivar, iqcflag] != notestflag:
qc_flag[d, s, ivar, iqcflag] = level2
def persistence_check(obs, nstnnets, var, ivar, qc_flag):
"""
For each station, for a day of data, calculate mean and std
deviation. Compare std deviation to set values and if it's too
small, flag entire day as suspect (1) or warning (2). Also
check the difference between subsequent obs and flag if this
difference is too small.
"""
iqcflag = ipersistflag
ndts = len(obs[:, 0, ivar]) # number of dts in obs.
min_nobs = 8 # minimum number of good obs to do 24-hr std deviation test.
level = warnflag
pdeltas = {
'dew': 0.1,
'wind_gust': 0.0,
'rel_hum': 0.1,
'mixr': 0.1,
'pcpn1': mvc,
'pressure': 10.0,
'temp': 0.1,
'wind_speed': 0.0,
'wind_dir': 0.1
}
try:
pdelta = pdeltas[var]
except KeyError:
raise ValueError('Unrecognized variable')
# For each station, determine # of good obs (individually) for standard deviation and maxdelta portions of the
# persistence test. Get maxdelta while we're at it too.
for s in range(nstnnets):
deltacount = 0
vali = []
val = []
maxdelta = mvc
# Loop through each dt gathering all non-missing/flagged obs
# and their indices into 'val' and 'vali' respectively.
# Also determine the 'maxdelta' between successive non-missing/flagged obs.
for d in range(ndts):
# get 'maxdelta' between successive non-missing obs.
if d > 0:
# assuming range check has been performed.
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
obs[d, s, ivar] == mvc and obs[d-1, s, ivar] == mvc):
pass # do nothing to maxdelta.
elif abs(obs[d, s, ivar] - obs[d - 1, s, ivar]) > maxdelta:
# both the current and previous values are ok, so get a delta value between them.
deltacount += 1 # TODO this depends on when would the highest delta show up!
maxdelta = abs(obs[d, s, ivar] - obs[d-1, s, ivar])
if qc_flag[d, s, ivar, irangeflag] in [failflag, notestflag, mflag] or obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = notestflag
else:
vali.append(d)
val.append(obs[d, s, ivar])
# Only do standard deviation portion of test if there's more than 'min_obs' number of non-missing obs.
if len(val) >= min_nobs:
mean = sum(val) / len(val)
sd = (sum([(v - mean) ** 2 for v in val]) / len(val))
# if the maxdelta between any successive obs is too small or if stdev is too small,
# flag all non-missing (vali) values.
if sd <= pdelta:
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali: # Make sure not to stomp on previous persistence tests! # TODO how could we?
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# Only do maxdelta portion of test if there's more than 'min_obs' number of deltas found ('deltacount')
# for calculating a 'maxdelta'.
if deltacount >= min_nobs:
if maxdelta != mvc and maxdelta < pdelta:
if var != 'rel_hum' or val[0] < 99.0: # Don't flag RH if it's 99-100% (saturated). # still unclear why they took only the first value
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali:
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# If there wasn't enough good obs to do this the stddev or maxdelta tests, set flags to indeterminate.
if len(vali) < min_nobs and deltacount < min_nobs:
for idx in vali: # Make sure not to stomp on previous persistence tests!
if qc_flag[idx, s, ivar, iqcflag] != level and qc_flag[idx, s, ivar, iqcflag] != passflag:
qc_flag[idx, s, ivar, iqcflag] = notestflag
def spatial_check(obs, nstnnets, lat, lon, elev, var, ivar, qc_flag):
"""
Spatial_check does does a spatial QC test using a simple neighbor check whereby it looks at stations within a radius
and elevation band and checks if at least one value is near the value in question. If not, it tries a bigger radius
and checks again, and if not again, the value is flagged.
"""
ndts = len(obs[:, 0, 0]) # number of hours in obs.
roism = 100.0 # smaller radius of influence.
roibg = 150.0 # bigger radius of influence.
min_stations = 2 # min # of stns needed for testing.
level1 = suspflag
level2 = warnflag
latdiff = 3.0
londiff = 3.0
thresholds = {
'pressure': (750.0, 1000.0, 1000.0),
'temp': (5.556, 8.333, 150.0), # (10degF), (15degF)
'dew': (5.556, 8.333, 150.0), # (10degF), (15degF)
'wind_speed': (7.65, 10.2, 250.0), # (15kts), (20kts)
'wind_dir': (360.0, 360.0, 250.0),
'rel_hum': (75.0, 85.0, 250.0),
'pcp6': (76.2, 101.6, 500.0), # (mm; eq 3 inches), (mm; eq 4 inches)
'pcp24': (152.4, 203.2, 500.0), # (mm; eq 6 inches), (mm; eq 8 inches).
}
try:
maxvdiff1, maxvdiff2, max_elev_diff = thresholds[var]
except KeyError:
raise ValueError('Unrecognized variable')
# If variable is precip, look for traces make them 0.0 (not permanently as these data values don't get sent back out)
if var == 'pcp':
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == trace:
obs[d, s, ivar] = 0.0 # obs[:,:,ivar]...
# Cliff's simple similar neighbor test.
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == mvc or elev[d, s] == mvc or qc_flag[d, s, ivar, irangeflag] == failflag:
qc_flag[d, s, ivar, ispatialflag] = notestflag
continue
valsm2 = []
valbg2 = []
# for each station, check it versus every other station (except itself). First time through get # of
# stations within radius of influence to determine if we can do this test.
for ss in range(nstnnets):
if ss == s or obs[d, ss, ivar] == mvc \
or elev[d, ss] == mvc or lat[d, ss] == mvc or lon[d, ss] == mvc \
or abs(lat[d, ss] - lat[d, s]) > latdiff or abs(lon[d, ss] - lon[d, s]) > londiff \
or abs(elev[d, ss] - elev[d, s]) > max_elev_diff:
continue
if qc_flag[d, ss, ivar, irangeflag] == failflag \
or qc_flag[d, ss, ivar, istepflag] in [suspflag, warnflag] \
or qc_flag[d, ss, ivar, ipersistflag] in [suspflag, warnflag]:
continue
dist = distance(lat[d, s], lon[d, s], lat[d, ss], lon[d, ss])
obsdiff = abs(obs[d, ss, ivar] - obs[d, s, ivar])
if dist < roism:
valsm2.append(obsdiff)
elif dist < roibg:
valbg2.append(obsdiff)
# !--- If any obs found in roi was <= maxvdiff1, it's a pass. If none found <= maxvdiff1,
# but one is >= maxvdiff1 & < maxvdiff2, it's "suspect." Otherwise it's "warning." Look in big roi too.
if len(valsm2) >= min_stations:
mindiffsm = min(valsm2)
if mindiffsm <= maxvdiff1:
qc_flag[d, s, ivar, ispatialflag] = passflag
elif maxvdiff1 < mindiffsm <= maxvdiff2:
qc_flag[d, s, ivar, ispatialflag] = level1
else:
qc_flag[d, s, ivar, ispatialflag] = level2
elif len(valsm2) < min_stations <= len(valbg2):
qc_flag[d, s, ivar, ispatialflag] = passflag if min(valbg2) <= maxvdiff2 else level1
else: # not enough obs in either roi to do test.
qc_flag[d, s, ivar, ispatialflag] = notestflag
#
# !-----------------------------------------------------------------------------
# !--- Spatial Test for Wind Direction
# !-----------------------------------------------------------------------------
#
# subroutine spatial_check_dir(obs, dts, stnnets, lat, lon, elev, &
# var, ivar, &
# qc_flag, roi_data_file)
#
# !--- spatial_check_dir does does a spatial QC test on wind direction
# !--- by gathering the closest 'min_stations' stations within a
# !--- radius 'roi' and calculating a vector average spd and dir from
# !--- those stations, and if the dir from the station in question is
# !--- more than 'dir_diff' degrees from the vector average, the
# !--- station's dir is flagged.
# !---
# !--- obs data array with dimensions hrs, variables, stations,
# !--- stnnets character array of station names and netids
# !--- lat station latitude for each station in d array.
# !--- lon station longitude for each station in d array.
# !--- elev station elevation for each station in d array.
# !--- var character string name of the variable
# !--- ivar array index of d for the variable in question
# !--- qc_flag array of incoming/outgoing quality control flags.
#
# !-- changing values of d for zeroing traces -- can't define 'IN' intent.
# real, dimension(:,:,:) :: obs
# character(len=10), dimension(:), intent(IN) :: dts
# character(len=*), dimension(:), intent(IN) :: stnnets
# real, dimension(:,:), intent(IN) :: lat
# real, dimension(:,:), intent(IN) :: lon
# real, dimension(:,:), intent(IN) :: elev
# character(len=*), intent(IN) :: var
# integer, intent(IN) :: ivar
# integer, dimension(:,:,:,:), intent(INOUT) :: qc_flag
# character(len=*) , intent(IN) :: roi_data_file
#
# integer :: d, i, n, s, ss, nstnnets, ndts
# integer :: min_stations, debug, level1, level2
# integer :: found_one, countem_sm, countem_bg
# integer :: print_roi_data
# real :: max_elev_diff, elev_diff, maxvdiff1, maxvdiff2
# real :: dist, roi_sm, roi_bg, latdiff, londiff, dir_diff
# real :: dir_thresh, spd_thresh, spd_thresh_roi, min_diff
#
# character(len=10) :: dt_c
#
# character(len=2), allocatable, dimension(:) :: netids
#
# real, allocatable, dimension(:) :: roi_dist
# real, allocatable, dimension(:) :: roi_spd
# real, allocatable, dimension(:) :: roi_dir
# real, allocatable, dimension(:) :: roi_lat
# real, allocatable, dimension(:) :: roi_lon
# real, allocatable, dimension(:) :: roi_elev
# character(len=8), allocatable, dimension(:) :: roi_stnnets
#
# real, allocatable, dimension(:) :: dist_sort
# integer, allocatable, dimension(:) :: isort
#
# intrinsic maxval,minval,size
#
# !--- Settings appropriate for all variables.
# debug = 0 ! sets debug level (sets what to print)
# print_roi_data = 1
# nstnnets = size(stnnets) ! get # of stations in obs.
# ndts = size(obs(:,1,1)) ! # of hours in obs.
# min_stations = 5 ! min # of stns needed for testing.
# roi_sm = 50 ! small radius for dir qc test.
# roi_bg = 75 ! big radius for dir qc test.
# dir_thresh = 85 ! diff in dir required for flagging.
# spd_thresh = 2.5722 ! spd_thresh for station in question.
# spd_thresh_roi = 0.5144 ! spd_thresh for stations in roi.
# level1 = suspflag ! "suspect" flag.
# level2 = warnflag ! "warning" flag.
# latdiff = 3.0 ! to speed up station finding...
# londiff = 3.0 ! to speed up station finding...
# max_elev_diff = 99999999.0 ! for now, allow any elevations.
#
# !--- pull netids out of the stnnets.
# allocate(netids(nstnnets))
# netids = stnnets(:)(7:8)
#
# allocate( roi_dist (nstnnets) )
# allocate( roi_spd (nstnnets) )
# allocate( roi_dir (nstnnets) )
# allocate( roi_stnnets (nstnnets) )
# allocate( roi_lat (nstnnets) )
# allocate( roi_lon (nstnnets) )
# allocate( roi_elev (nstnnets) )
#
# !--- Loop through each date.
# do d = 1, ndts
#
# dt_c = dts(d)
#
# !--- Loop through each station.
# do s = 1, nstnnets
#
# !--- Skip this station/obs if it's mvc, or if the speed is
# !--- below 'spd_thresh'. Since as of 1/16/2007 many obs
# !--- have bad elevation data, NOT skipping if this is missing
# !--- (probably change this when station elevation data gets
# !--- fixed by Dave Carey).
# if (obs(d,s,ispd) .lt. spd_thresh .or. &
# obs(d,s,ispd) .eq. mvc .or. &
# obs(d,s,idir) .eq. mvc .or. &
# obs(d,s,iu) .eq. mvc .or. &
# obs(d,s,iv) .eq. mvc .or. &
# qc_flag(d,s,ispd,irangeflag) .eq. failflag .or. &
# qc_flag(d,s,idir,irangeflag) .eq. failflag) then
# qc_flag(d,s,ivar,ispatialflag) = notestflag
# cycle
# end if
#
# found_one = 0
# countem_sm = 0
# countem_bg = 0
# !--- for each station, check it versus every other station | # !--- skip station for any of the following reasons: same
# !--- station as 's' (ss.eq.s), it's not within a lat/lon box
# !--- defined by latdiff/long diff, the value is mvc,
# !--- the value has been flagged in other tests, or the
# !--- elevation data is mvc.
# if ( abs(lat(d,ss) - lat(d,s) ) .gt. latdiff) cycle
# if ( abs(lon(d,ss) - lon(d,s) ) .gt. londiff) cycle
# if ( obs(d,ss,ispd) .eq. mvc ) cycle
# if ( obs(d,ss,idir) .eq. mvc ) cycle
# if ( obs(d,ss,iu) .eq. mvc ) cycle
# if ( obs(d,ss,iv) .eq. mvc ) cycle
# if ( lat(d,ss) .eq. mvc ) cycle
# if ( lon(d,ss) .eq. mvc ) cycle
# if ( obs(d,ss,ispd) .lt. spd_thresh_roi ) cycle
# if ( abs(elev(d,ss) - elev(d,s) ) .gt. max_elev_diff) cycle
# if ( qc_flag(d,ss,ivar,irangeflag) .eq. failflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. warnflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. warnflag ) cycle
# if ( ss .eq. s ) cycle
#
# call distance(lat(d,s),lon(d,s), lat(d,ss),lon(d,ss), dist)
#
# !--- Only add up number of stations in small roi, and keep
# !--- around values in big roi.
# if (dist .le. roi_sm) then
# countem_sm = countem_sm + 1
# end if
# if (dist .le. roi_bg) then
# countem_bg = countem_bg + 1
# roi_dist(countem_bg) = dist
# roi_spd(countem_bg) = obs(d,ss,ispd)
# roi_dir(countem_bg) = obs(d,ss,idir)
# roi_stnnets(countem_bg) = stnnets(ss)
# roi_lat(countem_bg) = lat(d,ss)
# roi_lon(countem_bg) = lon(d,ss)
# roi_elev(countem_bg) = elev(d,ss)
# end if
#
# end do
#
# !--- if there's enough good stations in small roi, expand to big
# !--- roi and use data from it.
# if (countem_sm .ge. min_stations .and. &
# countem_bg .ge. min_stations) then
#
# !--- calculate the minimum difference between the direction
# !--- in question and the obs in the roi.
# min_diff = 9999.0
# do i = 1, countem_bg
# ! call ndir_diff(dir_diff, roi_dir(i), obs(d,s,idir))
# if (dir_diff .lt. min_diff) then
# min_diff = dir_diff
# end if
# end do
#
# !--- if vector average speed as well as the station in
# !--- question's speed is high enough and the direction is
# !--- different enough from the vector average, flag it.
# if (min_diff .gt. dir_thresh) then
# qc_flag(d,s,ivar,ispatialflag) = warnflag
#
# !--- if set, print out station and roi stations data.
# 17 format(i1,',', a10, ',', a8, ',', 4(f12.3,','), f12.3)
# if (print_roi_data .eq. 1) then
# open(unit=100, file=trim(roi_data_file), &
# position="append")
# write(100,17) 1, dt_c, &
# stnnets(s),elev(d,s),lat(d,s),&
# lon(d,s), obs(d,s,idir), obs(d,s,ispd)
# do i = 1, countem_bg
# write(100,17) 0, dt_c, &
# roi_stnnets(i), roi_elev(i), &
# roi_lat(i), roi_lon(i), &
# roi_dir(i), roi_spd(i)
# end do
# close(100)
# end if
# end if
#
# else
#
# !--- not enough obs in roi to do test.
# qc_flag(d,s,ivar,ispatialflag) = notestflag
#
# end if
#
# end do !--- stations do loop.
# end do !--- dates do loop.
#
# end subroutine spatial_check_dir
#
def distance(lat1, lon1, lat2, lon2):
pi = atan2(0.0, -1.0)
theta = lon1 - lon2
lat1_r = lat1 * (pi / 180.)
lat2_r = lat2 * (pi / 180.)
theta_r = theta * (pi / 180.)
dist = acos(sin(lat1_r) * sin(lat2_r) + cos(lat1_r) * cos(lat2_r) * cos(theta_r))
dist_temp = (dist * (180. / pi)) * 60 * 1.1515
# only returning in km units for now.
return dist_temp * 1.609344 | # !--- (except itself). First time through get # of stations
# !--- within radius of influence to determine if we can do
# !--- this test.
# do ss = 1, nstnnets
# | random_line_split |
core.py | from math import atan2, acos, cos, sin
nqcflags = 9 # total number of qc flags
passflag = 0 # "pass" qc value.
suspflag = 1 # "suspect" qc value.
warnflag = 2 # "warning" qc value.
failflag = 3 # "failed" qc value.
notestflag = 8 # "not-tested" qc value.
mflag = 9 # "missing" qc value.
irangeflag = 0 # index for range test qc flag.
istepflag = 1 # index for step test qc flag.
ipersistflag = 2 # index for persistence test flag.
ispatialflag = 3 # index for spatial test qc flag.
mvc = -888888.0
obsmvc = -888888.0
trace = -666666.0
ispd = 1
idir = 1
iu = 1
iv = 1
def range_check(obs, nstnnets, var, ivar, qc_flag):
iqcflag = irangeflag
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
allowed_range = {
'temp': (225, 323),
'dew': (225, 323),
'wind_dir': (0, 360),
'wind_speed': (0, 60),
'wind_gust': (0, 70),
'pressure': (80000, 107840), # Pa, not hPa!
'rel_hum': (0, 105),
# let's keep the values for variables we don't have (yet?)
'mixr': (0, 25),
'pcp6': (0, 256),
'pcp24': (0, 508)
}
try:
minrange, maxrange = allowed_range[var]
except KeyError:
raise ValueError('Cannot recognize variable type')
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = mflag
continue
if obs[d, s, ivar] < minrange or obs[d, s, ivar] > maxrange:
# Don't flag traces for precip!
qc_flag[d, s, ivar, iqcflag] = passflag if var == 'pcp' and obs == trace else failflag
else:
qc_flag[d, s, ivar, iqcflag] = passflag
def test_temp_vs_dew(obs, nstnnets, var, ivar, qc_flag):
# If there is both temp and tdew and after we made all the tests.
td_gt_t_tol = 2.0 # tolerance for dew point being > temperature.
iqcflag = irangeflag # array index of range test.
ndts = len(obs[:, 0, ivar]) # nr of hours in d.
it = 0 # index temp
itd = 0 # index temp dew
for d in range(ndts): # timestamps
for s in range(nstnnets): # stations
if qc_flag[d, s, ivar, iqcflag] == passflag:
# !--- if this is temperature or dew point, make sure dew point
# !--- is not greater than temperature (by 'td_gt_t_tol').
if var in ['t', 'temp', 'td', 'dewp']:
if obs[d, s, itd] != mvc and obs[d, s, it] != mvc and obs[d, s, itd] - obs[d, s, it] > td_gt_t_tol:
qc_flag[d, s, it, iqcflag] = 2
qc_flag[d, s, itd, iqcflag] = 2 # why hardcoded 2? warnflag
def step_check(obs, nstnnets, var, ivar, qc_flag):
"""
Perform "delta test" which, for each station checks each hour
in a day, checks for jumps between consecutive observations
that exceed a given threshold.
"""
iqcflag = istepflag
ndts = len(obs[:, 0, ivar]) # # of hours in d.
level1 = suspflag
level2 = warnflag
# if too_many_spikes (hours around which a spike is found) or more are found, flag all ndts worth.
too_many_spikes = 2
# set two maximum absolute steps allowable for each variable, one for "suspect" flag and one for "warning" flag.
steps = {
'temp': (10.0, 15.0, 8.0),
'dew': (10.0, 15.0, 8.0),
'wind_gust': (40.0, 50.0, None),
'rel_hum': (60.0, 80.0, None),
'pressure': (1500.0, 2000.0, 1000.0),
'wind_speed': (25.0, 35.0, 15.0),
'wind_dir': (361., 361., None),
'mixr': (7.0, 10.0, None),
'pcpn1': (1.0, 2.0, None),
}
try:
step1, step2, step_3pt = steps[var]
except KeyError:
raise ValueError('Unrecognized variable')
for s in range(nstnnets):
spikes_found = 0
for d in range(ndts):
if d == 0:
qc_flag[d, s, ivar, iqcflag] = notestflag
continue
# FIRST SET OF TESTS
if qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or qc_flag[d, s, ivar, irangeflag] in [failflag, mflag]:
qc_flag[d, s, ivar, iqcflag] = notestflag
elif qc_flag[d, s, ivar, istepflag] in [level1, level2]:
pass # do nothing... must have been flagged by 3pt test for the previous d.
elif step1 < abs(obs[d, s, ivar] - obs[d-1, s, ivar]) < step2: # flag current and previous time with susp
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) >= step2: # flag current and previous time with warning
spikes_found += 1
qc_flag[d-1, s, ivar, iqcflag] = level2
qc_flag[d, s, ivar, iqcflag] = level2
else:
qc_flag[d, s, ivar, iqcflag] = passflag
# New test which looks for smaller spike in windspeed that is a 1 hour jump that come back down immediately
# not implemented?
# SECOND SET OF TESTS
# if (var in ['spd', 't', 'td', 'slp']) and d != ndts - 1: # our code never enters there although it should
if var in ['wind_speed', 'temp', 'dew', 'pressure'] and d != ndts - 1:
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d+1, s, ivar, irangeflag] in [failflag, mflag]):
pass
# do nothing if any of the 3 pts in question are missing or out of range.
elif abs(obs[d, s, ivar] - obs[d-1, s, ivar]) > step_3pt and abs(obs[d, s, ivar] - obs[d+1, s, ivar]) > step_3pt:
# !-- Don't recount this spike if it was already counted above.
if qc_flag[d, s, ivar, iqcflag] not in [level1, level2]:
spikes_found += 1
# flag current, previous, and next time with "suspect" flag (lower level flag as this is a new test)
qc_flag[d+1, s, ivar, iqcflag] = level1
qc_flag[d-1, s, ivar, iqcflag] = level1
qc_flag[d, s, ivar, iqcflag] = level1
if spikes_found >= too_many_spikes:
for d in range(ndts):
if qc_flag[d, s, ivar, iqcflag] != notestflag:
qc_flag[d, s, ivar, iqcflag] = level2
def persistence_check(obs, nstnnets, var, ivar, qc_flag):
"""
For each station, for a day of data, calculate mean and std
deviation. Compare std deviation to set values and if it's too
small, flag entire day as suspect (1) or warning (2). Also
check the difference between subsequent obs and flag if this
difference is too small.
"""
iqcflag = ipersistflag
ndts = len(obs[:, 0, ivar]) # number of dts in obs.
min_nobs = 8 # minimum number of good obs to do 24-hr std deviation test.
level = warnflag
pdeltas = {
'dew': 0.1,
'wind_gust': 0.0,
'rel_hum': 0.1,
'mixr': 0.1,
'pcpn1': mvc,
'pressure': 10.0,
'temp': 0.1,
'wind_speed': 0.0,
'wind_dir': 0.1
}
try:
pdelta = pdeltas[var]
except KeyError:
raise ValueError('Unrecognized variable')
# For each station, determine # of good obs (individually) for standard deviation and maxdelta portions of the
# persistence test. Get maxdelta while we're at it too.
for s in range(nstnnets):
deltacount = 0
vali = []
val = []
maxdelta = mvc
# Loop through each dt gathering all non-missing/flagged obs
# and their indices into 'val' and 'vali' respectively.
# Also determine the 'maxdelta' between successive non-missing/flagged obs.
for d in range(ndts):
# get 'maxdelta' between successive non-missing obs.
if d > 0:
# assuming range check has been performed.
if (qc_flag[d-1, s, ivar, irangeflag] in [failflag, mflag] or
qc_flag[d, s, ivar, irangeflag] in [failflag, mflag] or
obs[d, s, ivar] == mvc and obs[d-1, s, ivar] == mvc):
pass # do nothing to maxdelta.
elif abs(obs[d, s, ivar] - obs[d - 1, s, ivar]) > maxdelta:
# both the current and previous values are ok, so get a delta value between them.
deltacount += 1 # TODO this depends on when would the highest delta show up!
maxdelta = abs(obs[d, s, ivar] - obs[d-1, s, ivar])
if qc_flag[d, s, ivar, irangeflag] in [failflag, notestflag, mflag] or obs[d, s, ivar] == mvc:
qc_flag[d, s, ivar, iqcflag] = notestflag
else:
vali.append(d)
val.append(obs[d, s, ivar])
# Only do standard deviation portion of test if there's more than 'min_obs' number of non-missing obs.
if len(val) >= min_nobs:
mean = sum(val) / len(val)
sd = (sum([(v - mean) ** 2 for v in val]) / len(val))
# if the maxdelta between any successive obs is too small or if stdev is too small,
# flag all non-missing (vali) values.
if sd <= pdelta:
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali: # Make sure not to stomp on previous persistence tests! # TODO how could we?
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# Only do maxdelta portion of test if there's more than 'min_obs' number of deltas found ('deltacount')
# for calculating a 'maxdelta'.
if deltacount >= min_nobs:
if maxdelta != mvc and maxdelta < pdelta:
if var != 'rel_hum' or val[0] < 99.0: # Don't flag RH if it's 99-100% (saturated). # still unclear why they took only the first value
qc_flag[vali, s, ivar, iqcflag] = level
else:
for idx in vali:
if qc_flag[idx, s, ivar, iqcflag] != level:
qc_flag[idx, s, ivar, iqcflag] = passflag
# If there wasn't enough good obs to do this the stddev or maxdelta tests, set flags to indeterminate.
if len(vali) < min_nobs and deltacount < min_nobs:
for idx in vali: # Make sure not to stomp on previous persistence tests!
if qc_flag[idx, s, ivar, iqcflag] != level and qc_flag[idx, s, ivar, iqcflag] != passflag:
qc_flag[idx, s, ivar, iqcflag] = notestflag
def | (obs, nstnnets, lat, lon, elev, var, ivar, qc_flag):
"""
Spatial_check does does a spatial QC test using a simple neighbor check whereby it looks at stations within a radius
and elevation band and checks if at least one value is near the value in question. If not, it tries a bigger radius
and checks again, and if not again, the value is flagged.
"""
ndts = len(obs[:, 0, 0]) # number of hours in obs.
roism = 100.0 # smaller radius of influence.
roibg = 150.0 # bigger radius of influence.
min_stations = 2 # min # of stns needed for testing.
level1 = suspflag
level2 = warnflag
latdiff = 3.0
londiff = 3.0
thresholds = {
'pressure': (750.0, 1000.0, 1000.0),
'temp': (5.556, 8.333, 150.0), # (10degF), (15degF)
'dew': (5.556, 8.333, 150.0), # (10degF), (15degF)
'wind_speed': (7.65, 10.2, 250.0), # (15kts), (20kts)
'wind_dir': (360.0, 360.0, 250.0),
'rel_hum': (75.0, 85.0, 250.0),
'pcp6': (76.2, 101.6, 500.0), # (mm; eq 3 inches), (mm; eq 4 inches)
'pcp24': (152.4, 203.2, 500.0), # (mm; eq 6 inches), (mm; eq 8 inches).
}
try:
maxvdiff1, maxvdiff2, max_elev_diff = thresholds[var]
except KeyError:
raise ValueError('Unrecognized variable')
# If variable is precip, look for traces make them 0.0 (not permanently as these data values don't get sent back out)
if var == 'pcp':
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == trace:
obs[d, s, ivar] = 0.0 # obs[:,:,ivar]...
# Cliff's simple similar neighbor test.
for d in range(ndts):
for s in range(nstnnets):
if obs[d, s, ivar] == mvc or elev[d, s] == mvc or qc_flag[d, s, ivar, irangeflag] == failflag:
qc_flag[d, s, ivar, ispatialflag] = notestflag
continue
valsm2 = []
valbg2 = []
# for each station, check it versus every other station (except itself). First time through get # of
# stations within radius of influence to determine if we can do this test.
for ss in range(nstnnets):
if ss == s or obs[d, ss, ivar] == mvc \
or elev[d, ss] == mvc or lat[d, ss] == mvc or lon[d, ss] == mvc \
or abs(lat[d, ss] - lat[d, s]) > latdiff or abs(lon[d, ss] - lon[d, s]) > londiff \
or abs(elev[d, ss] - elev[d, s]) > max_elev_diff:
continue
if qc_flag[d, ss, ivar, irangeflag] == failflag \
or qc_flag[d, ss, ivar, istepflag] in [suspflag, warnflag] \
or qc_flag[d, ss, ivar, ipersistflag] in [suspflag, warnflag]:
continue
dist = distance(lat[d, s], lon[d, s], lat[d, ss], lon[d, ss])
obsdiff = abs(obs[d, ss, ivar] - obs[d, s, ivar])
if dist < roism:
valsm2.append(obsdiff)
elif dist < roibg:
valbg2.append(obsdiff)
# !--- If any obs found in roi was <= maxvdiff1, it's a pass. If none found <= maxvdiff1,
# but one is >= maxvdiff1 & < maxvdiff2, it's "suspect." Otherwise it's "warning." Look in big roi too.
if len(valsm2) >= min_stations:
mindiffsm = min(valsm2)
if mindiffsm <= maxvdiff1:
qc_flag[d, s, ivar, ispatialflag] = passflag
elif maxvdiff1 < mindiffsm <= maxvdiff2:
qc_flag[d, s, ivar, ispatialflag] = level1
else:
qc_flag[d, s, ivar, ispatialflag] = level2
elif len(valsm2) < min_stations <= len(valbg2):
qc_flag[d, s, ivar, ispatialflag] = passflag if min(valbg2) <= maxvdiff2 else level1
else: # not enough obs in either roi to do test.
qc_flag[d, s, ivar, ispatialflag] = notestflag
#
# !-----------------------------------------------------------------------------
# !--- Spatial Test for Wind Direction
# !-----------------------------------------------------------------------------
#
# subroutine spatial_check_dir(obs, dts, stnnets, lat, lon, elev, &
# var, ivar, &
# qc_flag, roi_data_file)
#
# !--- spatial_check_dir does does a spatial QC test on wind direction
# !--- by gathering the closest 'min_stations' stations within a
# !--- radius 'roi' and calculating a vector average spd and dir from
# !--- those stations, and if the dir from the station in question is
# !--- more than 'dir_diff' degrees from the vector average, the
# !--- station's dir is flagged.
# !---
# !--- obs data array with dimensions hrs, variables, stations,
# !--- stnnets character array of station names and netids
# !--- lat station latitude for each station in d array.
# !--- lon station longitude for each station in d array.
# !--- elev station elevation for each station in d array.
# !--- var character string name of the variable
# !--- ivar array index of d for the variable in question
# !--- qc_flag array of incoming/outgoing quality control flags.
#
# !-- changing values of d for zeroing traces -- can't define 'IN' intent.
# real, dimension(:,:,:) :: obs
# character(len=10), dimension(:), intent(IN) :: dts
# character(len=*), dimension(:), intent(IN) :: stnnets
# real, dimension(:,:), intent(IN) :: lat
# real, dimension(:,:), intent(IN) :: lon
# real, dimension(:,:), intent(IN) :: elev
# character(len=*), intent(IN) :: var
# integer, intent(IN) :: ivar
# integer, dimension(:,:,:,:), intent(INOUT) :: qc_flag
# character(len=*) , intent(IN) :: roi_data_file
#
# integer :: d, i, n, s, ss, nstnnets, ndts
# integer :: min_stations, debug, level1, level2
# integer :: found_one, countem_sm, countem_bg
# integer :: print_roi_data
# real :: max_elev_diff, elev_diff, maxvdiff1, maxvdiff2
# real :: dist, roi_sm, roi_bg, latdiff, londiff, dir_diff
# real :: dir_thresh, spd_thresh, spd_thresh_roi, min_diff
#
# character(len=10) :: dt_c
#
# character(len=2), allocatable, dimension(:) :: netids
#
# real, allocatable, dimension(:) :: roi_dist
# real, allocatable, dimension(:) :: roi_spd
# real, allocatable, dimension(:) :: roi_dir
# real, allocatable, dimension(:) :: roi_lat
# real, allocatable, dimension(:) :: roi_lon
# real, allocatable, dimension(:) :: roi_elev
# character(len=8), allocatable, dimension(:) :: roi_stnnets
#
# real, allocatable, dimension(:) :: dist_sort
# integer, allocatable, dimension(:) :: isort
#
# intrinsic maxval,minval,size
#
# !--- Settings appropriate for all variables.
# debug = 0 ! sets debug level (sets what to print)
# print_roi_data = 1
# nstnnets = size(stnnets) ! get # of stations in obs.
# ndts = size(obs(:,1,1)) ! # of hours in obs.
# min_stations = 5 ! min # of stns needed for testing.
# roi_sm = 50 ! small radius for dir qc test.
# roi_bg = 75 ! big radius for dir qc test.
# dir_thresh = 85 ! diff in dir required for flagging.
# spd_thresh = 2.5722 ! spd_thresh for station in question.
# spd_thresh_roi = 0.5144 ! spd_thresh for stations in roi.
# level1 = suspflag ! "suspect" flag.
# level2 = warnflag ! "warning" flag.
# latdiff = 3.0 ! to speed up station finding...
# londiff = 3.0 ! to speed up station finding...
# max_elev_diff = 99999999.0 ! for now, allow any elevations.
#
# !--- pull netids out of the stnnets.
# allocate(netids(nstnnets))
# netids = stnnets(:)(7:8)
#
# allocate( roi_dist (nstnnets) )
# allocate( roi_spd (nstnnets) )
# allocate( roi_dir (nstnnets) )
# allocate( roi_stnnets (nstnnets) )
# allocate( roi_lat (nstnnets) )
# allocate( roi_lon (nstnnets) )
# allocate( roi_elev (nstnnets) )
#
# !--- Loop through each date.
# do d = 1, ndts
#
# dt_c = dts(d)
#
# !--- Loop through each station.
# do s = 1, nstnnets
#
# !--- Skip this station/obs if it's mvc, or if the speed is
# !--- below 'spd_thresh'. Since as of 1/16/2007 many obs
# !--- have bad elevation data, NOT skipping if this is missing
# !--- (probably change this when station elevation data gets
# !--- fixed by Dave Carey).
# if (obs(d,s,ispd) .lt. spd_thresh .or. &
# obs(d,s,ispd) .eq. mvc .or. &
# obs(d,s,idir) .eq. mvc .or. &
# obs(d,s,iu) .eq. mvc .or. &
# obs(d,s,iv) .eq. mvc .or. &
# qc_flag(d,s,ispd,irangeflag) .eq. failflag .or. &
# qc_flag(d,s,idir,irangeflag) .eq. failflag) then
# qc_flag(d,s,ivar,ispatialflag) = notestflag
# cycle
# end if
#
# found_one = 0
# countem_sm = 0
# countem_bg = 0
# !--- for each station, check it versus every other station
# !--- (except itself). First time through get # of stations
# !--- within radius of influence to determine if we can do
# !--- this test.
# do ss = 1, nstnnets
#
# !--- skip station for any of the following reasons: same
# !--- station as 's' (ss.eq.s), it's not within a lat/lon box
# !--- defined by latdiff/long diff, the value is mvc,
# !--- the value has been flagged in other tests, or the
# !--- elevation data is mvc.
# if ( abs(lat(d,ss) - lat(d,s) ) .gt. latdiff) cycle
# if ( abs(lon(d,ss) - lon(d,s) ) .gt. londiff) cycle
# if ( obs(d,ss,ispd) .eq. mvc ) cycle
# if ( obs(d,ss,idir) .eq. mvc ) cycle
# if ( obs(d,ss,iu) .eq. mvc ) cycle
# if ( obs(d,ss,iv) .eq. mvc ) cycle
# if ( lat(d,ss) .eq. mvc ) cycle
# if ( lon(d,ss) .eq. mvc ) cycle
# if ( obs(d,ss,ispd) .lt. spd_thresh_roi ) cycle
# if ( abs(elev(d,ss) - elev(d,s) ) .gt. max_elev_diff) cycle
# if ( qc_flag(d,ss,ivar,irangeflag) .eq. failflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,istepflag) .eq. warnflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. suspflag ) cycle
# if ( qc_flag(d,ss,ivar,ipersistflag) .eq. warnflag ) cycle
# if ( ss .eq. s ) cycle
#
# call distance(lat(d,s),lon(d,s), lat(d,ss),lon(d,ss), dist)
#
# !--- Only add up number of stations in small roi, and keep
# !--- around values in big roi.
# if (dist .le. roi_sm) then
# countem_sm = countem_sm + 1
# end if
# if (dist .le. roi_bg) then
# countem_bg = countem_bg + 1
# roi_dist(countem_bg) = dist
# roi_spd(countem_bg) = obs(d,ss,ispd)
# roi_dir(countem_bg) = obs(d,ss,idir)
# roi_stnnets(countem_bg) = stnnets(ss)
# roi_lat(countem_bg) = lat(d,ss)
# roi_lon(countem_bg) = lon(d,ss)
# roi_elev(countem_bg) = elev(d,ss)
# end if
#
# end do
#
# !--- if there's enough good stations in small roi, expand to big
# !--- roi and use data from it.
# if (countem_sm .ge. min_stations .and. &
# countem_bg .ge. min_stations) then
#
# !--- calculate the minimum difference between the direction
# !--- in question and the obs in the roi.
# min_diff = 9999.0
# do i = 1, countem_bg
# ! call ndir_diff(dir_diff, roi_dir(i), obs(d,s,idir))
# if (dir_diff .lt. min_diff) then
# min_diff = dir_diff
# end if
# end do
#
# !--- if vector average speed as well as the station in
# !--- question's speed is high enough and the direction is
# !--- different enough from the vector average, flag it.
# if (min_diff .gt. dir_thresh) then
# qc_flag(d,s,ivar,ispatialflag) = warnflag
#
# !--- if set, print out station and roi stations data.
# 17 format(i1,',', a10, ',', a8, ',', 4(f12.3,','), f12.3)
# if (print_roi_data .eq. 1) then
# open(unit=100, file=trim(roi_data_file), &
# position="append")
# write(100,17) 1, dt_c, &
# stnnets(s),elev(d,s),lat(d,s),&
# lon(d,s), obs(d,s,idir), obs(d,s,ispd)
# do i = 1, countem_bg
# write(100,17) 0, dt_c, &
# roi_stnnets(i), roi_elev(i), &
# roi_lat(i), roi_lon(i), &
# roi_dir(i), roi_spd(i)
# end do
# close(100)
# end if
# end if
#
# else
#
# !--- not enough obs in roi to do test.
# qc_flag(d,s,ivar,ispatialflag) = notestflag
#
# end if
#
# end do !--- stations do loop.
# end do !--- dates do loop.
#
# end subroutine spatial_check_dir
#
def distance(lat1, lon1, lat2, lon2):
pi = atan2(0.0, -1.0)
theta = lon1 - lon2
lat1_r = lat1 * (pi / 180.)
lat2_r = lat2 * (pi / 180.)
theta_r = theta * (pi / 180.)
dist = acos(sin(lat1_r) * sin(lat2_r) + cos(lat1_r) * cos(lat2_r) * cos(theta_r))
dist_temp = (dist * (180. / pi)) * 60 * 1.1515
# only returning in km units for now.
return dist_temp * 1.609344
| spatial_check | identifier_name |
pushsync.go | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pushsync provides the pushsync protocol
// implementation.
package pushsync
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/cac"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pricer"
"github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/skippeers"
"github.com/ethersphere/bee/pkg/soc"
storage "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
opentracing "github.com/opentracing/opentracing-go"
)
// loggerName is the tree path name of the logger for this package.
const loggerName = "pushsync"
const (
protocolName = "pushsync"
protocolVersion = "1.2.0"
streamName = "pushsync"
)
const (
defaultTTL = 30 * time.Second // request time to live
preemptiveInterval = 5 * time.Second // P90 request time to live
sanctionWait = 5 * time.Minute
overDraftRefresh = time.Millisecond * 600
)
const (
nPeersToReplicate = 2 // number of peers to replicate to as receipt is sent upstream
maxPushErrors = 32
)
var (
ErrNoPush = errors.New("could not push chunk")
ErrOutOfDepthStoring = errors.New("storing outside of the neighborhood")
ErrWarmup = errors.New("node warmup time not complete")
)
type PushSyncer interface {
PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error)
}
type Receipt struct {
Address swarm.Address
Signature []byte
Nonce []byte
}
type Storer interface {
storage.PushReporter
ReservePutter() storage.Putter
IsWithinStorageRadius(swarm.Address) bool
StorageRadius() uint8
}
type PushSync struct {
address swarm.Address
nonce []byte
streamer p2p.StreamerDisconnecter
store Storer
topologyDriver topology.Driver
unwrap func(swarm.Chunk)
logger log.Logger
accounting accounting.Interface
pricer pricer.Interface
metrics metrics
tracer *tracing.Tracer
validStamp postage.ValidStampFn
signer crypto.Signer
fullNode bool
skipList *skippeers.List
warmupPeriod time.Time
}
type receiptResult struct {
pushTime time.Time
peer swarm.Address
receipt *pb.Receipt
err error
}
func New(
address swarm.Address,
nonce []byte,
streamer p2p.StreamerDisconnecter,
store Storer,
topology topology.Driver,
fullNode bool,
unwrap func(swarm.Chunk),
validStamp postage.ValidStampFn,
logger log.Logger,
accounting accounting.Interface,
pricer pricer.Interface,
signer crypto.Signer,
tracer *tracing.Tracer,
warmupTime time.Duration,
) *PushSync {
ps := &PushSync{
address: address,
nonce: nonce,
streamer: streamer,
store: store,
topologyDriver: topology,
fullNode: fullNode,
unwrap: unwrap,
logger: logger.WithName(loggerName).Register(),
accounting: accounting,
pricer: pricer,
metrics: newMetrics(),
tracer: tracer,
signer: signer,
skipList: skippeers.NewList(),
warmupPeriod: time.Now().Add(warmupTime),
}
ps.validStamp = ps.validStampWrapper(validStamp)
return ps
}
func (s *PushSync) Protocol() p2p.ProtocolSpec {
return p2p.ProtocolSpec{
Name: protocolName,
Version: protocolVersion,
StreamSpecs: []p2p.StreamSpec{
{
Name: streamName,
Handler: s.handler,
},
},
}
}
// handler handles chunk delivery from other node and forwards to its destination node.
// If the current node is the destination, it stores in the local store and sends a receipt.
func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
now := time.Now()
w, r := protobuf.NewWriterAndReader(stream)
ctx, cancel := context.WithTimeout(ctx, defaultTTL)
defer cancel()
defer func() {
if err != nil {
ps.metrics.TotalHandlerTime.WithLabelValues("failure").Observe(time.Since(now).Seconds())
ps.metrics.TotalHandlerErrors.Inc()
_ = stream.Reset()
} else {
ps.metrics.TotalHandlerTime.WithLabelValues("success").Observe(time.Since(now).Seconds())
_ = stream.FullClose()
}
}()
var ch pb.Delivery
if err = r.ReadMsgWithContext(ctx, &ch); err != nil {
return fmt.Errorf("pushsync read delivery: %w", err)
}
ps.metrics.TotalReceived.Inc()
chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
chunkAddress := chunk.Address()
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()})
defer span.Finish()
stamp := new(postage.Stamp)
err = stamp.UnmarshalBinary(ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync stamp unmarshall: %w", err)
}
chunk.WithStamp(stamp)
if cac.Valid(chunk) {
go ps.unwrap(chunk)
} else if !soc.Valid(chunk) {
return swarm.ErrInvalidChunk
}
price := ps.pricer.Price(chunkAddress)
store := func(ctx context.Context) error {
ps.metrics.Storer.Inc()
chunkToPut, err := ps.validStamp(chunk)
if err != nil {
return fmt.Errorf("invalid stamp: %w", err)
}
err = ps.store.ReservePutter().Put(ctx, chunkToPut)
if err != nil {
return fmt.Errorf("reserve put: %w", err)
}
signature, err := ps.signer.Sign(chunkToPut.Address().Bytes())
if err != nil {
return fmt.Errorf("receipt signature: %w", err)
}
// return back receipt
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
receipt := pb.Receipt{Address: chunkToPut.Address().Bytes(), Signature: signature, Nonce: ps.nonce}
if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
if ps.topologyDriver.IsReachable() && ps.store.IsWithinStorageRadius(chunkAddress) {
return store(ctx)
}
receipt, err := ps.pushToClosest(ctx, chunk, false)
if err != nil {
if errors.Is(err, topology.ErrWantSelf) {
return store(ctx)
}
ps.metrics.Forwarder.Inc()
return fmt.Errorf("handler: push to closest chunk %s: %w", chunkAddress, err)
}
ps.metrics.Forwarder.Inc()
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
// pass back the receipt
if err := w.WriteMsgWithContext(ctx, receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
// PushChunkToClosest sends chunk to the closest peer by opening a stream. It then waits for
// a receipt from that peer and returns error or nil based on the receiving and
// the validity of the receipt.
func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error) {
ps.metrics.TotalOutgoing.Inc()
r, err := ps.pushToClosest(ctx, ch, true)
if err != nil {
ps.metrics.TotalOutgoingErrors.Inc()
return nil, err
}
return &Receipt{
Address: swarm.NewAddress(r.Address),
Signature: r.Signature,
Nonce: r.Nonce,
}, nil
}
// pushToClosest attempts to push the chunk into the network.
func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bool) (*pb.Receipt, error) {
if !ps.warmedUp() {
return nil, ErrWarmup
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ps.metrics.TotalRequests.Inc()
var (
sentErrorsLeft = 1
preemptiveTicker <-chan time.Time
inflight int
parallelForwards = nPeersToReplicate
)
if origin {
ticker := time.NewTicker(preemptiveInterval)
defer ticker.Stop()
preemptiveTicker = ticker.C
sentErrorsLeft = maxPushErrors
}
resultChan := make(chan receiptResult)
retryC := make(chan struct{}, parallelForwards)
retry := func() {
select {
case retryC <- struct{}{}:
case <-ctx.Done():
default:
}
}
retry()
for sentErrorsLeft > 0 {
select {
case <-ctx.Done():
return nil, ErrNoPush
case <-preemptiveTicker:
retry()
case <-retryC:
// Origin peers should not store the chunk initially so that the chunk is always forwarded into the network.
// If no peer can be found from an origin peer, the origin peer may store the chunk.
// Non-origin peers store the chunk if the chunk is within depth.
// For non-origin peers, if the chunk is not within depth, they may store the chunk if they are the closest peer to the chunk.
peer, err := ps.topologyDriver.ClosestPeer(ch.Address(), ps.fullNode && !origin, topology.Select{Reachable: true, Healthy: true}, ps.skipList.ChunkPeers(ch.Address())...)
if errors.Is(err, topology.ErrNotFound) {
if ps.skipList.PruneExpiresAfter(ch.Address(), overDraftRefresh) == 0 { //no overdraft peers, we have depleted ALL peers
if inflight == 0 {
if ps.fullNode && ps.topologyDriver.IsReachable() {
if cac.Valid(ch) {
go ps.unwrap(ch)
}
return nil, topology.ErrWantSelf
}
ps.logger.Debug("no peers left", "chunk_address", ch.Address(), "error", err)
return nil, err
}
continue // there is still an inflight request, wait for it's result
}
ps.logger.Debug("sleeping to refresh overdraft balance", "chunk_address", ch.Address())
select {
case <-time.After(overDraftRefresh):
retry()
continue
case <-ctx.Done():
return nil, ctx.Err()
}
}
if err != nil {
if inflight == 0 {
return nil, err
}
ps.logger.Debug("next peer", "chunk_address", ch.Address(), "error", err)
continue
}
// since we can reach into the neighborhood of the chunk
// act as the multiplexer and push the chunk in parallel to multiple peers
if swarm.Proximity(peer.Bytes(), ch.Address().Bytes()) >= ps.store.StorageRadius() {
for ; parallelForwards > 0; parallelForwards-- {
retry()
sentErrorsLeft++
}
}
action, err := ps.prepareCredit(ctx, peer, ch, origin)
if err != nil {
retry()
ps.skipList.Add(ch.Address(), peer, overDraftRefresh)
continue
}
ps.skipList.Add(ch.Address(), peer, sanctionWait)
ps.metrics.TotalSendAttempts.Inc()
inflight++
go ps.push(ctx, resultChan, peer, ch, action)
case result := <-resultChan:
inflight--
ps.measurePushPeer(result.pushTime, result.err, origin)
if result.err == nil {
return result.receipt, nil
}
ps.metrics.TotalFailedSendAttempts.Inc()
ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "peer_address", result.peer, "error", result.err)
sentErrorsLeft--
retry()
}
}
return nil, ErrNoPush
}
func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptResult, peer swarm.Address, ch swarm.Chunk, action accounting.Action) {
span := tracing.FromContext(parentCtx)
ctx, cancel := context.WithTimeout(context.Background(), defaultTTL)
defer cancel()
spanInner, _, ctx := ps.tracer.StartSpanFromContext(tracing.WithContext(ctx, span), "push-closest", ps.logger, opentracing.Tag{Key: "address", Value: ch.Address().String()})
defer spanInner.Finish()
var (
err error
receipt *pb.Receipt
now = time.Now()
)
defer func() {
select {
case resultChan <- receiptResult{pushTime: now, peer: peer, err: err, receipt: receipt}:
case <-parentCtx.Done():
}
}()
defer action.Cleanup()
receipt, err = ps.pushChunkToPeer(ctx, peer, ch)
if err != nil {
return
}
ps.metrics.TotalSent.Inc()
err = action.Apply()
}
func (ps *PushSync) pushChunkToPeer(ctx context.Context, peer swarm.Address, ch swarm.Chunk) (receipt *pb.Receipt, err error) {
streamer, err := ps.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
if err != nil {
return nil, fmt.Errorf("new stream for peer %s: %w", peer.String(), err)
}
defer func() {
if err != nil {
_ = streamer.Reset()
} else {
_ = streamer.FullClose()
}
}()
w, r := protobuf.NewWriterAndReader(streamer)
stamp, err := ch.Stamp().MarshalBinary()
if err != nil {
return nil, err
}
err = w.WriteMsgWithContext(ctx, &pb.Delivery{
Address: ch.Address().Bytes(),
Data: ch.Data(),
Stamp: stamp,
})
if err != nil {
return nil, err
}
err = ps.store.Report(ctx, ch, storage.ChunkSent)
if err != nil && !errors.Is(err, storage.ErrNotFound) {
err = fmt.Errorf("tag %d increment: %w", ch.TagID(), err)
return
}
var rec pb.Receipt
if err = r.ReadMsgWithContext(ctx, &rec); err != nil {
return nil, err
}
if !ch.Address().Equal(swarm.NewAddress(rec.Address)) {
return nil, fmt.Errorf("invalid receipt. chunk %s, peer %s", ch.Address(), peer)
}
return &rec, nil
}
func (ps *PushSync) prepareCredit(ctx context.Context, peer swarm.Address, ch swarm.Chunk, origin bool) (accounting.Action, error) {
creditCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
creditAction, err := ps.accounting.PrepareCredit(creditCtx, peer, ps.pricer.PeerPrice(peer, ch.Address()), origin)
if err != nil {
return nil, err
}
return creditAction, nil
}
func (ps *PushSync) measurePushPeer(t time.Time, err error, origin bool) {
var status string
if err != nil {
status = "failure"
} else {
status = "success"
}
ps.metrics.PushToPeerTime.WithLabelValues(status).Observe(time.Since(t).Seconds())
}
func (ps *PushSync) validStampWrapper(f postage.ValidStampFn) postage.ValidStampFn {
return func(c swarm.Chunk) (swarm.Chunk, error) {
t := time.Now()
chunk, err := f(c)
if err != nil {
ps.metrics.InvalidStampErrors.Inc()
ps.metrics.StampValidationTime.WithLabelValues("failure").Observe(time.Since(t).Seconds())
} else {
ps.metrics.StampValidationTime.WithLabelValues("success").Observe(time.Since(t).Seconds())
}
return chunk, err
}
}
func (s *PushSync) Close() error {
return s.skipList.Close()
}
func (ps *PushSync) | () bool {
return time.Now().After(ps.warmupPeriod)
}
| warmedUp | identifier_name |
pushsync.go | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pushsync provides the pushsync protocol
// implementation.
package pushsync
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/cac"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pricer"
"github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/skippeers"
"github.com/ethersphere/bee/pkg/soc"
storage "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
opentracing "github.com/opentracing/opentracing-go"
)
// loggerName is the tree path name of the logger for this package.
const loggerName = "pushsync"
const (
protocolName = "pushsync"
protocolVersion = "1.2.0"
streamName = "pushsync"
)
const (
defaultTTL = 30 * time.Second // request time to live
preemptiveInterval = 5 * time.Second // P90 request time to live
sanctionWait = 5 * time.Minute
overDraftRefresh = time.Millisecond * 600
)
const (
nPeersToReplicate = 2 // number of peers to replicate to as receipt is sent upstream
maxPushErrors = 32
)
var (
ErrNoPush = errors.New("could not push chunk")
ErrOutOfDepthStoring = errors.New("storing outside of the neighborhood")
ErrWarmup = errors.New("node warmup time not complete")
)
type PushSyncer interface {
PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error)
}
type Receipt struct {
Address swarm.Address
Signature []byte
Nonce []byte
}
type Storer interface {
storage.PushReporter
ReservePutter() storage.Putter
IsWithinStorageRadius(swarm.Address) bool
StorageRadius() uint8
}
type PushSync struct {
address swarm.Address
nonce []byte
streamer p2p.StreamerDisconnecter
store Storer
topologyDriver topology.Driver
unwrap func(swarm.Chunk)
logger log.Logger
accounting accounting.Interface
pricer pricer.Interface
metrics metrics
tracer *tracing.Tracer
validStamp postage.ValidStampFn
signer crypto.Signer
fullNode bool
skipList *skippeers.List
warmupPeriod time.Time
}
type receiptResult struct {
pushTime time.Time
peer swarm.Address
receipt *pb.Receipt
err error
}
func New(
address swarm.Address,
nonce []byte,
streamer p2p.StreamerDisconnecter,
store Storer,
topology topology.Driver,
fullNode bool,
unwrap func(swarm.Chunk),
validStamp postage.ValidStampFn,
logger log.Logger,
accounting accounting.Interface,
pricer pricer.Interface,
signer crypto.Signer,
tracer *tracing.Tracer,
warmupTime time.Duration,
) *PushSync {
ps := &PushSync{
address: address,
nonce: nonce,
streamer: streamer,
store: store,
topologyDriver: topology,
fullNode: fullNode,
unwrap: unwrap,
logger: logger.WithName(loggerName).Register(),
accounting: accounting,
pricer: pricer,
metrics: newMetrics(),
tracer: tracer,
signer: signer,
skipList: skippeers.NewList(),
warmupPeriod: time.Now().Add(warmupTime),
}
ps.validStamp = ps.validStampWrapper(validStamp)
return ps
}
func (s *PushSync) Protocol() p2p.ProtocolSpec {
return p2p.ProtocolSpec{
Name: protocolName,
Version: protocolVersion,
StreamSpecs: []p2p.StreamSpec{
{
Name: streamName,
Handler: s.handler,
},
},
}
}
// handler handles chunk delivery from other node and forwards to its destination node.
// If the current node is the destination, it stores in the local store and sends a receipt.
func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
now := time.Now()
w, r := protobuf.NewWriterAndReader(stream)
ctx, cancel := context.WithTimeout(ctx, defaultTTL)
defer cancel()
defer func() {
if err != nil {
ps.metrics.TotalHandlerTime.WithLabelValues("failure").Observe(time.Since(now).Seconds())
ps.metrics.TotalHandlerErrors.Inc()
_ = stream.Reset()
} else {
ps.metrics.TotalHandlerTime.WithLabelValues("success").Observe(time.Since(now).Seconds())
_ = stream.FullClose()
}
}()
var ch pb.Delivery
if err = r.ReadMsgWithContext(ctx, &ch); err != nil {
return fmt.Errorf("pushsync read delivery: %w", err)
}
ps.metrics.TotalReceived.Inc()
chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
chunkAddress := chunk.Address()
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()})
defer span.Finish()
stamp := new(postage.Stamp)
err = stamp.UnmarshalBinary(ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync stamp unmarshall: %w", err)
}
chunk.WithStamp(stamp)
if cac.Valid(chunk) {
go ps.unwrap(chunk)
} else if !soc.Valid(chunk) {
return swarm.ErrInvalidChunk
}
price := ps.pricer.Price(chunkAddress)
store := func(ctx context.Context) error {
ps.metrics.Storer.Inc()
chunkToPut, err := ps.validStamp(chunk)
if err != nil {
return fmt.Errorf("invalid stamp: %w", err)
}
err = ps.store.ReservePutter().Put(ctx, chunkToPut)
if err != nil {
return fmt.Errorf("reserve put: %w", err)
}
signature, err := ps.signer.Sign(chunkToPut.Address().Bytes())
if err != nil {
return fmt.Errorf("receipt signature: %w", err)
}
// return back receipt
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
receipt := pb.Receipt{Address: chunkToPut.Address().Bytes(), Signature: signature, Nonce: ps.nonce}
if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
if ps.topologyDriver.IsReachable() && ps.store.IsWithinStorageRadius(chunkAddress) {
return store(ctx)
}
receipt, err := ps.pushToClosest(ctx, chunk, false)
if err != nil {
if errors.Is(err, topology.ErrWantSelf) {
return store(ctx)
}
ps.metrics.Forwarder.Inc()
return fmt.Errorf("handler: push to closest chunk %s: %w", chunkAddress, err)
}
ps.metrics.Forwarder.Inc()
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
// pass back the receipt
if err := w.WriteMsgWithContext(ctx, receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
// PushChunkToClosest sends chunk to the closest peer by opening a stream. It then waits for
// a receipt from that peer and returns error or nil based on the receiving and
// the validity of the receipt.
func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error) {
ps.metrics.TotalOutgoing.Inc()
r, err := ps.pushToClosest(ctx, ch, true)
if err != nil {
ps.metrics.TotalOutgoingErrors.Inc()
return nil, err
}
return &Receipt{
Address: swarm.NewAddress(r.Address),
Signature: r.Signature,
Nonce: r.Nonce,
}, nil
}
// pushToClosest attempts to push the chunk into the network.
func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bool) (*pb.Receipt, error) {
if !ps.warmedUp() {
return nil, ErrWarmup
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ps.metrics.TotalRequests.Inc()
var (
sentErrorsLeft = 1
preemptiveTicker <-chan time.Time
inflight int
parallelForwards = nPeersToReplicate
)
if origin {
ticker := time.NewTicker(preemptiveInterval)
defer ticker.Stop()
preemptiveTicker = ticker.C
sentErrorsLeft = maxPushErrors
}
resultChan := make(chan receiptResult)
retryC := make(chan struct{}, parallelForwards)
retry := func() {
select {
case retryC <- struct{}{}:
case <-ctx.Done():
default:
}
}
retry()
for sentErrorsLeft > 0 {
select {
case <-ctx.Done():
return nil, ErrNoPush
case <-preemptiveTicker:
retry()
case <-retryC:
// Origin peers should not store the chunk initially so that the chunk is always forwarded into the network.
// If no peer can be found from an origin peer, the origin peer may store the chunk.
// Non-origin peers store the chunk if the chunk is within depth.
// For non-origin peers, if the chunk is not within depth, they may store the chunk if they are the closest peer to the chunk.
peer, err := ps.topologyDriver.ClosestPeer(ch.Address(), ps.fullNode && !origin, topology.Select{Reachable: true, Healthy: true}, ps.skipList.ChunkPeers(ch.Address())...)
if errors.Is(err, topology.ErrNotFound) {
if ps.skipList.PruneExpiresAfter(ch.Address(), overDraftRefresh) == 0 |
ps.logger.Debug("sleeping to refresh overdraft balance", "chunk_address", ch.Address())
select {
case <-time.After(overDraftRefresh):
retry()
continue
case <-ctx.Done():
return nil, ctx.Err()
}
}
if err != nil {
if inflight == 0 {
return nil, err
}
ps.logger.Debug("next peer", "chunk_address", ch.Address(), "error", err)
continue
}
// since we can reach into the neighborhood of the chunk
// act as the multiplexer and push the chunk in parallel to multiple peers
if swarm.Proximity(peer.Bytes(), ch.Address().Bytes()) >= ps.store.StorageRadius() {
for ; parallelForwards > 0; parallelForwards-- {
retry()
sentErrorsLeft++
}
}
action, err := ps.prepareCredit(ctx, peer, ch, origin)
if err != nil {
retry()
ps.skipList.Add(ch.Address(), peer, overDraftRefresh)
continue
}
ps.skipList.Add(ch.Address(), peer, sanctionWait)
ps.metrics.TotalSendAttempts.Inc()
inflight++
go ps.push(ctx, resultChan, peer, ch, action)
case result := <-resultChan:
inflight--
ps.measurePushPeer(result.pushTime, result.err, origin)
if result.err == nil {
return result.receipt, nil
}
ps.metrics.TotalFailedSendAttempts.Inc()
ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "peer_address", result.peer, "error", result.err)
sentErrorsLeft--
retry()
}
}
return nil, ErrNoPush
}
func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptResult, peer swarm.Address, ch swarm.Chunk, action accounting.Action) {
span := tracing.FromContext(parentCtx)
ctx, cancel := context.WithTimeout(context.Background(), defaultTTL)
defer cancel()
spanInner, _, ctx := ps.tracer.StartSpanFromContext(tracing.WithContext(ctx, span), "push-closest", ps.logger, opentracing.Tag{Key: "address", Value: ch.Address().String()})
defer spanInner.Finish()
var (
err error
receipt *pb.Receipt
now = time.Now()
)
defer func() {
select {
case resultChan <- receiptResult{pushTime: now, peer: peer, err: err, receipt: receipt}:
case <-parentCtx.Done():
}
}()
defer action.Cleanup()
receipt, err = ps.pushChunkToPeer(ctx, peer, ch)
if err != nil {
return
}
ps.metrics.TotalSent.Inc()
err = action.Apply()
}
func (ps *PushSync) pushChunkToPeer(ctx context.Context, peer swarm.Address, ch swarm.Chunk) (receipt *pb.Receipt, err error) {
streamer, err := ps.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
if err != nil {
return nil, fmt.Errorf("new stream for peer %s: %w", peer.String(), err)
}
defer func() {
if err != nil {
_ = streamer.Reset()
} else {
_ = streamer.FullClose()
}
}()
w, r := protobuf.NewWriterAndReader(streamer)
stamp, err := ch.Stamp().MarshalBinary()
if err != nil {
return nil, err
}
err = w.WriteMsgWithContext(ctx, &pb.Delivery{
Address: ch.Address().Bytes(),
Data: ch.Data(),
Stamp: stamp,
})
if err != nil {
return nil, err
}
err = ps.store.Report(ctx, ch, storage.ChunkSent)
if err != nil && !errors.Is(err, storage.ErrNotFound) {
err = fmt.Errorf("tag %d increment: %w", ch.TagID(), err)
return
}
var rec pb.Receipt
if err = r.ReadMsgWithContext(ctx, &rec); err != nil {
return nil, err
}
if !ch.Address().Equal(swarm.NewAddress(rec.Address)) {
return nil, fmt.Errorf("invalid receipt. chunk %s, peer %s", ch.Address(), peer)
}
return &rec, nil
}
func (ps *PushSync) prepareCredit(ctx context.Context, peer swarm.Address, ch swarm.Chunk, origin bool) (accounting.Action, error) {
creditCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
creditAction, err := ps.accounting.PrepareCredit(creditCtx, peer, ps.pricer.PeerPrice(peer, ch.Address()), origin)
if err != nil {
return nil, err
}
return creditAction, nil
}
func (ps *PushSync) measurePushPeer(t time.Time, err error, origin bool) {
var status string
if err != nil {
status = "failure"
} else {
status = "success"
}
ps.metrics.PushToPeerTime.WithLabelValues(status).Observe(time.Since(t).Seconds())
}
func (ps *PushSync) validStampWrapper(f postage.ValidStampFn) postage.ValidStampFn {
return func(c swarm.Chunk) (swarm.Chunk, error) {
t := time.Now()
chunk, err := f(c)
if err != nil {
ps.metrics.InvalidStampErrors.Inc()
ps.metrics.StampValidationTime.WithLabelValues("failure").Observe(time.Since(t).Seconds())
} else {
ps.metrics.StampValidationTime.WithLabelValues("success").Observe(time.Since(t).Seconds())
}
return chunk, err
}
}
func (s *PushSync) Close() error {
return s.skipList.Close()
}
func (ps *PushSync) warmedUp() bool {
return time.Now().After(ps.warmupPeriod)
}
| { //no overdraft peers, we have depleted ALL peers
if inflight == 0 {
if ps.fullNode && ps.topologyDriver.IsReachable() {
if cac.Valid(ch) {
go ps.unwrap(ch)
}
return nil, topology.ErrWantSelf
}
ps.logger.Debug("no peers left", "chunk_address", ch.Address(), "error", err)
return nil, err
}
continue // there is still an inflight request, wait for it's result
} | conditional_block |
pushsync.go | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pushsync provides the pushsync protocol
// implementation.
package pushsync
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/cac"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pricer"
"github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/skippeers"
"github.com/ethersphere/bee/pkg/soc"
storage "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
opentracing "github.com/opentracing/opentracing-go"
)
// loggerName is the tree path name of the logger for this package.
const loggerName = "pushsync"
const (
protocolName = "pushsync"
protocolVersion = "1.2.0"
streamName = "pushsync"
)
const (
defaultTTL = 30 * time.Second // request time to live
preemptiveInterval = 5 * time.Second // P90 request time to live
sanctionWait = 5 * time.Minute
overDraftRefresh = time.Millisecond * 600
)
const (
nPeersToReplicate = 2 // number of peers to replicate to as receipt is sent upstream
maxPushErrors = 32
)
var (
ErrNoPush = errors.New("could not push chunk")
ErrOutOfDepthStoring = errors.New("storing outside of the neighborhood")
ErrWarmup = errors.New("node warmup time not complete")
)
type PushSyncer interface {
PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error)
}
type Receipt struct {
Address swarm.Address
Signature []byte
Nonce []byte
}
type Storer interface {
storage.PushReporter
ReservePutter() storage.Putter
IsWithinStorageRadius(swarm.Address) bool
StorageRadius() uint8
}
type PushSync struct {
address swarm.Address
nonce []byte
streamer p2p.StreamerDisconnecter
store Storer
topologyDriver topology.Driver
unwrap func(swarm.Chunk)
logger log.Logger
accounting accounting.Interface
pricer pricer.Interface
metrics metrics
tracer *tracing.Tracer
validStamp postage.ValidStampFn
signer crypto.Signer
fullNode bool
skipList *skippeers.List
warmupPeriod time.Time
}
type receiptResult struct {
pushTime time.Time
peer swarm.Address
receipt *pb.Receipt
err error
}
func New(
address swarm.Address,
nonce []byte,
streamer p2p.StreamerDisconnecter,
store Storer,
topology topology.Driver,
fullNode bool,
unwrap func(swarm.Chunk),
validStamp postage.ValidStampFn,
logger log.Logger,
accounting accounting.Interface,
pricer pricer.Interface,
signer crypto.Signer,
tracer *tracing.Tracer,
warmupTime time.Duration,
) *PushSync {
ps := &PushSync{
address: address,
nonce: nonce,
streamer: streamer,
store: store,
topologyDriver: topology,
fullNode: fullNode,
unwrap: unwrap,
logger: logger.WithName(loggerName).Register(),
accounting: accounting,
pricer: pricer,
metrics: newMetrics(),
tracer: tracer,
signer: signer,
skipList: skippeers.NewList(),
warmupPeriod: time.Now().Add(warmupTime),
}
ps.validStamp = ps.validStampWrapper(validStamp)
return ps
}
func (s *PushSync) Protocol() p2p.ProtocolSpec {
return p2p.ProtocolSpec{
Name: protocolName,
Version: protocolVersion,
StreamSpecs: []p2p.StreamSpec{
{
Name: streamName,
Handler: s.handler,
},
},
}
}
// handler handles chunk delivery from other node and forwards to its destination node.
// If the current node is the destination, it stores in the local store and sends a receipt.
func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
now := time.Now()
w, r := protobuf.NewWriterAndReader(stream)
ctx, cancel := context.WithTimeout(ctx, defaultTTL)
defer cancel()
defer func() {
if err != nil {
ps.metrics.TotalHandlerTime.WithLabelValues("failure").Observe(time.Since(now).Seconds())
ps.metrics.TotalHandlerErrors.Inc()
_ = stream.Reset()
} else {
ps.metrics.TotalHandlerTime.WithLabelValues("success").Observe(time.Since(now).Seconds())
_ = stream.FullClose()
}
}()
var ch pb.Delivery
if err = r.ReadMsgWithContext(ctx, &ch); err != nil {
return fmt.Errorf("pushsync read delivery: %w", err)
}
ps.metrics.TotalReceived.Inc()
chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
chunkAddress := chunk.Address()
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()})
defer span.Finish()
stamp := new(postage.Stamp)
err = stamp.UnmarshalBinary(ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync stamp unmarshall: %w", err)
}
chunk.WithStamp(stamp)
if cac.Valid(chunk) {
go ps.unwrap(chunk)
} else if !soc.Valid(chunk) {
return swarm.ErrInvalidChunk
}
price := ps.pricer.Price(chunkAddress)
store := func(ctx context.Context) error {
ps.metrics.Storer.Inc()
chunkToPut, err := ps.validStamp(chunk)
if err != nil {
return fmt.Errorf("invalid stamp: %w", err)
}
err = ps.store.ReservePutter().Put(ctx, chunkToPut)
if err != nil {
return fmt.Errorf("reserve put: %w", err)
}
signature, err := ps.signer.Sign(chunkToPut.Address().Bytes())
if err != nil {
return fmt.Errorf("receipt signature: %w", err)
}
// return back receipt
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
receipt := pb.Receipt{Address: chunkToPut.Address().Bytes(), Signature: signature, Nonce: ps.nonce}
if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
if ps.topologyDriver.IsReachable() && ps.store.IsWithinStorageRadius(chunkAddress) {
return store(ctx)
}
receipt, err := ps.pushToClosest(ctx, chunk, false)
if err != nil {
if errors.Is(err, topology.ErrWantSelf) {
return store(ctx)
}
ps.metrics.Forwarder.Inc()
return fmt.Errorf("handler: push to closest chunk %s: %w", chunkAddress, err)
}
ps.metrics.Forwarder.Inc()
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
// pass back the receipt
if err := w.WriteMsgWithContext(ctx, receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
// PushChunkToClosest sends chunk to the closest peer by opening a stream. It then waits for
// a receipt from that peer and returns error or nil based on the receiving and
// the validity of the receipt.
func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error) {
ps.metrics.TotalOutgoing.Inc()
r, err := ps.pushToClosest(ctx, ch, true)
if err != nil {
ps.metrics.TotalOutgoingErrors.Inc()
return nil, err
}
return &Receipt{
Address: swarm.NewAddress(r.Address),
Signature: r.Signature,
Nonce: r.Nonce,
}, nil
}
// pushToClosest attempts to push the chunk into the network.
func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bool) (*pb.Receipt, error) {
if !ps.warmedUp() {
return nil, ErrWarmup
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ps.metrics.TotalRequests.Inc()
var (
sentErrorsLeft = 1
preemptiveTicker <-chan time.Time
inflight int
parallelForwards = nPeersToReplicate
)
if origin {
ticker := time.NewTicker(preemptiveInterval)
defer ticker.Stop()
preemptiveTicker = ticker.C
sentErrorsLeft = maxPushErrors
}
resultChan := make(chan receiptResult)
retryC := make(chan struct{}, parallelForwards)
retry := func() {
select {
case retryC <- struct{}{}:
case <-ctx.Done():
default:
}
}
retry()
for sentErrorsLeft > 0 {
select {
case <-ctx.Done():
return nil, ErrNoPush
case <-preemptiveTicker:
retry()
case <-retryC:
// Origin peers should not store the chunk initially so that the chunk is always forwarded into the network.
// If no peer can be found from an origin peer, the origin peer may store the chunk.
// Non-origin peers store the chunk if the chunk is within depth.
// For non-origin peers, if the chunk is not within depth, they may store the chunk if they are the closest peer to the chunk.
peer, err := ps.topologyDriver.ClosestPeer(ch.Address(), ps.fullNode && !origin, topology.Select{Reachable: true, Healthy: true}, ps.skipList.ChunkPeers(ch.Address())...)
if errors.Is(err, topology.ErrNotFound) {
if ps.skipList.PruneExpiresAfter(ch.Address(), overDraftRefresh) == 0 { //no overdraft peers, we have depleted ALL peers
if inflight == 0 {
if ps.fullNode && ps.topologyDriver.IsReachable() {
if cac.Valid(ch) {
go ps.unwrap(ch)
}
return nil, topology.ErrWantSelf
}
ps.logger.Debug("no peers left", "chunk_address", ch.Address(), "error", err)
return nil, err
}
continue // there is still an inflight request, wait for it's result
}
ps.logger.Debug("sleeping to refresh overdraft balance", "chunk_address", ch.Address())
select {
case <-time.After(overDraftRefresh):
retry()
continue
case <-ctx.Done():
return nil, ctx.Err()
}
}
if err != nil {
if inflight == 0 {
return nil, err
}
ps.logger.Debug("next peer", "chunk_address", ch.Address(), "error", err)
continue
}
// since we can reach into the neighborhood of the chunk
// act as the multiplexer and push the chunk in parallel to multiple peers
if swarm.Proximity(peer.Bytes(), ch.Address().Bytes()) >= ps.store.StorageRadius() {
for ; parallelForwards > 0; parallelForwards-- {
retry()
sentErrorsLeft++
}
}
action, err := ps.prepareCredit(ctx, peer, ch, origin)
if err != nil {
retry()
ps.skipList.Add(ch.Address(), peer, overDraftRefresh)
continue
}
ps.skipList.Add(ch.Address(), peer, sanctionWait)
ps.metrics.TotalSendAttempts.Inc()
inflight++
go ps.push(ctx, resultChan, peer, ch, action)
case result := <-resultChan:
inflight--
ps.measurePushPeer(result.pushTime, result.err, origin)
if result.err == nil {
return result.receipt, nil
}
ps.metrics.TotalFailedSendAttempts.Inc()
ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "peer_address", result.peer, "error", result.err)
sentErrorsLeft--
retry()
}
}
return nil, ErrNoPush
}
func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptResult, peer swarm.Address, ch swarm.Chunk, action accounting.Action) {
span := tracing.FromContext(parentCtx)
ctx, cancel := context.WithTimeout(context.Background(), defaultTTL)
defer cancel()
spanInner, _, ctx := ps.tracer.StartSpanFromContext(tracing.WithContext(ctx, span), "push-closest", ps.logger, opentracing.Tag{Key: "address", Value: ch.Address().String()})
defer spanInner.Finish()
var (
err error
receipt *pb.Receipt
now = time.Now()
)
defer func() {
select {
case resultChan <- receiptResult{pushTime: now, peer: peer, err: err, receipt: receipt}:
case <-parentCtx.Done():
}
}()
defer action.Cleanup()
receipt, err = ps.pushChunkToPeer(ctx, peer, ch)
if err != nil {
return
}
ps.metrics.TotalSent.Inc()
err = action.Apply()
}
func (ps *PushSync) pushChunkToPeer(ctx context.Context, peer swarm.Address, ch swarm.Chunk) (receipt *pb.Receipt, err error) {
streamer, err := ps.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
if err != nil {
return nil, fmt.Errorf("new stream for peer %s: %w", peer.String(), err)
}
defer func() {
if err != nil {
_ = streamer.Reset()
} else {
_ = streamer.FullClose()
}
}()
w, r := protobuf.NewWriterAndReader(streamer)
stamp, err := ch.Stamp().MarshalBinary()
if err != nil {
return nil, err
}
err = w.WriteMsgWithContext(ctx, &pb.Delivery{
Address: ch.Address().Bytes(),
Data: ch.Data(),
Stamp: stamp,
})
if err != nil {
return nil, err
}
err = ps.store.Report(ctx, ch, storage.ChunkSent)
if err != nil && !errors.Is(err, storage.ErrNotFound) {
err = fmt.Errorf("tag %d increment: %w", ch.TagID(), err)
return
}
var rec pb.Receipt
if err = r.ReadMsgWithContext(ctx, &rec); err != nil {
return nil, err
}
if !ch.Address().Equal(swarm.NewAddress(rec.Address)) {
return nil, fmt.Errorf("invalid receipt. chunk %s, peer %s", ch.Address(), peer)
}
return &rec, nil
}
func (ps *PushSync) prepareCredit(ctx context.Context, peer swarm.Address, ch swarm.Chunk, origin bool) (accounting.Action, error) {
creditCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
creditAction, err := ps.accounting.PrepareCredit(creditCtx, peer, ps.pricer.PeerPrice(peer, ch.Address()), origin)
if err != nil {
return nil, err
}
return creditAction, nil
}
func (ps *PushSync) measurePushPeer(t time.Time, err error, origin bool) |
func (ps *PushSync) validStampWrapper(f postage.ValidStampFn) postage.ValidStampFn {
return func(c swarm.Chunk) (swarm.Chunk, error) {
t := time.Now()
chunk, err := f(c)
if err != nil {
ps.metrics.InvalidStampErrors.Inc()
ps.metrics.StampValidationTime.WithLabelValues("failure").Observe(time.Since(t).Seconds())
} else {
ps.metrics.StampValidationTime.WithLabelValues("success").Observe(time.Since(t).Seconds())
}
return chunk, err
}
}
func (s *PushSync) Close() error {
return s.skipList.Close()
}
func (ps *PushSync) warmedUp() bool {
return time.Now().After(ps.warmupPeriod)
}
| {
var status string
if err != nil {
status = "failure"
} else {
status = "success"
}
ps.metrics.PushToPeerTime.WithLabelValues(status).Observe(time.Since(t).Seconds())
} | identifier_body |
pushsync.go | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pushsync provides the pushsync protocol
// implementation.
package pushsync
import (
"context"
"errors"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/cac"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/log"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pricer"
"github.com/ethersphere/bee/pkg/pushsync/pb"
"github.com/ethersphere/bee/pkg/skippeers"
"github.com/ethersphere/bee/pkg/soc"
storage "github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
opentracing "github.com/opentracing/opentracing-go"
)
// loggerName is the tree path name of the logger for this package.
const loggerName = "pushsync"
const (
protocolName = "pushsync"
protocolVersion = "1.2.0"
streamName = "pushsync"
)
const (
defaultTTL = 30 * time.Second // request time to live
preemptiveInterval = 5 * time.Second // P90 request time to live
sanctionWait = 5 * time.Minute
overDraftRefresh = time.Millisecond * 600
)
const (
nPeersToReplicate = 2 // number of peers to replicate to as receipt is sent upstream
maxPushErrors = 32
)
var (
ErrNoPush = errors.New("could not push chunk")
ErrOutOfDepthStoring = errors.New("storing outside of the neighborhood")
ErrWarmup = errors.New("node warmup time not complete")
)
type PushSyncer interface {
PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error)
}
type Receipt struct {
Address swarm.Address
Signature []byte
Nonce []byte
}
type Storer interface {
storage.PushReporter
ReservePutter() storage.Putter
IsWithinStorageRadius(swarm.Address) bool
StorageRadius() uint8
}
type PushSync struct {
address swarm.Address
nonce []byte
streamer p2p.StreamerDisconnecter
store Storer
topologyDriver topology.Driver
unwrap func(swarm.Chunk)
logger log.Logger
accounting accounting.Interface
pricer pricer.Interface
metrics metrics
tracer *tracing.Tracer
validStamp postage.ValidStampFn
signer crypto.Signer
fullNode bool
skipList *skippeers.List
warmupPeriod time.Time
}
type receiptResult struct {
pushTime time.Time
peer swarm.Address
receipt *pb.Receipt
err error
}
func New(
address swarm.Address,
nonce []byte,
streamer p2p.StreamerDisconnecter,
store Storer,
topology topology.Driver,
fullNode bool,
unwrap func(swarm.Chunk),
validStamp postage.ValidStampFn,
logger log.Logger,
accounting accounting.Interface,
pricer pricer.Interface,
signer crypto.Signer,
tracer *tracing.Tracer,
warmupTime time.Duration,
) *PushSync {
ps := &PushSync{
address: address,
nonce: nonce,
streamer: streamer,
store: store,
topologyDriver: topology,
fullNode: fullNode,
unwrap: unwrap,
logger: logger.WithName(loggerName).Register(),
accounting: accounting,
pricer: pricer,
metrics: newMetrics(),
tracer: tracer,
signer: signer,
skipList: skippeers.NewList(),
warmupPeriod: time.Now().Add(warmupTime),
}
ps.validStamp = ps.validStampWrapper(validStamp)
return ps
}
func (s *PushSync) Protocol() p2p.ProtocolSpec {
return p2p.ProtocolSpec{
Name: protocolName,
Version: protocolVersion,
StreamSpecs: []p2p.StreamSpec{
{
Name: streamName,
Handler: s.handler,
},
},
}
}
// handler handles chunk delivery from other node and forwards to its destination node.
// If the current node is the destination, it stores in the local store and sends a receipt.
func (ps *PushSync) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
now := time.Now()
w, r := protobuf.NewWriterAndReader(stream)
ctx, cancel := context.WithTimeout(ctx, defaultTTL)
defer cancel()
defer func() {
if err != nil {
ps.metrics.TotalHandlerTime.WithLabelValues("failure").Observe(time.Since(now).Seconds())
ps.metrics.TotalHandlerErrors.Inc()
_ = stream.Reset()
} else {
ps.metrics.TotalHandlerTime.WithLabelValues("success").Observe(time.Since(now).Seconds())
_ = stream.FullClose()
}
}()
var ch pb.Delivery
if err = r.ReadMsgWithContext(ctx, &ch); err != nil {
return fmt.Errorf("pushsync read delivery: %w", err)
}
ps.metrics.TotalReceived.Inc()
chunk := swarm.NewChunk(swarm.NewAddress(ch.Address), ch.Data)
chunkAddress := chunk.Address()
span, _, ctx := ps.tracer.StartSpanFromContext(ctx, "pushsync-handler", ps.logger, opentracing.Tag{Key: "address", Value: chunkAddress.String()})
defer span.Finish()
stamp := new(postage.Stamp)
err = stamp.UnmarshalBinary(ch.Stamp)
if err != nil {
return fmt.Errorf("pushsync stamp unmarshall: %w", err)
}
chunk.WithStamp(stamp)
if cac.Valid(chunk) {
go ps.unwrap(chunk)
} else if !soc.Valid(chunk) {
return swarm.ErrInvalidChunk
}
price := ps.pricer.Price(chunkAddress)
store := func(ctx context.Context) error {
ps.metrics.Storer.Inc()
chunkToPut, err := ps.validStamp(chunk)
if err != nil {
return fmt.Errorf("invalid stamp: %w", err)
}
err = ps.store.ReservePutter().Put(ctx, chunkToPut)
if err != nil {
return fmt.Errorf("reserve put: %w", err)
}
signature, err := ps.signer.Sign(chunkToPut.Address().Bytes())
if err != nil {
return fmt.Errorf("receipt signature: %w", err)
}
// return back receipt
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
receipt := pb.Receipt{Address: chunkToPut.Address().Bytes(), Signature: signature, Nonce: ps.nonce}
if err := w.WriteMsgWithContext(ctx, &receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
if ps.topologyDriver.IsReachable() && ps.store.IsWithinStorageRadius(chunkAddress) {
return store(ctx)
}
receipt, err := ps.pushToClosest(ctx, chunk, false)
if err != nil {
if errors.Is(err, topology.ErrWantSelf) {
return store(ctx)
}
ps.metrics.Forwarder.Inc()
return fmt.Errorf("handler: push to closest chunk %s: %w", chunkAddress, err)
}
ps.metrics.Forwarder.Inc()
debit, err := ps.accounting.PrepareDebit(ctx, p.Address, price)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
// pass back the receipt
if err := w.WriteMsgWithContext(ctx, receipt); err != nil {
return fmt.Errorf("send receipt to peer %s: %w", p.Address.String(), err)
}
return debit.Apply()
}
// PushChunkToClosest sends chunk to the closest peer by opening a stream. It then waits for
// a receipt from that peer and returns error or nil based on the receiving and
// the validity of the receipt.
func (ps *PushSync) PushChunkToClosest(ctx context.Context, ch swarm.Chunk) (*Receipt, error) {
ps.metrics.TotalOutgoing.Inc()
r, err := ps.pushToClosest(ctx, ch, true)
if err != nil {
ps.metrics.TotalOutgoingErrors.Inc()
return nil, err
}
return &Receipt{
Address: swarm.NewAddress(r.Address),
Signature: r.Signature,
Nonce: r.Nonce,
}, nil
}
// pushToClosest attempts to push the chunk into the network.
func (ps *PushSync) pushToClosest(ctx context.Context, ch swarm.Chunk, origin bool) (*pb.Receipt, error) {
if !ps.warmedUp() {
return nil, ErrWarmup
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ps.metrics.TotalRequests.Inc()
var (
sentErrorsLeft = 1
preemptiveTicker <-chan time.Time
inflight int
parallelForwards = nPeersToReplicate
)
if origin {
ticker := time.NewTicker(preemptiveInterval)
defer ticker.Stop()
preemptiveTicker = ticker.C
sentErrorsLeft = maxPushErrors
}
resultChan := make(chan receiptResult)
retryC := make(chan struct{}, parallelForwards)
retry := func() {
select {
case retryC <- struct{}{}:
case <-ctx.Done():
default:
}
}
retry()
for sentErrorsLeft > 0 {
select {
case <-ctx.Done():
return nil, ErrNoPush
case <-preemptiveTicker:
retry()
case <-retryC:
// Origin peers should not store the chunk initially so that the chunk is always forwarded into the network.
// If no peer can be found from an origin peer, the origin peer may store the chunk.
// Non-origin peers store the chunk if the chunk is within depth.
// For non-origin peers, if the chunk is not within depth, they may store the chunk if they are the closest peer to the chunk.
peer, err := ps.topologyDriver.ClosestPeer(ch.Address(), ps.fullNode && !origin, topology.Select{Reachable: true, Healthy: true}, ps.skipList.ChunkPeers(ch.Address())...)
if errors.Is(err, topology.ErrNotFound) {
if ps.skipList.PruneExpiresAfter(ch.Address(), overDraftRefresh) == 0 { //no overdraft peers, we have depleted ALL peers
if inflight == 0 {
if ps.fullNode && ps.topologyDriver.IsReachable() {
if cac.Valid(ch) {
go ps.unwrap(ch)
}
return nil, topology.ErrWantSelf
}
ps.logger.Debug("no peers left", "chunk_address", ch.Address(), "error", err)
return nil, err
}
continue // there is still an inflight request, wait for it's result
}
ps.logger.Debug("sleeping to refresh overdraft balance", "chunk_address", ch.Address())
select {
case <-time.After(overDraftRefresh):
retry()
continue
case <-ctx.Done():
return nil, ctx.Err()
}
}
if err != nil {
if inflight == 0 {
return nil, err
}
ps.logger.Debug("next peer", "chunk_address", ch.Address(), "error", err)
continue
}
// since we can reach into the neighborhood of the chunk
// act as the multiplexer and push the chunk in parallel to multiple peers
if swarm.Proximity(peer.Bytes(), ch.Address().Bytes()) >= ps.store.StorageRadius() {
for ; parallelForwards > 0; parallelForwards-- {
retry()
sentErrorsLeft++
}
}
action, err := ps.prepareCredit(ctx, peer, ch, origin)
if err != nil {
retry()
ps.skipList.Add(ch.Address(), peer, overDraftRefresh)
continue
}
ps.skipList.Add(ch.Address(), peer, sanctionWait)
ps.metrics.TotalSendAttempts.Inc()
inflight++
go ps.push(ctx, resultChan, peer, ch, action)
case result := <-resultChan:
inflight--
ps.measurePushPeer(result.pushTime, result.err, origin)
if result.err == nil {
return result.receipt, nil
}
ps.metrics.TotalFailedSendAttempts.Inc()
ps.logger.Debug("could not push to peer", "chunk_address", ch.Address(), "peer_address", result.peer, "error", result.err)
sentErrorsLeft--
retry()
}
}
return nil, ErrNoPush
}
func (ps *PushSync) push(parentCtx context.Context, resultChan chan<- receiptResult, peer swarm.Address, ch swarm.Chunk, action accounting.Action) {
span := tracing.FromContext(parentCtx)
ctx, cancel := context.WithTimeout(context.Background(), defaultTTL)
defer cancel()
spanInner, _, ctx := ps.tracer.StartSpanFromContext(tracing.WithContext(ctx, span), "push-closest", ps.logger, opentracing.Tag{Key: "address", Value: ch.Address().String()})
defer spanInner.Finish()
var (
err error
receipt *pb.Receipt
now = time.Now()
)
defer func() {
select {
case resultChan <- receiptResult{pushTime: now, peer: peer, err: err, receipt: receipt}:
case <-parentCtx.Done():
}
}()
defer action.Cleanup()
| ps.metrics.TotalSent.Inc()
err = action.Apply()
}
func (ps *PushSync) pushChunkToPeer(ctx context.Context, peer swarm.Address, ch swarm.Chunk) (receipt *pb.Receipt, err error) {
streamer, err := ps.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
if err != nil {
return nil, fmt.Errorf("new stream for peer %s: %w", peer.String(), err)
}
defer func() {
if err != nil {
_ = streamer.Reset()
} else {
_ = streamer.FullClose()
}
}()
w, r := protobuf.NewWriterAndReader(streamer)
stamp, err := ch.Stamp().MarshalBinary()
if err != nil {
return nil, err
}
err = w.WriteMsgWithContext(ctx, &pb.Delivery{
Address: ch.Address().Bytes(),
Data: ch.Data(),
Stamp: stamp,
})
if err != nil {
return nil, err
}
err = ps.store.Report(ctx, ch, storage.ChunkSent)
if err != nil && !errors.Is(err, storage.ErrNotFound) {
err = fmt.Errorf("tag %d increment: %w", ch.TagID(), err)
return
}
var rec pb.Receipt
if err = r.ReadMsgWithContext(ctx, &rec); err != nil {
return nil, err
}
if !ch.Address().Equal(swarm.NewAddress(rec.Address)) {
return nil, fmt.Errorf("invalid receipt. chunk %s, peer %s", ch.Address(), peer)
}
return &rec, nil
}
func (ps *PushSync) prepareCredit(ctx context.Context, peer swarm.Address, ch swarm.Chunk, origin bool) (accounting.Action, error) {
creditCtx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
creditAction, err := ps.accounting.PrepareCredit(creditCtx, peer, ps.pricer.PeerPrice(peer, ch.Address()), origin)
if err != nil {
return nil, err
}
return creditAction, nil
}
func (ps *PushSync) measurePushPeer(t time.Time, err error, origin bool) {
var status string
if err != nil {
status = "failure"
} else {
status = "success"
}
ps.metrics.PushToPeerTime.WithLabelValues(status).Observe(time.Since(t).Seconds())
}
func (ps *PushSync) validStampWrapper(f postage.ValidStampFn) postage.ValidStampFn {
return func(c swarm.Chunk) (swarm.Chunk, error) {
t := time.Now()
chunk, err := f(c)
if err != nil {
ps.metrics.InvalidStampErrors.Inc()
ps.metrics.StampValidationTime.WithLabelValues("failure").Observe(time.Since(t).Seconds())
} else {
ps.metrics.StampValidationTime.WithLabelValues("success").Observe(time.Since(t).Seconds())
}
return chunk, err
}
}
func (s *PushSync) Close() error {
return s.skipList.Close()
}
func (ps *PushSync) warmedUp() bool {
return time.Now().After(ps.warmupPeriod)
} | receipt, err = ps.pushChunkToPeer(ctx, peer, ch)
if err != nil {
return
}
| random_line_split |
trickledag.go | // Package trickle allows to build trickle DAGs.
// In this type of DAG, non-leave nodes are first filled
// with data leaves, and then incorporate "layers" of subtrees
// as additional links.
//
// Each layer is a trickle sub-tree and is limited by an increasing
// maximum depth. Thus, the nodes first layer
// can only hold leaves (depth 1) but subsequent layers can grow deeper.
// By default, this module places 4 nodes per layer (that is, 4 subtrees
// of the same maximum depth before increasing it).
//
// Trickle DAGs are very good for sequentially reading data, as the
// first data leaves are directly reachable from the root and those
// coming next are always nearby. They are
// suited for things like streaming applications.
package trickle
import (
"context"
"errors"
"fmt"
ft "github.com/ipfs/go-unixfs"
h "github.com/ipfs/go-unixfs/importer/helpers"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
)
// depthRepeat specifies how many times to append a child tree of a
// given depth. Higher values increase the width of a given node, which
// improves seek speeds.
const depthRepeat = 4
// Layout builds a new DAG with the trickle format using the provided
// DagBuilderHelper. See the module's description for a more detailed
// explanation.
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Layout
func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {
newRoot := db.NewFSNodeOverDag(ft.TFile)
root, _, err := fillTrickleRec(db, newRoot, -1)
if err != nil {
return nil, err
}
return root, db.Add(root)
}
// fillTrickleRec creates a trickle (sub-)tree with an optional maximum specified depth
// in the case maxDepth is greater than zero, or with unlimited depth otherwise
// (where the DAG builder will signal the end of data to end the function).
func fillTrickleRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, maxDepth int) (filledNode ipld.Node, nodeFileSize uint64, err error) {
// Always do this, even in the base case
if err := db.FillNodeLayer(node); err != nil {
return nil, 0, err
}
// For each depth in [1, `maxDepth`) (or without limit if `maxDepth` is -1,
// initial call from `Layout`) add `depthRepeat` sub-graphs of that depth.
for depth := 1; maxDepth == -1 || depth < maxDepth; depth++ {
if db.Done() {
break
// No more data, stop here, posterior append calls will figure out
// where we left off.
}
for repeatIndex := 0; repeatIndex < depthRepeat && !db.Done(); repeatIndex++ {
childNode, childFileSize, err := fillTrickleRec(db, db.NewFSNodeOverDag(ft.TFile), depth)
if err != nil {
return nil, 0, err
}
if err := node.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
// Get the final `dag.ProtoNode` with the `FSNode` data encoded inside.
filledNode, err = node.Commit()
if err != nil {
return nil, 0, err
}
return filledNode, node.FileSize(), nil
}
// Append appends the data in `db` to the dag, using the Trickledag format
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Append
func Append(ctx context.Context, basen ipld.Node, db *h.DagBuilderHelper) (out ipld.Node, errOut error) {
base, ok := basen.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
// Convert to unixfs node for working with easily
fsn, err := h.NewFSNFromDag(base)
if err != nil {
return nil, err
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, err
}
if db.Done() {
// TODO: If `FillNodeLayer` stop `Commit`ing this should be
// the place (besides the function end) to call it.
return fsn.GetDagNode()
}
// If continuing, our depth has increased by one
depth++
}
// Last child in this node may not be a full tree, lets fill it up.
if err := appendFillLastChild(ctx, fsn, depth-1, repeatNumber, db); err != nil {
return nil, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, err
}
err = fsn.AddChild(childNode, childFileSize, db)
if err != nil {
return nil, err
}
}
}
_, err = fsn.Commit()
if err != nil {
return nil, err
}
return fsn.GetDagNode()
}
func appendFillLastChild(ctx context.Context, fsn *h.FSNodeOverDag, depth int, repeatNumber int, db *h.DagBuilderHelper) error {
if fsn.NumChildren() <= db.Maxlinks() {
return nil
}
// TODO: Why do we need this check, didn't the caller already take
// care of this?
// Recursive step, grab last child
last := fsn.NumChildren() - 1
lastChild, err := fsn.GetChild(ctx, last, db.GetDagServ())
if err != nil {
return err
}
// Fill out last child (may not be full tree)
newChild, nchildSize, err := appendRec(ctx, lastChild, db, depth-1)
if err != nil {
return err
}
// Update changed child in parent node
fsn.RemoveChild(last, db)
filledNode, err := newChild.Commit()
if err != nil {
return err
}
err = fsn.AddChild(filledNode, nchildSize, db)
if err != nil {
return err
}
// Partially filled depth layer
if repeatNumber != 0 {
for ; repeatNumber < depthRepeat && !db.Done(); repeatNumber++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, depth)
if err != nil {
return err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return err
}
}
}
return nil
}
// recursive call for Append
func appendRec(ctx context.Context, fsn *h.FSNodeOverDag, db *h.DagBuilderHelper, maxDepth int) (*h.FSNodeOverDag, uint64, error) {
if maxDepth == 0 || db.Done() {
return fsn, fsn.FileSize(), nil
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, 0, err
}
depth++
}
// TODO: Same as `appendFillLastChild`, when is this case possible?
// If at correct depth, no need to continue
if depth == maxDepth {
return fsn, fsn.FileSize(), nil
}
if err := appendFillLastChild(ctx, fsn, depth, repeatNumber, db); err != nil {
return nil, 0, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; i < maxDepth && !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, 0, err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
return fsn, fsn.FileSize(), nil
}
// Deduce where we left off in `fillTrickleRec`, returns the `depth`
// with which new sub-graphs were being added and, within that depth,
// in which `repeatNumber` of the total `depthRepeat` we should add.
func trickleDepthInfo(node *h.FSNodeOverDag, maxlinks int) (depth int, repeatNumber int) {
n := node.NumChildren()
if n < maxlinks {
// We didn't even added the initial `maxlinks` leaf nodes (`FillNodeLayer`).
return 0, 0
}
nonLeafChildren := n - maxlinks
// The number of non-leaf child nodes added in `fillTrickleRec` (after
// the `FillNodeLayer` call).
depth = nonLeafChildren/depthRepeat + 1
// "Deduplicate" the added `depthRepeat` sub-graphs at each depth
// (rounding it up since we may be on an unfinished depth with less
// than `depthRepeat` sub-graphs).
repeatNumber = nonLeafChildren % depthRepeat
// What's left after taking full depths of `depthRepeat` sub-graphs
// is the current `repeatNumber` we're at (this fractional part is
// what we rounded up before).
return
}
// VerifyParams is used by VerifyTrickleDagStructure
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyParams
type VerifyParams struct {
Getter ipld.NodeGetter
Direct int
LayerRepeat int
Prefix *cid.Prefix
RawLeaves bool
}
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
// layout
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyTrickleDagStructure
func VerifyTrickleDagStructure(nd ipld.Node, p VerifyParams) error {
return verifyTDagRec(nd, -1, p)
}
// Recursive call for verifying the structure of a trickledag
func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
codec := cid.DagProtobuf
if depth == 0 {
if len(n.Links()) > 0 |
// zero depth dag is raw data block
switch nd := n.(type) {
case *dag.ProtoNode:
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TRaw {
return errors.New("expected raw block")
}
if p.RawLeaves {
return errors.New("expected raw leaf, got a protobuf node")
}
case *dag.RawNode:
if !p.RawLeaves {
return errors.New("expected protobuf node as leaf")
}
codec = cid.Raw
default:
return errors.New("expected ProtoNode or RawNode")
}
}
// verify prefix
if p.Prefix != nil {
prefix := n.Cid().Prefix()
expect := *p.Prefix // make a copy
expect.Codec = uint64(codec)
if codec == cid.Raw && expect.Version == 0 {
expect.Version = 1
}
if expect.MhLength == -1 {
expect.MhLength = prefix.MhLength
}
if prefix != expect {
return fmt.Errorf("unexpected cid prefix: expected: %v; got %v", expect, prefix)
}
}
if depth == 0 {
return nil
}
nd, ok := n.(*dag.ProtoNode)
if !ok {
return errors.New("expected ProtoNode")
}
// Verify this is a branch node
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", fsn.Type())
}
if len(fsn.Data()) > 0 {
return errors.New("branch node should not have data")
}
for i := 0; i < len(nd.Links()); i++ {
child, err := nd.Links()[i].GetNode(context.TODO(), p.Getter)
if err != nil {
return err
}
if i < p.Direct {
// Direct blocks
err := verifyTDagRec(child, 0, p)
if err != nil {
return err
}
} else {
// Recursive trickle dags
rdepth := ((i - p.Direct) / p.LayerRepeat) + 1
if rdepth >= depth && depth > 0 {
return errors.New("child dag was too deep")
}
err := verifyTDagRec(child, rdepth, p)
if err != nil {
return err
}
}
}
return nil
}
| {
return errors.New("expected direct block")
} | conditional_block |
trickledag.go | // Package trickle allows to build trickle DAGs.
// In this type of DAG, non-leave nodes are first filled
// with data leaves, and then incorporate "layers" of subtrees
// as additional links.
//
// Each layer is a trickle sub-tree and is limited by an increasing
// maximum depth. Thus, the nodes first layer
// can only hold leaves (depth 1) but subsequent layers can grow deeper.
// By default, this module places 4 nodes per layer (that is, 4 subtrees
// of the same maximum depth before increasing it).
//
// Trickle DAGs are very good for sequentially reading data, as the
// first data leaves are directly reachable from the root and those
// coming next are always nearby. They are
// suited for things like streaming applications.
package trickle
import (
"context"
"errors"
"fmt"
ft "github.com/ipfs/go-unixfs"
h "github.com/ipfs/go-unixfs/importer/helpers"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
)
// depthRepeat specifies how many times to append a child tree of a
// given depth. Higher values increase the width of a given node, which
// improves seek speeds.
const depthRepeat = 4
// Layout builds a new DAG with the trickle format using the provided
// DagBuilderHelper. See the module's description for a more detailed
// explanation.
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Layout
func Layout(db *h.DagBuilderHelper) (ipld.Node, error) |
// fillTrickleRec creates a trickle (sub-)tree with an optional maximum specified depth
// in the case maxDepth is greater than zero, or with unlimited depth otherwise
// (where the DAG builder will signal the end of data to end the function).
func fillTrickleRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, maxDepth int) (filledNode ipld.Node, nodeFileSize uint64, err error) {
// Always do this, even in the base case
if err := db.FillNodeLayer(node); err != nil {
return nil, 0, err
}
// For each depth in [1, `maxDepth`) (or without limit if `maxDepth` is -1,
// initial call from `Layout`) add `depthRepeat` sub-graphs of that depth.
for depth := 1; maxDepth == -1 || depth < maxDepth; depth++ {
if db.Done() {
break
// No more data, stop here, posterior append calls will figure out
// where we left off.
}
for repeatIndex := 0; repeatIndex < depthRepeat && !db.Done(); repeatIndex++ {
childNode, childFileSize, err := fillTrickleRec(db, db.NewFSNodeOverDag(ft.TFile), depth)
if err != nil {
return nil, 0, err
}
if err := node.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
// Get the final `dag.ProtoNode` with the `FSNode` data encoded inside.
filledNode, err = node.Commit()
if err != nil {
return nil, 0, err
}
return filledNode, node.FileSize(), nil
}
// Append appends the data in `db` to the dag, using the Trickledag format
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Append
func Append(ctx context.Context, basen ipld.Node, db *h.DagBuilderHelper) (out ipld.Node, errOut error) {
base, ok := basen.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
// Convert to unixfs node for working with easily
fsn, err := h.NewFSNFromDag(base)
if err != nil {
return nil, err
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, err
}
if db.Done() {
// TODO: If `FillNodeLayer` stop `Commit`ing this should be
// the place (besides the function end) to call it.
return fsn.GetDagNode()
}
// If continuing, our depth has increased by one
depth++
}
// Last child in this node may not be a full tree, lets fill it up.
if err := appendFillLastChild(ctx, fsn, depth-1, repeatNumber, db); err != nil {
return nil, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, err
}
err = fsn.AddChild(childNode, childFileSize, db)
if err != nil {
return nil, err
}
}
}
_, err = fsn.Commit()
if err != nil {
return nil, err
}
return fsn.GetDagNode()
}
func appendFillLastChild(ctx context.Context, fsn *h.FSNodeOverDag, depth int, repeatNumber int, db *h.DagBuilderHelper) error {
if fsn.NumChildren() <= db.Maxlinks() {
return nil
}
// TODO: Why do we need this check, didn't the caller already take
// care of this?
// Recursive step, grab last child
last := fsn.NumChildren() - 1
lastChild, err := fsn.GetChild(ctx, last, db.GetDagServ())
if err != nil {
return err
}
// Fill out last child (may not be full tree)
newChild, nchildSize, err := appendRec(ctx, lastChild, db, depth-1)
if err != nil {
return err
}
// Update changed child in parent node
fsn.RemoveChild(last, db)
filledNode, err := newChild.Commit()
if err != nil {
return err
}
err = fsn.AddChild(filledNode, nchildSize, db)
if err != nil {
return err
}
// Partially filled depth layer
if repeatNumber != 0 {
for ; repeatNumber < depthRepeat && !db.Done(); repeatNumber++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, depth)
if err != nil {
return err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return err
}
}
}
return nil
}
// recursive call for Append
func appendRec(ctx context.Context, fsn *h.FSNodeOverDag, db *h.DagBuilderHelper, maxDepth int) (*h.FSNodeOverDag, uint64, error) {
if maxDepth == 0 || db.Done() {
return fsn, fsn.FileSize(), nil
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, 0, err
}
depth++
}
// TODO: Same as `appendFillLastChild`, when is this case possible?
// If at correct depth, no need to continue
if depth == maxDepth {
return fsn, fsn.FileSize(), nil
}
if err := appendFillLastChild(ctx, fsn, depth, repeatNumber, db); err != nil {
return nil, 0, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; i < maxDepth && !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, 0, err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
return fsn, fsn.FileSize(), nil
}
// Deduce where we left off in `fillTrickleRec`, returns the `depth`
// with which new sub-graphs were being added and, within that depth,
// in which `repeatNumber` of the total `depthRepeat` we should add.
func trickleDepthInfo(node *h.FSNodeOverDag, maxlinks int) (depth int, repeatNumber int) {
n := node.NumChildren()
if n < maxlinks {
// We didn't even added the initial `maxlinks` leaf nodes (`FillNodeLayer`).
return 0, 0
}
nonLeafChildren := n - maxlinks
// The number of non-leaf child nodes added in `fillTrickleRec` (after
// the `FillNodeLayer` call).
depth = nonLeafChildren/depthRepeat + 1
// "Deduplicate" the added `depthRepeat` sub-graphs at each depth
// (rounding it up since we may be on an unfinished depth with less
// than `depthRepeat` sub-graphs).
repeatNumber = nonLeafChildren % depthRepeat
// What's left after taking full depths of `depthRepeat` sub-graphs
// is the current `repeatNumber` we're at (this fractional part is
// what we rounded up before).
return
}
// VerifyParams is used by VerifyTrickleDagStructure
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyParams
type VerifyParams struct {
Getter ipld.NodeGetter
Direct int
LayerRepeat int
Prefix *cid.Prefix
RawLeaves bool
}
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
// layout
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyTrickleDagStructure
func VerifyTrickleDagStructure(nd ipld.Node, p VerifyParams) error {
return verifyTDagRec(nd, -1, p)
}
// Recursive call for verifying the structure of a trickledag
func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
codec := cid.DagProtobuf
if depth == 0 {
if len(n.Links()) > 0 {
return errors.New("expected direct block")
}
// zero depth dag is raw data block
switch nd := n.(type) {
case *dag.ProtoNode:
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TRaw {
return errors.New("expected raw block")
}
if p.RawLeaves {
return errors.New("expected raw leaf, got a protobuf node")
}
case *dag.RawNode:
if !p.RawLeaves {
return errors.New("expected protobuf node as leaf")
}
codec = cid.Raw
default:
return errors.New("expected ProtoNode or RawNode")
}
}
// verify prefix
if p.Prefix != nil {
prefix := n.Cid().Prefix()
expect := *p.Prefix // make a copy
expect.Codec = uint64(codec)
if codec == cid.Raw && expect.Version == 0 {
expect.Version = 1
}
if expect.MhLength == -1 {
expect.MhLength = prefix.MhLength
}
if prefix != expect {
return fmt.Errorf("unexpected cid prefix: expected: %v; got %v", expect, prefix)
}
}
if depth == 0 {
return nil
}
nd, ok := n.(*dag.ProtoNode)
if !ok {
return errors.New("expected ProtoNode")
}
// Verify this is a branch node
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", fsn.Type())
}
if len(fsn.Data()) > 0 {
return errors.New("branch node should not have data")
}
for i := 0; i < len(nd.Links()); i++ {
child, err := nd.Links()[i].GetNode(context.TODO(), p.Getter)
if err != nil {
return err
}
if i < p.Direct {
// Direct blocks
err := verifyTDagRec(child, 0, p)
if err != nil {
return err
}
} else {
// Recursive trickle dags
rdepth := ((i - p.Direct) / p.LayerRepeat) + 1
if rdepth >= depth && depth > 0 {
return errors.New("child dag was too deep")
}
err := verifyTDagRec(child, rdepth, p)
if err != nil {
return err
}
}
}
return nil
}
| {
newRoot := db.NewFSNodeOverDag(ft.TFile)
root, _, err := fillTrickleRec(db, newRoot, -1)
if err != nil {
return nil, err
}
return root, db.Add(root)
} | identifier_body |
trickledag.go | // Package trickle allows to build trickle DAGs.
// In this type of DAG, non-leave nodes are first filled
// with data leaves, and then incorporate "layers" of subtrees
// as additional links.
//
// Each layer is a trickle sub-tree and is limited by an increasing
// maximum depth. Thus, the nodes first layer
// can only hold leaves (depth 1) but subsequent layers can grow deeper.
// By default, this module places 4 nodes per layer (that is, 4 subtrees
// of the same maximum depth before increasing it).
//
// Trickle DAGs are very good for sequentially reading data, as the
// first data leaves are directly reachable from the root and those
// coming next are always nearby. They are
// suited for things like streaming applications.
package trickle
import (
"context"
"errors"
"fmt"
ft "github.com/ipfs/go-unixfs"
h "github.com/ipfs/go-unixfs/importer/helpers"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
)
// depthRepeat specifies how many times to append a child tree of a
// given depth. Higher values increase the width of a given node, which
// improves seek speeds.
const depthRepeat = 4
// Layout builds a new DAG with the trickle format using the provided
// DagBuilderHelper. See the module's description for a more detailed
// explanation.
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Layout
func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {
newRoot := db.NewFSNodeOverDag(ft.TFile)
root, _, err := fillTrickleRec(db, newRoot, -1)
if err != nil {
return nil, err
}
return root, db.Add(root)
}
// fillTrickleRec creates a trickle (sub-)tree with an optional maximum specified depth
// in the case maxDepth is greater than zero, or with unlimited depth otherwise
// (where the DAG builder will signal the end of data to end the function).
func fillTrickleRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, maxDepth int) (filledNode ipld.Node, nodeFileSize uint64, err error) {
// Always do this, even in the base case
if err := db.FillNodeLayer(node); err != nil {
return nil, 0, err
}
// For each depth in [1, `maxDepth`) (or without limit if `maxDepth` is -1,
// initial call from `Layout`) add `depthRepeat` sub-graphs of that depth.
for depth := 1; maxDepth == -1 || depth < maxDepth; depth++ {
if db.Done() {
break
// No more data, stop here, posterior append calls will figure out
// where we left off.
}
for repeatIndex := 0; repeatIndex < depthRepeat && !db.Done(); repeatIndex++ {
childNode, childFileSize, err := fillTrickleRec(db, db.NewFSNodeOverDag(ft.TFile), depth)
if err != nil {
return nil, 0, err
}
if err := node.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
// Get the final `dag.ProtoNode` with the `FSNode` data encoded inside.
filledNode, err = node.Commit()
if err != nil {
return nil, 0, err
}
return filledNode, node.FileSize(), nil
}
// Append appends the data in `db` to the dag, using the Trickledag format
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Append
func Append(ctx context.Context, basen ipld.Node, db *h.DagBuilderHelper) (out ipld.Node, errOut error) {
base, ok := basen.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
// Convert to unixfs node for working with easily
fsn, err := h.NewFSNFromDag(base)
if err != nil {
return nil, err
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, err
}
if db.Done() {
// TODO: If `FillNodeLayer` stop `Commit`ing this should be
// the place (besides the function end) to call it.
return fsn.GetDagNode()
}
// If continuing, our depth has increased by one
depth++
}
// Last child in this node may not be a full tree, lets fill it up.
if err := appendFillLastChild(ctx, fsn, depth-1, repeatNumber, db); err != nil {
return nil, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, err
}
err = fsn.AddChild(childNode, childFileSize, db)
if err != nil {
return nil, err
}
}
}
_, err = fsn.Commit()
if err != nil {
return nil, err
}
return fsn.GetDagNode()
}
func appendFillLastChild(ctx context.Context, fsn *h.FSNodeOverDag, depth int, repeatNumber int, db *h.DagBuilderHelper) error {
if fsn.NumChildren() <= db.Maxlinks() {
return nil
}
// TODO: Why do we need this check, didn't the caller already take
// care of this?
// Recursive step, grab last child
last := fsn.NumChildren() - 1
lastChild, err := fsn.GetChild(ctx, last, db.GetDagServ())
if err != nil {
return err
}
// Fill out last child (may not be full tree)
newChild, nchildSize, err := appendRec(ctx, lastChild, db, depth-1)
if err != nil {
return err
}
// Update changed child in parent node
fsn.RemoveChild(last, db)
filledNode, err := newChild.Commit()
if err != nil {
return err
}
err = fsn.AddChild(filledNode, nchildSize, db)
if err != nil {
return err
}
// Partially filled depth layer
if repeatNumber != 0 {
for ; repeatNumber < depthRepeat && !db.Done(); repeatNumber++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, depth)
if err != nil {
return err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return err
}
}
}
return nil
}
// recursive call for Append
func appendRec(ctx context.Context, fsn *h.FSNodeOverDag, db *h.DagBuilderHelper, maxDepth int) (*h.FSNodeOverDag, uint64, error) {
if maxDepth == 0 || db.Done() {
return fsn, fsn.FileSize(), nil
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, 0, err
}
depth++
}
// TODO: Same as `appendFillLastChild`, when is this case possible?
// If at correct depth, no need to continue
if depth == maxDepth {
return fsn, fsn.FileSize(), nil
}
if err := appendFillLastChild(ctx, fsn, depth, repeatNumber, db); err != nil {
return nil, 0, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; i < maxDepth && !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, 0, err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
return fsn, fsn.FileSize(), nil
}
// Deduce where we left off in `fillTrickleRec`, returns the `depth`
// with which new sub-graphs were being added and, within that depth,
// in which `repeatNumber` of the total `depthRepeat` we should add.
func trickleDepthInfo(node *h.FSNodeOverDag, maxlinks int) (depth int, repeatNumber int) {
n := node.NumChildren()
if n < maxlinks {
// We didn't even added the initial `maxlinks` leaf nodes (`FillNodeLayer`).
return 0, 0
}
nonLeafChildren := n - maxlinks
// The number of non-leaf child nodes added in `fillTrickleRec` (after
// the `FillNodeLayer` call).
depth = nonLeafChildren/depthRepeat + 1
// "Deduplicate" the added `depthRepeat` sub-graphs at each depth
// (rounding it up since we may be on an unfinished depth with less
// than `depthRepeat` sub-graphs).
repeatNumber = nonLeafChildren % depthRepeat
// What's left after taking full depths of `depthRepeat` sub-graphs
// is the current `repeatNumber` we're at (this fractional part is
// what we rounded up before).
return
}
// VerifyParams is used by VerifyTrickleDagStructure
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyParams
type VerifyParams struct {
Getter ipld.NodeGetter
Direct int
LayerRepeat int
Prefix *cid.Prefix
RawLeaves bool
}
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
// layout
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyTrickleDagStructure
func VerifyTrickleDagStructure(nd ipld.Node, p VerifyParams) error {
return verifyTDagRec(nd, -1, p) | func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
codec := cid.DagProtobuf
if depth == 0 {
if len(n.Links()) > 0 {
return errors.New("expected direct block")
}
// zero depth dag is raw data block
switch nd := n.(type) {
case *dag.ProtoNode:
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TRaw {
return errors.New("expected raw block")
}
if p.RawLeaves {
return errors.New("expected raw leaf, got a protobuf node")
}
case *dag.RawNode:
if !p.RawLeaves {
return errors.New("expected protobuf node as leaf")
}
codec = cid.Raw
default:
return errors.New("expected ProtoNode or RawNode")
}
}
// verify prefix
if p.Prefix != nil {
prefix := n.Cid().Prefix()
expect := *p.Prefix // make a copy
expect.Codec = uint64(codec)
if codec == cid.Raw && expect.Version == 0 {
expect.Version = 1
}
if expect.MhLength == -1 {
expect.MhLength = prefix.MhLength
}
if prefix != expect {
return fmt.Errorf("unexpected cid prefix: expected: %v; got %v", expect, prefix)
}
}
if depth == 0 {
return nil
}
nd, ok := n.(*dag.ProtoNode)
if !ok {
return errors.New("expected ProtoNode")
}
// Verify this is a branch node
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", fsn.Type())
}
if len(fsn.Data()) > 0 {
return errors.New("branch node should not have data")
}
for i := 0; i < len(nd.Links()); i++ {
child, err := nd.Links()[i].GetNode(context.TODO(), p.Getter)
if err != nil {
return err
}
if i < p.Direct {
// Direct blocks
err := verifyTDagRec(child, 0, p)
if err != nil {
return err
}
} else {
// Recursive trickle dags
rdepth := ((i - p.Direct) / p.LayerRepeat) + 1
if rdepth >= depth && depth > 0 {
return errors.New("child dag was too deep")
}
err := verifyTDagRec(child, rdepth, p)
if err != nil {
return err
}
}
}
return nil
} | }
// Recursive call for verifying the structure of a trickledag | random_line_split |
trickledag.go | // Package trickle allows to build trickle DAGs.
// In this type of DAG, non-leave nodes are first filled
// with data leaves, and then incorporate "layers" of subtrees
// as additional links.
//
// Each layer is a trickle sub-tree and is limited by an increasing
// maximum depth. Thus, the nodes first layer
// can only hold leaves (depth 1) but subsequent layers can grow deeper.
// By default, this module places 4 nodes per layer (that is, 4 subtrees
// of the same maximum depth before increasing it).
//
// Trickle DAGs are very good for sequentially reading data, as the
// first data leaves are directly reachable from the root and those
// coming next are always nearby. They are
// suited for things like streaming applications.
package trickle
import (
"context"
"errors"
"fmt"
ft "github.com/ipfs/go-unixfs"
h "github.com/ipfs/go-unixfs/importer/helpers"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
)
// depthRepeat specifies how many times to append a child tree of a
// given depth. Higher values increase the width of a given node, which
// improves seek speeds.
const depthRepeat = 4
// Layout builds a new DAG with the trickle format using the provided
// DagBuilderHelper. See the module's description for a more detailed
// explanation.
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Layout
func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {
newRoot := db.NewFSNodeOverDag(ft.TFile)
root, _, err := fillTrickleRec(db, newRoot, -1)
if err != nil {
return nil, err
}
return root, db.Add(root)
}
// fillTrickleRec creates a trickle (sub-)tree with an optional maximum specified depth
// in the case maxDepth is greater than zero, or with unlimited depth otherwise
// (where the DAG builder will signal the end of data to end the function).
func fillTrickleRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, maxDepth int) (filledNode ipld.Node, nodeFileSize uint64, err error) {
// Always do this, even in the base case
if err := db.FillNodeLayer(node); err != nil {
return nil, 0, err
}
// For each depth in [1, `maxDepth`) (or without limit if `maxDepth` is -1,
// initial call from `Layout`) add `depthRepeat` sub-graphs of that depth.
for depth := 1; maxDepth == -1 || depth < maxDepth; depth++ {
if db.Done() {
break
// No more data, stop here, posterior append calls will figure out
// where we left off.
}
for repeatIndex := 0; repeatIndex < depthRepeat && !db.Done(); repeatIndex++ {
childNode, childFileSize, err := fillTrickleRec(db, db.NewFSNodeOverDag(ft.TFile), depth)
if err != nil {
return nil, 0, err
}
if err := node.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
// Get the final `dag.ProtoNode` with the `FSNode` data encoded inside.
filledNode, err = node.Commit()
if err != nil {
return nil, 0, err
}
return filledNode, node.FileSize(), nil
}
// Append appends the data in `db` to the dag, using the Trickledag format
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.Append
func Append(ctx context.Context, basen ipld.Node, db *h.DagBuilderHelper) (out ipld.Node, errOut error) {
base, ok := basen.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
// Convert to unixfs node for working with easily
fsn, err := h.NewFSNFromDag(base)
if err != nil {
return nil, err
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, err
}
if db.Done() {
// TODO: If `FillNodeLayer` stop `Commit`ing this should be
// the place (besides the function end) to call it.
return fsn.GetDagNode()
}
// If continuing, our depth has increased by one
depth++
}
// Last child in this node may not be a full tree, lets fill it up.
if err := appendFillLastChild(ctx, fsn, depth-1, repeatNumber, db); err != nil {
return nil, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, err
}
err = fsn.AddChild(childNode, childFileSize, db)
if err != nil {
return nil, err
}
}
}
_, err = fsn.Commit()
if err != nil {
return nil, err
}
return fsn.GetDagNode()
}
func | (ctx context.Context, fsn *h.FSNodeOverDag, depth int, repeatNumber int, db *h.DagBuilderHelper) error {
if fsn.NumChildren() <= db.Maxlinks() {
return nil
}
// TODO: Why do we need this check, didn't the caller already take
// care of this?
// Recursive step, grab last child
last := fsn.NumChildren() - 1
lastChild, err := fsn.GetChild(ctx, last, db.GetDagServ())
if err != nil {
return err
}
// Fill out last child (may not be full tree)
newChild, nchildSize, err := appendRec(ctx, lastChild, db, depth-1)
if err != nil {
return err
}
// Update changed child in parent node
fsn.RemoveChild(last, db)
filledNode, err := newChild.Commit()
if err != nil {
return err
}
err = fsn.AddChild(filledNode, nchildSize, db)
if err != nil {
return err
}
// Partially filled depth layer
if repeatNumber != 0 {
for ; repeatNumber < depthRepeat && !db.Done(); repeatNumber++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, depth)
if err != nil {
return err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return err
}
}
}
return nil
}
// recursive call for Append
func appendRec(ctx context.Context, fsn *h.FSNodeOverDag, db *h.DagBuilderHelper, maxDepth int) (*h.FSNodeOverDag, uint64, error) {
if maxDepth == 0 || db.Done() {
return fsn, fsn.FileSize(), nil
}
// Get depth of this 'tree'
depth, repeatNumber := trickleDepthInfo(fsn, db.Maxlinks())
if depth == 0 {
// If direct blocks not filled...
if err := db.FillNodeLayer(fsn); err != nil {
return nil, 0, err
}
depth++
}
// TODO: Same as `appendFillLastChild`, when is this case possible?
// If at correct depth, no need to continue
if depth == maxDepth {
return fsn, fsn.FileSize(), nil
}
if err := appendFillLastChild(ctx, fsn, depth, repeatNumber, db); err != nil {
return nil, 0, err
}
// after appendFillLastChild, our depth is now increased by one
if !db.Done() {
depth++
}
// Now, continue filling out tree like normal
for i := depth; i < maxDepth && !db.Done(); i++ {
for j := 0; j < depthRepeat && !db.Done(); j++ {
nextChild := db.NewFSNodeOverDag(ft.TFile)
childNode, childFileSize, err := fillTrickleRec(db, nextChild, i)
if err != nil {
return nil, 0, err
}
if err := fsn.AddChild(childNode, childFileSize, db); err != nil {
return nil, 0, err
}
}
}
return fsn, fsn.FileSize(), nil
}
// Deduce where we left off in `fillTrickleRec`, returns the `depth`
// with which new sub-graphs were being added and, within that depth,
// in which `repeatNumber` of the total `depthRepeat` we should add.
func trickleDepthInfo(node *h.FSNodeOverDag, maxlinks int) (depth int, repeatNumber int) {
n := node.NumChildren()
if n < maxlinks {
// We didn't even added the initial `maxlinks` leaf nodes (`FillNodeLayer`).
return 0, 0
}
nonLeafChildren := n - maxlinks
// The number of non-leaf child nodes added in `fillTrickleRec` (after
// the `FillNodeLayer` call).
depth = nonLeafChildren/depthRepeat + 1
// "Deduplicate" the added `depthRepeat` sub-graphs at each depth
// (rounding it up since we may be on an unfinished depth with less
// than `depthRepeat` sub-graphs).
repeatNumber = nonLeafChildren % depthRepeat
// What's left after taking full depths of `depthRepeat` sub-graphs
// is the current `repeatNumber` we're at (this fractional part is
// what we rounded up before).
return
}
// VerifyParams is used by VerifyTrickleDagStructure
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyParams
type VerifyParams struct {
Getter ipld.NodeGetter
Direct int
LayerRepeat int
Prefix *cid.Prefix
RawLeaves bool
}
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
// layout
//
// Deprecated: use github.com/ipfs/boxo/ipld/unixfs/importer/trickle.VerifyTrickleDagStructure
func VerifyTrickleDagStructure(nd ipld.Node, p VerifyParams) error {
return verifyTDagRec(nd, -1, p)
}
// Recursive call for verifying the structure of a trickledag
func verifyTDagRec(n ipld.Node, depth int, p VerifyParams) error {
codec := cid.DagProtobuf
if depth == 0 {
if len(n.Links()) > 0 {
return errors.New("expected direct block")
}
// zero depth dag is raw data block
switch nd := n.(type) {
case *dag.ProtoNode:
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TRaw {
return errors.New("expected raw block")
}
if p.RawLeaves {
return errors.New("expected raw leaf, got a protobuf node")
}
case *dag.RawNode:
if !p.RawLeaves {
return errors.New("expected protobuf node as leaf")
}
codec = cid.Raw
default:
return errors.New("expected ProtoNode or RawNode")
}
}
// verify prefix
if p.Prefix != nil {
prefix := n.Cid().Prefix()
expect := *p.Prefix // make a copy
expect.Codec = uint64(codec)
if codec == cid.Raw && expect.Version == 0 {
expect.Version = 1
}
if expect.MhLength == -1 {
expect.MhLength = prefix.MhLength
}
if prefix != expect {
return fmt.Errorf("unexpected cid prefix: expected: %v; got %v", expect, prefix)
}
}
if depth == 0 {
return nil
}
nd, ok := n.(*dag.ProtoNode)
if !ok {
return errors.New("expected ProtoNode")
}
// Verify this is a branch node
fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return err
}
if fsn.Type() != ft.TFile {
return fmt.Errorf("expected file as branch node, got: %s", fsn.Type())
}
if len(fsn.Data()) > 0 {
return errors.New("branch node should not have data")
}
for i := 0; i < len(nd.Links()); i++ {
child, err := nd.Links()[i].GetNode(context.TODO(), p.Getter)
if err != nil {
return err
}
if i < p.Direct {
// Direct blocks
err := verifyTDagRec(child, 0, p)
if err != nil {
return err
}
} else {
// Recursive trickle dags
rdepth := ((i - p.Direct) / p.LayerRepeat) + 1
if rdepth >= depth && depth > 0 {
return errors.New("child dag was too deep")
}
err := verifyTDagRec(child, rdepth, p)
if err != nil {
return err
}
}
}
return nil
}
| appendFillLastChild | identifier_name |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE {
println!("The given board is full, so there's no uncertainty.");
1
} else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn create_opts() -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if !matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
}
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if !matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> { | let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' | 's' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
} | let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
| random_line_split |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE {
println!("The given board is full, so there's no uncertainty.");
1
} else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn create_opts() -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if !matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 |
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if !matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> {
let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' | 's' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
}
| {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
} | identifier_body |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE {
println!("The given board is full, so there's no uncertainty.");
1
} else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn | () -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if !matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
}
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if !matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> {
let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' | 's' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
}
| create_opts | identifier_name |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE | else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn create_opts() -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if !matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
}
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if !matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> {
let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' | 's' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
}
| {
println!("The given board is full, so there's no uncertainty.");
1
} | conditional_block |
day18.rs | use crate::day::{DayResult, PartResult};
use chumsky::prelude::*;
use itertools::Itertools;
use std::{collections::LinkedList, ops::Add, str::FromStr};
pub fn run() -> Result<DayResult, Box<dyn std::error::Error>> {
let part1 = part1(include_str!("inputs/day18.txt"))?;
let part2 = part2(include_str!("inputs/day18.txt"))?;
Ok(DayResult::new(
PartResult::Success(format!("The answer is {}", part1)),
PartResult::Success(format!(
"The largest magnitude from two numbers is {}",
part2
)),
))
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishNumber {
values: LinkedList<SnailfishValue>,
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishValue {
value: u32,
depth: usize,
}
impl SnailfishValue {
fn deeper(&self) -> Self {
Self {
value: self.value,
depth: self.depth + 1,
}
}
}
impl SnailfishNumber {
fn split(&mut self) -> bool {
if let Some(index_to_split) = self
.values
.iter()
.enumerate()
.filter(|(_, v)| v.value >= 10)
.map(|(i, _)| i)
.next()
{
let mut back_side = self.values.split_off(index_to_split);
let split_num = back_side.pop_front().unwrap();
let new_depth = split_num.depth + 1;
self.values.push_back(SnailfishValue {
value: split_num.value / 2,
depth: new_depth,
});
self.values.push_back(SnailfishValue {
value: (split_num.value / 2) + (split_num.value % 2),
depth: new_depth,
});
self.values.append(&mut back_side);
true
} else {
false
}
}
fn explode(&mut self) -> bool {
let explode_pair: Vec<(usize, SnailfishValue)> = self
.values
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth >= 4)
.take(2)
.collect();
if explode_pair.len() < 2 {
// no explosions necessary
return false;
}
let left_index = explode_pair[0].0;
let left_value = &explode_pair[0].1;
let right_index = explode_pair[1].0;
let right_value = &explode_pair[1].1;
if left_index + 1 != right_index {
panic!("Exploding pair don't have neighbouring indicies, the list is corrupted. List: {:?}", self.values);
}
let mut back_side = self.values.split_off(left_index);
// drop the pair we're exploding
back_side.pop_front();
back_side.pop_front();
if let Some(to_left) = self.values.back_mut() {
// need to modify the number to the left
to_left.value += left_value.value;
}
// construct new 0 element
let new_zero = SnailfishValue {
value: 0,
depth: left_value.depth - 1,
};
self.values.push_back(new_zero);
if let Some(to_right) = back_side.front_mut() {
// need to modify the number to the right
to_right.value += right_value.value;
}
self.values.append(&mut back_side);
true
}
fn reduce(&mut self) {
if self.explode() {
self.reduce();
} else {
if self.split() {
self.reduce();
}
}
}
fn magnitude(&self) -> u32 {
let mut flattened = self.values.clone();
while flattened.len() > 1 {
// the first two deepest elements must be a pair
let deepest = flattened.iter().map(|v| v.depth).max().unwrap();
let deepest_two = flattened
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth == deepest)
.take(2)
.collect::<Vec<_>>();
let new_value = SnailfishValue {
value: deepest_two[0].1.value * 3 + deepest_two[1].1.value * 2,
depth: if deepest == 0 { 0 } else { deepest - 1 },
};
let mut back = flattened.split_off(deepest_two[0].0);
back.pop_front();
back.pop_front();
flattened.push_back(new_value);
flattened.append(&mut back);
}
flattened.pop_front().unwrap().value
}
}
impl Add for SnailfishNumber {
type Output = SnailfishNumber;
fn add(self, rhs: Self) -> Self::Output {
let mut values = LinkedList::new();
for v in self
.values
.iter()
.chain(rhs.values.iter())
.map(|v| v.deeper())
{
values.push_back(v)
}
let mut sfn = SnailfishNumber { values };
sfn.reduce();
sfn
}
}
impl FromStr for SnailfishNumber {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let values = parser()
.parse(s)
.map_err(|s| s.into_iter().map(|e| e.to_string()).collect::<String>())?;
Ok(SnailfishNumber { values })
}
}
enum ValueOrList {
Value(SnailfishValue),
List(LinkedList<SnailfishValue>),
}
fn parser() -> impl Parser<char, LinkedList<SnailfishValue>, Error = Simple<char>> {
recursive(|pair| {
let int = text::int(10)
.map(|s: String| s.parse::<u32>().unwrap())
.map(|i| ValueOrList::Value(SnailfishValue { value: i, depth: 0 }));
let int_or_nested_pair = int.or(pair.map(|p| ValueOrList::List(p)));
let inner_pair = int_or_nested_pair
.clone()
.then_ignore(just(','))
.then(int_or_nested_pair)
.map(|(l, r)| {
let mut list = LinkedList::new();
match l {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
match r {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
list
});
inner_pair.delimited_by('[', ']')
})
}
fn part1(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let mut sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<LinkedList<_>, _>>()?;
let first = sfns.pop_front().ok_or(format!("No numbers were parsed"))?;
let result = sfns.into_iter().fold(first, |a, x| a + x);
Ok(result.magnitude())
}
fn part2(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<Vec<_>, _>>()?;
let pairs = sfns.into_iter().permutations(2);
let magnitudes = pairs.map(|p| (p[0].clone() + p[1].clone()).magnitude());
let answer = magnitudes.max().ok_or(format!("No numbers were parsed"))?;
Ok(answer)
}
#[test]
fn test_parser() {
let pair = SnailfishNumber::from_str("[1,2]").unwrap();
assert_eq!(
pair.values.into_iter().collect::<Vec<_>>(),
vec![
SnailfishValue { value: 1, depth: 0 },
SnailfishValue { value: 2, depth: 0 }
]
);
}
#[test]
fn test_split() {
let mut must_split = SnailfishNumber::from_str("[[[[0,7],4],[15,[0,13]]],[1,1]]").unwrap();
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,13]]],[1,1]]").unwrap()
);
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]").unwrap()
)
}
#[test]
fn test_explode() {
let mut must_explode = SnailfishNumber::from_str("[[[[[9,8],1],2],3],4]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[[[0,9],2],3],4]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[7,[6,[5,[4,[3,2]]]]]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[7,[6,[5,[7,0]]]]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[[6,[5,[4,[3,2]]]],1]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[6,[5,[7,0]]],3]").unwrap()
);
}
#[test]
fn test_part1_samples() {
fn test_sample(left: &str, right: &str, result: &str) {
let left = SnailfishNumber::from_str(left).unwrap();
let right = SnailfishNumber::from_str(right).unwrap();
let result = SnailfishNumber::from_str(result).unwrap();
assert_eq!(left + right, result);
}
test_sample(
"[[[[4,3],4],4],[7,[[8,4],9]]]",
"[1,1]",
"[[[[0,7],4],[[7,8],[6,0]]],[8,1]]",
);
}
#[test]
fn test_magnitude() {
assert_eq!(
SnailfishNumber::from_str("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]")
.unwrap()
.magnitude(),
3488
);
}
#[test]
fn test_part1_sample() {
let result = part1(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]] | [[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 4140);
}
#[test]
fn test_part2_sample() {
let result = part2(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 3993);
} | random_line_split | |
day18.rs | use crate::day::{DayResult, PartResult};
use chumsky::prelude::*;
use itertools::Itertools;
use std::{collections::LinkedList, ops::Add, str::FromStr};
pub fn run() -> Result<DayResult, Box<dyn std::error::Error>> {
let part1 = part1(include_str!("inputs/day18.txt"))?;
let part2 = part2(include_str!("inputs/day18.txt"))?;
Ok(DayResult::new(
PartResult::Success(format!("The answer is {}", part1)),
PartResult::Success(format!(
"The largest magnitude from two numbers is {}",
part2
)),
))
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishNumber {
values: LinkedList<SnailfishValue>,
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishValue {
value: u32,
depth: usize,
}
impl SnailfishValue {
fn | (&self) -> Self {
Self {
value: self.value,
depth: self.depth + 1,
}
}
}
impl SnailfishNumber {
fn split(&mut self) -> bool {
if let Some(index_to_split) = self
.values
.iter()
.enumerate()
.filter(|(_, v)| v.value >= 10)
.map(|(i, _)| i)
.next()
{
let mut back_side = self.values.split_off(index_to_split);
let split_num = back_side.pop_front().unwrap();
let new_depth = split_num.depth + 1;
self.values.push_back(SnailfishValue {
value: split_num.value / 2,
depth: new_depth,
});
self.values.push_back(SnailfishValue {
value: (split_num.value / 2) + (split_num.value % 2),
depth: new_depth,
});
self.values.append(&mut back_side);
true
} else {
false
}
}
fn explode(&mut self) -> bool {
let explode_pair: Vec<(usize, SnailfishValue)> = self
.values
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth >= 4)
.take(2)
.collect();
if explode_pair.len() < 2 {
// no explosions necessary
return false;
}
let left_index = explode_pair[0].0;
let left_value = &explode_pair[0].1;
let right_index = explode_pair[1].0;
let right_value = &explode_pair[1].1;
if left_index + 1 != right_index {
panic!("Exploding pair don't have neighbouring indicies, the list is corrupted. List: {:?}", self.values);
}
let mut back_side = self.values.split_off(left_index);
// drop the pair we're exploding
back_side.pop_front();
back_side.pop_front();
if let Some(to_left) = self.values.back_mut() {
// need to modify the number to the left
to_left.value += left_value.value;
}
// construct new 0 element
let new_zero = SnailfishValue {
value: 0,
depth: left_value.depth - 1,
};
self.values.push_back(new_zero);
if let Some(to_right) = back_side.front_mut() {
// need to modify the number to the right
to_right.value += right_value.value;
}
self.values.append(&mut back_side);
true
}
fn reduce(&mut self) {
if self.explode() {
self.reduce();
} else {
if self.split() {
self.reduce();
}
}
}
fn magnitude(&self) -> u32 {
let mut flattened = self.values.clone();
while flattened.len() > 1 {
// the first two deepest elements must be a pair
let deepest = flattened.iter().map(|v| v.depth).max().unwrap();
let deepest_two = flattened
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth == deepest)
.take(2)
.collect::<Vec<_>>();
let new_value = SnailfishValue {
value: deepest_two[0].1.value * 3 + deepest_two[1].1.value * 2,
depth: if deepest == 0 { 0 } else { deepest - 1 },
};
let mut back = flattened.split_off(deepest_two[0].0);
back.pop_front();
back.pop_front();
flattened.push_back(new_value);
flattened.append(&mut back);
}
flattened.pop_front().unwrap().value
}
}
impl Add for SnailfishNumber {
type Output = SnailfishNumber;
fn add(self, rhs: Self) -> Self::Output {
let mut values = LinkedList::new();
for v in self
.values
.iter()
.chain(rhs.values.iter())
.map(|v| v.deeper())
{
values.push_back(v)
}
let mut sfn = SnailfishNumber { values };
sfn.reduce();
sfn
}
}
impl FromStr for SnailfishNumber {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let values = parser()
.parse(s)
.map_err(|s| s.into_iter().map(|e| e.to_string()).collect::<String>())?;
Ok(SnailfishNumber { values })
}
}
enum ValueOrList {
Value(SnailfishValue),
List(LinkedList<SnailfishValue>),
}
fn parser() -> impl Parser<char, LinkedList<SnailfishValue>, Error = Simple<char>> {
recursive(|pair| {
let int = text::int(10)
.map(|s: String| s.parse::<u32>().unwrap())
.map(|i| ValueOrList::Value(SnailfishValue { value: i, depth: 0 }));
let int_or_nested_pair = int.or(pair.map(|p| ValueOrList::List(p)));
let inner_pair = int_or_nested_pair
.clone()
.then_ignore(just(','))
.then(int_or_nested_pair)
.map(|(l, r)| {
let mut list = LinkedList::new();
match l {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
match r {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
list
});
inner_pair.delimited_by('[', ']')
})
}
fn part1(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let mut sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<LinkedList<_>, _>>()?;
let first = sfns.pop_front().ok_or(format!("No numbers were parsed"))?;
let result = sfns.into_iter().fold(first, |a, x| a + x);
Ok(result.magnitude())
}
fn part2(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<Vec<_>, _>>()?;
let pairs = sfns.into_iter().permutations(2);
let magnitudes = pairs.map(|p| (p[0].clone() + p[1].clone()).magnitude());
let answer = magnitudes.max().ok_or(format!("No numbers were parsed"))?;
Ok(answer)
}
#[test]
fn test_parser() {
let pair = SnailfishNumber::from_str("[1,2]").unwrap();
assert_eq!(
pair.values.into_iter().collect::<Vec<_>>(),
vec![
SnailfishValue { value: 1, depth: 0 },
SnailfishValue { value: 2, depth: 0 }
]
);
}
#[test]
fn test_split() {
let mut must_split = SnailfishNumber::from_str("[[[[0,7],4],[15,[0,13]]],[1,1]]").unwrap();
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,13]]],[1,1]]").unwrap()
);
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]").unwrap()
)
}
#[test]
fn test_explode() {
let mut must_explode = SnailfishNumber::from_str("[[[[[9,8],1],2],3],4]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[[[0,9],2],3],4]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[7,[6,[5,[4,[3,2]]]]]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[7,[6,[5,[7,0]]]]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[[6,[5,[4,[3,2]]]],1]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[6,[5,[7,0]]],3]").unwrap()
);
}
#[test]
fn test_part1_samples() {
fn test_sample(left: &str, right: &str, result: &str) {
let left = SnailfishNumber::from_str(left).unwrap();
let right = SnailfishNumber::from_str(right).unwrap();
let result = SnailfishNumber::from_str(result).unwrap();
assert_eq!(left + right, result);
}
test_sample(
"[[[[4,3],4],4],[7,[[8,4],9]]]",
"[1,1]",
"[[[[0,7],4],[[7,8],[6,0]]],[8,1]]",
);
}
#[test]
fn test_magnitude() {
assert_eq!(
SnailfishNumber::from_str("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]")
.unwrap()
.magnitude(),
3488
);
}
#[test]
fn test_part1_sample() {
let result = part1(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 4140);
}
#[test]
fn test_part2_sample() {
let result = part2(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 3993);
}
| deeper | identifier_name |
day18.rs | use crate::day::{DayResult, PartResult};
use chumsky::prelude::*;
use itertools::Itertools;
use std::{collections::LinkedList, ops::Add, str::FromStr};
pub fn run() -> Result<DayResult, Box<dyn std::error::Error>> {
let part1 = part1(include_str!("inputs/day18.txt"))?;
let part2 = part2(include_str!("inputs/day18.txt"))?;
Ok(DayResult::new(
PartResult::Success(format!("The answer is {}", part1)),
PartResult::Success(format!(
"The largest magnitude from two numbers is {}",
part2
)),
))
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishNumber {
values: LinkedList<SnailfishValue>,
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishValue {
value: u32,
depth: usize,
}
impl SnailfishValue {
fn deeper(&self) -> Self {
Self {
value: self.value,
depth: self.depth + 1,
}
}
}
impl SnailfishNumber {
fn split(&mut self) -> bool {
if let Some(index_to_split) = self
.values
.iter()
.enumerate()
.filter(|(_, v)| v.value >= 10)
.map(|(i, _)| i)
.next()
{
let mut back_side = self.values.split_off(index_to_split);
let split_num = back_side.pop_front().unwrap();
let new_depth = split_num.depth + 1;
self.values.push_back(SnailfishValue {
value: split_num.value / 2,
depth: new_depth,
});
self.values.push_back(SnailfishValue {
value: (split_num.value / 2) + (split_num.value % 2),
depth: new_depth,
});
self.values.append(&mut back_side);
true
} else {
false
}
}
fn explode(&mut self) -> bool {
let explode_pair: Vec<(usize, SnailfishValue)> = self
.values
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth >= 4)
.take(2)
.collect();
if explode_pair.len() < 2 {
// no explosions necessary
return false;
}
let left_index = explode_pair[0].0;
let left_value = &explode_pair[0].1;
let right_index = explode_pair[1].0;
let right_value = &explode_pair[1].1;
if left_index + 1 != right_index {
panic!("Exploding pair don't have neighbouring indicies, the list is corrupted. List: {:?}", self.values);
}
let mut back_side = self.values.split_off(left_index);
// drop the pair we're exploding
back_side.pop_front();
back_side.pop_front();
if let Some(to_left) = self.values.back_mut() {
// need to modify the number to the left
to_left.value += left_value.value;
}
// construct new 0 element
let new_zero = SnailfishValue {
value: 0,
depth: left_value.depth - 1,
};
self.values.push_back(new_zero);
if let Some(to_right) = back_side.front_mut() {
// need to modify the number to the right
to_right.value += right_value.value;
}
self.values.append(&mut back_side);
true
}
fn reduce(&mut self) {
if self.explode() {
self.reduce();
} else {
if self.split() {
self.reduce();
}
}
}
fn magnitude(&self) -> u32 {
let mut flattened = self.values.clone();
while flattened.len() > 1 {
// the first two deepest elements must be a pair
let deepest = flattened.iter().map(|v| v.depth).max().unwrap();
let deepest_two = flattened
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth == deepest)
.take(2)
.collect::<Vec<_>>();
let new_value = SnailfishValue {
value: deepest_two[0].1.value * 3 + deepest_two[1].1.value * 2,
depth: if deepest == 0 { 0 } else { deepest - 1 },
};
let mut back = flattened.split_off(deepest_two[0].0);
back.pop_front();
back.pop_front();
flattened.push_back(new_value);
flattened.append(&mut back);
}
flattened.pop_front().unwrap().value
}
}
impl Add for SnailfishNumber {
type Output = SnailfishNumber;
fn add(self, rhs: Self) -> Self::Output {
let mut values = LinkedList::new();
for v in self
.values
.iter()
.chain(rhs.values.iter())
.map(|v| v.deeper())
{
values.push_back(v)
}
let mut sfn = SnailfishNumber { values };
sfn.reduce();
sfn
}
}
impl FromStr for SnailfishNumber {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let values = parser()
.parse(s)
.map_err(|s| s.into_iter().map(|e| e.to_string()).collect::<String>())?;
Ok(SnailfishNumber { values })
}
}
enum ValueOrList {
Value(SnailfishValue),
List(LinkedList<SnailfishValue>),
}
fn parser() -> impl Parser<char, LinkedList<SnailfishValue>, Error = Simple<char>> {
recursive(|pair| {
let int = text::int(10)
.map(|s: String| s.parse::<u32>().unwrap())
.map(|i| ValueOrList::Value(SnailfishValue { value: i, depth: 0 }));
let int_or_nested_pair = int.or(pair.map(|p| ValueOrList::List(p)));
let inner_pair = int_or_nested_pair
.clone()
.then_ignore(just(','))
.then(int_or_nested_pair)
.map(|(l, r)| {
let mut list = LinkedList::new();
match l {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
match r {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
list
});
inner_pair.delimited_by('[', ']')
})
}
fn part1(input: &str) -> Result<u32, Box<dyn std::error::Error>> |
fn part2(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<Vec<_>, _>>()?;
let pairs = sfns.into_iter().permutations(2);
let magnitudes = pairs.map(|p| (p[0].clone() + p[1].clone()).magnitude());
let answer = magnitudes.max().ok_or(format!("No numbers were parsed"))?;
Ok(answer)
}
#[test]
fn test_parser() {
let pair = SnailfishNumber::from_str("[1,2]").unwrap();
assert_eq!(
pair.values.into_iter().collect::<Vec<_>>(),
vec![
SnailfishValue { value: 1, depth: 0 },
SnailfishValue { value: 2, depth: 0 }
]
);
}
#[test]
fn test_split() {
let mut must_split = SnailfishNumber::from_str("[[[[0,7],4],[15,[0,13]]],[1,1]]").unwrap();
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,13]]],[1,1]]").unwrap()
);
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]").unwrap()
)
}
#[test]
fn test_explode() {
let mut must_explode = SnailfishNumber::from_str("[[[[[9,8],1],2],3],4]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[[[0,9],2],3],4]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[7,[6,[5,[4,[3,2]]]]]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[7,[6,[5,[7,0]]]]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[[6,[5,[4,[3,2]]]],1]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[6,[5,[7,0]]],3]").unwrap()
);
}
#[test]
fn test_part1_samples() {
fn test_sample(left: &str, right: &str, result: &str) {
let left = SnailfishNumber::from_str(left).unwrap();
let right = SnailfishNumber::from_str(right).unwrap();
let result = SnailfishNumber::from_str(result).unwrap();
assert_eq!(left + right, result);
}
test_sample(
"[[[[4,3],4],4],[7,[[8,4],9]]]",
"[1,1]",
"[[[[0,7],4],[[7,8],[6,0]]],[8,1]]",
);
}
#[test]
fn test_magnitude() {
assert_eq!(
SnailfishNumber::from_str("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]")
.unwrap()
.magnitude(),
3488
);
}
#[test]
fn test_part1_sample() {
let result = part1(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 4140);
}
#[test]
fn test_part2_sample() {
let result = part2(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 3993);
}
| {
let mut sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<LinkedList<_>, _>>()?;
let first = sfns.pop_front().ok_or(format!("No numbers were parsed"))?;
let result = sfns.into_iter().fold(first, |a, x| a + x);
Ok(result.magnitude())
} | identifier_body |
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn to_buf(&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len != size_of::<StreamID>() |
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
}
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 | ... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull(
s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | {
return StreamID::default();
} | conditional_block |
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn | (&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len != size_of::<StreamID>() {
return StreamID::default();
}
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
}
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 | ... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull(
s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | to_buf | identifier_name |
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn to_buf(&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len != size_of::<StreamID>() {
return StreamID::default();
}
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
}
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 | ... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull( | s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | random_line_split | |
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn to_buf(&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len != size_of::<StreamID>() {
return StreamID::default();
}
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() |
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 | ... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull(
s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
} | identifier_body |
provision_utils.go | package sparta
import (
"archive/zip"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"text/template"
"github.com/Sirupsen/logrus"
spartaIAM "github.com/mweagle/Sparta/aws/iam"
"github.com/mweagle/cloudformationresources"
gocf "github.com/mweagle/go-cloudformation"
)
const (
// ScratchDirectory is the cwd relative path component
// where intermediate build artifacts are created
ScratchDirectory = ".sparta"
salt = "213EA743-A98F-499D-8FEF-B87015FE13E7"
// The relative path of the custom scripts that is used
// to create the filename relative path when creating the custom archive
provisioningResourcesRelPath = "/resources/provision"
)
// The basename of the scripts that are embedded into CONSTANTS.go
// by `esc` during the generate phase. In order to export these, there
// MUST be a corresponding PROXIED_MODULES entry for the base filename
// in resources/index.js
var customResourceScripts = []string{"sparta_utils.js",
"golang-constants.json"}
var golangCustomResourceTypes = []string{
cloudformationresources.SESLambdaEventSource,
cloudformationresources.S3LambdaEventSource,
cloudformationresources.SNSLambdaEventSource,
cloudformationresources.CloudWatchLogsLambdaEventSource,
cloudformationresources.ZipToS3Bucket,
}
// PushSourceConfigurationActions map stores common IAM Policy Actions for Lambda
// push-source configuration management.
// The configuration is handled by CustomResources inserted into the generated
// CloudFormation template.
var PushSourceConfigurationActions = struct {
SNSLambdaEventSource []string
S3LambdaEventSource []string
SESLambdaEventSource []string
CloudWatchLogsLambdaEventSource []string
}{
SNSLambdaEventSource: []string{"sns:ConfirmSubscription",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Subscribe",
"sns:Unsubscribe"},
S3LambdaEventSource: []string{"s3:GetBucketLocation",
"s3:GetBucketNotification",
"s3:PutBucketNotification",
"s3:GetBucketNotificationConfiguration",
"s3:PutBucketNotificationConfiguration"},
SESLambdaEventSource: []string{"ses:CreateReceiptRuleSet",
"ses:CreateReceiptRule",
"ses:DeleteReceiptRule",
"ses:DeleteReceiptRuleSet",
"ses:DescribeReceiptRuleSet"},
CloudWatchLogsLambdaEventSource: []string{"logs:DescribeSubscriptionFilters",
"logs:DeleteSubscriptionFilter",
"logs:PutSubscriptionFilter",
},
}
// Create a stable temporary filename in the current working
// directory
func temporaryFile(name string) (*os.File, error) {
workingDir, err := os.Getwd()
if nil != err {
return nil, err
}
// Put everything in the ./sparta directory
buildDir := filepath.Join(workingDir, ".sparta")
mkdirErr := os.MkdirAll(buildDir, os.ModePerm)
if nil != mkdirErr {
return nil, mkdirErr
}
// Use a stable temporary name
temporaryPath := filepath.Join(buildDir, name)
tmpFile, err := os.Create(temporaryPath)
if err != nil {
return nil, errors.New("Failed to create temporary file: " + err.Error())
}
return tmpFile, nil
}
func runOSCommand(cmd *exec.Cmd, logger *logrus.Logger) error {
logger.WithFields(logrus.Fields{
"Arguments": cmd.Args,
"Dir": cmd.Dir,
"Path": cmd.Path,
"Env": cmd.Env,
}).Debug("Running Command")
outputWriter := logger.Writer()
defer outputWriter.Close()
cmd.Stdout = outputWriter
cmd.Stderr = outputWriter
return cmd.Run()
}
func awsPrincipalToService(awsPrincipalName string) string {
return strings.ToUpper(strings.SplitN(awsPrincipalName, ".", 2)[0])
}
func ensureCustomResourceHandler(serviceName string,
useCGO bool,
customResourceTypeName string,
sourceArn *gocf.StringExpr,
dependsOn []string,
template *gocf.Template,
S3Bucket string,
S3Key string,
logger *logrus.Logger) (string, error) {
// AWS service basename
awsServiceName := awsPrincipalToService(customResourceTypeName)
// Use a stable resource CloudFormation resource name to represent
// the single CustomResource that can configure the different
// PushSource's for the given principal.
keyName, err := json.Marshal(ArbitraryJSONObject{
"Principal": customResourceTypeName,
"ServiceName": awsServiceName,
})
if err != nil {
logger.Error("Failed to create configurator resource name: ", err.Error())
return "", err
}
resourceBaseName := fmt.Sprintf("%sCustomResource", awsServiceName)
subscriberHandlerName := CloudFormationResourceName(resourceBaseName, string(keyName))
//////////////////////////////////////////////////////////////////////////////
// IAM Role definition
iamResourceName, err := ensureIAMRoleForCustomResource(customResourceTypeName,
sourceArn,
template,
logger)
if nil != err {
return "", err
}
iamRoleRef := gocf.GetAtt(iamResourceName, "Arn")
_, exists := template.Resources[subscriberHandlerName]
if !exists {
logger.WithFields(logrus.Fields{
"Service": customResourceTypeName,
}).Debug("Including Lambda CustomResource for AWS Service")
configuratorDescription := customResourceDescription(serviceName, customResourceTypeName)
//////////////////////////////////////////////////////////////////////////////
// Custom Resource Lambda Handler
// The export name MUST correspond to the createForwarder entry that is dynamically
// written into the index.js file during compile in createNewSpartaCustomResourceEntry
handlerName := lambdaExportNameForCustomResourceType(customResourceTypeName)
logger.WithFields(logrus.Fields{
"CustomResourceType": customResourceTypeName,
"ScriptExport": handlerName,
}).Debug("Sparta CloudFormation custom resource handler info")
customResourceHandlerDef := gocf.LambdaFunction{
Code: &gocf.LambdaFunctionCode{
S3Bucket: gocf.String(S3Bucket),
S3Key: gocf.String(S3Key),
},
Description: gocf.String(configuratorDescription),
Handler: gocf.String(handlerName),
Role: iamRoleRef,
Timeout: gocf.Integer(30),
}
if useCGO {
customResourceHandlerDef.Runtime = gocf.String(PythonVersion)
} else {
customResourceHandlerDef.Runtime = gocf.String(NodeJSVersion)
}
cfResource := template.AddResource(subscriberHandlerName, customResourceHandlerDef)
if nil != dependsOn && (len(dependsOn) > 0) {
cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...)
}
}
return subscriberHandlerName, nil
}
// ensureIAMRoleForCustomResource ensures that the single IAM::Role for a single
// AWS principal (eg, s3.*.*) exists, and includes statements for the given
// sourceArn. Sparta uses a single IAM::Role for the CustomResource configuration
// lambda, which is the union of all Arns in the application.
func ensureIAMRoleForCustomResource(awsPrincipalName string,
sourceArn *gocf.StringExpr,
template *gocf.Template,
logger *logrus.Logger) (string, error) {
var principalActions []string
switch awsPrincipalName {
case cloudformationresources.SNSLambdaEventSource:
principalActions = PushSourceConfigurationActions.SNSLambdaEventSource
case cloudformationresources.S3LambdaEventSource:
principalActions = PushSourceConfigurationActions.S3LambdaEventSource
case cloudformationresources.SESLambdaEventSource:
principalActions = PushSourceConfigurationActions.SESLambdaEventSource
case cloudformationresources.CloudWatchLogsLambdaEventSource:
principalActions = PushSourceConfigurationActions.CloudWatchLogsLambdaEventSource
default:
return "", fmt.Errorf("Unsupported principal for IAM role creation: %s", awsPrincipalName)
}
// What's the stable IAMRoleName?
resourceBaseName := fmt.Sprintf("CustomResource%sIAMRole", awsPrincipalToService(awsPrincipalName))
stableRoleName := CloudFormationResourceName(resourceBaseName, awsPrincipalName)
// Ensure it exists, then check to see if this Source ARN is already specified...
// Checking equality with Stringable?
// Create a new Role
var existingIAMRole *gocf.IAMRole
existingResource, exists := template.Resources[stableRoleName]
logger.WithFields(logrus.Fields{
"PrincipalActions": principalActions,
"SourceArn": sourceArn,
}).Debug("Ensuring IAM Role results")
if !exists {
// Insert the IAM role here. We'll walk the policies data in the next section
// to make sure that the sourceARN we have is in the list
statements := CommonIAMStatements.Core
iamPolicyList := gocf.IAMRolePolicyList{}
iamPolicyList = append(iamPolicyList,
gocf.IAMRolePolicy{
PolicyDocument: ArbitraryJSONObject{
"Version": "2012-10-17",
"Statement": statements,
},
PolicyName: gocf.String(fmt.Sprintf("%sPolicy", stableRoleName)),
},
)
existingIAMRole = &gocf.IAMRole{
AssumeRolePolicyDocument: AssumePolicyDocument,
Policies: &iamPolicyList,
}
template.AddResource(stableRoleName, existingIAMRole)
// Create a new IAM Role resource
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
}).Debug("Inserting IAM Role")
} else {
existingIAMRole = existingResource.Properties.(*gocf.IAMRole)
}
// Walk the existing statements
if nil != existingIAMRole.Policies |
return "", fmt.Errorf("Unable to find Policies entry for IAM role: %s", stableRoleName)
}
func writeCustomResources(zipWriter *zip.Writer,
logger *logrus.Logger) error {
for _, eachName := range customResourceScripts {
resourceName := fmt.Sprintf("%s/%s", provisioningResourcesRelPath, eachName)
resourceContent := _escFSMustString(false, resourceName)
stringReader := strings.NewReader(resourceContent)
embedWriter, errCreate := zipWriter.Create(eachName)
if nil != errCreate {
return errCreate
}
logger.WithFields(logrus.Fields{
"Name": eachName,
}).Debug("Script name")
_, copyErr := io.Copy(embedWriter, stringReader)
if nil != copyErr {
return copyErr
}
}
return nil
}
func createUserCustomResourceEntry(customResource *customResourceInfo, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
logger.WithFields(logrus.Fields{
"UserFunction": customResource.userFunctionName,
"NodeJSFunctionName": customResource.scriptExportHandlerName(),
}).Debug("Registering User CustomResource function")
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
customResource.scriptExportHandlerName(),
customResource.userFunctionName)
return primaryEntry
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func createNewNodeJSProxyEntry(lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta JS function")
// We do know the CF resource name here - could write this into
// index.js and expose a GET localhost:9000/lambdaMetadata
// which wraps up DescribeStackResource for the running
// lambda function
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaNodeJSCustomResourceEntry(resourceName string, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
jsName := scriptExportNameForCustomResourceType(resourceName)
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
jsName,
resourceName)
return primaryEntry
}
func insertNodeJSProxyResources(serviceName string,
executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
// Add the string literal adapter, which requires us to add exported
// functions to the end of index.js. These NodeJS exports will be
// linked to the AWS Lambda NodeJS function name, and are basically
// automatically generated pass through proxies to the golang HTTP handler.
nodeJSWriter, err := zipWriter.Create("index.js")
if err != nil {
return errors.New("Failed to create ZIP entry: index.js")
}
nodeJSSource := _escFSMustString(false, "/resources/index.js")
nodeJSSource += "\n// DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
nodeJSSource += createNewNodeJSProxyEntry(eachLambda, logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
nodeJSSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
nodeJSSource += createNewSpartaNodeJSCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, replace
// SPARTA_BINARY_NAME = 'Sparta.lambda.amd64';
// with the service binary name
nodeJSSource += fmt.Sprintf("SPARTA_BINARY_NAME='%s';\n", executableOutput)
// And the service name
nodeJSSource += fmt.Sprintf("SPARTA_SERVICE_NAME='%s';\n", serviceName)
logger.WithFields(logrus.Fields{
"index.js": nodeJSSource,
}).Debug("Dynamically generated NodeJS adapter")
stringReader := strings.NewReader(nodeJSSource)
_, copyErr := io.Copy(nodeJSWriter, stringReader)
if nil != copyErr {
return copyErr
}
// Next embed the custom resource scripts into the package.
logger.Debug("Embedding CustomResource scripts")
return writeCustomResources(zipWriter, logger)
}
func pythonFunctionEntry(scriptExportName string,
lambdaFunctionName string,
logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"ScriptName": scriptExportName,
"LambdaName": lambdaFunctionName,
}).Debug("Registering Sparta Python function")
return fmt.Sprintf(`def %s(event, context):
return lambda_handler("%s", event, context)
`,
scriptExportName,
lambdaFunctionName)
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func createNewPythonProxyEntry(lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta Python function")
primaryEntry := fmt.Sprintf(`def %s(event, context):
return lambda_handler(%s, event, context)
`,
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaPythonCustomResourceEntry(resourceName string, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
pyName := scriptExportNameForCustomResourceType(resourceName)
return pythonFunctionEntry(pyName, resourceName, logger)
}
func insertPythonProxyResources(serviceName string,
executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
pythonWriter, err := zipWriter.Create("index.py")
if err != nil {
return errors.New("Failed to create ZIP entry: index.py")
}
pythonTemplate := _escFSMustString(false, "/resources/index.template.py")
pythonSource := "\n#DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
// Great, let's assemble all the Python function names, then
// supply them to the template expansion to perform the final
// magic
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachLambda.scriptExportHandlerName(),
eachLambda.lambdaFunctionName(),
logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachCustomResource.scriptExportHandlerName(),
eachCustomResource.userFunctionName,
logger)
pythonSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
pythonSource += createNewSpartaPythonCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, pump the index.template.py through
// the Go template engine so that we can substitute the
// library name and the python functions we've built up...
data := struct {
LibraryName string
PythonFunctions string
}{
executableOutput,
pythonSource,
}
pyTemplate, pyTemplateErr := template.New("PythonHandler").Parse(pythonTemplate)
if nil != pyTemplateErr {
return pyTemplateErr
}
var pyDoc bytes.Buffer
pyTemplateErr = pyTemplate.Execute(&pyDoc, data)
if nil != pyTemplateErr {
return pyTemplateErr
}
// Log the Python handler...
logger.WithFields(logrus.Fields{
"index.py": pyDoc.String(),
}).Debug("Dynamically generated Python ctypes adapter")
_, copyErr := io.WriteString(pythonWriter, pyDoc.String())
return copyErr
}
func systemGoVersion(logger *logrus.Logger) (string, error) {
runtimeVersion := runtime.Version()
// Get the golang version from the output:
// Matts-MBP:Sparta mweagle$ go version
// go version go1.8.1 darwin/amd64
golangVersionRE := regexp.MustCompile(`go(\d+\.\d+(\.\d+)?)`)
matches := golangVersionRE.FindStringSubmatch(runtimeVersion)
if len(matches) > 2 {
return matches[1], nil
}
logger.WithFields(logrus.Fields{
"Output": runtimeVersion,
}).Warn("Unable to find Golang version using RegExp - using current version")
return runtimeVersion, nil
}
| {
for _, eachPolicy := range *existingIAMRole.Policies {
policyDoc := eachPolicy.PolicyDocument.(ArbitraryJSONObject)
statements := policyDoc["Statement"]
for _, eachStatement := range statements.([]spartaIAM.PolicyStatement) {
if sourceArn.String() == eachStatement.Resource.String() {
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"SourceArn": sourceArn.String(),
}).Debug("SourceArn already exists for IAM Policy")
return stableRoleName, nil
}
}
}
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"Action": principalActions,
"Resource": sourceArn,
}).Debug("Inserting Actions for configuration ARN")
// Add this statement to the first policy, iff the actions are non-empty
if len(principalActions) > 0 {
rootPolicy := (*existingIAMRole.Policies)[0]
rootPolicyDoc := rootPolicy.PolicyDocument.(ArbitraryJSONObject)
rootPolicyStatements := rootPolicyDoc["Statement"].([]spartaIAM.PolicyStatement)
rootPolicyDoc["Statement"] = append(rootPolicyStatements, spartaIAM.PolicyStatement{
Effect: "Allow",
Action: principalActions,
Resource: sourceArn,
})
}
return stableRoleName, nil
} | conditional_block |
provision_utils.go | package sparta
import (
"archive/zip"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"text/template"
"github.com/Sirupsen/logrus"
spartaIAM "github.com/mweagle/Sparta/aws/iam"
"github.com/mweagle/cloudformationresources"
gocf "github.com/mweagle/go-cloudformation"
)
const (
// ScratchDirectory is the cwd relative path component
// where intermediate build artifacts are created
ScratchDirectory = ".sparta"
salt = "213EA743-A98F-499D-8FEF-B87015FE13E7"
// The relative path of the custom scripts that is used
// to create the filename relative path when creating the custom archive
provisioningResourcesRelPath = "/resources/provision"
)
// The basename of the scripts that are embedded into CONSTANTS.go
// by `esc` during the generate phase. In order to export these, there
// MUST be a corresponding PROXIED_MODULES entry for the base filename
// in resources/index.js
var customResourceScripts = []string{"sparta_utils.js",
"golang-constants.json"}
var golangCustomResourceTypes = []string{
cloudformationresources.SESLambdaEventSource,
cloudformationresources.S3LambdaEventSource,
cloudformationresources.SNSLambdaEventSource,
cloudformationresources.CloudWatchLogsLambdaEventSource,
cloudformationresources.ZipToS3Bucket,
}
// PushSourceConfigurationActions map stores common IAM Policy Actions for Lambda
// push-source configuration management.
// The configuration is handled by CustomResources inserted into the generated
// CloudFormation template.
var PushSourceConfigurationActions = struct {
SNSLambdaEventSource []string
S3LambdaEventSource []string
SESLambdaEventSource []string
CloudWatchLogsLambdaEventSource []string
}{
SNSLambdaEventSource: []string{"sns:ConfirmSubscription",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Subscribe",
"sns:Unsubscribe"},
S3LambdaEventSource: []string{"s3:GetBucketLocation",
"s3:GetBucketNotification",
"s3:PutBucketNotification",
"s3:GetBucketNotificationConfiguration",
"s3:PutBucketNotificationConfiguration"},
SESLambdaEventSource: []string{"ses:CreateReceiptRuleSet",
"ses:CreateReceiptRule",
"ses:DeleteReceiptRule",
"ses:DeleteReceiptRuleSet",
"ses:DescribeReceiptRuleSet"},
CloudWatchLogsLambdaEventSource: []string{"logs:DescribeSubscriptionFilters",
"logs:DeleteSubscriptionFilter",
"logs:PutSubscriptionFilter",
},
}
// Create a stable temporary filename in the current working
// directory
func temporaryFile(name string) (*os.File, error) {
workingDir, err := os.Getwd()
if nil != err {
return nil, err
}
// Put everything in the ./sparta directory
buildDir := filepath.Join(workingDir, ".sparta")
mkdirErr := os.MkdirAll(buildDir, os.ModePerm)
if nil != mkdirErr {
return nil, mkdirErr
}
// Use a stable temporary name
temporaryPath := filepath.Join(buildDir, name)
tmpFile, err := os.Create(temporaryPath)
if err != nil {
return nil, errors.New("Failed to create temporary file: " + err.Error())
}
return tmpFile, nil
}
func runOSCommand(cmd *exec.Cmd, logger *logrus.Logger) error {
logger.WithFields(logrus.Fields{
"Arguments": cmd.Args,
"Dir": cmd.Dir,
"Path": cmd.Path,
"Env": cmd.Env,
}).Debug("Running Command")
outputWriter := logger.Writer()
defer outputWriter.Close()
cmd.Stdout = outputWriter
cmd.Stderr = outputWriter
return cmd.Run()
}
func awsPrincipalToService(awsPrincipalName string) string {
return strings.ToUpper(strings.SplitN(awsPrincipalName, ".", 2)[0])
}
func ensureCustomResourceHandler(serviceName string,
useCGO bool,
customResourceTypeName string,
sourceArn *gocf.StringExpr,
dependsOn []string,
template *gocf.Template,
S3Bucket string,
S3Key string,
logger *logrus.Logger) (string, error) {
// AWS service basename
awsServiceName := awsPrincipalToService(customResourceTypeName)
// Use a stable resource CloudFormation resource name to represent
// the single CustomResource that can configure the different
// PushSource's for the given principal.
keyName, err := json.Marshal(ArbitraryJSONObject{
"Principal": customResourceTypeName,
"ServiceName": awsServiceName,
})
if err != nil {
logger.Error("Failed to create configurator resource name: ", err.Error())
return "", err
}
resourceBaseName := fmt.Sprintf("%sCustomResource", awsServiceName)
subscriberHandlerName := CloudFormationResourceName(resourceBaseName, string(keyName))
//////////////////////////////////////////////////////////////////////////////
// IAM Role definition
iamResourceName, err := ensureIAMRoleForCustomResource(customResourceTypeName,
sourceArn,
template,
logger)
if nil != err {
return "", err
}
iamRoleRef := gocf.GetAtt(iamResourceName, "Arn")
_, exists := template.Resources[subscriberHandlerName]
if !exists {
logger.WithFields(logrus.Fields{
"Service": customResourceTypeName,
}).Debug("Including Lambda CustomResource for AWS Service")
configuratorDescription := customResourceDescription(serviceName, customResourceTypeName)
//////////////////////////////////////////////////////////////////////////////
// Custom Resource Lambda Handler
// The export name MUST correspond to the createForwarder entry that is dynamically
// written into the index.js file during compile in createNewSpartaCustomResourceEntry
handlerName := lambdaExportNameForCustomResourceType(customResourceTypeName)
logger.WithFields(logrus.Fields{
"CustomResourceType": customResourceTypeName,
"ScriptExport": handlerName,
}).Debug("Sparta CloudFormation custom resource handler info")
customResourceHandlerDef := gocf.LambdaFunction{
Code: &gocf.LambdaFunctionCode{
S3Bucket: gocf.String(S3Bucket),
S3Key: gocf.String(S3Key),
},
Description: gocf.String(configuratorDescription),
Handler: gocf.String(handlerName),
Role: iamRoleRef,
Timeout: gocf.Integer(30),
}
if useCGO {
customResourceHandlerDef.Runtime = gocf.String(PythonVersion)
} else {
customResourceHandlerDef.Runtime = gocf.String(NodeJSVersion)
}
cfResource := template.AddResource(subscriberHandlerName, customResourceHandlerDef)
if nil != dependsOn && (len(dependsOn) > 0) {
cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...)
}
}
return subscriberHandlerName, nil
}
// ensureIAMRoleForCustomResource ensures that the single IAM::Role for a single
// AWS principal (eg, s3.*.*) exists, and includes statements for the given
// sourceArn. Sparta uses a single IAM::Role for the CustomResource configuration
// lambda, which is the union of all Arns in the application.
func ensureIAMRoleForCustomResource(awsPrincipalName string,
sourceArn *gocf.StringExpr,
template *gocf.Template,
logger *logrus.Logger) (string, error) {
var principalActions []string
switch awsPrincipalName {
case cloudformationresources.SNSLambdaEventSource:
principalActions = PushSourceConfigurationActions.SNSLambdaEventSource
case cloudformationresources.S3LambdaEventSource:
principalActions = PushSourceConfigurationActions.S3LambdaEventSource
case cloudformationresources.SESLambdaEventSource:
principalActions = PushSourceConfigurationActions.SESLambdaEventSource
case cloudformationresources.CloudWatchLogsLambdaEventSource:
principalActions = PushSourceConfigurationActions.CloudWatchLogsLambdaEventSource
default:
return "", fmt.Errorf("Unsupported principal for IAM role creation: %s", awsPrincipalName)
}
// What's the stable IAMRoleName?
resourceBaseName := fmt.Sprintf("CustomResource%sIAMRole", awsPrincipalToService(awsPrincipalName))
stableRoleName := CloudFormationResourceName(resourceBaseName, awsPrincipalName)
// Ensure it exists, then check to see if this Source ARN is already specified...
// Checking equality with Stringable?
// Create a new Role
var existingIAMRole *gocf.IAMRole
existingResource, exists := template.Resources[stableRoleName]
logger.WithFields(logrus.Fields{
"PrincipalActions": principalActions,
"SourceArn": sourceArn,
}).Debug("Ensuring IAM Role results")
if !exists {
// Insert the IAM role here. We'll walk the policies data in the next section
// to make sure that the sourceARN we have is in the list
statements := CommonIAMStatements.Core
iamPolicyList := gocf.IAMRolePolicyList{}
iamPolicyList = append(iamPolicyList,
gocf.IAMRolePolicy{
PolicyDocument: ArbitraryJSONObject{
"Version": "2012-10-17",
"Statement": statements,
},
PolicyName: gocf.String(fmt.Sprintf("%sPolicy", stableRoleName)),
},
)
existingIAMRole = &gocf.IAMRole{
AssumeRolePolicyDocument: AssumePolicyDocument,
Policies: &iamPolicyList,
}
template.AddResource(stableRoleName, existingIAMRole)
// Create a new IAM Role resource
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
}).Debug("Inserting IAM Role")
} else {
existingIAMRole = existingResource.Properties.(*gocf.IAMRole)
}
// Walk the existing statements
if nil != existingIAMRole.Policies {
for _, eachPolicy := range *existingIAMRole.Policies {
policyDoc := eachPolicy.PolicyDocument.(ArbitraryJSONObject)
statements := policyDoc["Statement"]
for _, eachStatement := range statements.([]spartaIAM.PolicyStatement) {
if sourceArn.String() == eachStatement.Resource.String() {
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"SourceArn": sourceArn.String(),
}).Debug("SourceArn already exists for IAM Policy")
return stableRoleName, nil
}
}
}
logger.WithFields(logrus.Fields{
"RoleName": stableRoleName,
"Action": principalActions,
"Resource": sourceArn,
}).Debug("Inserting Actions for configuration ARN")
// Add this statement to the first policy, iff the actions are non-empty
if len(principalActions) > 0 {
rootPolicy := (*existingIAMRole.Policies)[0]
rootPolicyDoc := rootPolicy.PolicyDocument.(ArbitraryJSONObject)
rootPolicyStatements := rootPolicyDoc["Statement"].([]spartaIAM.PolicyStatement)
rootPolicyDoc["Statement"] = append(rootPolicyStatements, spartaIAM.PolicyStatement{
Effect: "Allow",
Action: principalActions,
Resource: sourceArn,
})
}
return stableRoleName, nil
}
return "", fmt.Errorf("Unable to find Policies entry for IAM role: %s", stableRoleName)
}
func writeCustomResources(zipWriter *zip.Writer,
logger *logrus.Logger) error {
for _, eachName := range customResourceScripts {
resourceName := fmt.Sprintf("%s/%s", provisioningResourcesRelPath, eachName)
resourceContent := _escFSMustString(false, resourceName)
stringReader := strings.NewReader(resourceContent)
embedWriter, errCreate := zipWriter.Create(eachName)
if nil != errCreate {
return errCreate
}
logger.WithFields(logrus.Fields{
"Name": eachName,
}).Debug("Script name")
_, copyErr := io.Copy(embedWriter, stringReader)
if nil != copyErr {
return copyErr
}
}
return nil
}
func createUserCustomResourceEntry(customResource *customResourceInfo, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
logger.WithFields(logrus.Fields{
"UserFunction": customResource.userFunctionName,
"NodeJSFunctionName": customResource.scriptExportHandlerName(),
}).Debug("Registering User CustomResource function")
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
customResource.scriptExportHandlerName(),
customResource.userFunctionName)
return primaryEntry
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func createNewNodeJSProxyEntry(lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta JS function")
// We do know the CF resource name here - could write this into
// index.js and expose a GET localhost:9000/lambdaMetadata
// which wraps up DescribeStackResource for the running
// lambda function
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaNodeJSCustomResourceEntry(resourceName string, logger *logrus.Logger) string |
func insertNodeJSProxyResources(serviceName string,
executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
// Add the string literal adapter, which requires us to add exported
// functions to the end of index.js. These NodeJS exports will be
// linked to the AWS Lambda NodeJS function name, and are basically
// automatically generated pass through proxies to the golang HTTP handler.
nodeJSWriter, err := zipWriter.Create("index.js")
if err != nil {
return errors.New("Failed to create ZIP entry: index.js")
}
nodeJSSource := _escFSMustString(false, "/resources/index.js")
nodeJSSource += "\n// DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
nodeJSSource += createNewNodeJSProxyEntry(eachLambda, logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
nodeJSSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
nodeJSSource += createNewSpartaNodeJSCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, replace
// SPARTA_BINARY_NAME = 'Sparta.lambda.amd64';
// with the service binary name
nodeJSSource += fmt.Sprintf("SPARTA_BINARY_NAME='%s';\n", executableOutput)
// And the service name
nodeJSSource += fmt.Sprintf("SPARTA_SERVICE_NAME='%s';\n", serviceName)
logger.WithFields(logrus.Fields{
"index.js": nodeJSSource,
}).Debug("Dynamically generated NodeJS adapter")
stringReader := strings.NewReader(nodeJSSource)
_, copyErr := io.Copy(nodeJSWriter, stringReader)
if nil != copyErr {
return copyErr
}
// Next embed the custom resource scripts into the package.
logger.Debug("Embedding CustomResource scripts")
return writeCustomResources(zipWriter, logger)
}
func pythonFunctionEntry(scriptExportName string,
lambdaFunctionName string,
logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"ScriptName": scriptExportName,
"LambdaName": lambdaFunctionName,
}).Debug("Registering Sparta Python function")
return fmt.Sprintf(`def %s(event, context):
return lambda_handler("%s", event, context)
`,
scriptExportName,
lambdaFunctionName)
}
// Return a string representation of a JS function call that can be exposed
// to AWS Lambda
func createNewPythonProxyEntry(lambdaInfo *LambdaAWSInfo, logger *logrus.Logger) string {
logger.WithFields(logrus.Fields{
"FunctionName": lambdaInfo.lambdaFunctionName(),
"ScriptName": lambdaInfo.scriptExportHandlerName(),
}).Info("Registering Sparta Python function")
primaryEntry := fmt.Sprintf(`def %s(event, context):
return lambda_handler(%s, event, context)
`,
lambdaInfo.scriptExportHandlerName(),
lambdaInfo.lambdaFunctionName())
return primaryEntry
}
func createNewSpartaPythonCustomResourceEntry(resourceName string, logger *logrus.Logger) string {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
pyName := scriptExportNameForCustomResourceType(resourceName)
return pythonFunctionEntry(pyName, resourceName, logger)
}
func insertPythonProxyResources(serviceName string,
executableOutput string,
lambdaAWSInfos []*LambdaAWSInfo,
zipWriter *zip.Writer,
logger *logrus.Logger) error {
pythonWriter, err := zipWriter.Create("index.py")
if err != nil {
return errors.New("Failed to create ZIP entry: index.py")
}
pythonTemplate := _escFSMustString(false, "/resources/index.template.py")
pythonSource := "\n#DO NOT EDIT - CONTENT UNTIL EOF IS AUTOMATICALLY GENERATED\n"
// Great, let's assemble all the Python function names, then
// supply them to the template expansion to perform the final
// magic
handlerNames := make(map[string]bool, 0)
for _, eachLambda := range lambdaAWSInfos {
if _, exists := handlerNames[eachLambda.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachLambda.scriptExportHandlerName(),
eachLambda.lambdaFunctionName(),
logger)
handlerNames[eachLambda.scriptExportHandlerName()] = true
}
// USER DEFINED RESOURCES
for _, eachCustomResource := range eachLambda.customResources {
if _, exists := handlerNames[eachCustomResource.scriptExportHandlerName()]; !exists {
pythonSource += pythonFunctionEntry(eachCustomResource.scriptExportHandlerName(),
eachCustomResource.userFunctionName,
logger)
pythonSource += createUserCustomResourceEntry(eachCustomResource, logger)
handlerNames[eachCustomResource.scriptExportHandlerName()] = true
}
}
}
// SPARTA CUSTOM RESOURCES
for _, eachCustomResourceName := range golangCustomResourceTypes {
pythonSource += createNewSpartaPythonCustomResourceEntry(eachCustomResourceName, logger)
}
// Finally, pump the index.template.py through
// the Go template engine so that we can substitute the
// library name and the python functions we've built up...
data := struct {
LibraryName string
PythonFunctions string
}{
executableOutput,
pythonSource,
}
pyTemplate, pyTemplateErr := template.New("PythonHandler").Parse(pythonTemplate)
if nil != pyTemplateErr {
return pyTemplateErr
}
var pyDoc bytes.Buffer
pyTemplateErr = pyTemplate.Execute(&pyDoc, data)
if nil != pyTemplateErr {
return pyTemplateErr
}
// Log the Python handler...
logger.WithFields(logrus.Fields{
"index.py": pyDoc.String(),
}).Debug("Dynamically generated Python ctypes adapter")
_, copyErr := io.WriteString(pythonWriter, pyDoc.String())
return copyErr
}
func systemGoVersion(logger *logrus.Logger) (string, error) {
runtimeVersion := runtime.Version()
// Get the golang version from the output:
// Matts-MBP:Sparta mweagle$ go version
// go version go1.8.1 darwin/amd64
golangVersionRE := regexp.MustCompile(`go(\d+\.\d+(\.\d+)?)`)
matches := golangVersionRE.FindStringSubmatch(runtimeVersion)
if len(matches) > 2 {
return matches[1], nil
}
logger.WithFields(logrus.Fields{
"Output": runtimeVersion,
}).Warn("Unable to find Golang version using RegExp - using current version")
return runtimeVersion, nil
}
| {
// The resource name is a :: delimited one, so let's sanitize that
// to make it a valid JS identifier
jsName := scriptExportNameForCustomResourceType(resourceName)
primaryEntry := fmt.Sprintf("exports[\"%s\"] = createForwarder(\"/%s\");\n",
jsName,
resourceName)
return primaryEntry
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.