file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
lib.rs
//! jlrs is a crate that provides access to most of the Julia C API, it can be used to embed Julia //! in Rust applications and to use functionality from the Julia C API when writing `ccall`able //! functions in Rust. Currently this crate is only tested on Linux in combination with Julia 1.6 //! and is not compatible with earlier versions of Julia. //! //! The documentation assumes you have a basic understanding of Julia's type system. //! //! # Features //! //! An incomplete list of features that are currently supported by jlrs: //! //! - Access arbitrary Julia modules and their contents. //! - Call Julia functions, including functions that take keyword arguments. //! - Exceptions can be handled or converted to their error message, optionally with color. //! - Include and call your own Julia code. //! - Use a custom system image. //! - Create values that Julia can use, and convert them back to Rust, from Rust. //! - Access the type information and fields of values. The contents of inline and bits-union //! fields can be accessed directly. //! - Create and use n-dimensional arrays. The `jlrs-ndarray` feature can be enabled for //! integration with ndarray. //! - Support for mapping Julia structs to Rust structs that can be generated by JlrsReflect.jl. //! - Structs that can be mapped to Rust include those with type parameters and bits unions. //! - An async runtime is available when the `async` feature is enabled, which can be used from //! multiple threads and supports scheduling Julia `Task`s and `await`ing the result without //! blocking the runtime. //! //! //! # Generating the bindings //! //! This crate depends on jl-sys which contains the raw bindings to the Julia C API, by default //! pregenerated bindings are used. If you want to generate the bindings at compile time, the //! `use-bindgen` feature must be enabled. In this case the bindings are generated by bindgen. You //! can find the requirements for using bindgen in [their User Guide] //! //! #### Linux //! //! The recommended way to install Julia is to download the binaries from the official website, //! which is distributed in an archive containing a directory called `julia-x.y.z`. This directory //! contains several other directories, including a `bin` directory containing the `julia` //! executable. //! //! In order to ensure the `julia.h` header file can be found, either `/usr/include/julia/julia.h` //! or `/usr/local/include/julia/julia.h` //! must exist, or you have to set the `JULIA_DIR` environment variable to `/path/to/julia-x.y.z`. //! This environment variable can be used to override the default. Similarly, in order to load //! `libjulia.so` you must add `/path/to/julia-x.y.z/lib` to the `LD_LIBRARY_PATH` environment //! variable. When the `uv` feature is enabled, `/path/to/julia-x.y.z/lib/julia` must also be //! added to `LD_LIBRARY_PATH`. //! //! #### Windows //! //! If you want to use jlrs on Windows you must use WSL. An installation guide to install WSL on //! Windows can be found [on Microsoft's website]. After installing a Linux distribution, follow //! the installation instructions for Linux. //! //! //! # Using this crate //! //! The first thing you should do is `use` the [`prelude`]-module with an asterisk, this will //! bring all the structs and traits you're likely to need into scope. When embedding Julia, it //! must be initialized before it can be used. You can do this by calling [`Julia::init`] which //! returns an instance of [`Julia`]. Note that this method can only be called once while the //! application is running; if you drop it you won't be able to create a new instance but have to //! restart the application. If you want to use a custom system image, you must call //! [`Julia::init_with_image`] instead of `Julia::init`. If you're calling Rust from Julia //! everything has already been initialized, you can use `CCall` instead. If you want to use the //! async runtime, one of the initialization methods of [`AsyncJulia`] must be used. //! //! //! ## Calling Julia from Rust //! //! After initialization you have an instance of [`Julia`], [`Julia::include`] can be used to //! include files with custom Julia code. In order to call Julia functions and create new values //! that can be used by these functions, [`Julia::scope`] and [`Julia::scope_with_slots`] must be //! used. These two methods take a closure with two arguments, a [`Global`] and a mutable //! reference to a [`GcFrame`]. `Global` is a token that is used to access Julia modules, their //! contents and other global values, while `GcFrame` is used to root local values. Rooting a //! value in a frame prevents it from being freed by the garbage collector until that frame has //! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped //! when that method returns. //! //! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure, //! it's possible to access the contents of modules and create new values that can be used by //! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several //! methods are available to create new values. //! //! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first //! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia //! code. The most important thing to know about the [`Scope`] trait for now is that it's used //! by functions that create new values to ensure the result is rooted. Mutable references to //! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in //! that frame, so the result is protected from garbage collection until the frame is dropped when //! that scope ends. //! //! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple //! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is //! importing installed packages by evaluating an `import` or `using` statement. A more //! interesting method, [`Value::new`], can be used with data of any type that implements //! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type //! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents //! of a Julia value. //! //! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call //! anything that implements [`Call`] as a Julia function, `Value` implements this trait because //! any Julia value is potentially callable as a function. Functions can be called with any number //! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and //! the trait methods of `Call` are all unsafe. It's trivial to write a function like //! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and //! call it with these methods. //! //! As a simple example, let's convert two numbers to Julia values and add them: //! //! ```no_run //! use jlrs::prelude::*; //! //! # fn main() { //! // Initializing Julia is unsafe because it can race with another crate that does //! // the same. //! let mut julia = unsafe { Julia::init().unwrap() }; //! let res = julia.scope(|global, frame| { //! // Create the two arguments. Note that the first argument, something that //! // implements Scope, is taken by value and mutable references don't implement //! // Copy, so it's necessary to mutably reborrow the frame. //! let i = Value::new(&mut *frame, 2u64)?; //! let j = Value::new(&mut *frame, 1u32)?; //! //! // The `+` function can be found in the base module. //! let func = Module::base(global).function(&mut *frame, "+")?; //! //! // Call the function and unbox the result as a `u64`. The result of the function //! // call is a nested `Result`; the outer error doesn't contain to any Julia //! // data, while the inner error contains the exception if one is thrown. Here the //! // exception is converted to the outer error type by calling `into_jlrs_result`, this new //! // error contains the error message Julia would have shown. Colors can be enabled by //! // calling `Julia::error_color`. //! unsafe { //! func.call2(&mut *frame, i, j)? //! .into_jlrs_result()? //! .unbox::<u64>() //! } //! }).unwrap(); //! //! assert_eq!(res, 3); //! # } //! ``` //! //! Many more features are available, including creating and accessing n-dimensional Julia arrays //! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`] //! and [`wrappers`] modules. //! //! //! ## Calling Rust from Julia //! //! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most //! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to //! the function or a `(:function, "library")` pair. //! //! A function can be cast to a void pointer and converted to a [`Value`]: //! //! ```no_run //! # use jlrs::prelude::*; //! // This function will be provided to Julia as a pointer, so its name can be mangled. //! unsafe extern "C" fn call_me(arg: bool) -> isize { //! if arg { //! 1 //! } else { //! -1 //! } //! } //! //! # fn main() { //! let mut julia = unsafe { Julia::init().unwrap() }; //! julia.scope(|global, frame| unsafe { //! // Cast the function to a void pointer //! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?; //! //! // Value::eval_string can be used to create new functions. //! let func = Value::eval_string( //! &mut *frame, //! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)" //! )?.unwrap(); //! //! // Call the function and unbox the result. //! let output = func.call1(&mut *frame, call_me_val)? //! .into_jlrs_result()? //! .unbox::<isize>()?; //! //! assert_eq!(output, 1); //! //! Ok(()) //! }).unwrap(); //! # } //! ``` //! //! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such //! a library you need to add //! //! ```toml //! [lib] //! crate-type = ["dylib"] //! ``` //! //! or //! //! ```toml //! [lib] //! crate-type = ["cdylib"] //! ``` //! //! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other //! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better //! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`. //! //! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C //! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find //! libraries in directories that are either on the default library search path or included by //! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not //! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers //! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous //! example. //! //! If the library is visible to Julia you can access it with the library name. If `call_me` is //! defined in a crate called `foo`, the following should work if the function is annotated with //! `#[no_mangle]`: //! //! ```julia //! ccall((:call_me, "libfoo"), Int, (Bool,), false) //! ``` //! //! One important aspect of calling Rust from other languages in general is that panicking across //! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it //! with `std::panic::catch_unwind`. //! //! Most features provided by jlrs including accessing modules, calling functions, and borrowing //! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`] //! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be //! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able //! function that does its actual work on another thread, return early and `wait` on the async //! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished. //! The advantage of this is that the long-running function will not block the Julia runtime, //! There's an example available on GitHub that shows how to do this. //! //! //! ## Async runtime //! //! The async runtime runs Julia in a separate thread and returns a handle that can be shared //! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can //! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task` //! has not completed, the runtime can switch to another task. To use this feature you must enable //! the `async` feature flag: //! //! ```toml //! [dependencies] //! jlrs = { version = "0.12", features = ["async"] } //! ``` //! //! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in //! two ways, either as a blocking task or as a thread. The first way should be used if you want //! to integrate the async runtime into a larger project that uses `async_std`. //! //! The easiest way to interact with Julia when using the async runtime is by using //! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first //! example and call it. While this closure has not completed the runtime is blocked, the methods //! that schedule a function call as a new Julia `Task` can't be used. //! //! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or //! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces //! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The //! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to //! call the methods of the [`CallAsync`] trait. These methods schedule the function call on //! another thread and return a `Future`. While awaiting the result the runtime can handle another //! task. //! //!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads. //! //! You can find basic examples that show how to implement these traits in //! [the examples directory of the GitHub repository]. //! //! //! # Testing //! //! The restriction that Julia can be initialized once must be taken into account when running //! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`: //! //! ```no_run //! use jlrs::prelude::*; //! use std::cell::RefCell; //! thread_local! { //! pub static JULIA: RefCell<Julia> = { //! let julia = RefCell::new(unsafe { Julia::init().unwrap() }); //! julia.borrow_mut().scope(|_global, _frame| { //! /* include everything you need to use */ //! Ok(()) //! }).unwrap(); //! julia //! }; //! } //! ``` //! //! Tests that use this construct can only use one thread for testing, so you must use //! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test //! tries to call `Julia::init` a second time from another thread. //! //! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment //! variable must be set to a value larger than 2. //! //! If you want to run jlrs's tests, both these requirements must be taken into account: //! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1` //! //! //! # Custom types //! //! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`], //! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can //! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`]. //! //! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl //! package can generate the correct Rust struct and automatically derive the supported traits for //! types that have no tuple or union fields with type parameters. The reason for this restriction //! is that the layout of tuple and union fields can be very different depending on these //! parameters in a way that can't be expressed in Rust. //! //! These custom types can also be used when you call Rust from Julia with `ccall`. //! //! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html //! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 //! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples //! [`IntoJulia`]: crate::convert::into_julia::IntoJulia //! [`Typecheck`]: crate::layout::typecheck::Typecheck //! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout //! [`Unbox`]: crate::convert::unbox::Unbox //! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync //! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame //! [`Frame`]: crate::memory::frame::Frame //! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask //! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask //! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle //! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia //! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync //! [`DataType`]: crate::wrappers::ptr::datatype::DataType //! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray //! [`Output`]: crate::memory::output::Output //! [`OutputScope`]: crate::memory::output::OutputScope //! [`ScopeExt`]: crate::memory::scope::ScopeExt //! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope //! [`Scope`]: crate::memory::scope::Scope //! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope //! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope #![forbid(rustdoc::broken_intra_doc_links)] pub mod convert; pub mod error; pub mod extensions; pub mod info; pub mod layout; pub mod memory; pub mod prelude; pub(crate) mod private; #[doc(hidden)] pub mod util; pub mod wrappers; use convert::into_jlrs_result::IntoJlrsResult; use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE}; use info::Info; #[cfg(feature = "uv")] use jl_sys::uv_async_send; use jl_sys::{ jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image, jl_is_initialized, }; use memory::frame::{GcFrame, NullFrame}; use memory::global::Global; use memory::mode::Sync; use memory::stack_page::StackPage; use prelude::Wrapper; use private::Private; use std::ffi::CString; use std::io::{Error as IOError, ErrorKind}; use std::mem::{self, MaybeUninit}; use std::path::Path; use std::ptr::null_mut; use std::slice; use std::sync::atomic::{AtomicBool, Ordering}; use wrappers::ptr::module::Module; use wrappers::ptr::string::JuliaString; use wrappers::ptr::value::Value; use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _}; pub(crate) static INIT: AtomicBool = AtomicBool::new(false); pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl"); /// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`] /// before you can do anything related to Julia. While this struct exists Julia is active, /// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized. pub struct Julia { page: StackPage, } impl Julia { /// Initialize Julia, this method can only be called once. If it's called a second time it /// will return an error. If this struct is dropped, you will need to restart your program to /// be able to call Julia code again. /// /// This method is unsafe because it can race with another crate initializing Julia. pub unsafe fn init() -> JlrsResult<Self> { if jl_is_initialized() != 0 || INIT.swap(true, Ordering::SeqCst) { return Err(JlrsError::AlreadyInitialized.into()); } jl_init(); let mut jl = Julia { page: StackPage::default(), }; jl.scope_with_slots(1, |_, frame| { Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?; Ok(()) }) .expect("Could not load Jlrs module"); Ok(jl) } /// This method is similar to [`Julia::init`] except that it loads a custom system image. A /// custom image can be generated with the [`PackageCompiler`] package for Julia. The main /// advantage of using a custom image over the default one is that it allows you to avoid much /// of the compilation overhead often associated with Julia. /// /// Two arguments are required to call this method compared to [`Julia::init`]; /// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a /// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must /// be either an absolute or a relative path to a system image. /// /// This method will return an error if either of the two paths doesn't exist or if Julia /// has already been initialized. It is unsafe because it can race with another crate /// initializing Julia. /// /// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/ pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>( julia_bindir: P, image_path: Q, ) -> JlrsResult<Self> { if INIT.swap(true, Ordering::SeqCst) { Err(JlrsError::AlreadyInitialized)?; } let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string(); let image_path_str = image_path.as_ref().to_string_lossy().to_string(); if !julia_bindir.as_ref().exists() { let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str); return Err(JlrsError::other(io_err))?; } if !image_path.as_ref().exists() { let io_err = IOError::new(ErrorKind::NotFound, image_path_str); return Err(JlrsError::other(io_err))?; } let bindir = CString::new(julia_bindir_str).unwrap(); let im_rel_path = CString::new(image_path_str).unwrap(); jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr()); let mut jl = Julia { page: StackPage::default(), }; jl.scope_with_slots(1, |_, frame| { Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?; Ok(()) }) .expect("Could not load Jlrs module"); Ok(jl) } /// Enable or disable colored error messages originating from Julia. If this is enabled the /// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is /// disabled by default. pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> { self.scope(|global, _frame| unsafe { let enable = if enable { Value::true_v(global) } else { Value::false_v(global) }; Module::main(global) .submodule_ref("Jlrs")? .wrapper_unchecked() .global_ref("color")? .value_unchecked() .set_field_unchecked("x", enable)?; Ok(()) })?; Ok(()) } /// Calls `include` in the `Main` module in Julia, which executes the file's contents in that /// module. This has the same effect as calling `include` in the Julia REPL. /// /// Example: /// /// ```no_run /// # use jlrs::prelude::*; /// # fn main() { /// # let mut julia = unsafe { Julia::init().unwrap() }; /// julia.include("Path/To/MyJuliaCode.jl").unwrap(); /// # } /// ``` pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> { if path.as_ref().exists() { return self.scope_with_slots(2, |global, frame| unsafe { let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?; let include_func = Module::main(global) .function_ref("include")? .wrapper_unchecked(); let res = include_func.call1(frame, path_jl_str)?; return match res { Ok(_) => Ok(()), Err(e) => Err(JlrsError::IncludeError { path: path.as_ref().to_string_lossy().into(), msg: e.display_string_or(CANNOT_DISPLAY_VALUE), })?, }; }); } Err(JlrsError::IncludeNotFound { path: path.as_ref().to_string_lossy().into(), })? } /// This method is a main entrypoint to interact with Julia. It takes a closure with two /// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary /// results. /// /// Example: /// /// ``` /// # use jlrs::prelude::*; /// # use jlrs::util::JULIA; /// # fn main() { /// # JULIA.with(|j| { /// # let mut julia = j.borrow_mut(); /// julia.scope(|_global, frame| { /// let _i = Value::new(&mut *frame, 1u64)?; /// Ok(()) /// }).unwrap(); /// # }); /// # } /// ``` pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let global = Global::new(); let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync); func(global, &mut frame) } } /// This method is a main entrypoint to interact with Julia. It takes a closure with two /// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results. The frame will preallocate `slots` slots. /// /// Example: /// /// ``` /// # use jlrs::prelude::*; /// # use jlrs::util::JULIA; /// # fn main() { /// # JULIA.with(|j| { /// # let mut julia = j.borrow_mut(); /// julia.scope_with_slots(1, |_global, frame| { /// // Uses the preallocated slot /// let _i = Value::new(&mut *frame, 1u64)?; /// // Allocates a new slot, because only a single slot was preallocated /// let _j = Value::new(&mut *frame, 1u64)?; /// Ok(()) /// }).unwrap(); /// # }); /// # } /// ``` pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let global = Global::new(); if slots + 2 > self.page.size() { self.page = StackPage::new(slots + 2); } let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync); func(global, &mut frame) } } /// Provides access to global information. pub fn info(&self) -> Info { Info::new() } } impl Drop for Julia { fn drop(&mut self) { unsafe { jl_atexit_hook(0); } } } /// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to /// initialize it again would cause a crash. In order to still be able to call Julia from Rust /// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to /// create a frame first. You can use this struct to do so. It must never be used outside /// functions called through `ccall`, and only once for each `ccall`ed function. /// /// If you only need to use a frame to borrow array data, you can use [`CCall::null_scope`]. /// Unlike [`Julia`], `CCall` postpones the allocation of the stack that is used for managing the /// GC until a `GcFrame` is created. In the case of a null scope, this stack isn't allocated at /// all. pub struct CCall { page: Option<StackPage>, } impl CCall { /// Create a new `CCall`. This function must never be called outside a function called through /// `ccall` from Julia and must only be called once during that call. The stack is not /// allocated until a [`GcFrame`] is created. pub unsafe fn new() -> Self { CCall { page: None } } /// Wake the task associated with `handle`. The handle must be the `handle` field of a /// `Base.AsyncCondition` in Julia. This can be used to call a long-running Rust function from /// Julia with ccall in another thread and wait for it to complete in Julia without blocking, /// there's an example available in the repository: ccall_with_threads. /// /// This method is only available if the `uv` feature is enabled. #[cfg(feature = "uv")] pub unsafe fn uv_async_send(handle: *mut std::ffi::c_void) -> bool { uv_async_send(handle.cast()) == 0 } /// Creates a [`GcFrame`], calls the given closure, and returns its result. pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let page = self.get_init_page(); let global = Global::new(); let mut frame = GcFrame::new(page.as_mut(), 0, Sync); func(global, &mut frame) } } /// Creates a [`GcFrame`] with `slots` slots, calls the given closure, and returns its result. pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let page = self.get_init_page(); let global = Global::new(); if slots + 2 > page.size() { *page = StackPage::new(slots + 2); } let mut frame = GcFrame::new(page.as_mut(), slots, Sync); func(global, &mut frame) } } /// Create a [`NullFrame`] and call the given closure. A [`NullFrame`] cannot be nested and /// can only be used to (mutably) borrow array data. Unlike other scope-methods, no `Global` /// is provided to the closure. pub fn null_scope<'base, 'julia: 'base, T, F>(&'julia mut self, func: F) -> JlrsResult<T> where F: FnOnce(&mut NullFrame<'base>) -> JlrsResult<T>, { unsafe { let mut frame = NullFrame::new(self); func(&mut frame) } } #[inline(always)] fn get_init_page(&mut self) -> &mut StackPage { if self.page.is_none() { self.page = Some(StackPage::default()); } self.page.as_mut().unwrap() } } unsafe extern "C" fn droparray(a: Array) { // The data of a moved array is allocated by Rust, this function is called by // a finalizer in order to ensure it's also freed by Rust. let mut arr_nn_ptr = a.unwrap_non_null(Private); let arr_ref = arr_nn_ptr.as_mut(); if arr_ref.flags.how() != 2 { return; } // Set data to null pointer let data_ptr = arr_ref.data.cast::<MaybeUninit<u8>>(); arr_ref.data = null_mut(); // Set all dims to 0 let arr_ptr = arr_nn_ptr.as_ptr(); let dims_ptr = jl_array_dims_ptr(arr_ptr); let n_dims = jl_array_ndims(arr_ptr); let mut_dims_slice = slice::from_raw_parts_mut(dims_ptr, n_dims as _); for dim in mut_dims_slice { *dim = 0; } // Drop the data let n_els = arr_ref.elsize as usize * arr_ref.length; let data = Vec::from_raw_parts(data_ptr, n_els, n_els); mem::drop(data); }
random_line_split
lib.rs
//! jlrs is a crate that provides access to most of the Julia C API, it can be used to embed Julia //! in Rust applications and to use functionality from the Julia C API when writing `ccall`able //! functions in Rust. Currently this crate is only tested on Linux in combination with Julia 1.6 //! and is not compatible with earlier versions of Julia. //! //! The documentation assumes you have a basic understanding of Julia's type system. //! //! # Features //! //! An incomplete list of features that are currently supported by jlrs: //! //! - Access arbitrary Julia modules and their contents. //! - Call Julia functions, including functions that take keyword arguments. //! - Exceptions can be handled or converted to their error message, optionally with color. //! - Include and call your own Julia code. //! - Use a custom system image. //! - Create values that Julia can use, and convert them back to Rust, from Rust. //! - Access the type information and fields of values. The contents of inline and bits-union //! fields can be accessed directly. //! - Create and use n-dimensional arrays. The `jlrs-ndarray` feature can be enabled for //! integration with ndarray. //! - Support for mapping Julia structs to Rust structs that can be generated by JlrsReflect.jl. //! - Structs that can be mapped to Rust include those with type parameters and bits unions. //! - An async runtime is available when the `async` feature is enabled, which can be used from //! multiple threads and supports scheduling Julia `Task`s and `await`ing the result without //! blocking the runtime. //! //! //! # Generating the bindings //! //! This crate depends on jl-sys which contains the raw bindings to the Julia C API, by default //! pregenerated bindings are used. If you want to generate the bindings at compile time, the //! `use-bindgen` feature must be enabled. In this case the bindings are generated by bindgen. You //! can find the requirements for using bindgen in [their User Guide] //! //! #### Linux //! //! The recommended way to install Julia is to download the binaries from the official website, //! which is distributed in an archive containing a directory called `julia-x.y.z`. This directory //! contains several other directories, including a `bin` directory containing the `julia` //! executable. //! //! In order to ensure the `julia.h` header file can be found, either `/usr/include/julia/julia.h` //! or `/usr/local/include/julia/julia.h` //! must exist, or you have to set the `JULIA_DIR` environment variable to `/path/to/julia-x.y.z`. //! This environment variable can be used to override the default. Similarly, in order to load //! `libjulia.so` you must add `/path/to/julia-x.y.z/lib` to the `LD_LIBRARY_PATH` environment //! variable. When the `uv` feature is enabled, `/path/to/julia-x.y.z/lib/julia` must also be //! added to `LD_LIBRARY_PATH`. //! //! #### Windows //! //! If you want to use jlrs on Windows you must use WSL. An installation guide to install WSL on //! Windows can be found [on Microsoft's website]. After installing a Linux distribution, follow //! the installation instructions for Linux. //! //! //! # Using this crate //! //! The first thing you should do is `use` the [`prelude`]-module with an asterisk, this will //! bring all the structs and traits you're likely to need into scope. When embedding Julia, it //! must be initialized before it can be used. You can do this by calling [`Julia::init`] which //! returns an instance of [`Julia`]. Note that this method can only be called once while the //! application is running; if you drop it you won't be able to create a new instance but have to //! restart the application. If you want to use a custom system image, you must call //! [`Julia::init_with_image`] instead of `Julia::init`. If you're calling Rust from Julia //! everything has already been initialized, you can use `CCall` instead. If you want to use the //! async runtime, one of the initialization methods of [`AsyncJulia`] must be used. //! //! //! ## Calling Julia from Rust //! //! After initialization you have an instance of [`Julia`], [`Julia::include`] can be used to //! include files with custom Julia code. In order to call Julia functions and create new values //! that can be used by these functions, [`Julia::scope`] and [`Julia::scope_with_slots`] must be //! used. These two methods take a closure with two arguments, a [`Global`] and a mutable //! reference to a [`GcFrame`]. `Global` is a token that is used to access Julia modules, their //! contents and other global values, while `GcFrame` is used to root local values. Rooting a //! value in a frame prevents it from being freed by the garbage collector until that frame has //! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped //! when that method returns. //! //! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure, //! it's possible to access the contents of modules and create new values that can be used by //! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several //! methods are available to create new values. //! //! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first //! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia //! code. The most important thing to know about the [`Scope`] trait for now is that it's used //! by functions that create new values to ensure the result is rooted. Mutable references to //! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in //! that frame, so the result is protected from garbage collection until the frame is dropped when //! that scope ends. //! //! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple //! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is //! importing installed packages by evaluating an `import` or `using` statement. A more //! interesting method, [`Value::new`], can be used with data of any type that implements //! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type //! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents //! of a Julia value. //! //! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call //! anything that implements [`Call`] as a Julia function, `Value` implements this trait because //! any Julia value is potentially callable as a function. Functions can be called with any number //! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and //! the trait methods of `Call` are all unsafe. It's trivial to write a function like //! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and //! call it with these methods. //! //! As a simple example, let's convert two numbers to Julia values and add them: //! //! ```no_run //! use jlrs::prelude::*; //! //! # fn main() { //! // Initializing Julia is unsafe because it can race with another crate that does //! // the same. //! let mut julia = unsafe { Julia::init().unwrap() }; //! let res = julia.scope(|global, frame| { //! // Create the two arguments. Note that the first argument, something that //! // implements Scope, is taken by value and mutable references don't implement //! // Copy, so it's necessary to mutably reborrow the frame. //! let i = Value::new(&mut *frame, 2u64)?; //! let j = Value::new(&mut *frame, 1u32)?; //! //! // The `+` function can be found in the base module. //! let func = Module::base(global).function(&mut *frame, "+")?; //! //! // Call the function and unbox the result as a `u64`. The result of the function //! // call is a nested `Result`; the outer error doesn't contain to any Julia //! // data, while the inner error contains the exception if one is thrown. Here the //! // exception is converted to the outer error type by calling `into_jlrs_result`, this new //! // error contains the error message Julia would have shown. Colors can be enabled by //! // calling `Julia::error_color`. //! unsafe { //! func.call2(&mut *frame, i, j)? //! .into_jlrs_result()? //! .unbox::<u64>() //! } //! }).unwrap(); //! //! assert_eq!(res, 3); //! # } //! ``` //! //! Many more features are available, including creating and accessing n-dimensional Julia arrays //! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`] //! and [`wrappers`] modules. //! //! //! ## Calling Rust from Julia //! //! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most //! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to //! the function or a `(:function, "library")` pair. //! //! A function can be cast to a void pointer and converted to a [`Value`]: //! //! ```no_run //! # use jlrs::prelude::*; //! // This function will be provided to Julia as a pointer, so its name can be mangled. //! unsafe extern "C" fn call_me(arg: bool) -> isize { //! if arg { //! 1 //! } else { //! -1 //! } //! } //! //! # fn main() { //! let mut julia = unsafe { Julia::init().unwrap() }; //! julia.scope(|global, frame| unsafe { //! // Cast the function to a void pointer //! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?; //! //! // Value::eval_string can be used to create new functions. //! let func = Value::eval_string( //! &mut *frame, //! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)" //! )?.unwrap(); //! //! // Call the function and unbox the result. //! let output = func.call1(&mut *frame, call_me_val)? //! .into_jlrs_result()? //! .unbox::<isize>()?; //! //! assert_eq!(output, 1); //! //! Ok(()) //! }).unwrap(); //! # } //! ``` //! //! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such //! a library you need to add //! //! ```toml //! [lib] //! crate-type = ["dylib"] //! ``` //! //! or //! //! ```toml //! [lib] //! crate-type = ["cdylib"] //! ``` //! //! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other //! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better //! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`. //! //! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C //! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find //! libraries in directories that are either on the default library search path or included by //! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not //! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers //! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous //! example. //! //! If the library is visible to Julia you can access it with the library name. If `call_me` is //! defined in a crate called `foo`, the following should work if the function is annotated with //! `#[no_mangle]`: //! //! ```julia //! ccall((:call_me, "libfoo"), Int, (Bool,), false) //! ``` //! //! One important aspect of calling Rust from other languages in general is that panicking across //! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it //! with `std::panic::catch_unwind`. //! //! Most features provided by jlrs including accessing modules, calling functions, and borrowing //! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`] //! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be //! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able //! function that does its actual work on another thread, return early and `wait` on the async //! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished. //! The advantage of this is that the long-running function will not block the Julia runtime, //! There's an example available on GitHub that shows how to do this. //! //! //! ## Async runtime //! //! The async runtime runs Julia in a separate thread and returns a handle that can be shared //! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can //! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task` //! has not completed, the runtime can switch to another task. To use this feature you must enable //! the `async` feature flag: //! //! ```toml //! [dependencies] //! jlrs = { version = "0.12", features = ["async"] } //! ``` //! //! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in //! two ways, either as a blocking task or as a thread. The first way should be used if you want //! to integrate the async runtime into a larger project that uses `async_std`. //! //! The easiest way to interact with Julia when using the async runtime is by using //! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first //! example and call it. While this closure has not completed the runtime is blocked, the methods //! that schedule a function call as a new Julia `Task` can't be used. //! //! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or //! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces //! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The //! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to //! call the methods of the [`CallAsync`] trait. These methods schedule the function call on //! another thread and return a `Future`. While awaiting the result the runtime can handle another //! task. //! //!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads. //! //! You can find basic examples that show how to implement these traits in //! [the examples directory of the GitHub repository]. //! //! //! # Testing //! //! The restriction that Julia can be initialized once must be taken into account when running //! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`: //! //! ```no_run //! use jlrs::prelude::*; //! use std::cell::RefCell; //! thread_local! { //! pub static JULIA: RefCell<Julia> = { //! let julia = RefCell::new(unsafe { Julia::init().unwrap() }); //! julia.borrow_mut().scope(|_global, _frame| { //! /* include everything you need to use */ //! Ok(()) //! }).unwrap(); //! julia //! }; //! } //! ``` //! //! Tests that use this construct can only use one thread for testing, so you must use //! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test //! tries to call `Julia::init` a second time from another thread. //! //! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment //! variable must be set to a value larger than 2. //! //! If you want to run jlrs's tests, both these requirements must be taken into account: //! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1` //! //! //! # Custom types //! //! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`], //! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can //! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`]. //! //! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl //! package can generate the correct Rust struct and automatically derive the supported traits for //! types that have no tuple or union fields with type parameters. The reason for this restriction //! is that the layout of tuple and union fields can be very different depending on these //! parameters in a way that can't be expressed in Rust. //! //! These custom types can also be used when you call Rust from Julia with `ccall`. //! //! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html //! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 //! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples //! [`IntoJulia`]: crate::convert::into_julia::IntoJulia //! [`Typecheck`]: crate::layout::typecheck::Typecheck //! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout //! [`Unbox`]: crate::convert::unbox::Unbox //! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync //! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame //! [`Frame`]: crate::memory::frame::Frame //! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask //! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask //! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle //! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia //! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync //! [`DataType`]: crate::wrappers::ptr::datatype::DataType //! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray //! [`Output`]: crate::memory::output::Output //! [`OutputScope`]: crate::memory::output::OutputScope //! [`ScopeExt`]: crate::memory::scope::ScopeExt //! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope //! [`Scope`]: crate::memory::scope::Scope //! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope //! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope #![forbid(rustdoc::broken_intra_doc_links)] pub mod convert; pub mod error; pub mod extensions; pub mod info; pub mod layout; pub mod memory; pub mod prelude; pub(crate) mod private; #[doc(hidden)] pub mod util; pub mod wrappers; use convert::into_jlrs_result::IntoJlrsResult; use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE}; use info::Info; #[cfg(feature = "uv")] use jl_sys::uv_async_send; use jl_sys::{ jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image, jl_is_initialized, }; use memory::frame::{GcFrame, NullFrame}; use memory::global::Global; use memory::mode::Sync; use memory::stack_page::StackPage; use prelude::Wrapper; use private::Private; use std::ffi::CString; use std::io::{Error as IOError, ErrorKind}; use std::mem::{self, MaybeUninit}; use std::path::Path; use std::ptr::null_mut; use std::slice; use std::sync::atomic::{AtomicBool, Ordering}; use wrappers::ptr::module::Module; use wrappers::ptr::string::JuliaString; use wrappers::ptr::value::Value; use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _}; pub(crate) static INIT: AtomicBool = AtomicBool::new(false); pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl"); /// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`] /// before you can do anything related to Julia. While this struct exists Julia is active, /// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized. pub struct Julia { page: StackPage, } impl Julia { /// Initialize Julia, this method can only be called once. If it's called a second time it /// will return an error. If this struct is dropped, you will need to restart your program to /// be able to call Julia code again. /// /// This method is unsafe because it can race with another crate initializing Julia. pub unsafe fn init() -> JlrsResult<Self> { if jl_is_initialized() != 0 || INIT.swap(true, Ordering::SeqCst) { return Err(JlrsError::AlreadyInitialized.into()); } jl_init(); let mut jl = Julia { page: StackPage::default(), }; jl.scope_with_slots(1, |_, frame| { Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?; Ok(()) }) .expect("Could not load Jlrs module"); Ok(jl) } /// This method is similar to [`Julia::init`] except that it loads a custom system image. A /// custom image can be generated with the [`PackageCompiler`] package for Julia. The main /// advantage of using a custom image over the default one is that it allows you to avoid much /// of the compilation overhead often associated with Julia. /// /// Two arguments are required to call this method compared to [`Julia::init`]; /// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a /// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must /// be either an absolute or a relative path to a system image. /// /// This method will return an error if either of the two paths doesn't exist or if Julia /// has already been initialized. It is unsafe because it can race with another crate /// initializing Julia. /// /// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/ pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>( julia_bindir: P, image_path: Q, ) -> JlrsResult<Self> { if INIT.swap(true, Ordering::SeqCst) { Err(JlrsError::AlreadyInitialized)?; } let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string(); let image_path_str = image_path.as_ref().to_string_lossy().to_string(); if !julia_bindir.as_ref().exists()
if !image_path.as_ref().exists() { let io_err = IOError::new(ErrorKind::NotFound, image_path_str); return Err(JlrsError::other(io_err))?; } let bindir = CString::new(julia_bindir_str).unwrap(); let im_rel_path = CString::new(image_path_str).unwrap(); jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr()); let mut jl = Julia { page: StackPage::default(), }; jl.scope_with_slots(1, |_, frame| { Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?; Ok(()) }) .expect("Could not load Jlrs module"); Ok(jl) } /// Enable or disable colored error messages originating from Julia. If this is enabled the /// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is /// disabled by default. pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> { self.scope(|global, _frame| unsafe { let enable = if enable { Value::true_v(global) } else { Value::false_v(global) }; Module::main(global) .submodule_ref("Jlrs")? .wrapper_unchecked() .global_ref("color")? .value_unchecked() .set_field_unchecked("x", enable)?; Ok(()) })?; Ok(()) } /// Calls `include` in the `Main` module in Julia, which executes the file's contents in that /// module. This has the same effect as calling `include` in the Julia REPL. /// /// Example: /// /// ```no_run /// # use jlrs::prelude::*; /// # fn main() { /// # let mut julia = unsafe { Julia::init().unwrap() }; /// julia.include("Path/To/MyJuliaCode.jl").unwrap(); /// # } /// ``` pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> { if path.as_ref().exists() { return self.scope_with_slots(2, |global, frame| unsafe { let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?; let include_func = Module::main(global) .function_ref("include")? .wrapper_unchecked(); let res = include_func.call1(frame, path_jl_str)?; return match res { Ok(_) => Ok(()), Err(e) => Err(JlrsError::IncludeError { path: path.as_ref().to_string_lossy().into(), msg: e.display_string_or(CANNOT_DISPLAY_VALUE), })?, }; }); } Err(JlrsError::IncludeNotFound { path: path.as_ref().to_string_lossy().into(), })? } /// This method is a main entrypoint to interact with Julia. It takes a closure with two /// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary /// results. /// /// Example: /// /// ``` /// # use jlrs::prelude::*; /// # use jlrs::util::JULIA; /// # fn main() { /// # JULIA.with(|j| { /// # let mut julia = j.borrow_mut(); /// julia.scope(|_global, frame| { /// let _i = Value::new(&mut *frame, 1u64)?; /// Ok(()) /// }).unwrap(); /// # }); /// # } /// ``` pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let global = Global::new(); let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync); func(global, &mut frame) } } /// This method is a main entrypoint to interact with Julia. It takes a closure with two /// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary /// results. The frame will preallocate `slots` slots. /// /// Example: /// /// ``` /// # use jlrs::prelude::*; /// # use jlrs::util::JULIA; /// # fn main() { /// # JULIA.with(|j| { /// # let mut julia = j.borrow_mut(); /// julia.scope_with_slots(1, |_global, frame| { /// // Uses the preallocated slot /// let _i = Value::new(&mut *frame, 1u64)?; /// // Allocates a new slot, because only a single slot was preallocated /// let _j = Value::new(&mut *frame, 1u64)?; /// Ok(()) /// }).unwrap(); /// # }); /// # } /// ``` pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let global = Global::new(); if slots + 2 > self.page.size() { self.page = StackPage::new(slots + 2); } let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync); func(global, &mut frame) } } /// Provides access to global information. pub fn info(&self) -> Info { Info::new() } } impl Drop for Julia { fn drop(&mut self) { unsafe { jl_atexit_hook(0); } } } /// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to /// initialize it again would cause a crash. In order to still be able to call Julia from Rust /// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to /// create a frame first. You can use this struct to do so. It must never be used outside /// functions called through `ccall`, and only once for each `ccall`ed function. /// /// If you only need to use a frame to borrow array data, you can use [`CCall::null_scope`]. /// Unlike [`Julia`], `CCall` postpones the allocation of the stack that is used for managing the /// GC until a `GcFrame` is created. In the case of a null scope, this stack isn't allocated at /// all. pub struct CCall { page: Option<StackPage>, } impl CCall { /// Create a new `CCall`. This function must never be called outside a function called through /// `ccall` from Julia and must only be called once during that call. The stack is not /// allocated until a [`GcFrame`] is created. pub unsafe fn new() -> Self { CCall { page: None } } /// Wake the task associated with `handle`. The handle must be the `handle` field of a /// `Base.AsyncCondition` in Julia. This can be used to call a long-running Rust function from /// Julia with ccall in another thread and wait for it to complete in Julia without blocking, /// there's an example available in the repository: ccall_with_threads. /// /// This method is only available if the `uv` feature is enabled. #[cfg(feature = "uv")] pub unsafe fn uv_async_send(handle: *mut std::ffi::c_void) -> bool { uv_async_send(handle.cast()) == 0 } /// Creates a [`GcFrame`], calls the given closure, and returns its result. pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let page = self.get_init_page(); let global = Global::new(); let mut frame = GcFrame::new(page.as_mut(), 0, Sync); func(global, &mut frame) } } /// Creates a [`GcFrame`] with `slots` slots, calls the given closure, and returns its result. pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let page = self.get_init_page(); let global = Global::new(); if slots + 2 > page.size() { *page = StackPage::new(slots + 2); } let mut frame = GcFrame::new(page.as_mut(), slots, Sync); func(global, &mut frame) } } /// Create a [`NullFrame`] and call the given closure. A [`NullFrame`] cannot be nested and /// can only be used to (mutably) borrow array data. Unlike other scope-methods, no `Global` /// is provided to the closure. pub fn null_scope<'base, 'julia: 'base, T, F>(&'julia mut self, func: F) -> JlrsResult<T> where F: FnOnce(&mut NullFrame<'base>) -> JlrsResult<T>, { unsafe { let mut frame = NullFrame::new(self); func(&mut frame) } } #[inline(always)] fn get_init_page(&mut self) -> &mut StackPage { if self.page.is_none() { self.page = Some(StackPage::default()); } self.page.as_mut().unwrap() } } unsafe extern "C" fn droparray(a: Array) { // The data of a moved array is allocated by Rust, this function is called by // a finalizer in order to ensure it's also freed by Rust. let mut arr_nn_ptr = a.unwrap_non_null(Private); let arr_ref = arr_nn_ptr.as_mut(); if arr_ref.flags.how() != 2 { return; } // Set data to null pointer let data_ptr = arr_ref.data.cast::<MaybeUninit<u8>>(); arr_ref.data = null_mut(); // Set all dims to 0 let arr_ptr = arr_nn_ptr.as_ptr(); let dims_ptr = jl_array_dims_ptr(arr_ptr); let n_dims = jl_array_ndims(arr_ptr); let mut_dims_slice = slice::from_raw_parts_mut(dims_ptr, n_dims as _); for dim in mut_dims_slice { *dim = 0; } // Drop the data let n_els = arr_ref.elsize as usize * arr_ref.length; let data = Vec::from_raw_parts(data_ptr, n_els, n_els); mem::drop(data); }
{ let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str); return Err(JlrsError::other(io_err))?; }
conditional_block
lib.rs
//! jlrs is a crate that provides access to most of the Julia C API, it can be used to embed Julia //! in Rust applications and to use functionality from the Julia C API when writing `ccall`able //! functions in Rust. Currently this crate is only tested on Linux in combination with Julia 1.6 //! and is not compatible with earlier versions of Julia. //! //! The documentation assumes you have a basic understanding of Julia's type system. //! //! # Features //! //! An incomplete list of features that are currently supported by jlrs: //! //! - Access arbitrary Julia modules and their contents. //! - Call Julia functions, including functions that take keyword arguments. //! - Exceptions can be handled or converted to their error message, optionally with color. //! - Include and call your own Julia code. //! - Use a custom system image. //! - Create values that Julia can use, and convert them back to Rust, from Rust. //! - Access the type information and fields of values. The contents of inline and bits-union //! fields can be accessed directly. //! - Create and use n-dimensional arrays. The `jlrs-ndarray` feature can be enabled for //! integration with ndarray. //! - Support for mapping Julia structs to Rust structs that can be generated by JlrsReflect.jl. //! - Structs that can be mapped to Rust include those with type parameters and bits unions. //! - An async runtime is available when the `async` feature is enabled, which can be used from //! multiple threads and supports scheduling Julia `Task`s and `await`ing the result without //! blocking the runtime. //! //! //! # Generating the bindings //! //! This crate depends on jl-sys which contains the raw bindings to the Julia C API, by default //! pregenerated bindings are used. If you want to generate the bindings at compile time, the //! `use-bindgen` feature must be enabled. In this case the bindings are generated by bindgen. You //! can find the requirements for using bindgen in [their User Guide] //! //! #### Linux //! //! The recommended way to install Julia is to download the binaries from the official website, //! which is distributed in an archive containing a directory called `julia-x.y.z`. This directory //! contains several other directories, including a `bin` directory containing the `julia` //! executable. //! //! In order to ensure the `julia.h` header file can be found, either `/usr/include/julia/julia.h` //! or `/usr/local/include/julia/julia.h` //! must exist, or you have to set the `JULIA_DIR` environment variable to `/path/to/julia-x.y.z`. //! This environment variable can be used to override the default. Similarly, in order to load //! `libjulia.so` you must add `/path/to/julia-x.y.z/lib` to the `LD_LIBRARY_PATH` environment //! variable. When the `uv` feature is enabled, `/path/to/julia-x.y.z/lib/julia` must also be //! added to `LD_LIBRARY_PATH`. //! //! #### Windows //! //! If you want to use jlrs on Windows you must use WSL. An installation guide to install WSL on //! Windows can be found [on Microsoft's website]. After installing a Linux distribution, follow //! the installation instructions for Linux. //! //! //! # Using this crate //! //! The first thing you should do is `use` the [`prelude`]-module with an asterisk, this will //! bring all the structs and traits you're likely to need into scope. When embedding Julia, it //! must be initialized before it can be used. You can do this by calling [`Julia::init`] which //! returns an instance of [`Julia`]. Note that this method can only be called once while the //! application is running; if you drop it you won't be able to create a new instance but have to //! restart the application. If you want to use a custom system image, you must call //! [`Julia::init_with_image`] instead of `Julia::init`. If you're calling Rust from Julia //! everything has already been initialized, you can use `CCall` instead. If you want to use the //! async runtime, one of the initialization methods of [`AsyncJulia`] must be used. //! //! //! ## Calling Julia from Rust //! //! After initialization you have an instance of [`Julia`], [`Julia::include`] can be used to //! include files with custom Julia code. In order to call Julia functions and create new values //! that can be used by these functions, [`Julia::scope`] and [`Julia::scope_with_slots`] must be //! used. These two methods take a closure with two arguments, a [`Global`] and a mutable //! reference to a [`GcFrame`]. `Global` is a token that is used to access Julia modules, their //! contents and other global values, while `GcFrame` is used to root local values. Rooting a //! value in a frame prevents it from being freed by the garbage collector until that frame has //! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped //! when that method returns. //! //! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure, //! it's possible to access the contents of modules and create new values that can be used by //! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several //! methods are available to create new values. //! //! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first //! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia //! code. The most important thing to know about the [`Scope`] trait for now is that it's used //! by functions that create new values to ensure the result is rooted. Mutable references to //! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in //! that frame, so the result is protected from garbage collection until the frame is dropped when //! that scope ends. //! //! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple //! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is //! importing installed packages by evaluating an `import` or `using` statement. A more //! interesting method, [`Value::new`], can be used with data of any type that implements //! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type //! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents //! of a Julia value. //! //! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call //! anything that implements [`Call`] as a Julia function, `Value` implements this trait because //! any Julia value is potentially callable as a function. Functions can be called with any number //! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and //! the trait methods of `Call` are all unsafe. It's trivial to write a function like //! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and //! call it with these methods. //! //! As a simple example, let's convert two numbers to Julia values and add them: //! //! ```no_run //! use jlrs::prelude::*; //! //! # fn main() { //! // Initializing Julia is unsafe because it can race with another crate that does //! // the same. //! let mut julia = unsafe { Julia::init().unwrap() }; //! let res = julia.scope(|global, frame| { //! // Create the two arguments. Note that the first argument, something that //! // implements Scope, is taken by value and mutable references don't implement //! // Copy, so it's necessary to mutably reborrow the frame. //! let i = Value::new(&mut *frame, 2u64)?; //! let j = Value::new(&mut *frame, 1u32)?; //! //! // The `+` function can be found in the base module. //! let func = Module::base(global).function(&mut *frame, "+")?; //! //! // Call the function and unbox the result as a `u64`. The result of the function //! // call is a nested `Result`; the outer error doesn't contain to any Julia //! // data, while the inner error contains the exception if one is thrown. Here the //! // exception is converted to the outer error type by calling `into_jlrs_result`, this new //! // error contains the error message Julia would have shown. Colors can be enabled by //! // calling `Julia::error_color`. //! unsafe { //! func.call2(&mut *frame, i, j)? //! .into_jlrs_result()? //! .unbox::<u64>() //! } //! }).unwrap(); //! //! assert_eq!(res, 3); //! # } //! ``` //! //! Many more features are available, including creating and accessing n-dimensional Julia arrays //! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`] //! and [`wrappers`] modules. //! //! //! ## Calling Rust from Julia //! //! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most //! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to //! the function or a `(:function, "library")` pair. //! //! A function can be cast to a void pointer and converted to a [`Value`]: //! //! ```no_run //! # use jlrs::prelude::*; //! // This function will be provided to Julia as a pointer, so its name can be mangled. //! unsafe extern "C" fn call_me(arg: bool) -> isize { //! if arg { //! 1 //! } else { //! -1 //! } //! } //! //! # fn main() { //! let mut julia = unsafe { Julia::init().unwrap() }; //! julia.scope(|global, frame| unsafe { //! // Cast the function to a void pointer //! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?; //! //! // Value::eval_string can be used to create new functions. //! let func = Value::eval_string( //! &mut *frame, //! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)" //! )?.unwrap(); //! //! // Call the function and unbox the result. //! let output = func.call1(&mut *frame, call_me_val)? //! .into_jlrs_result()? //! .unbox::<isize>()?; //! //! assert_eq!(output, 1); //! //! Ok(()) //! }).unwrap(); //! # } //! ``` //! //! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such //! a library you need to add //! //! ```toml //! [lib] //! crate-type = ["dylib"] //! ``` //! //! or //! //! ```toml //! [lib] //! crate-type = ["cdylib"] //! ``` //! //! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other //! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better //! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`. //! //! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C //! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find //! libraries in directories that are either on the default library search path or included by //! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not //! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers //! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous //! example. //! //! If the library is visible to Julia you can access it with the library name. If `call_me` is //! defined in a crate called `foo`, the following should work if the function is annotated with //! `#[no_mangle]`: //! //! ```julia //! ccall((:call_me, "libfoo"), Int, (Bool,), false) //! ``` //! //! One important aspect of calling Rust from other languages in general is that panicking across //! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it //! with `std::panic::catch_unwind`. //! //! Most features provided by jlrs including accessing modules, calling functions, and borrowing //! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`] //! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be //! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able //! function that does its actual work on another thread, return early and `wait` on the async //! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished. //! The advantage of this is that the long-running function will not block the Julia runtime, //! There's an example available on GitHub that shows how to do this. //! //! //! ## Async runtime //! //! The async runtime runs Julia in a separate thread and returns a handle that can be shared //! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can //! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task` //! has not completed, the runtime can switch to another task. To use this feature you must enable //! the `async` feature flag: //! //! ```toml //! [dependencies] //! jlrs = { version = "0.12", features = ["async"] } //! ``` //! //! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in //! two ways, either as a blocking task or as a thread. The first way should be used if you want //! to integrate the async runtime into a larger project that uses `async_std`. //! //! The easiest way to interact with Julia when using the async runtime is by using //! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first //! example and call it. While this closure has not completed the runtime is blocked, the methods //! that schedule a function call as a new Julia `Task` can't be used. //! //! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or //! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces //! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The //! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to //! call the methods of the [`CallAsync`] trait. These methods schedule the function call on //! another thread and return a `Future`. While awaiting the result the runtime can handle another //! task. //! //!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads. //! //! You can find basic examples that show how to implement these traits in //! [the examples directory of the GitHub repository]. //! //! //! # Testing //! //! The restriction that Julia can be initialized once must be taken into account when running //! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`: //! //! ```no_run //! use jlrs::prelude::*; //! use std::cell::RefCell; //! thread_local! { //! pub static JULIA: RefCell<Julia> = { //! let julia = RefCell::new(unsafe { Julia::init().unwrap() }); //! julia.borrow_mut().scope(|_global, _frame| { //! /* include everything you need to use */ //! Ok(()) //! }).unwrap(); //! julia //! }; //! } //! ``` //! //! Tests that use this construct can only use one thread for testing, so you must use //! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test //! tries to call `Julia::init` a second time from another thread. //! //! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment //! variable must be set to a value larger than 2. //! //! If you want to run jlrs's tests, both these requirements must be taken into account: //! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1` //! //! //! # Custom types //! //! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`], //! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can //! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`]. //! //! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl //! package can generate the correct Rust struct and automatically derive the supported traits for //! types that have no tuple or union fields with type parameters. The reason for this restriction //! is that the layout of tuple and union fields can be very different depending on these //! parameters in a way that can't be expressed in Rust. //! //! These custom types can also be used when you call Rust from Julia with `ccall`. //! //! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html //! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 //! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples //! [`IntoJulia`]: crate::convert::into_julia::IntoJulia //! [`Typecheck`]: crate::layout::typecheck::Typecheck //! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout //! [`Unbox`]: crate::convert::unbox::Unbox //! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync //! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame //! [`Frame`]: crate::memory::frame::Frame //! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask //! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask //! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle //! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia //! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync //! [`DataType`]: crate::wrappers::ptr::datatype::DataType //! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray //! [`Output`]: crate::memory::output::Output //! [`OutputScope`]: crate::memory::output::OutputScope //! [`ScopeExt`]: crate::memory::scope::ScopeExt //! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope //! [`Scope`]: crate::memory::scope::Scope //! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope //! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope #![forbid(rustdoc::broken_intra_doc_links)] pub mod convert; pub mod error; pub mod extensions; pub mod info; pub mod layout; pub mod memory; pub mod prelude; pub(crate) mod private; #[doc(hidden)] pub mod util; pub mod wrappers; use convert::into_jlrs_result::IntoJlrsResult; use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE}; use info::Info; #[cfg(feature = "uv")] use jl_sys::uv_async_send; use jl_sys::{ jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image, jl_is_initialized, }; use memory::frame::{GcFrame, NullFrame}; use memory::global::Global; use memory::mode::Sync; use memory::stack_page::StackPage; use prelude::Wrapper; use private::Private; use std::ffi::CString; use std::io::{Error as IOError, ErrorKind}; use std::mem::{self, MaybeUninit}; use std::path::Path; use std::ptr::null_mut; use std::slice; use std::sync::atomic::{AtomicBool, Ordering}; use wrappers::ptr::module::Module; use wrappers::ptr::string::JuliaString; use wrappers::ptr::value::Value; use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _}; pub(crate) static INIT: AtomicBool = AtomicBool::new(false); pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl"); /// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`] /// before you can do anything related to Julia. While this struct exists Julia is active, /// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized. pub struct Julia { page: StackPage, } impl Julia { /// Initialize Julia, this method can only be called once. If it's called a second time it /// will return an error. If this struct is dropped, you will need to restart your program to /// be able to call Julia code again. /// /// This method is unsafe because it can race with another crate initializing Julia. pub unsafe fn init() -> JlrsResult<Self> { if jl_is_initialized() != 0 || INIT.swap(true, Ordering::SeqCst) { return Err(JlrsError::AlreadyInitialized.into()); } jl_init(); let mut jl = Julia { page: StackPage::default(), }; jl.scope_with_slots(1, |_, frame| { Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?; Ok(()) }) .expect("Could not load Jlrs module"); Ok(jl) } /// This method is similar to [`Julia::init`] except that it loads a custom system image. A /// custom image can be generated with the [`PackageCompiler`] package for Julia. The main /// advantage of using a custom image over the default one is that it allows you to avoid much /// of the compilation overhead often associated with Julia. /// /// Two arguments are required to call this method compared to [`Julia::init`]; /// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a /// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must /// be either an absolute or a relative path to a system image. /// /// This method will return an error if either of the two paths doesn't exist or if Julia /// has already been initialized. It is unsafe because it can race with another crate /// initializing Julia. /// /// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/ pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>( julia_bindir: P, image_path: Q, ) -> JlrsResult<Self> { if INIT.swap(true, Ordering::SeqCst) { Err(JlrsError::AlreadyInitialized)?; } let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string(); let image_path_str = image_path.as_ref().to_string_lossy().to_string(); if !julia_bindir.as_ref().exists() { let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str); return Err(JlrsError::other(io_err))?; } if !image_path.as_ref().exists() { let io_err = IOError::new(ErrorKind::NotFound, image_path_str); return Err(JlrsError::other(io_err))?; } let bindir = CString::new(julia_bindir_str).unwrap(); let im_rel_path = CString::new(image_path_str).unwrap(); jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr()); let mut jl = Julia { page: StackPage::default(), }; jl.scope_with_slots(1, |_, frame| { Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?; Ok(()) }) .expect("Could not load Jlrs module"); Ok(jl) } /// Enable or disable colored error messages originating from Julia. If this is enabled the /// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is /// disabled by default. pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> { self.scope(|global, _frame| unsafe { let enable = if enable { Value::true_v(global) } else { Value::false_v(global) }; Module::main(global) .submodule_ref("Jlrs")? .wrapper_unchecked() .global_ref("color")? .value_unchecked() .set_field_unchecked("x", enable)?; Ok(()) })?; Ok(()) } /// Calls `include` in the `Main` module in Julia, which executes the file's contents in that /// module. This has the same effect as calling `include` in the Julia REPL. /// /// Example: /// /// ```no_run /// # use jlrs::prelude::*; /// # fn main() { /// # let mut julia = unsafe { Julia::init().unwrap() }; /// julia.include("Path/To/MyJuliaCode.jl").unwrap(); /// # } /// ``` pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> { if path.as_ref().exists() { return self.scope_with_slots(2, |global, frame| unsafe { let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?; let include_func = Module::main(global) .function_ref("include")? .wrapper_unchecked(); let res = include_func.call1(frame, path_jl_str)?; return match res { Ok(_) => Ok(()), Err(e) => Err(JlrsError::IncludeError { path: path.as_ref().to_string_lossy().into(), msg: e.display_string_or(CANNOT_DISPLAY_VALUE), })?, }; }); } Err(JlrsError::IncludeNotFound { path: path.as_ref().to_string_lossy().into(), })? } /// This method is a main entrypoint to interact with Julia. It takes a closure with two /// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary /// results. /// /// Example: /// /// ``` /// # use jlrs::prelude::*; /// # use jlrs::util::JULIA; /// # fn main() { /// # JULIA.with(|j| { /// # let mut julia = j.borrow_mut(); /// julia.scope(|_global, frame| { /// let _i = Value::new(&mut *frame, 1u64)?; /// Ok(()) /// }).unwrap(); /// # }); /// # } /// ``` pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let global = Global::new(); let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync); func(global, &mut frame) } } /// This method is a main entrypoint to interact with Julia. It takes a closure with two /// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary /// results. The frame will preallocate `slots` slots. /// /// Example: /// /// ``` /// # use jlrs::prelude::*; /// # use jlrs::util::JULIA; /// # fn main() { /// # JULIA.with(|j| { /// # let mut julia = j.borrow_mut(); /// julia.scope_with_slots(1, |_global, frame| { /// // Uses the preallocated slot /// let _i = Value::new(&mut *frame, 1u64)?; /// // Allocates a new slot, because only a single slot was preallocated /// let _j = Value::new(&mut *frame, 1u64)?; /// Ok(()) /// }).unwrap(); /// # }); /// # } /// ``` pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let global = Global::new(); if slots + 2 > self.page.size() { self.page = StackPage::new(slots + 2); } let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync); func(global, &mut frame) } } /// Provides access to global information. pub fn info(&self) -> Info { Info::new() } } impl Drop for Julia { fn drop(&mut self) { unsafe { jl_atexit_hook(0); } } } /// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to /// initialize it again would cause a crash. In order to still be able to call Julia from Rust /// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to /// create a frame first. You can use this struct to do so. It must never be used outside /// functions called through `ccall`, and only once for each `ccall`ed function. /// /// If you only need to use a frame to borrow array data, you can use [`CCall::null_scope`]. /// Unlike [`Julia`], `CCall` postpones the allocation of the stack that is used for managing the /// GC until a `GcFrame` is created. In the case of a null scope, this stack isn't allocated at /// all. pub struct CCall { page: Option<StackPage>, } impl CCall { /// Create a new `CCall`. This function must never be called outside a function called through /// `ccall` from Julia and must only be called once during that call. The stack is not /// allocated until a [`GcFrame`] is created. pub unsafe fn new() -> Self { CCall { page: None } } /// Wake the task associated with `handle`. The handle must be the `handle` field of a /// `Base.AsyncCondition` in Julia. This can be used to call a long-running Rust function from /// Julia with ccall in another thread and wait for it to complete in Julia without blocking, /// there's an example available in the repository: ccall_with_threads. /// /// This method is only available if the `uv` feature is enabled. #[cfg(feature = "uv")] pub unsafe fn uv_async_send(handle: *mut std::ffi::c_void) -> bool
/// Creates a [`GcFrame`], calls the given closure, and returns its result. pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let page = self.get_init_page(); let global = Global::new(); let mut frame = GcFrame::new(page.as_mut(), 0, Sync); func(global, &mut frame) } } /// Creates a [`GcFrame`] with `slots` slots, calls the given closure, and returns its result. pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T> where for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>, { unsafe { let page = self.get_init_page(); let global = Global::new(); if slots + 2 > page.size() { *page = StackPage::new(slots + 2); } let mut frame = GcFrame::new(page.as_mut(), slots, Sync); func(global, &mut frame) } } /// Create a [`NullFrame`] and call the given closure. A [`NullFrame`] cannot be nested and /// can only be used to (mutably) borrow array data. Unlike other scope-methods, no `Global` /// is provided to the closure. pub fn null_scope<'base, 'julia: 'base, T, F>(&'julia mut self, func: F) -> JlrsResult<T> where F: FnOnce(&mut NullFrame<'base>) -> JlrsResult<T>, { unsafe { let mut frame = NullFrame::new(self); func(&mut frame) } } #[inline(always)] fn get_init_page(&mut self) -> &mut StackPage { if self.page.is_none() { self.page = Some(StackPage::default()); } self.page.as_mut().unwrap() } } unsafe extern "C" fn droparray(a: Array) { // The data of a moved array is allocated by Rust, this function is called by // a finalizer in order to ensure it's also freed by Rust. let mut arr_nn_ptr = a.unwrap_non_null(Private); let arr_ref = arr_nn_ptr.as_mut(); if arr_ref.flags.how() != 2 { return; } // Set data to null pointer let data_ptr = arr_ref.data.cast::<MaybeUninit<u8>>(); arr_ref.data = null_mut(); // Set all dims to 0 let arr_ptr = arr_nn_ptr.as_ptr(); let dims_ptr = jl_array_dims_ptr(arr_ptr); let n_dims = jl_array_ndims(arr_ptr); let mut_dims_slice = slice::from_raw_parts_mut(dims_ptr, n_dims as _); for dim in mut_dims_slice { *dim = 0; } // Drop the data let n_els = arr_ref.elsize as usize * arr_ref.length; let data = Vec::from_raw_parts(data_ptr, n_els, n_els); mem::drop(data); }
{ uv_async_send(handle.cast()) == 0 }
identifier_body
moves.go
package main import "fmt" import "math" type Position struct { x, y int } type Move struct { x, y int // move relative to current position } type FullMove struct { pos Position move Move } // the result of applying a move type Turn struct { board Board lastMove FullMove } // MoveSeq contains a list of moves such that, moves[n] is valid only if moves[n - 1] is valid as well. // This allows us to tell easily that you can't, for example, move a rock two places away if you can't do it one time // away in the same direction. type MoveSeq []Move // MoveSeqs contains a list of MoveSeq; each MoveSeq is independent from the others type MoveSeqs []MoveSeq // movesMap stores the relative movement for each piece var movesMap = map[PieceColor]map[Piece]MoveSeqs {} func PositionAdd(pos Position, move Move) Position { return Position{ pos.x + move.x, pos.y + move.y } } func PositionDiff(pos Position, move Move) Position { return Position{ pos.x - move.x, pos.y - move.y } } // ApplyCastling applies the castling move in one specific direction; it assumes castling is valid func ApplyCastling(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board) { var rockMove, kingMove FullMove newBoard = board if direction < 0 { rockMove = FullMove{ Position{0, kingPos.y}, Move{3, 0} } } else { rockMove = FullMove{ Position{7, kingPos.y}, Move{-2, 0} } } SetBoardAt(&newBoard, rockMove.pos, PieceInfo{ Piece_Rock, PieceStatus_CastlingNotAllowed, kingInfo.color }) SetBoardAt(&newBoard, kingPos, PieceInfo{ kingInfo.piece, PieceStatus_CastlingNotAllowed, kingInfo.color }) updateStates := true kingMove = FullMove{ kingPos, Move{direction * 2, 0} } newBoard = ApplyMove(newBoard, rockMove, updateStates) newBoard = ApplyMove(newBoard, kingMove, updateStates) return } // ApplyEnPassant applies en-passant move; it assumes the move is valid func ApplyEnPassant(board Board, fullMove FullMove, updateStates bool) Board { newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) SetBoardAt(&board, Position{ newPos.x, fullMove.pos.y }, EmptyPieceInfo) return board } // ApplyPawnPromotion applies promotion move for one selected promotion type; it assumes the move is valid func ApplyPawnPromotion(board Board, fullMove FullMove, selectedPiece Piece, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) status := PieceStatus_Default if selectedPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } SetBoardAt(&board, newPos, PieceInfo{ selectedPiece, status, info.color }) return board } // addCastlingMove computes the board for a left or right castling move for the given king. // direction is either -1 (left) or 1 (right) func addCastlingMove(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board, ok bool) { var rockPos Position rockPos = Position{ 0, kingPos.y } if direction == 1 { rockPos.x = 7 } rockInfo := GetBoardAt(board, rockPos) if rockInfo.piece != Piece_Rock || rockInfo.status != PieceStatus_Default || rockInfo.color != kingInfo.color { return } // all squares between king and rock must be empty for xi := kingPos.x + direction; xi != rockPos.x; xi += direction { newPos := Position{ xi, kingPos.y } newInfo := GetBoardAt(board, newPos) if newInfo.piece != Piece_Empty { return } } // neither the king square nor the two squares in the direction of the rock can be under attack for xi := kingPos.x; xi != kingPos.x + 3 * direction; xi += direction { newPos := Position{ xi, kingPos.y } if isUnderAttack(board, newPos, kingInfo.color) { return } } // apply move to king & rock ok = true newBoard = ApplyCastling(board, kingPos, kingInfo, direction) return } func addCastlingMoves(board Board, kingPos Position, kingInfo PieceInfo, moves []Board) (newMoves []Board) { newMoves = moves if kingInfo.status != PieceStatus_Default { return } dirs := []int { -1, 1 } for _, dir := range dirs { move, ok := addCastlingMove(board, kingPos, kingInfo, dir) if ok { newMoves = append(newMoves, move) } } return } // addPawnMove adds either the pawn move, or all available promotions if the move is a promotion func addPawnMove(board Board, info PieceInfo, move FullMove, isEnPassant bool, moves []Board) (newMoves []Board) { newMoves = moves updateStates := true newPos := PositionAdd(move.pos, move.move) if newPos.y != 0 && newPos.y != 7 { var newMove Board if isEnPassant { newMove = ApplyEnPassant(board, move, updateStates) } else { newMove = ApplyMove(board, move, updateStates) } newMoves = append(newMoves, newMove) return } availablePromotions := []Piece{ Piece_Queen, Piece_Rock, Piece_Bishop, Piece_Knight } for _, newPiece := range availablePromotions { status := PieceStatus_Default if newPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } newBoard := ApplyPawnPromotion(board, move, newPiece, updateStates) SetBoardAt(&newBoard, newPos, PieceInfo{ newPiece, status, info.color }) newMoves = append(newMoves, newBoard) } return } // addPawnSpecialMoves adds to list, the captures that can be done by a given pawn (including en-passant) and // the promotion func addPawnSpecialMoves(board Board, pos Position, info PieceInfo, moves []Board) (newMoves []Board) { newMoves = moves yDirection := 1 if info.color == PieceColor_White { yDirection = -1 } newy := pos.y + yDirection if newy < 0 || newy > 7 { return } xDirections := []int{ -1, 1 } for _, xDirection := range xDirections
return } // removeCheckMoves gets rid of any moves that put the king under attack func removeCheckMoves(boards []Board, color PieceColor) []Board { newBoards := make([]Board, 0, len(boards)) for _, b := range boards { if len(GetPieces(b, Piece_King, color)) == 0 { // TODO: remove this, only here to debug fmt.Println("DEBUG BOARD!!") DrawBoard(b) } kingPos := GetPieces(b, Piece_King, color)[0] if !isUnderAttack(b, kingPos, color) { newBoards = append(newBoards, b) } } return newBoards } // GetPossibleMoves returns the list of moves that can be done by a single piece. // It doesn't take checks into account, except for castling. // Params: // - filterCheckMoves = true forces the removal of any moves that puts the king under attack. // - quickMode = true skips some steps that aren't necessary for secondary uses of this // function: computing castling and updating state info. func GetPossibleMoves(board Board, pos Position, info PieceInfo, filterCheckMoves bool, quickMode bool) []Board { seqs := movesMap[info.color][info.piece] moves := []Move{} for _, seq := range seqs { for _, move := range seq { newPos := PositionAdd(pos, move) if !PositionInBoard(newPos) { break } infoHere := GetBoardAt(board, newPos) if infoHere.piece == Piece_Empty { moves = append(moves, move) } else { if infoHere.color != info.color && info.piece != Piece_Pawn { moves = append(moves, move) } break } } } boards := []Board{} updateStates := !quickMode for _, m := range moves { boards = append(boards, ApplyMove(board, FullMove{pos, m}, updateStates)) } // we assume first move is one step, second move is two steps... this is always correct because // of the MoveSeq definition pawnWithMoves := info.piece == Piece_Pawn && len(moves) != 0 if pawnWithMoves && ((info.color == PieceColor_Black && pos.y != 1) || (info.color == PieceColor_White && pos.y != 6)) { moves = moves[:1] } if info.piece == Piece_Pawn { if len(moves) == 1 { isEnPassant := false boards = addPawnMove(board, info, FullMove{ pos, moves[0] }, isEnPassant, []Board{}) } boards = addPawnSpecialMoves(board, pos, info, boards) } if !quickMode && info.piece == Piece_King { boards = addCastlingMoves(board, pos, info, boards) } if filterCheckMoves { boards = removeCheckMoves(boards, info.color) } return boards } // GetAllPossibleMoves returns all possible moves for pieces of a given color // (more details about arguments in GetPossibleMoves) func GetAllPossibleMoves(board Board, color PieceColor, filterCheckMoves bool, quickMode bool) []Board { positions := GetPiecesByColor(board, color) allMoves := []Board{} for _, pos := range positions { info := GetBoardAt(board, pos) allMoves = append(allMoves, GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)...) } return allMoves } // isUnderAttack tells whether a piece with color=color is under attack by any enemy piece. // This is the slow, but easy implementation. func isUnderAttack(board Board, pos Position, color PieceColor) bool { var enemies []Position = GetPiecesByColor(board, !color) filterCheckMoves := false quickMode := true for _, ePos := range enemies { enemyInfo := GetBoardAt(board, ePos) enemyMoves := GetPossibleMoves(board, ePos, enemyInfo, filterCheckMoves, quickMode) for _, enemyMove := range enemyMoves { infoHere := GetBoardAt(enemyMove, pos) if infoHere.piece != Piece_Empty && infoHere.color != color { return true } } } return false } func IsValidMove(board Board, piecePos Position, newBoard Board) bool { quickMode := false filterCheckMoves := true info := GetBoardAt(board, piecePos) moves := GetPossibleMoves(board, piecePos, info, filterCheckMoves, quickMode) for _, m := range moves { if m == newBoard { return true } } return false } func GetPossibleMoveCount(board Board, color PieceColor, filterCheckMoves bool) int { count := 0 quickMode := true for _, pos := range GetPiecesByColor(board, color) { info := GetBoardAt(board, pos) count += len(GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)) } return count } // resetPawnsStatus resets the status of all pawns of a given color; this means no contrary pawn can capture // any pawn using en-passant after this func resetPawnsStatus(board Board, color PieceColor) Board { for _, pos := range GetPieces(board, Piece_Pawn, color) { info := GetBoardAt(board, pos) SetBoardAt(&board, pos, PieceInfo{ info.piece, PieceStatus_Default, info.color }) } return board } // ApplyMove executes a move in a board; it assumes the move is a valid one, and it only applies simple moves // (castling or en-passant can't use this function) // Because the resetPawnsStatus call that updates states for the en-passant capture can be slow, we allow that // to be disabled. func ApplyMove(board Board, fullMove FullMove, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) // switch state changes for castling & en-passant if updateStates { board = resetPawnsStatus(board, info.color) if info.piece == Piece_King || info.piece == Piece_Rock { info.status = PieceStatus_CastlingNotAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 2. { info.status = PieceStatus_EnPassantAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 1. { info.status = PieceStatus_Default } } SetBoardAt(&board, fullMove.pos, EmptyPieceInfo) SetBoardAt(&board, PositionAdd(fullMove.pos, fullMove.move), info) return board } func initMovesMap(color PieceColor) map[Piece]MoveSeqs { m := make(map[Piece]MoveSeqs) // each sequence has to be in an order such that move n can only be done if // move n-1 is also possible (this takes care of collisions) // pawn dir := 1 if color == PieceColor_White { dir = -1 } m[Piece_Pawn] = MoveSeqs{ MoveSeq{ Move{0, dir}, Move{0, 2 * dir} } } // rock rmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { rmoves[0] = append(rmoves[0], Move{0, i}) rmoves[1] = append(rmoves[1], Move{0, -i}) rmoves[2] = append(rmoves[2], Move{i, 0}) rmoves[3] = append(rmoves[3], Move{-i, 0}) } m[Piece_Rock] = MoveSeqs{ MoveSeq(rmoves[0]), MoveSeq(rmoves[1]), MoveSeq(rmoves[2]), MoveSeq(rmoves[3]) } // bishop bmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { bmoves[0] = append(bmoves[0], Move{i, i}) bmoves[1] = append(bmoves[1], Move{-i, -i}) bmoves[2] = append(bmoves[2], Move{i, -i}) bmoves[3] = append(bmoves[3], Move{-i, i}) } m[Piece_Bishop] = MoveSeqs{ MoveSeq(bmoves[0]), MoveSeq(bmoves[1]), MoveSeq(bmoves[2]), MoveSeq(bmoves[3]) } // queen qmoves := MoveSeqs{} qmoves = append(qmoves, rmoves...) qmoves = append(qmoves, bmoves...) m[Piece_Queen] = qmoves // king kmoves := MoveSeqs{} for i := 0; i < len(qmoves); i ++ { kmoves = append(kmoves, MoveSeq{ qmoves[i][0] }) } m[Piece_King] = kmoves // knight m[Piece_Knight] = MoveSeqs{ MoveSeq{ Move{-2, -1} }, MoveSeq{ Move{-1, -2} }, MoveSeq{ Move{2, 1} }, MoveSeq{ Move{1, 2} }, MoveSeq{ Move{-2, 1} }, MoveSeq{ Move{-1, 2} }, MoveSeq{ Move{2, -1} }, MoveSeq{ Move{1, -2} }, } return m } // isCheckMate tells whether the king of a color is in checkmate. func isCheckMate(board Board, availableMoveCount int, color PieceColor) bool { kingPos := GetPieces(board, Piece_King, color)[0] if availableMoveCount == 0 && isUnderAttack(board, kingPos, color) { return true } return false } // GetGameStatus tells whether game is finished or not, and who wins if it is finished func GetGameStatus(board Board, nextTurnColor PieceColor, availableMoveCount int) (finished bool, draw bool, winningColor PieceColor) { finished = true if isCheckMate(board, availableMoveCount, nextTurnColor) { winningColor = !nextTurnColor return } if availableMoveCount == 0 { draw = true return } finished = false return } func init() { movesMap[PieceColor_Black] = initMovesMap(PieceColor_Black) movesMap[PieceColor_White] = initMovesMap(PieceColor_White) }
{ newx := pos.x + xDirection fullMove := FullMove{ pos, Move{ xDirection, yDirection } } if newx < 0 || newx > 7 { continue } enemyInfo := GetBoardAt(board, Position{ newx, newy }) isEnPassant := false if enemyInfo.piece != Piece_Empty { // normal capture if enemyInfo.color != info.color { newMoves = addPawnMove(board, info, fullMove, isEnPassant, newMoves) } } else { // try en-passant isEnPassant = true enPassantPos := Position{ newx, pos.y } enPassantInfo := GetBoardAt(board, enPassantPos) if enPassantInfo.color != info.color && enPassantInfo.piece == Piece_Pawn && enPassantInfo.status == PieceStatus_EnPassantAllowed { tmpMoves := []Board{} tmpMoves = addPawnMove(board, info, fullMove, isEnPassant, tmpMoves) newMoves = append(newMoves, tmpMoves...) } } }
conditional_block
moves.go
package main import "fmt" import "math" type Position struct { x, y int } type Move struct { x, y int // move relative to current position } type FullMove struct { pos Position move Move } // the result of applying a move type Turn struct { board Board lastMove FullMove } // MoveSeq contains a list of moves such that, moves[n] is valid only if moves[n - 1] is valid as well. // This allows us to tell easily that you can't, for example, move a rock two places away if you can't do it one time // away in the same direction. type MoveSeq []Move // MoveSeqs contains a list of MoveSeq; each MoveSeq is independent from the others type MoveSeqs []MoveSeq // movesMap stores the relative movement for each piece var movesMap = map[PieceColor]map[Piece]MoveSeqs {} func PositionAdd(pos Position, move Move) Position { return Position{ pos.x + move.x, pos.y + move.y } } func PositionDiff(pos Position, move Move) Position { return Position{ pos.x - move.x, pos.y - move.y } } // ApplyCastling applies the castling move in one specific direction; it assumes castling is valid func ApplyCastling(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board) { var rockMove, kingMove FullMove newBoard = board if direction < 0 { rockMove = FullMove{ Position{0, kingPos.y}, Move{3, 0} } } else { rockMove = FullMove{ Position{7, kingPos.y}, Move{-2, 0} } } SetBoardAt(&newBoard, rockMove.pos, PieceInfo{ Piece_Rock, PieceStatus_CastlingNotAllowed, kingInfo.color }) SetBoardAt(&newBoard, kingPos, PieceInfo{ kingInfo.piece, PieceStatus_CastlingNotAllowed, kingInfo.color }) updateStates := true kingMove = FullMove{ kingPos, Move{direction * 2, 0} } newBoard = ApplyMove(newBoard, rockMove, updateStates) newBoard = ApplyMove(newBoard, kingMove, updateStates) return } // ApplyEnPassant applies en-passant move; it assumes the move is valid func ApplyEnPassant(board Board, fullMove FullMove, updateStates bool) Board { newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) SetBoardAt(&board, Position{ newPos.x, fullMove.pos.y }, EmptyPieceInfo) return board } // ApplyPawnPromotion applies promotion move for one selected promotion type; it assumes the move is valid func ApplyPawnPromotion(board Board, fullMove FullMove, selectedPiece Piece, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) status := PieceStatus_Default if selectedPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } SetBoardAt(&board, newPos, PieceInfo{ selectedPiece, status, info.color }) return board } // addCastlingMove computes the board for a left or right castling move for the given king. // direction is either -1 (left) or 1 (right) func addCastlingMove(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board, ok bool) { var rockPos Position rockPos = Position{ 0, kingPos.y } if direction == 1 { rockPos.x = 7 } rockInfo := GetBoardAt(board, rockPos) if rockInfo.piece != Piece_Rock || rockInfo.status != PieceStatus_Default || rockInfo.color != kingInfo.color { return } // all squares between king and rock must be empty for xi := kingPos.x + direction; xi != rockPos.x; xi += direction { newPos := Position{ xi, kingPos.y } newInfo := GetBoardAt(board, newPos) if newInfo.piece != Piece_Empty { return } } // neither the king square nor the two squares in the direction of the rock can be under attack for xi := kingPos.x; xi != kingPos.x + 3 * direction; xi += direction { newPos := Position{ xi, kingPos.y } if isUnderAttack(board, newPos, kingInfo.color) { return } } // apply move to king & rock ok = true newBoard = ApplyCastling(board, kingPos, kingInfo, direction) return } func addCastlingMoves(board Board, kingPos Position, kingInfo PieceInfo, moves []Board) (newMoves []Board) { newMoves = moves if kingInfo.status != PieceStatus_Default { return } dirs := []int { -1, 1 } for _, dir := range dirs { move, ok := addCastlingMove(board, kingPos, kingInfo, dir) if ok { newMoves = append(newMoves, move) } } return } // addPawnMove adds either the pawn move, or all available promotions if the move is a promotion func addPawnMove(board Board, info PieceInfo, move FullMove, isEnPassant bool, moves []Board) (newMoves []Board) { newMoves = moves updateStates := true newPos := PositionAdd(move.pos, move.move) if newPos.y != 0 && newPos.y != 7 { var newMove Board if isEnPassant { newMove = ApplyEnPassant(board, move, updateStates) } else { newMove = ApplyMove(board, move, updateStates) } newMoves = append(newMoves, newMove) return } availablePromotions := []Piece{ Piece_Queen, Piece_Rock, Piece_Bishop, Piece_Knight } for _, newPiece := range availablePromotions { status := PieceStatus_Default if newPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } newBoard := ApplyPawnPromotion(board, move, newPiece, updateStates) SetBoardAt(&newBoard, newPos, PieceInfo{ newPiece, status, info.color }) newMoves = append(newMoves, newBoard) } return } // addPawnSpecialMoves adds to list, the captures that can be done by a given pawn (including en-passant) and // the promotion func addPawnSpecialMoves(board Board, pos Position, info PieceInfo, moves []Board) (newMoves []Board) { newMoves = moves yDirection := 1 if info.color == PieceColor_White { yDirection = -1 } newy := pos.y + yDirection if newy < 0 || newy > 7 { return } xDirections := []int{ -1, 1 } for _, xDirection := range xDirections { newx := pos.x + xDirection fullMove := FullMove{ pos, Move{ xDirection, yDirection } } if newx < 0 || newx > 7 { continue } enemyInfo := GetBoardAt(board, Position{ newx, newy }) isEnPassant := false if enemyInfo.piece != Piece_Empty { // normal capture if enemyInfo.color != info.color { newMoves = addPawnMove(board, info, fullMove, isEnPassant, newMoves) } } else { // try en-passant isEnPassant = true enPassantPos := Position{ newx, pos.y } enPassantInfo := GetBoardAt(board, enPassantPos) if enPassantInfo.color != info.color && enPassantInfo.piece == Piece_Pawn && enPassantInfo.status == PieceStatus_EnPassantAllowed { tmpMoves := []Board{} tmpMoves = addPawnMove(board, info, fullMove, isEnPassant, tmpMoves) newMoves = append(newMoves, tmpMoves...) } } } return } // removeCheckMoves gets rid of any moves that put the king under attack func removeCheckMoves(boards []Board, color PieceColor) []Board { newBoards := make([]Board, 0, len(boards)) for _, b := range boards { if len(GetPieces(b, Piece_King, color)) == 0 { // TODO: remove this, only here to debug fmt.Println("DEBUG BOARD!!") DrawBoard(b) } kingPos := GetPieces(b, Piece_King, color)[0] if !isUnderAttack(b, kingPos, color) { newBoards = append(newBoards, b) } } return newBoards } // GetPossibleMoves returns the list of moves that can be done by a single piece. // It doesn't take checks into account, except for castling. // Params: // - filterCheckMoves = true forces the removal of any moves that puts the king under attack. // - quickMode = true skips some steps that aren't necessary for secondary uses of this // function: computing castling and updating state info. func GetPossibleMoves(board Board, pos Position, info PieceInfo, filterCheckMoves bool, quickMode bool) []Board { seqs := movesMap[info.color][info.piece] moves := []Move{} for _, seq := range seqs { for _, move := range seq { newPos := PositionAdd(pos, move) if !PositionInBoard(newPos) { break } infoHere := GetBoardAt(board, newPos) if infoHere.piece == Piece_Empty { moves = append(moves, move) } else { if infoHere.color != info.color && info.piece != Piece_Pawn { moves = append(moves, move) } break } } } boards := []Board{} updateStates := !quickMode for _, m := range moves { boards = append(boards, ApplyMove(board, FullMove{pos, m}, updateStates)) } // we assume first move is one step, second move is two steps... this is always correct because // of the MoveSeq definition pawnWithMoves := info.piece == Piece_Pawn && len(moves) != 0 if pawnWithMoves && ((info.color == PieceColor_Black && pos.y != 1) || (info.color == PieceColor_White && pos.y != 6)) { moves = moves[:1] } if info.piece == Piece_Pawn { if len(moves) == 1 { isEnPassant := false boards = addPawnMove(board, info, FullMove{ pos, moves[0] }, isEnPassant, []Board{}) } boards = addPawnSpecialMoves(board, pos, info, boards) } if !quickMode && info.piece == Piece_King { boards = addCastlingMoves(board, pos, info, boards) } if filterCheckMoves { boards = removeCheckMoves(boards, info.color) } return boards } // GetAllPossibleMoves returns all possible moves for pieces of a given color // (more details about arguments in GetPossibleMoves) func GetAllPossibleMoves(board Board, color PieceColor, filterCheckMoves bool, quickMode bool) []Board { positions := GetPiecesByColor(board, color) allMoves := []Board{} for _, pos := range positions { info := GetBoardAt(board, pos) allMoves = append(allMoves, GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)...) } return allMoves } // isUnderAttack tells whether a piece with color=color is under attack by any enemy piece. // This is the slow, but easy implementation. func isUnderAttack(board Board, pos Position, color PieceColor) bool { var enemies []Position = GetPiecesByColor(board, !color) filterCheckMoves := false quickMode := true for _, ePos := range enemies { enemyInfo := GetBoardAt(board, ePos) enemyMoves := GetPossibleMoves(board, ePos, enemyInfo, filterCheckMoves, quickMode) for _, enemyMove := range enemyMoves { infoHere := GetBoardAt(enemyMove, pos) if infoHere.piece != Piece_Empty && infoHere.color != color { return true } } } return false } func IsValidMove(board Board, piecePos Position, newBoard Board) bool { quickMode := false filterCheckMoves := true info := GetBoardAt(board, piecePos) moves := GetPossibleMoves(board, piecePos, info, filterCheckMoves, quickMode) for _, m := range moves { if m == newBoard { return true } } return false } func GetPossibleMoveCount(board Board, color PieceColor, filterCheckMoves bool) int { count := 0 quickMode := true for _, pos := range GetPiecesByColor(board, color) { info := GetBoardAt(board, pos) count += len(GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)) } return count } // resetPawnsStatus resets the status of all pawns of a given color; this means no contrary pawn can capture // any pawn using en-passant after this func resetPawnsStatus(board Board, color PieceColor) Board { for _, pos := range GetPieces(board, Piece_Pawn, color) { info := GetBoardAt(board, pos) SetBoardAt(&board, pos, PieceInfo{ info.piece, PieceStatus_Default, info.color }) } return board } // ApplyMove executes a move in a board; it assumes the move is a valid one, and it only applies simple moves // (castling or en-passant can't use this function) // Because the resetPawnsStatus call that updates states for the en-passant capture can be slow, we allow that // to be disabled. func ApplyMove(board Board, fullMove FullMove, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) // switch state changes for castling & en-passant if updateStates { board = resetPawnsStatus(board, info.color) if info.piece == Piece_King || info.piece == Piece_Rock { info.status = PieceStatus_CastlingNotAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 2. { info.status = PieceStatus_EnPassantAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 1. { info.status = PieceStatus_Default } } SetBoardAt(&board, fullMove.pos, EmptyPieceInfo) SetBoardAt(&board, PositionAdd(fullMove.pos, fullMove.move), info) return board } func
(color PieceColor) map[Piece]MoveSeqs { m := make(map[Piece]MoveSeqs) // each sequence has to be in an order such that move n can only be done if // move n-1 is also possible (this takes care of collisions) // pawn dir := 1 if color == PieceColor_White { dir = -1 } m[Piece_Pawn] = MoveSeqs{ MoveSeq{ Move{0, dir}, Move{0, 2 * dir} } } // rock rmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { rmoves[0] = append(rmoves[0], Move{0, i}) rmoves[1] = append(rmoves[1], Move{0, -i}) rmoves[2] = append(rmoves[2], Move{i, 0}) rmoves[3] = append(rmoves[3], Move{-i, 0}) } m[Piece_Rock] = MoveSeqs{ MoveSeq(rmoves[0]), MoveSeq(rmoves[1]), MoveSeq(rmoves[2]), MoveSeq(rmoves[3]) } // bishop bmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { bmoves[0] = append(bmoves[0], Move{i, i}) bmoves[1] = append(bmoves[1], Move{-i, -i}) bmoves[2] = append(bmoves[2], Move{i, -i}) bmoves[3] = append(bmoves[3], Move{-i, i}) } m[Piece_Bishop] = MoveSeqs{ MoveSeq(bmoves[0]), MoveSeq(bmoves[1]), MoveSeq(bmoves[2]), MoveSeq(bmoves[3]) } // queen qmoves := MoveSeqs{} qmoves = append(qmoves, rmoves...) qmoves = append(qmoves, bmoves...) m[Piece_Queen] = qmoves // king kmoves := MoveSeqs{} for i := 0; i < len(qmoves); i ++ { kmoves = append(kmoves, MoveSeq{ qmoves[i][0] }) } m[Piece_King] = kmoves // knight m[Piece_Knight] = MoveSeqs{ MoveSeq{ Move{-2, -1} }, MoveSeq{ Move{-1, -2} }, MoveSeq{ Move{2, 1} }, MoveSeq{ Move{1, 2} }, MoveSeq{ Move{-2, 1} }, MoveSeq{ Move{-1, 2} }, MoveSeq{ Move{2, -1} }, MoveSeq{ Move{1, -2} }, } return m } // isCheckMate tells whether the king of a color is in checkmate. func isCheckMate(board Board, availableMoveCount int, color PieceColor) bool { kingPos := GetPieces(board, Piece_King, color)[0] if availableMoveCount == 0 && isUnderAttack(board, kingPos, color) { return true } return false } // GetGameStatus tells whether game is finished or not, and who wins if it is finished func GetGameStatus(board Board, nextTurnColor PieceColor, availableMoveCount int) (finished bool, draw bool, winningColor PieceColor) { finished = true if isCheckMate(board, availableMoveCount, nextTurnColor) { winningColor = !nextTurnColor return } if availableMoveCount == 0 { draw = true return } finished = false return } func init() { movesMap[PieceColor_Black] = initMovesMap(PieceColor_Black) movesMap[PieceColor_White] = initMovesMap(PieceColor_White) }
initMovesMap
identifier_name
moves.go
package main import "fmt" import "math" type Position struct { x, y int } type Move struct { x, y int // move relative to current position } type FullMove struct { pos Position move Move } // the result of applying a move type Turn struct { board Board lastMove FullMove } // MoveSeq contains a list of moves such that, moves[n] is valid only if moves[n - 1] is valid as well. // This allows us to tell easily that you can't, for example, move a rock two places away if you can't do it one time // away in the same direction. type MoveSeq []Move // MoveSeqs contains a list of MoveSeq; each MoveSeq is independent from the others type MoveSeqs []MoveSeq // movesMap stores the relative movement for each piece var movesMap = map[PieceColor]map[Piece]MoveSeqs {} func PositionAdd(pos Position, move Move) Position { return Position{ pos.x + move.x, pos.y + move.y } } func PositionDiff(pos Position, move Move) Position { return Position{ pos.x - move.x, pos.y - move.y } } // ApplyCastling applies the castling move in one specific direction; it assumes castling is valid func ApplyCastling(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board) { var rockMove, kingMove FullMove newBoard = board if direction < 0 { rockMove = FullMove{ Position{0, kingPos.y}, Move{3, 0} } } else { rockMove = FullMove{ Position{7, kingPos.y}, Move{-2, 0} } } SetBoardAt(&newBoard, rockMove.pos, PieceInfo{ Piece_Rock, PieceStatus_CastlingNotAllowed, kingInfo.color }) SetBoardAt(&newBoard, kingPos, PieceInfo{ kingInfo.piece, PieceStatus_CastlingNotAllowed, kingInfo.color }) updateStates := true kingMove = FullMove{ kingPos, Move{direction * 2, 0} } newBoard = ApplyMove(newBoard, rockMove, updateStates) newBoard = ApplyMove(newBoard, kingMove, updateStates) return } // ApplyEnPassant applies en-passant move; it assumes the move is valid func ApplyEnPassant(board Board, fullMove FullMove, updateStates bool) Board { newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) SetBoardAt(&board, Position{ newPos.x, fullMove.pos.y }, EmptyPieceInfo) return board } // ApplyPawnPromotion applies promotion move for one selected promotion type; it assumes the move is valid func ApplyPawnPromotion(board Board, fullMove FullMove, selectedPiece Piece, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) status := PieceStatus_Default if selectedPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } SetBoardAt(&board, newPos, PieceInfo{ selectedPiece, status, info.color }) return board } // addCastlingMove computes the board for a left or right castling move for the given king. // direction is either -1 (left) or 1 (right) func addCastlingMove(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board, ok bool) { var rockPos Position rockPos = Position{ 0, kingPos.y } if direction == 1 { rockPos.x = 7 } rockInfo := GetBoardAt(board, rockPos) if rockInfo.piece != Piece_Rock || rockInfo.status != PieceStatus_Default || rockInfo.color != kingInfo.color { return } // all squares between king and rock must be empty for xi := kingPos.x + direction; xi != rockPos.x; xi += direction { newPos := Position{ xi, kingPos.y } newInfo := GetBoardAt(board, newPos) if newInfo.piece != Piece_Empty { return } } // neither the king square nor the two squares in the direction of the rock can be under attack for xi := kingPos.x; xi != kingPos.x + 3 * direction; xi += direction { newPos := Position{ xi, kingPos.y } if isUnderAttack(board, newPos, kingInfo.color) { return } } // apply move to king & rock ok = true newBoard = ApplyCastling(board, kingPos, kingInfo, direction) return } func addCastlingMoves(board Board, kingPos Position, kingInfo PieceInfo, moves []Board) (newMoves []Board) { newMoves = moves if kingInfo.status != PieceStatus_Default { return } dirs := []int { -1, 1 } for _, dir := range dirs { move, ok := addCastlingMove(board, kingPos, kingInfo, dir) if ok { newMoves = append(newMoves, move) } } return } // addPawnMove adds either the pawn move, or all available promotions if the move is a promotion func addPawnMove(board Board, info PieceInfo, move FullMove, isEnPassant bool, moves []Board) (newMoves []Board) { newMoves = moves updateStates := true newPos := PositionAdd(move.pos, move.move) if newPos.y != 0 && newPos.y != 7 { var newMove Board if isEnPassant { newMove = ApplyEnPassant(board, move, updateStates) } else { newMove = ApplyMove(board, move, updateStates) } newMoves = append(newMoves, newMove) return } availablePromotions := []Piece{ Piece_Queen, Piece_Rock, Piece_Bishop, Piece_Knight } for _, newPiece := range availablePromotions { status := PieceStatus_Default if newPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } newBoard := ApplyPawnPromotion(board, move, newPiece, updateStates) SetBoardAt(&newBoard, newPos, PieceInfo{ newPiece, status, info.color }) newMoves = append(newMoves, newBoard) } return } // addPawnSpecialMoves adds to list, the captures that can be done by a given pawn (including en-passant) and // the promotion func addPawnSpecialMoves(board Board, pos Position, info PieceInfo, moves []Board) (newMoves []Board) { newMoves = moves yDirection := 1 if info.color == PieceColor_White { yDirection = -1 } newy := pos.y + yDirection if newy < 0 || newy > 7 { return } xDirections := []int{ -1, 1 } for _, xDirection := range xDirections { newx := pos.x + xDirection fullMove := FullMove{ pos, Move{ xDirection, yDirection } } if newx < 0 || newx > 7 { continue } enemyInfo := GetBoardAt(board, Position{ newx, newy }) isEnPassant := false if enemyInfo.piece != Piece_Empty { // normal capture if enemyInfo.color != info.color { newMoves = addPawnMove(board, info, fullMove, isEnPassant, newMoves) } } else { // try en-passant isEnPassant = true enPassantPos := Position{ newx, pos.y } enPassantInfo := GetBoardAt(board, enPassantPos) if enPassantInfo.color != info.color && enPassantInfo.piece == Piece_Pawn && enPassantInfo.status == PieceStatus_EnPassantAllowed { tmpMoves := []Board{} tmpMoves = addPawnMove(board, info, fullMove, isEnPassant, tmpMoves) newMoves = append(newMoves, tmpMoves...) } } } return }
newBoards := make([]Board, 0, len(boards)) for _, b := range boards { if len(GetPieces(b, Piece_King, color)) == 0 { // TODO: remove this, only here to debug fmt.Println("DEBUG BOARD!!") DrawBoard(b) } kingPos := GetPieces(b, Piece_King, color)[0] if !isUnderAttack(b, kingPos, color) { newBoards = append(newBoards, b) } } return newBoards } // GetPossibleMoves returns the list of moves that can be done by a single piece. // It doesn't take checks into account, except for castling. // Params: // - filterCheckMoves = true forces the removal of any moves that puts the king under attack. // - quickMode = true skips some steps that aren't necessary for secondary uses of this // function: computing castling and updating state info. func GetPossibleMoves(board Board, pos Position, info PieceInfo, filterCheckMoves bool, quickMode bool) []Board { seqs := movesMap[info.color][info.piece] moves := []Move{} for _, seq := range seqs { for _, move := range seq { newPos := PositionAdd(pos, move) if !PositionInBoard(newPos) { break } infoHere := GetBoardAt(board, newPos) if infoHere.piece == Piece_Empty { moves = append(moves, move) } else { if infoHere.color != info.color && info.piece != Piece_Pawn { moves = append(moves, move) } break } } } boards := []Board{} updateStates := !quickMode for _, m := range moves { boards = append(boards, ApplyMove(board, FullMove{pos, m}, updateStates)) } // we assume first move is one step, second move is two steps... this is always correct because // of the MoveSeq definition pawnWithMoves := info.piece == Piece_Pawn && len(moves) != 0 if pawnWithMoves && ((info.color == PieceColor_Black && pos.y != 1) || (info.color == PieceColor_White && pos.y != 6)) { moves = moves[:1] } if info.piece == Piece_Pawn { if len(moves) == 1 { isEnPassant := false boards = addPawnMove(board, info, FullMove{ pos, moves[0] }, isEnPassant, []Board{}) } boards = addPawnSpecialMoves(board, pos, info, boards) } if !quickMode && info.piece == Piece_King { boards = addCastlingMoves(board, pos, info, boards) } if filterCheckMoves { boards = removeCheckMoves(boards, info.color) } return boards } // GetAllPossibleMoves returns all possible moves for pieces of a given color // (more details about arguments in GetPossibleMoves) func GetAllPossibleMoves(board Board, color PieceColor, filterCheckMoves bool, quickMode bool) []Board { positions := GetPiecesByColor(board, color) allMoves := []Board{} for _, pos := range positions { info := GetBoardAt(board, pos) allMoves = append(allMoves, GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)...) } return allMoves } // isUnderAttack tells whether a piece with color=color is under attack by any enemy piece. // This is the slow, but easy implementation. func isUnderAttack(board Board, pos Position, color PieceColor) bool { var enemies []Position = GetPiecesByColor(board, !color) filterCheckMoves := false quickMode := true for _, ePos := range enemies { enemyInfo := GetBoardAt(board, ePos) enemyMoves := GetPossibleMoves(board, ePos, enemyInfo, filterCheckMoves, quickMode) for _, enemyMove := range enemyMoves { infoHere := GetBoardAt(enemyMove, pos) if infoHere.piece != Piece_Empty && infoHere.color != color { return true } } } return false } func IsValidMove(board Board, piecePos Position, newBoard Board) bool { quickMode := false filterCheckMoves := true info := GetBoardAt(board, piecePos) moves := GetPossibleMoves(board, piecePos, info, filterCheckMoves, quickMode) for _, m := range moves { if m == newBoard { return true } } return false } func GetPossibleMoveCount(board Board, color PieceColor, filterCheckMoves bool) int { count := 0 quickMode := true for _, pos := range GetPiecesByColor(board, color) { info := GetBoardAt(board, pos) count += len(GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)) } return count } // resetPawnsStatus resets the status of all pawns of a given color; this means no contrary pawn can capture // any pawn using en-passant after this func resetPawnsStatus(board Board, color PieceColor) Board { for _, pos := range GetPieces(board, Piece_Pawn, color) { info := GetBoardAt(board, pos) SetBoardAt(&board, pos, PieceInfo{ info.piece, PieceStatus_Default, info.color }) } return board } // ApplyMove executes a move in a board; it assumes the move is a valid one, and it only applies simple moves // (castling or en-passant can't use this function) // Because the resetPawnsStatus call that updates states for the en-passant capture can be slow, we allow that // to be disabled. func ApplyMove(board Board, fullMove FullMove, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) // switch state changes for castling & en-passant if updateStates { board = resetPawnsStatus(board, info.color) if info.piece == Piece_King || info.piece == Piece_Rock { info.status = PieceStatus_CastlingNotAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 2. { info.status = PieceStatus_EnPassantAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 1. { info.status = PieceStatus_Default } } SetBoardAt(&board, fullMove.pos, EmptyPieceInfo) SetBoardAt(&board, PositionAdd(fullMove.pos, fullMove.move), info) return board } func initMovesMap(color PieceColor) map[Piece]MoveSeqs { m := make(map[Piece]MoveSeqs) // each sequence has to be in an order such that move n can only be done if // move n-1 is also possible (this takes care of collisions) // pawn dir := 1 if color == PieceColor_White { dir = -1 } m[Piece_Pawn] = MoveSeqs{ MoveSeq{ Move{0, dir}, Move{0, 2 * dir} } } // rock rmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { rmoves[0] = append(rmoves[0], Move{0, i}) rmoves[1] = append(rmoves[1], Move{0, -i}) rmoves[2] = append(rmoves[2], Move{i, 0}) rmoves[3] = append(rmoves[3], Move{-i, 0}) } m[Piece_Rock] = MoveSeqs{ MoveSeq(rmoves[0]), MoveSeq(rmoves[1]), MoveSeq(rmoves[2]), MoveSeq(rmoves[3]) } // bishop bmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { bmoves[0] = append(bmoves[0], Move{i, i}) bmoves[1] = append(bmoves[1], Move{-i, -i}) bmoves[2] = append(bmoves[2], Move{i, -i}) bmoves[3] = append(bmoves[3], Move{-i, i}) } m[Piece_Bishop] = MoveSeqs{ MoveSeq(bmoves[0]), MoveSeq(bmoves[1]), MoveSeq(bmoves[2]), MoveSeq(bmoves[3]) } // queen qmoves := MoveSeqs{} qmoves = append(qmoves, rmoves...) qmoves = append(qmoves, bmoves...) m[Piece_Queen] = qmoves // king kmoves := MoveSeqs{} for i := 0; i < len(qmoves); i ++ { kmoves = append(kmoves, MoveSeq{ qmoves[i][0] }) } m[Piece_King] = kmoves // knight m[Piece_Knight] = MoveSeqs{ MoveSeq{ Move{-2, -1} }, MoveSeq{ Move{-1, -2} }, MoveSeq{ Move{2, 1} }, MoveSeq{ Move{1, 2} }, MoveSeq{ Move{-2, 1} }, MoveSeq{ Move{-1, 2} }, MoveSeq{ Move{2, -1} }, MoveSeq{ Move{1, -2} }, } return m } // isCheckMate tells whether the king of a color is in checkmate. func isCheckMate(board Board, availableMoveCount int, color PieceColor) bool { kingPos := GetPieces(board, Piece_King, color)[0] if availableMoveCount == 0 && isUnderAttack(board, kingPos, color) { return true } return false } // GetGameStatus tells whether game is finished or not, and who wins if it is finished func GetGameStatus(board Board, nextTurnColor PieceColor, availableMoveCount int) (finished bool, draw bool, winningColor PieceColor) { finished = true if isCheckMate(board, availableMoveCount, nextTurnColor) { winningColor = !nextTurnColor return } if availableMoveCount == 0 { draw = true return } finished = false return } func init() { movesMap[PieceColor_Black] = initMovesMap(PieceColor_Black) movesMap[PieceColor_White] = initMovesMap(PieceColor_White) }
// removeCheckMoves gets rid of any moves that put the king under attack func removeCheckMoves(boards []Board, color PieceColor) []Board {
random_line_split
moves.go
package main import "fmt" import "math" type Position struct { x, y int } type Move struct { x, y int // move relative to current position } type FullMove struct { pos Position move Move } // the result of applying a move type Turn struct { board Board lastMove FullMove } // MoveSeq contains a list of moves such that, moves[n] is valid only if moves[n - 1] is valid as well. // This allows us to tell easily that you can't, for example, move a rock two places away if you can't do it one time // away in the same direction. type MoveSeq []Move // MoveSeqs contains a list of MoveSeq; each MoveSeq is independent from the others type MoveSeqs []MoveSeq // movesMap stores the relative movement for each piece var movesMap = map[PieceColor]map[Piece]MoveSeqs {} func PositionAdd(pos Position, move Move) Position { return Position{ pos.x + move.x, pos.y + move.y } } func PositionDiff(pos Position, move Move) Position { return Position{ pos.x - move.x, pos.y - move.y } } // ApplyCastling applies the castling move in one specific direction; it assumes castling is valid func ApplyCastling(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board) { var rockMove, kingMove FullMove newBoard = board if direction < 0 { rockMove = FullMove{ Position{0, kingPos.y}, Move{3, 0} } } else { rockMove = FullMove{ Position{7, kingPos.y}, Move{-2, 0} } } SetBoardAt(&newBoard, rockMove.pos, PieceInfo{ Piece_Rock, PieceStatus_CastlingNotAllowed, kingInfo.color }) SetBoardAt(&newBoard, kingPos, PieceInfo{ kingInfo.piece, PieceStatus_CastlingNotAllowed, kingInfo.color }) updateStates := true kingMove = FullMove{ kingPos, Move{direction * 2, 0} } newBoard = ApplyMove(newBoard, rockMove, updateStates) newBoard = ApplyMove(newBoard, kingMove, updateStates) return } // ApplyEnPassant applies en-passant move; it assumes the move is valid func ApplyEnPassant(board Board, fullMove FullMove, updateStates bool) Board { newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) SetBoardAt(&board, Position{ newPos.x, fullMove.pos.y }, EmptyPieceInfo) return board } // ApplyPawnPromotion applies promotion move for one selected promotion type; it assumes the move is valid func ApplyPawnPromotion(board Board, fullMove FullMove, selectedPiece Piece, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) newPos := PositionAdd(fullMove.pos, fullMove.move) board = ApplyMove(board, fullMove, updateStates) status := PieceStatus_Default if selectedPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } SetBoardAt(&board, newPos, PieceInfo{ selectedPiece, status, info.color }) return board } // addCastlingMove computes the board for a left or right castling move for the given king. // direction is either -1 (left) or 1 (right) func addCastlingMove(board Board, kingPos Position, kingInfo PieceInfo, direction int) (newBoard Board, ok bool) { var rockPos Position rockPos = Position{ 0, kingPos.y } if direction == 1 { rockPos.x = 7 } rockInfo := GetBoardAt(board, rockPos) if rockInfo.piece != Piece_Rock || rockInfo.status != PieceStatus_Default || rockInfo.color != kingInfo.color { return } // all squares between king and rock must be empty for xi := kingPos.x + direction; xi != rockPos.x; xi += direction { newPos := Position{ xi, kingPos.y } newInfo := GetBoardAt(board, newPos) if newInfo.piece != Piece_Empty { return } } // neither the king square nor the two squares in the direction of the rock can be under attack for xi := kingPos.x; xi != kingPos.x + 3 * direction; xi += direction { newPos := Position{ xi, kingPos.y } if isUnderAttack(board, newPos, kingInfo.color) { return } } // apply move to king & rock ok = true newBoard = ApplyCastling(board, kingPos, kingInfo, direction) return } func addCastlingMoves(board Board, kingPos Position, kingInfo PieceInfo, moves []Board) (newMoves []Board) { newMoves = moves if kingInfo.status != PieceStatus_Default { return } dirs := []int { -1, 1 } for _, dir := range dirs { move, ok := addCastlingMove(board, kingPos, kingInfo, dir) if ok { newMoves = append(newMoves, move) } } return } // addPawnMove adds either the pawn move, or all available promotions if the move is a promotion func addPawnMove(board Board, info PieceInfo, move FullMove, isEnPassant bool, moves []Board) (newMoves []Board) { newMoves = moves updateStates := true newPos := PositionAdd(move.pos, move.move) if newPos.y != 0 && newPos.y != 7 { var newMove Board if isEnPassant { newMove = ApplyEnPassant(board, move, updateStates) } else { newMove = ApplyMove(board, move, updateStates) } newMoves = append(newMoves, newMove) return } availablePromotions := []Piece{ Piece_Queen, Piece_Rock, Piece_Bishop, Piece_Knight } for _, newPiece := range availablePromotions { status := PieceStatus_Default if newPiece == Piece_Rock { status = PieceStatus_CastlingNotAllowed } newBoard := ApplyPawnPromotion(board, move, newPiece, updateStates) SetBoardAt(&newBoard, newPos, PieceInfo{ newPiece, status, info.color }) newMoves = append(newMoves, newBoard) } return } // addPawnSpecialMoves adds to list, the captures that can be done by a given pawn (including en-passant) and // the promotion func addPawnSpecialMoves(board Board, pos Position, info PieceInfo, moves []Board) (newMoves []Board)
// removeCheckMoves gets rid of any moves that put the king under attack func removeCheckMoves(boards []Board, color PieceColor) []Board { newBoards := make([]Board, 0, len(boards)) for _, b := range boards { if len(GetPieces(b, Piece_King, color)) == 0 { // TODO: remove this, only here to debug fmt.Println("DEBUG BOARD!!") DrawBoard(b) } kingPos := GetPieces(b, Piece_King, color)[0] if !isUnderAttack(b, kingPos, color) { newBoards = append(newBoards, b) } } return newBoards } // GetPossibleMoves returns the list of moves that can be done by a single piece. // It doesn't take checks into account, except for castling. // Params: // - filterCheckMoves = true forces the removal of any moves that puts the king under attack. // - quickMode = true skips some steps that aren't necessary for secondary uses of this // function: computing castling and updating state info. func GetPossibleMoves(board Board, pos Position, info PieceInfo, filterCheckMoves bool, quickMode bool) []Board { seqs := movesMap[info.color][info.piece] moves := []Move{} for _, seq := range seqs { for _, move := range seq { newPos := PositionAdd(pos, move) if !PositionInBoard(newPos) { break } infoHere := GetBoardAt(board, newPos) if infoHere.piece == Piece_Empty { moves = append(moves, move) } else { if infoHere.color != info.color && info.piece != Piece_Pawn { moves = append(moves, move) } break } } } boards := []Board{} updateStates := !quickMode for _, m := range moves { boards = append(boards, ApplyMove(board, FullMove{pos, m}, updateStates)) } // we assume first move is one step, second move is two steps... this is always correct because // of the MoveSeq definition pawnWithMoves := info.piece == Piece_Pawn && len(moves) != 0 if pawnWithMoves && ((info.color == PieceColor_Black && pos.y != 1) || (info.color == PieceColor_White && pos.y != 6)) { moves = moves[:1] } if info.piece == Piece_Pawn { if len(moves) == 1 { isEnPassant := false boards = addPawnMove(board, info, FullMove{ pos, moves[0] }, isEnPassant, []Board{}) } boards = addPawnSpecialMoves(board, pos, info, boards) } if !quickMode && info.piece == Piece_King { boards = addCastlingMoves(board, pos, info, boards) } if filterCheckMoves { boards = removeCheckMoves(boards, info.color) } return boards } // GetAllPossibleMoves returns all possible moves for pieces of a given color // (more details about arguments in GetPossibleMoves) func GetAllPossibleMoves(board Board, color PieceColor, filterCheckMoves bool, quickMode bool) []Board { positions := GetPiecesByColor(board, color) allMoves := []Board{} for _, pos := range positions { info := GetBoardAt(board, pos) allMoves = append(allMoves, GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)...) } return allMoves } // isUnderAttack tells whether a piece with color=color is under attack by any enemy piece. // This is the slow, but easy implementation. func isUnderAttack(board Board, pos Position, color PieceColor) bool { var enemies []Position = GetPiecesByColor(board, !color) filterCheckMoves := false quickMode := true for _, ePos := range enemies { enemyInfo := GetBoardAt(board, ePos) enemyMoves := GetPossibleMoves(board, ePos, enemyInfo, filterCheckMoves, quickMode) for _, enemyMove := range enemyMoves { infoHere := GetBoardAt(enemyMove, pos) if infoHere.piece != Piece_Empty && infoHere.color != color { return true } } } return false } func IsValidMove(board Board, piecePos Position, newBoard Board) bool { quickMode := false filterCheckMoves := true info := GetBoardAt(board, piecePos) moves := GetPossibleMoves(board, piecePos, info, filterCheckMoves, quickMode) for _, m := range moves { if m == newBoard { return true } } return false } func GetPossibleMoveCount(board Board, color PieceColor, filterCheckMoves bool) int { count := 0 quickMode := true for _, pos := range GetPiecesByColor(board, color) { info := GetBoardAt(board, pos) count += len(GetPossibleMoves(board, pos, info, filterCheckMoves, quickMode)) } return count } // resetPawnsStatus resets the status of all pawns of a given color; this means no contrary pawn can capture // any pawn using en-passant after this func resetPawnsStatus(board Board, color PieceColor) Board { for _, pos := range GetPieces(board, Piece_Pawn, color) { info := GetBoardAt(board, pos) SetBoardAt(&board, pos, PieceInfo{ info.piece, PieceStatus_Default, info.color }) } return board } // ApplyMove executes a move in a board; it assumes the move is a valid one, and it only applies simple moves // (castling or en-passant can't use this function) // Because the resetPawnsStatus call that updates states for the en-passant capture can be slow, we allow that // to be disabled. func ApplyMove(board Board, fullMove FullMove, updateStates bool) Board { info := GetBoardAt(board, fullMove.pos) // switch state changes for castling & en-passant if updateStates { board = resetPawnsStatus(board, info.color) if info.piece == Piece_King || info.piece == Piece_Rock { info.status = PieceStatus_CastlingNotAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 2. { info.status = PieceStatus_EnPassantAllowed } if info.piece == Piece_Pawn && math.Abs(float64(fullMove.move.y)) == 1. { info.status = PieceStatus_Default } } SetBoardAt(&board, fullMove.pos, EmptyPieceInfo) SetBoardAt(&board, PositionAdd(fullMove.pos, fullMove.move), info) return board } func initMovesMap(color PieceColor) map[Piece]MoveSeqs { m := make(map[Piece]MoveSeqs) // each sequence has to be in an order such that move n can only be done if // move n-1 is also possible (this takes care of collisions) // pawn dir := 1 if color == PieceColor_White { dir = -1 } m[Piece_Pawn] = MoveSeqs{ MoveSeq{ Move{0, dir}, Move{0, 2 * dir} } } // rock rmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { rmoves[0] = append(rmoves[0], Move{0, i}) rmoves[1] = append(rmoves[1], Move{0, -i}) rmoves[2] = append(rmoves[2], Move{i, 0}) rmoves[3] = append(rmoves[3], Move{-i, 0}) } m[Piece_Rock] = MoveSeqs{ MoveSeq(rmoves[0]), MoveSeq(rmoves[1]), MoveSeq(rmoves[2]), MoveSeq(rmoves[3]) } // bishop bmoves := MoveSeqs{ MoveSeq{}, MoveSeq{}, MoveSeq{}, MoveSeq{} } for i := 1; i < 8; i ++ { bmoves[0] = append(bmoves[0], Move{i, i}) bmoves[1] = append(bmoves[1], Move{-i, -i}) bmoves[2] = append(bmoves[2], Move{i, -i}) bmoves[3] = append(bmoves[3], Move{-i, i}) } m[Piece_Bishop] = MoveSeqs{ MoveSeq(bmoves[0]), MoveSeq(bmoves[1]), MoveSeq(bmoves[2]), MoveSeq(bmoves[3]) } // queen qmoves := MoveSeqs{} qmoves = append(qmoves, rmoves...) qmoves = append(qmoves, bmoves...) m[Piece_Queen] = qmoves // king kmoves := MoveSeqs{} for i := 0; i < len(qmoves); i ++ { kmoves = append(kmoves, MoveSeq{ qmoves[i][0] }) } m[Piece_King] = kmoves // knight m[Piece_Knight] = MoveSeqs{ MoveSeq{ Move{-2, -1} }, MoveSeq{ Move{-1, -2} }, MoveSeq{ Move{2, 1} }, MoveSeq{ Move{1, 2} }, MoveSeq{ Move{-2, 1} }, MoveSeq{ Move{-1, 2} }, MoveSeq{ Move{2, -1} }, MoveSeq{ Move{1, -2} }, } return m } // isCheckMate tells whether the king of a color is in checkmate. func isCheckMate(board Board, availableMoveCount int, color PieceColor) bool { kingPos := GetPieces(board, Piece_King, color)[0] if availableMoveCount == 0 && isUnderAttack(board, kingPos, color) { return true } return false } // GetGameStatus tells whether game is finished or not, and who wins if it is finished func GetGameStatus(board Board, nextTurnColor PieceColor, availableMoveCount int) (finished bool, draw bool, winningColor PieceColor) { finished = true if isCheckMate(board, availableMoveCount, nextTurnColor) { winningColor = !nextTurnColor return } if availableMoveCount == 0 { draw = true return } finished = false return } func init() { movesMap[PieceColor_Black] = initMovesMap(PieceColor_Black) movesMap[PieceColor_White] = initMovesMap(PieceColor_White) }
{ newMoves = moves yDirection := 1 if info.color == PieceColor_White { yDirection = -1 } newy := pos.y + yDirection if newy < 0 || newy > 7 { return } xDirections := []int{ -1, 1 } for _, xDirection := range xDirections { newx := pos.x + xDirection fullMove := FullMove{ pos, Move{ xDirection, yDirection } } if newx < 0 || newx > 7 { continue } enemyInfo := GetBoardAt(board, Position{ newx, newy }) isEnPassant := false if enemyInfo.piece != Piece_Empty { // normal capture if enemyInfo.color != info.color { newMoves = addPawnMove(board, info, fullMove, isEnPassant, newMoves) } } else { // try en-passant isEnPassant = true enPassantPos := Position{ newx, pos.y } enPassantInfo := GetBoardAt(board, enPassantPos) if enPassantInfo.color != info.color && enPassantInfo.piece == Piece_Pawn && enPassantInfo.status == PieceStatus_EnPassantAllowed { tmpMoves := []Board{} tmpMoves = addPawnMove(board, info, fullMove, isEnPassant, tmpMoves) newMoves = append(newMoves, tmpMoves...) } } } return }
identifier_body
main.go
package main import ( "crypto/rand" "errors" "flag" "fmt" "go/build" "io/ioutil" "log" "math/big" "net/http" "os" "path/filepath" "runtime" "sort" "strings" "unicode/utf8" "github.com/remeh/sizedwaitgroup" "github.com/sirupsen/logrus" "iochen.com/v2gen/v2" "iochen.com/v2gen/v2/common/base64" "iochen.com/v2gen/v2/common/mean" "iochen.com/v2gen/v2/infra" "iochen.com/v2gen/v2/ping" "iochen.com/v2gen/v2/vmess" ) var ( Version = "v2.0.0-dev" FlagLoglevel = flag.String("loglevel", "warn", "log level") FlagLog = flag.String("log", "-", "log output file") FlagAddr = flag.String("u", "", "subscription address(URL)") FlagOut = flag.String("o", "/etc/v2ray/config.json", "output path") FlagConf = flag.String("config", "/etc/v2ray/v2gen.ini", "v2gen config path") FlagTPL = flag.String("template", "", "V2Ray template path") FlagInit = flag.Bool("init", false, "init v2gen config (specify certain path with -config)") FlagRandom = flag.Bool("random", false, "random node index") FlagPing = flag.Bool("ping", true, "ping nodes") FlagDest = flag.String("dst", "https://cloudflare.com/cdn-cgi/trace", "test destination url (vmess ping only)") FlagCount = flag.Int("c", 3, "ping count for each node") // FlagMedian = flag.Bool("med", false, "use median instead of ArithmeticMean") FlagThreads = flag.Int("thread", 3, "threads used when pinging") FlagBest = flag.Bool("best", false, "use best node judged by ping result") FlagPipe = flag.Bool("pipe", true, "read from pipe") FlagVersion = flag.Bool("v", false, "show version") ) /* function main may be too long, here is a simple step list: ################################################################################################# # STEP 1 (READ): # # 1. read links from subscription(net) and pipe. # # # # STEP 2 (PROCESS): # # TYPE 1 (PING): # # SUBTYPE 1.1 (BEST): # # 1. ping. # # 2. choose the best node. # # # # SUBTYPE 1.2 (RANDOM): # # 1. ping. # # 2. filter out available node list A. # # NOTE: if exist nodes that no error, then A would be them(it), # # else A would be all of them. # # 3. randomly choose one from A. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. ping. # # 2. print nodes and ping info. # # 3. wait for user's choosing. # # # # TYPE 2 (NOT PING): # # SUBTYPE 1.2 (RANDOM): # # 1. randomly choose one from nodes. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. print nodes and ping info. # # 2. wait for user's choosing. # # # # STEP 3 (RENDER AND WRITE): # # 1. render and write. # ################################################################################################# */ type PingInfo struct { Status *ping.Status Duration ping.Duration Link v2gen.Link Err error } type PingInfoList []*PingInfo func (pf *PingInfoList) Len() int { return len(*pf) } func (pf *PingInfoList) Less(i, j int) bool { if (*pf)[i].Err != nil { return false } else if (*pf)[j].Err != nil { return true } if len((*pf)[i].Status.Errors) != len((*pf)[j].Status.Errors) { return len((*pf)[i].Status.Errors) < len((*pf)[j].Status.Errors) } return (*pf)[i].Duration < (*pf)[j].Duration } func (pf *PingInfoList) Swap(i, j int) { (*pf)[i], (*pf)[j] = (*pf)[j], (*pf)[i] } func main() { flag.Parse() /* LOG PART */ logger := logrus.New() if *FlagLog != "-" && *FlagLog != "" { file, err := os.Create(*FlagLog) if err != nil { logrus.Fatal(err) } defer file.Close() _, err = file.Write([]byte(version() + "\n")) if err != nil { panic("cannot write into log file") } logger.Out = file } // set log level level, err := logrus.ParseLevel(*FlagLoglevel) if err != nil { logger.Panic(err) } logger.SetLevel(level) /* FLAG PART */ // if -v || trace, debug, info if *FlagVersion { fmt.Println(version()) return } else if level > logrus.ErrorLevel { fmt.Println(version()) } // if -init if *FlagInit { err := ioutil.WriteFile(*FlagConf, []byte(infra.DefaultV2GenConf), 0644) if err != nil { panic(err) return } logger.Info("v2gen config initialized") return } /* LINK PART */ var linkList []v2gen.Link // combine links from different sources // read from subscribe address(net) if *FlagAddr != "" { logger.Infof("Reading from %s...", *FlagAddr) resp, err := http.Get(*FlagAddr) if err != nil { logger.Fatal(err) } defer resp.Body.Close() bytes, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // check whether reading from pipe if fi, _ := os.Stdin.Stat(); (fi.Mode()&os.ModeCharDevice) == 0 && *FlagPipe { logger.Info("Reading from pipe...") bytes, err := ioutil.ReadAll(os.Stdin) if err != nil { log.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // if no Link, then exit if len(linkList) == 0 { logger.Warn("no available links, nothing to do") os.Exit(0) } var chosenLink v2gen.Link var spaceCount = func(i int, str string) string { rl := utf8.RuneCountInString(str) c := i - (len(str)+rl)/2 if c < 0 { c = 0 } return strings.Repeat(" ", c) } if *FlagPing { // if ping // make ping info list pingInfoList := make(PingInfoList, len(linkList)) wg := sizedwaitgroup.New(*FlagThreads) for i := range linkList { wg.Add() go func(i int) { logger.Debugf("[%d/%d]Pinging %s\n", i, len(linkList)-1, linkList[i].Safe()) if level > logrus.ErrorLevel { fmt.Printf("\rPinging %d/%d", i, len(linkList)-1) } defer func() { wg.Done() }() pingInfoList[i] = &PingInfo{ Link: linkList[i], } status, err := linkList[i].Ping(*FlagCount, *FlagDest) if status.Durations == nil || len(*status.Durations) == 0 { pingInfoList[i].Err = errors.New("all error") status.Durations = &ping.DurationList{-1} } if err != nil { pingInfoList[i].Err = err pingInfoList[i].Status = &ping.Status{ Durations: &ping.DurationList{}, } } else { pingInfoList[i].Status = &status } }(i) } wg.Wait() fmt.Println() for i := range pingInfoList { var ok bool pingInfoList[i].Duration, ok = mean.ArithmeticMean(pingInfoList[i].Status.Durations).(ping.Duration) if !ok { pingInfoList[i].Duration = 0 } } sort.Sort(&pingInfoList) if *FlagBest { // if ping && best chosenLink = pingInfoList[0].Link } else if *FlagRandom { // if ping && rand pingInfoList = AvailableLinks(pingInfoList) i, err := Random(len(pingInfoList)) if err != nil { logger.Fatal(err) } chosenLink = pingInfoList[i].Link } else { // if ping && not rand && not best for i := range pingInfoList { fmt.Printf("[%2d] %s%s[%-7s(%d errors)]\n", i, pingInfoList[i].Link.Description(), spaceCount(30, pingInfoList[i].Link.Description()), pingInfoList[i].Duration.Precision(1e6), len(pingInfoList[i].Status.Errors)) } i := Select(len(pingInfoList)) chosenLink = pingInfoList[i].Link } } else { // if not ping if *FlagRandom { // if not ping && rand i, err := Random(len(linkList)) if err != nil { logger.Fatal(err) } chosenLink = linkList[i] } else { // if not ping && not rand for i := range linkList { fmt.Printf("[%2d] %s%s\n", i, linkList[i].Description(), spaceCount(30, linkList[i].Description())) } i := Select(len(linkList)) chosenLink = linkList[i] } } /* CONFIG PART */ var template []byte template = []byte(infra.ConfigTpl) if *FlagTPL != "" { tpl, err := ioutil.ReadFile(*FlagTPL) if err != nil { logrus.Error(err, "using default template...") } else { template = tpl } } v2genConf := infra.V2genConfig{} confFile, err := ioutil.ReadFile(*FlagConf) if err == nil { v2genConf = infra.ParseV2genConf(confFile) } conf := infra.DefaultConf() bytes, err := infra.GenV2RayConf(*conf.Append(v2genConf).Append(chosenLink.Config()), template) if err != nil { logrus.Fatal(err) } if *FlagOut == "-" || *FlagOut == ""
else { err := ioutil.WriteFile(*FlagOut, bytes, 0644) if err != nil { logrus.Fatal(err) } else { if level > logrus.ErrorLevel { fmt.Printf("config has been written to %s\n", filepath.Clean(*FlagOut)) } } } } func version() string { return fmt.Sprintf("v2gen %s, V2Ray %s (%s %dcores %s/%s)", Version, vmess.CoreVersion(), runtime.Version(), runtime.NumCPU(), build.Default.GOOS, build.Default.GOARCH) } func ParseLinks(b []byte) ([]v2gen.Link, error) { s, err := base64.Decode(string(b)) if err != nil { return nil, err } linkList, err := vmess.Parse(s) if err != nil { return nil, err } links := make([]v2gen.Link, len(linkList)) for i := range linkList { links[i] = linkList[i] } return links, err } func AvailableLinks(pil PingInfoList) PingInfoList { var pingInfoList PingInfoList for i := range pil { if pil[i].Err != nil && len(pil[i].Status.Errors) == 0 { pingInfoList = append(pingInfoList, pil[i]) } } if len(pingInfoList) != 0 { return pingInfoList } else { return pil } } // Select returns an int [0,max) func Select(max int) int { var in int fmt.Print("=====================\nPlease Select: ") _, err := fmt.Scanf("%d", &in) if err != nil || in < 0 || in >= max { fmt.Println("wrong number, please reselect") return Select(max) } return in } func Random(max int) (int, error) { n, err := rand.Int(rand.Reader, big.NewInt(int64(max))) if err != nil { return 0, err } return int(n.Int64()), nil }
{ fmt.Println(string(bytes)) return }
conditional_block
main.go
package main import ( "crypto/rand" "errors" "flag" "fmt" "go/build" "io/ioutil" "log" "math/big" "net/http" "os" "path/filepath" "runtime" "sort" "strings" "unicode/utf8" "github.com/remeh/sizedwaitgroup" "github.com/sirupsen/logrus" "iochen.com/v2gen/v2" "iochen.com/v2gen/v2/common/base64" "iochen.com/v2gen/v2/common/mean" "iochen.com/v2gen/v2/infra" "iochen.com/v2gen/v2/ping" "iochen.com/v2gen/v2/vmess" ) var ( Version = "v2.0.0-dev" FlagLoglevel = flag.String("loglevel", "warn", "log level") FlagLog = flag.String("log", "-", "log output file") FlagAddr = flag.String("u", "", "subscription address(URL)") FlagOut = flag.String("o", "/etc/v2ray/config.json", "output path") FlagConf = flag.String("config", "/etc/v2ray/v2gen.ini", "v2gen config path") FlagTPL = flag.String("template", "", "V2Ray template path") FlagInit = flag.Bool("init", false, "init v2gen config (specify certain path with -config)") FlagRandom = flag.Bool("random", false, "random node index") FlagPing = flag.Bool("ping", true, "ping nodes") FlagDest = flag.String("dst", "https://cloudflare.com/cdn-cgi/trace", "test destination url (vmess ping only)") FlagCount = flag.Int("c", 3, "ping count for each node") // FlagMedian = flag.Bool("med", false, "use median instead of ArithmeticMean") FlagThreads = flag.Int("thread", 3, "threads used when pinging") FlagBest = flag.Bool("best", false, "use best node judged by ping result") FlagPipe = flag.Bool("pipe", true, "read from pipe") FlagVersion = flag.Bool("v", false, "show version") ) /* function main may be too long, here is a simple step list: ################################################################################################# # STEP 1 (READ): # # 1. read links from subscription(net) and pipe. # # # # STEP 2 (PROCESS): # # TYPE 1 (PING): # # SUBTYPE 1.1 (BEST): # # 1. ping. # # 2. choose the best node. # # # # SUBTYPE 1.2 (RANDOM): # # 1. ping. # # 2. filter out available node list A. # # NOTE: if exist nodes that no error, then A would be them(it), # # else A would be all of them. # # 3. randomly choose one from A. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. ping. # # 2. print nodes and ping info. # # 3. wait for user's choosing. # # # # TYPE 2 (NOT PING): # # SUBTYPE 1.2 (RANDOM): # # 1. randomly choose one from nodes. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. print nodes and ping info. # # 2. wait for user's choosing. # # # # STEP 3 (RENDER AND WRITE): # # 1. render and write. # ################################################################################################# */ type PingInfo struct { Status *ping.Status Duration ping.Duration Link v2gen.Link
Err error } type PingInfoList []*PingInfo func (pf *PingInfoList) Len() int { return len(*pf) } func (pf *PingInfoList) Less(i, j int) bool { if (*pf)[i].Err != nil { return false } else if (*pf)[j].Err != nil { return true } if len((*pf)[i].Status.Errors) != len((*pf)[j].Status.Errors) { return len((*pf)[i].Status.Errors) < len((*pf)[j].Status.Errors) } return (*pf)[i].Duration < (*pf)[j].Duration } func (pf *PingInfoList) Swap(i, j int) { (*pf)[i], (*pf)[j] = (*pf)[j], (*pf)[i] } func main() { flag.Parse() /* LOG PART */ logger := logrus.New() if *FlagLog != "-" && *FlagLog != "" { file, err := os.Create(*FlagLog) if err != nil { logrus.Fatal(err) } defer file.Close() _, err = file.Write([]byte(version() + "\n")) if err != nil { panic("cannot write into log file") } logger.Out = file } // set log level level, err := logrus.ParseLevel(*FlagLoglevel) if err != nil { logger.Panic(err) } logger.SetLevel(level) /* FLAG PART */ // if -v || trace, debug, info if *FlagVersion { fmt.Println(version()) return } else if level > logrus.ErrorLevel { fmt.Println(version()) } // if -init if *FlagInit { err := ioutil.WriteFile(*FlagConf, []byte(infra.DefaultV2GenConf), 0644) if err != nil { panic(err) return } logger.Info("v2gen config initialized") return } /* LINK PART */ var linkList []v2gen.Link // combine links from different sources // read from subscribe address(net) if *FlagAddr != "" { logger.Infof("Reading from %s...", *FlagAddr) resp, err := http.Get(*FlagAddr) if err != nil { logger.Fatal(err) } defer resp.Body.Close() bytes, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // check whether reading from pipe if fi, _ := os.Stdin.Stat(); (fi.Mode()&os.ModeCharDevice) == 0 && *FlagPipe { logger.Info("Reading from pipe...") bytes, err := ioutil.ReadAll(os.Stdin) if err != nil { log.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // if no Link, then exit if len(linkList) == 0 { logger.Warn("no available links, nothing to do") os.Exit(0) } var chosenLink v2gen.Link var spaceCount = func(i int, str string) string { rl := utf8.RuneCountInString(str) c := i - (len(str)+rl)/2 if c < 0 { c = 0 } return strings.Repeat(" ", c) } if *FlagPing { // if ping // make ping info list pingInfoList := make(PingInfoList, len(linkList)) wg := sizedwaitgroup.New(*FlagThreads) for i := range linkList { wg.Add() go func(i int) { logger.Debugf("[%d/%d]Pinging %s\n", i, len(linkList)-1, linkList[i].Safe()) if level > logrus.ErrorLevel { fmt.Printf("\rPinging %d/%d", i, len(linkList)-1) } defer func() { wg.Done() }() pingInfoList[i] = &PingInfo{ Link: linkList[i], } status, err := linkList[i].Ping(*FlagCount, *FlagDest) if status.Durations == nil || len(*status.Durations) == 0 { pingInfoList[i].Err = errors.New("all error") status.Durations = &ping.DurationList{-1} } if err != nil { pingInfoList[i].Err = err pingInfoList[i].Status = &ping.Status{ Durations: &ping.DurationList{}, } } else { pingInfoList[i].Status = &status } }(i) } wg.Wait() fmt.Println() for i := range pingInfoList { var ok bool pingInfoList[i].Duration, ok = mean.ArithmeticMean(pingInfoList[i].Status.Durations).(ping.Duration) if !ok { pingInfoList[i].Duration = 0 } } sort.Sort(&pingInfoList) if *FlagBest { // if ping && best chosenLink = pingInfoList[0].Link } else if *FlagRandom { // if ping && rand pingInfoList = AvailableLinks(pingInfoList) i, err := Random(len(pingInfoList)) if err != nil { logger.Fatal(err) } chosenLink = pingInfoList[i].Link } else { // if ping && not rand && not best for i := range pingInfoList { fmt.Printf("[%2d] %s%s[%-7s(%d errors)]\n", i, pingInfoList[i].Link.Description(), spaceCount(30, pingInfoList[i].Link.Description()), pingInfoList[i].Duration.Precision(1e6), len(pingInfoList[i].Status.Errors)) } i := Select(len(pingInfoList)) chosenLink = pingInfoList[i].Link } } else { // if not ping if *FlagRandom { // if not ping && rand i, err := Random(len(linkList)) if err != nil { logger.Fatal(err) } chosenLink = linkList[i] } else { // if not ping && not rand for i := range linkList { fmt.Printf("[%2d] %s%s\n", i, linkList[i].Description(), spaceCount(30, linkList[i].Description())) } i := Select(len(linkList)) chosenLink = linkList[i] } } /* CONFIG PART */ var template []byte template = []byte(infra.ConfigTpl) if *FlagTPL != "" { tpl, err := ioutil.ReadFile(*FlagTPL) if err != nil { logrus.Error(err, "using default template...") } else { template = tpl } } v2genConf := infra.V2genConfig{} confFile, err := ioutil.ReadFile(*FlagConf) if err == nil { v2genConf = infra.ParseV2genConf(confFile) } conf := infra.DefaultConf() bytes, err := infra.GenV2RayConf(*conf.Append(v2genConf).Append(chosenLink.Config()), template) if err != nil { logrus.Fatal(err) } if *FlagOut == "-" || *FlagOut == "" { fmt.Println(string(bytes)) return } else { err := ioutil.WriteFile(*FlagOut, bytes, 0644) if err != nil { logrus.Fatal(err) } else { if level > logrus.ErrorLevel { fmt.Printf("config has been written to %s\n", filepath.Clean(*FlagOut)) } } } } func version() string { return fmt.Sprintf("v2gen %s, V2Ray %s (%s %dcores %s/%s)", Version, vmess.CoreVersion(), runtime.Version(), runtime.NumCPU(), build.Default.GOOS, build.Default.GOARCH) } func ParseLinks(b []byte) ([]v2gen.Link, error) { s, err := base64.Decode(string(b)) if err != nil { return nil, err } linkList, err := vmess.Parse(s) if err != nil { return nil, err } links := make([]v2gen.Link, len(linkList)) for i := range linkList { links[i] = linkList[i] } return links, err } func AvailableLinks(pil PingInfoList) PingInfoList { var pingInfoList PingInfoList for i := range pil { if pil[i].Err != nil && len(pil[i].Status.Errors) == 0 { pingInfoList = append(pingInfoList, pil[i]) } } if len(pingInfoList) != 0 { return pingInfoList } else { return pil } } // Select returns an int [0,max) func Select(max int) int { var in int fmt.Print("=====================\nPlease Select: ") _, err := fmt.Scanf("%d", &in) if err != nil || in < 0 || in >= max { fmt.Println("wrong number, please reselect") return Select(max) } return in } func Random(max int) (int, error) { n, err := rand.Int(rand.Reader, big.NewInt(int64(max))) if err != nil { return 0, err } return int(n.Int64()), nil }
random_line_split
main.go
package main import ( "crypto/rand" "errors" "flag" "fmt" "go/build" "io/ioutil" "log" "math/big" "net/http" "os" "path/filepath" "runtime" "sort" "strings" "unicode/utf8" "github.com/remeh/sizedwaitgroup" "github.com/sirupsen/logrus" "iochen.com/v2gen/v2" "iochen.com/v2gen/v2/common/base64" "iochen.com/v2gen/v2/common/mean" "iochen.com/v2gen/v2/infra" "iochen.com/v2gen/v2/ping" "iochen.com/v2gen/v2/vmess" ) var ( Version = "v2.0.0-dev" FlagLoglevel = flag.String("loglevel", "warn", "log level") FlagLog = flag.String("log", "-", "log output file") FlagAddr = flag.String("u", "", "subscription address(URL)") FlagOut = flag.String("o", "/etc/v2ray/config.json", "output path") FlagConf = flag.String("config", "/etc/v2ray/v2gen.ini", "v2gen config path") FlagTPL = flag.String("template", "", "V2Ray template path") FlagInit = flag.Bool("init", false, "init v2gen config (specify certain path with -config)") FlagRandom = flag.Bool("random", false, "random node index") FlagPing = flag.Bool("ping", true, "ping nodes") FlagDest = flag.String("dst", "https://cloudflare.com/cdn-cgi/trace", "test destination url (vmess ping only)") FlagCount = flag.Int("c", 3, "ping count for each node") // FlagMedian = flag.Bool("med", false, "use median instead of ArithmeticMean") FlagThreads = flag.Int("thread", 3, "threads used when pinging") FlagBest = flag.Bool("best", false, "use best node judged by ping result") FlagPipe = flag.Bool("pipe", true, "read from pipe") FlagVersion = flag.Bool("v", false, "show version") ) /* function main may be too long, here is a simple step list: ################################################################################################# # STEP 1 (READ): # # 1. read links from subscription(net) and pipe. # # # # STEP 2 (PROCESS): # # TYPE 1 (PING): # # SUBTYPE 1.1 (BEST): # # 1. ping. # # 2. choose the best node. # # # # SUBTYPE 1.2 (RANDOM): # # 1. ping. # # 2. filter out available node list A. # # NOTE: if exist nodes that no error, then A would be them(it), # # else A would be all of them. # # 3. randomly choose one from A. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. ping. # # 2. print nodes and ping info. # # 3. wait for user's choosing. # # # # TYPE 2 (NOT PING): # # SUBTYPE 1.2 (RANDOM): # # 1. randomly choose one from nodes. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. print nodes and ping info. # # 2. wait for user's choosing. # # # # STEP 3 (RENDER AND WRITE): # # 1. render and write. # ################################################################################################# */ type PingInfo struct { Status *ping.Status Duration ping.Duration Link v2gen.Link Err error } type PingInfoList []*PingInfo func (pf *PingInfoList) Len() int { return len(*pf) } func (pf *PingInfoList) Less(i, j int) bool { if (*pf)[i].Err != nil { return false } else if (*pf)[j].Err != nil { return true } if len((*pf)[i].Status.Errors) != len((*pf)[j].Status.Errors) { return len((*pf)[i].Status.Errors) < len((*pf)[j].Status.Errors) } return (*pf)[i].Duration < (*pf)[j].Duration } func (pf *PingInfoList) Swap(i, j int) { (*pf)[i], (*pf)[j] = (*pf)[j], (*pf)[i] } func main()
func version() string { return fmt.Sprintf("v2gen %s, V2Ray %s (%s %dcores %s/%s)", Version, vmess.CoreVersion(), runtime.Version(), runtime.NumCPU(), build.Default.GOOS, build.Default.GOARCH) } func ParseLinks(b []byte) ([]v2gen.Link, error) { s, err := base64.Decode(string(b)) if err != nil { return nil, err } linkList, err := vmess.Parse(s) if err != nil { return nil, err } links := make([]v2gen.Link, len(linkList)) for i := range linkList { links[i] = linkList[i] } return links, err } func AvailableLinks(pil PingInfoList) PingInfoList { var pingInfoList PingInfoList for i := range pil { if pil[i].Err != nil && len(pil[i].Status.Errors) == 0 { pingInfoList = append(pingInfoList, pil[i]) } } if len(pingInfoList) != 0 { return pingInfoList } else { return pil } } // Select returns an int [0,max) func Select(max int) int { var in int fmt.Print("=====================\nPlease Select: ") _, err := fmt.Scanf("%d", &in) if err != nil || in < 0 || in >= max { fmt.Println("wrong number, please reselect") return Select(max) } return in } func Random(max int) (int, error) { n, err := rand.Int(rand.Reader, big.NewInt(int64(max))) if err != nil { return 0, err } return int(n.Int64()), nil }
{ flag.Parse() /* LOG PART */ logger := logrus.New() if *FlagLog != "-" && *FlagLog != "" { file, err := os.Create(*FlagLog) if err != nil { logrus.Fatal(err) } defer file.Close() _, err = file.Write([]byte(version() + "\n")) if err != nil { panic("cannot write into log file") } logger.Out = file } // set log level level, err := logrus.ParseLevel(*FlagLoglevel) if err != nil { logger.Panic(err) } logger.SetLevel(level) /* FLAG PART */ // if -v || trace, debug, info if *FlagVersion { fmt.Println(version()) return } else if level > logrus.ErrorLevel { fmt.Println(version()) } // if -init if *FlagInit { err := ioutil.WriteFile(*FlagConf, []byte(infra.DefaultV2GenConf), 0644) if err != nil { panic(err) return } logger.Info("v2gen config initialized") return } /* LINK PART */ var linkList []v2gen.Link // combine links from different sources // read from subscribe address(net) if *FlagAddr != "" { logger.Infof("Reading from %s...", *FlagAddr) resp, err := http.Get(*FlagAddr) if err != nil { logger.Fatal(err) } defer resp.Body.Close() bytes, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // check whether reading from pipe if fi, _ := os.Stdin.Stat(); (fi.Mode()&os.ModeCharDevice) == 0 && *FlagPipe { logger.Info("Reading from pipe...") bytes, err := ioutil.ReadAll(os.Stdin) if err != nil { log.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // if no Link, then exit if len(linkList) == 0 { logger.Warn("no available links, nothing to do") os.Exit(0) } var chosenLink v2gen.Link var spaceCount = func(i int, str string) string { rl := utf8.RuneCountInString(str) c := i - (len(str)+rl)/2 if c < 0 { c = 0 } return strings.Repeat(" ", c) } if *FlagPing { // if ping // make ping info list pingInfoList := make(PingInfoList, len(linkList)) wg := sizedwaitgroup.New(*FlagThreads) for i := range linkList { wg.Add() go func(i int) { logger.Debugf("[%d/%d]Pinging %s\n", i, len(linkList)-1, linkList[i].Safe()) if level > logrus.ErrorLevel { fmt.Printf("\rPinging %d/%d", i, len(linkList)-1) } defer func() { wg.Done() }() pingInfoList[i] = &PingInfo{ Link: linkList[i], } status, err := linkList[i].Ping(*FlagCount, *FlagDest) if status.Durations == nil || len(*status.Durations) == 0 { pingInfoList[i].Err = errors.New("all error") status.Durations = &ping.DurationList{-1} } if err != nil { pingInfoList[i].Err = err pingInfoList[i].Status = &ping.Status{ Durations: &ping.DurationList{}, } } else { pingInfoList[i].Status = &status } }(i) } wg.Wait() fmt.Println() for i := range pingInfoList { var ok bool pingInfoList[i].Duration, ok = mean.ArithmeticMean(pingInfoList[i].Status.Durations).(ping.Duration) if !ok { pingInfoList[i].Duration = 0 } } sort.Sort(&pingInfoList) if *FlagBest { // if ping && best chosenLink = pingInfoList[0].Link } else if *FlagRandom { // if ping && rand pingInfoList = AvailableLinks(pingInfoList) i, err := Random(len(pingInfoList)) if err != nil { logger.Fatal(err) } chosenLink = pingInfoList[i].Link } else { // if ping && not rand && not best for i := range pingInfoList { fmt.Printf("[%2d] %s%s[%-7s(%d errors)]\n", i, pingInfoList[i].Link.Description(), spaceCount(30, pingInfoList[i].Link.Description()), pingInfoList[i].Duration.Precision(1e6), len(pingInfoList[i].Status.Errors)) } i := Select(len(pingInfoList)) chosenLink = pingInfoList[i].Link } } else { // if not ping if *FlagRandom { // if not ping && rand i, err := Random(len(linkList)) if err != nil { logger.Fatal(err) } chosenLink = linkList[i] } else { // if not ping && not rand for i := range linkList { fmt.Printf("[%2d] %s%s\n", i, linkList[i].Description(), spaceCount(30, linkList[i].Description())) } i := Select(len(linkList)) chosenLink = linkList[i] } } /* CONFIG PART */ var template []byte template = []byte(infra.ConfigTpl) if *FlagTPL != "" { tpl, err := ioutil.ReadFile(*FlagTPL) if err != nil { logrus.Error(err, "using default template...") } else { template = tpl } } v2genConf := infra.V2genConfig{} confFile, err := ioutil.ReadFile(*FlagConf) if err == nil { v2genConf = infra.ParseV2genConf(confFile) } conf := infra.DefaultConf() bytes, err := infra.GenV2RayConf(*conf.Append(v2genConf).Append(chosenLink.Config()), template) if err != nil { logrus.Fatal(err) } if *FlagOut == "-" || *FlagOut == "" { fmt.Println(string(bytes)) return } else { err := ioutil.WriteFile(*FlagOut, bytes, 0644) if err != nil { logrus.Fatal(err) } else { if level > logrus.ErrorLevel { fmt.Printf("config has been written to %s\n", filepath.Clean(*FlagOut)) } } } }
identifier_body
main.go
package main import ( "crypto/rand" "errors" "flag" "fmt" "go/build" "io/ioutil" "log" "math/big" "net/http" "os" "path/filepath" "runtime" "sort" "strings" "unicode/utf8" "github.com/remeh/sizedwaitgroup" "github.com/sirupsen/logrus" "iochen.com/v2gen/v2" "iochen.com/v2gen/v2/common/base64" "iochen.com/v2gen/v2/common/mean" "iochen.com/v2gen/v2/infra" "iochen.com/v2gen/v2/ping" "iochen.com/v2gen/v2/vmess" ) var ( Version = "v2.0.0-dev" FlagLoglevel = flag.String("loglevel", "warn", "log level") FlagLog = flag.String("log", "-", "log output file") FlagAddr = flag.String("u", "", "subscription address(URL)") FlagOut = flag.String("o", "/etc/v2ray/config.json", "output path") FlagConf = flag.String("config", "/etc/v2ray/v2gen.ini", "v2gen config path") FlagTPL = flag.String("template", "", "V2Ray template path") FlagInit = flag.Bool("init", false, "init v2gen config (specify certain path with -config)") FlagRandom = flag.Bool("random", false, "random node index") FlagPing = flag.Bool("ping", true, "ping nodes") FlagDest = flag.String("dst", "https://cloudflare.com/cdn-cgi/trace", "test destination url (vmess ping only)") FlagCount = flag.Int("c", 3, "ping count for each node") // FlagMedian = flag.Bool("med", false, "use median instead of ArithmeticMean") FlagThreads = flag.Int("thread", 3, "threads used when pinging") FlagBest = flag.Bool("best", false, "use best node judged by ping result") FlagPipe = flag.Bool("pipe", true, "read from pipe") FlagVersion = flag.Bool("v", false, "show version") ) /* function main may be too long, here is a simple step list: ################################################################################################# # STEP 1 (READ): # # 1. read links from subscription(net) and pipe. # # # # STEP 2 (PROCESS): # # TYPE 1 (PING): # # SUBTYPE 1.1 (BEST): # # 1. ping. # # 2. choose the best node. # # # # SUBTYPE 1.2 (RANDOM): # # 1. ping. # # 2. filter out available node list A. # # NOTE: if exist nodes that no error, then A would be them(it), # # else A would be all of them. # # 3. randomly choose one from A. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. ping. # # 2. print nodes and ping info. # # 3. wait for user's choosing. # # # # TYPE 2 (NOT PING): # # SUBTYPE 1.2 (RANDOM): # # 1. randomly choose one from nodes. # # # # SUBTYPE 1.3 (DEFAULT): # # 1. print nodes and ping info. # # 2. wait for user's choosing. # # # # STEP 3 (RENDER AND WRITE): # # 1. render and write. # ################################################################################################# */ type PingInfo struct { Status *ping.Status Duration ping.Duration Link v2gen.Link Err error } type PingInfoList []*PingInfo func (pf *PingInfoList)
() int { return len(*pf) } func (pf *PingInfoList) Less(i, j int) bool { if (*pf)[i].Err != nil { return false } else if (*pf)[j].Err != nil { return true } if len((*pf)[i].Status.Errors) != len((*pf)[j].Status.Errors) { return len((*pf)[i].Status.Errors) < len((*pf)[j].Status.Errors) } return (*pf)[i].Duration < (*pf)[j].Duration } func (pf *PingInfoList) Swap(i, j int) { (*pf)[i], (*pf)[j] = (*pf)[j], (*pf)[i] } func main() { flag.Parse() /* LOG PART */ logger := logrus.New() if *FlagLog != "-" && *FlagLog != "" { file, err := os.Create(*FlagLog) if err != nil { logrus.Fatal(err) } defer file.Close() _, err = file.Write([]byte(version() + "\n")) if err != nil { panic("cannot write into log file") } logger.Out = file } // set log level level, err := logrus.ParseLevel(*FlagLoglevel) if err != nil { logger.Panic(err) } logger.SetLevel(level) /* FLAG PART */ // if -v || trace, debug, info if *FlagVersion { fmt.Println(version()) return } else if level > logrus.ErrorLevel { fmt.Println(version()) } // if -init if *FlagInit { err := ioutil.WriteFile(*FlagConf, []byte(infra.DefaultV2GenConf), 0644) if err != nil { panic(err) return } logger.Info("v2gen config initialized") return } /* LINK PART */ var linkList []v2gen.Link // combine links from different sources // read from subscribe address(net) if *FlagAddr != "" { logger.Infof("Reading from %s...", *FlagAddr) resp, err := http.Get(*FlagAddr) if err != nil { logger.Fatal(err) } defer resp.Body.Close() bytes, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // check whether reading from pipe if fi, _ := os.Stdin.Stat(); (fi.Mode()&os.ModeCharDevice) == 0 && *FlagPipe { logger.Info("Reading from pipe...") bytes, err := ioutil.ReadAll(os.Stdin) if err != nil { log.Fatal(err) } links, err := ParseLinks(bytes) if err != nil { logger.Fatal(err) } linkList = append(linkList, links...) } // if no Link, then exit if len(linkList) == 0 { logger.Warn("no available links, nothing to do") os.Exit(0) } var chosenLink v2gen.Link var spaceCount = func(i int, str string) string { rl := utf8.RuneCountInString(str) c := i - (len(str)+rl)/2 if c < 0 { c = 0 } return strings.Repeat(" ", c) } if *FlagPing { // if ping // make ping info list pingInfoList := make(PingInfoList, len(linkList)) wg := sizedwaitgroup.New(*FlagThreads) for i := range linkList { wg.Add() go func(i int) { logger.Debugf("[%d/%d]Pinging %s\n", i, len(linkList)-1, linkList[i].Safe()) if level > logrus.ErrorLevel { fmt.Printf("\rPinging %d/%d", i, len(linkList)-1) } defer func() { wg.Done() }() pingInfoList[i] = &PingInfo{ Link: linkList[i], } status, err := linkList[i].Ping(*FlagCount, *FlagDest) if status.Durations == nil || len(*status.Durations) == 0 { pingInfoList[i].Err = errors.New("all error") status.Durations = &ping.DurationList{-1} } if err != nil { pingInfoList[i].Err = err pingInfoList[i].Status = &ping.Status{ Durations: &ping.DurationList{}, } } else { pingInfoList[i].Status = &status } }(i) } wg.Wait() fmt.Println() for i := range pingInfoList { var ok bool pingInfoList[i].Duration, ok = mean.ArithmeticMean(pingInfoList[i].Status.Durations).(ping.Duration) if !ok { pingInfoList[i].Duration = 0 } } sort.Sort(&pingInfoList) if *FlagBest { // if ping && best chosenLink = pingInfoList[0].Link } else if *FlagRandom { // if ping && rand pingInfoList = AvailableLinks(pingInfoList) i, err := Random(len(pingInfoList)) if err != nil { logger.Fatal(err) } chosenLink = pingInfoList[i].Link } else { // if ping && not rand && not best for i := range pingInfoList { fmt.Printf("[%2d] %s%s[%-7s(%d errors)]\n", i, pingInfoList[i].Link.Description(), spaceCount(30, pingInfoList[i].Link.Description()), pingInfoList[i].Duration.Precision(1e6), len(pingInfoList[i].Status.Errors)) } i := Select(len(pingInfoList)) chosenLink = pingInfoList[i].Link } } else { // if not ping if *FlagRandom { // if not ping && rand i, err := Random(len(linkList)) if err != nil { logger.Fatal(err) } chosenLink = linkList[i] } else { // if not ping && not rand for i := range linkList { fmt.Printf("[%2d] %s%s\n", i, linkList[i].Description(), spaceCount(30, linkList[i].Description())) } i := Select(len(linkList)) chosenLink = linkList[i] } } /* CONFIG PART */ var template []byte template = []byte(infra.ConfigTpl) if *FlagTPL != "" { tpl, err := ioutil.ReadFile(*FlagTPL) if err != nil { logrus.Error(err, "using default template...") } else { template = tpl } } v2genConf := infra.V2genConfig{} confFile, err := ioutil.ReadFile(*FlagConf) if err == nil { v2genConf = infra.ParseV2genConf(confFile) } conf := infra.DefaultConf() bytes, err := infra.GenV2RayConf(*conf.Append(v2genConf).Append(chosenLink.Config()), template) if err != nil { logrus.Fatal(err) } if *FlagOut == "-" || *FlagOut == "" { fmt.Println(string(bytes)) return } else { err := ioutil.WriteFile(*FlagOut, bytes, 0644) if err != nil { logrus.Fatal(err) } else { if level > logrus.ErrorLevel { fmt.Printf("config has been written to %s\n", filepath.Clean(*FlagOut)) } } } } func version() string { return fmt.Sprintf("v2gen %s, V2Ray %s (%s %dcores %s/%s)", Version, vmess.CoreVersion(), runtime.Version(), runtime.NumCPU(), build.Default.GOOS, build.Default.GOARCH) } func ParseLinks(b []byte) ([]v2gen.Link, error) { s, err := base64.Decode(string(b)) if err != nil { return nil, err } linkList, err := vmess.Parse(s) if err != nil { return nil, err } links := make([]v2gen.Link, len(linkList)) for i := range linkList { links[i] = linkList[i] } return links, err } func AvailableLinks(pil PingInfoList) PingInfoList { var pingInfoList PingInfoList for i := range pil { if pil[i].Err != nil && len(pil[i].Status.Errors) == 0 { pingInfoList = append(pingInfoList, pil[i]) } } if len(pingInfoList) != 0 { return pingInfoList } else { return pil } } // Select returns an int [0,max) func Select(max int) int { var in int fmt.Print("=====================\nPlease Select: ") _, err := fmt.Scanf("%d", &in) if err != nil || in < 0 || in >= max { fmt.Println("wrong number, please reselect") return Select(max) } return in } func Random(max int) (int, error) { n, err := rand.Int(rand.Reader, big.NewInt(int64(max))) if err != nil { return 0, err } return int(n.Int64()), nil }
Len
identifier_name
round_trip.rs
use std::fmt::Debug; use tree_buf::prelude::*; mod common; use common::*; use std::collections::HashMap; use tree_buf::encode_options; use tree_buf::options; // Create this namespace to hide the prelude. This is a check that the hygenics do not require any types from tree_buf to be imported mod hide_namespace { use tree_buf::{Read, Write}; #[derive(Read, Write, PartialEq, Debug, Clone)] pub struct Bits { pub f: f64, pub obj_array: Vec<Bobs>, pub extra: Option<Bobs>, pub s: Box<String>, } #[derive(Read, Write, PartialEq, Debug, Clone)] pub struct Bobs { pub one: Vec<u64>, pub tup: (f64, f64), } } use hide_namespace::{Bits, Bobs}; // TODO: Compare to Avro - https://github.com/flavray/avro-rs fn make_item() -> Bits { Bits { f: 5.0, extra: Some(Bobs { one: vec![99], tup: (9999.99, 200.1), }), s: Box::new("abc".to_owned()), obj_array: vec![ Bobs { one: vec![3, 2, 1, 0], tup: (10.0, 200.2), }, Bobs { one: vec![], tup: (2.2, 200.3) }, Bobs { one: vec![20, 20, 20, 20, 20, 20, 20], tup: (0.0, 200.4), }, ], } } #[test] fn broken_int() { round_trip(&75339u64, 4, 10); } #[test] fn bools_root() { round_trip(&true, 1, 5); round_trip(&false, 1, 5); } #[test] fn opts_root() { round_trip(&Some(true), 1, 9); round_trip(&Option::<bool>::None, 1, 3); } #[test] fn bool_array() { round_trip(&vec![false, true, true, false, true, true, true, false, false, true, false, true], 6, 9); } #[test] fn ints_root() { round_trip(&0u32, 1, 5); round_trip(&1u32, 1, 5); for i in 2..=127u32 { round_trip(&i, 2, 6); } for i in 128..=255u32 { round_trip(&i, 2, 6); } for i in 256..1024u32 { round_trip(&i, 3, 8); } } // Special case for 1 element array encodes root object #[test] fn array1() { round_trip(&vec![99u64], 3, 8); round_trip(&vec![1u64], 2, 7); } #[test] fn int_vec() { round_trip(&vec![99u64, 100], 6, 10); } #[test] fn float64_vec() { round_trip(&vec![0.99], 10, 16); round_trip(&vec![0.01, 0.02, 0.03, 0.04], 36, 65); } #[test] fn float32_vec() { round_trip(&vec![0.99f32], 6, 14); round_trip(&vec![0.01f32, 0.02, 0.03, 0.04], 20, 38); } #[test] fn lossy_f64_vec() { let mut data = Vec::new(); for i in 0..50 { data.push(0.01 * i as f64); } let tolerance = -10; let options = encode_options! { options::LossyFloatTolerance(tolerance) }; let binary = tree_buf::write_with_options(&data, &options); assert_eq!(binary.len(), 104); let decoded = read::<Vec<f64>>(&binary).unwrap(); assert_eq!(data.len(), decoded.len()); for (e, d) in data.iter().zip(decoded.iter()) { assert!((e - d).abs() <= 0.001); } // Show how much smaller this is than lossless let options = encode_options! { options::LosslessFloat }; let binary = tree_buf::write_with_options(&data, &options); assert_eq!(binary.len(), 376); // Show that this is much better than fixed, since this would be a minimum for exactly 0 schema overhead. assert_eq!(std::mem::size_of::<f64>() * data.len(), 400); } #[test] fn nested_float_vec() { round_trip(&vec![vec![10.0, 11.0], vec![], vec![99.0]], 24, 32); } #[test] fn array_tuple() { round_trip(&vec![vec![(1u32, 2u32), (3, 4), (5, 6)]], 14, 19); } #[test] fn item() { let item = make_item(); round_trip(&item, 144, 221); } #[test] fn item_vec() { let item = make_item(); let item = vec![item; 5]; round_trip(&item, 379, 646); } #[test] fn nullable_array() { round_trip(&vec![Some(1u32), Some(2)], 10, 14); } #[test] fn visibility_modifiers() { #[derive(Default, Read, Write, Debug, PartialEq, Clone)] struct Inherited { a: u64, } #[derive(Default, Read, Write, Debug, PartialEq, Clone)] pub(crate) struct Crate { a: u64, } #[derive(Default, Read, Write, Debug, PartialEq, Clone)] pub struct Public { a: u64, } round_trip_default::<Inherited>(4, 8); round_trip_default::<Crate>(4, 8); round_trip_default::<Public>(4, 8); } #[test] fn ignores() { use tree_buf::Ignore; round_trip(&Ignore, 1, 3); #[derive(Default, Read, Write, Debug, PartialEq, Clone)] struct X { i: Ignore, } let x = X { i: Ignore }; round_trip(&x, 4, 6); #[derive(Read, Write, Debug, PartialEq, Clone)] enum E { A(Ignore), B(Ignore), } let e = E::A(Ignore); round_trip(&e, 4, 10); #[derive(Read, Write, Debug, PartialEq, Clone)] struct N { e: E, } let o = vec![N { e: E::A(Ignore) }, N { e: E::B(Ignore) }]; round_trip(&o, 16, 18); } // TODO: Using Quickcheck and Arbitrary with quickcheck_derive. #[test] fn various_types() { round_trip_default::<u64>(1, 5); round_trip_default::<u32>(1, 5); round_trip_default::<u16>(1, 5); round_trip_default::<u8>(1, 5); round_trip_default::<(u64, u64)>(3, 9); round_trip_default::<(u64, u32)>(3, 9); round_trip_default::<f64>(1, 14); // See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9 round_trip_default::<Vec<u32>>(1, 5); round_trip_default::<Option<Vec<u32>>>(1, 3); round_trip_default::<Option<u32>>(1, 3); round_trip_default::<Vec<Option<u32>>>(1, 5); round_trip_default::<String>(1, 6); } #[test] fn conversions() { // TODO: f32 //serialize_eq(1.0f64, 1.0f32, 0); //serialize_eq(1.0f32, 1.0f64, 0); //serialize_eq(9.0f32, 9.0f64, 0); // TODO: A bunch more of these } #[test] fn small_structs() { #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _1 { a: u64, } round_trip_default::<_1>(4, 8); } #[test] fn large_structs() { #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _14 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _15 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _16 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, p: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _17 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, p: f64, q: f64, } round_trip_default::<_14>(44, 200); round_trip_default::<_15>(47, 214); round_trip_default::<_16>(50, 228); round_trip_default::<_17>(53, 242); } #[test] fn map_0_root()
#[test] fn map_1_root() { let mut data = HashMap::new(); data.insert("test".to_owned(), 5u32); round_trip(&data, 10, 22); } #[test] fn map_n_root() { let mut data = HashMap::new(); data.insert("test3".to_owned(), 5u32); data.insert("test2".to_owned(), 5); data.insert("test1".to_owned(), 0); round_trip(&data, None, None); } #[test] fn maps_array() { let mut data = Vec::new(); for i in 0..5u32 { let mut h = HashMap::new(); h.insert(i, Vec::<u32>::new()); h.insert(10, vec![10, 9, 8, 7]); data.push(h); } // Interestingly, the output size is not deterministic in this case. // It depends on whether the last key or value from iterating the HashMap is Default round_trip(&data, None, None); } #[test] fn maps_void() { let mut data = Vec::new(); for _ in 0..5 { let h = HashMap::<String, String>::new(); data.push(h); } round_trip(&data, 10, 13); } #[test] fn fixed_arrays() { round_trip(&[0u32, 1, 2, 3], 8, 10); round_trip(&[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 6, 8); } // This failed to compile at one point when moving generics for WriterArray out of associated type. #[test] fn enum_with_vec() { #[derive(Write, Read, Debug, PartialEq, Clone)] enum X { X(Vec<u64>), } round_trip(&X::X(vec![25, 30, 0, 0, 0]), 11, 21); } fn owned_vec(strs: Vec<&'static str>) -> Vec<String> { strs.iter().map(|s| String::from(*s)).collect() } #[test] fn strings_using_dictionary() { let data = vec!["abcd", "abcd", "def", "abcd", "abcd", "abcd", ""]; round_trip(&owned_vec(data), 21, 23); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd"]; round_trip(&owned_vec(data), 13, 15); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "def", "def"]; round_trip(&owned_vec(data), 17, 20); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "abcd", "def"]; round_trip(&owned_vec(data), 17, 20); } #[test] fn nested_strings_using_rle() { let data = ( //owned_vec(vec![]), owned_vec(vec!["abc", "abc", "abc"]), owned_vec(vec!["def", "def", "def"]), 1u32, ); //let data = owned_vec(vec!["abc", "abc", "abc"]); // TODO: Add sizes round_trip(&data, 26, 30); } #[test] fn long_bool_runs() { let mut data = Vec::new(); for i in 560..570 { for _ in 0..i { data.push(true); } data.push(false); } round_trip(&data, 36, 68); } #[test] fn int_to_bool_nested() { let data = ( vec![0u32,0,1,1,0], vec![0u32,0,0,1,1,1,1], ); round_trip(&data, 11, 15); let data = vec![ vec![0u32, 0, 1, 1,0], vec![1u32, 1, 1, 1, 1, 1, 0], vec![1u32, 0, 0, 0, 0, 0, 1], ]; round_trip(&data, 13, 18); } // TODO: Use coverage marks to ensure all types are used // https://ferrous-systems.com/blog/coverage-marks/ // This was useful for narrowing down a subset of a broken compressor. // It may be useful in the future /* #[test] fn broken_gorilla() { use rand::Rng; use std::convert::TryInto as _; use tree_buf::internal::encodings::gorilla; let broken = [-75.01536474599993, -75.00911189799993, 114.37647545700004]; let mut bytes = Vec::new(); gorilla::compress((&broken[..]).iter().copied(), &mut bytes).unwrap(); let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap(); assert_eq!(&broken[..], &out[..]); // 356301 - 356304 // 457009 - 457012 let data = std::fs::read("C:\\git\\floats.dat").unwrap(); let mut offset = 0; let mut values = Vec::new(); while offset < data.len() { let val = (&data[offset..(offset + 8)]).try_into().unwrap(); offset += 8; let f = f64::from_le_bytes(val); values.push(f); } return; fn attempt(values: &[f64], min: usize, max: usize) -> bool { let values = &values[min..max]; std::panic::catch_unwind(|| { let mut bytes = Vec::new(); gorilla::compress(values.iter().copied(), &mut bytes).unwrap(); let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap(); assert_eq!(values, &out[..]); }) .is_ok() } let mut min = 0; let mut max = values.len(); let mut rng = rand::thread_rng(); for _ in 0..100000 { let try_min = rng.gen_range(min, max); let try_max = rng.gen_range(try_min + 1, max + 1); if try_min == min && try_max == max { continue; } if !attempt(&values[..], try_min, try_max) { min = try_min; max = try_max; } } } */
{ // See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9 let data = HashMap::<u32, u32>::new(); round_trip(&data, 2, 8); }
identifier_body
round_trip.rs
use std::fmt::Debug; use tree_buf::prelude::*; mod common; use common::*; use std::collections::HashMap; use tree_buf::encode_options; use tree_buf::options; // Create this namespace to hide the prelude. This is a check that the hygenics do not require any types from tree_buf to be imported mod hide_namespace { use tree_buf::{Read, Write}; #[derive(Read, Write, PartialEq, Debug, Clone)] pub struct Bits { pub f: f64, pub obj_array: Vec<Bobs>, pub extra: Option<Bobs>, pub s: Box<String>, } #[derive(Read, Write, PartialEq, Debug, Clone)] pub struct Bobs { pub one: Vec<u64>, pub tup: (f64, f64), } } use hide_namespace::{Bits, Bobs}; // TODO: Compare to Avro - https://github.com/flavray/avro-rs fn make_item() -> Bits { Bits { f: 5.0, extra: Some(Bobs { one: vec![99], tup: (9999.99, 200.1), }), s: Box::new("abc".to_owned()), obj_array: vec![ Bobs { one: vec![3, 2, 1, 0], tup: (10.0, 200.2), }, Bobs { one: vec![], tup: (2.2, 200.3) }, Bobs { one: vec![20, 20, 20, 20, 20, 20, 20], tup: (0.0, 200.4), }, ], } } #[test] fn broken_int() { round_trip(&75339u64, 4, 10); } #[test] fn bools_root() { round_trip(&true, 1, 5); round_trip(&false, 1, 5); } #[test] fn opts_root() { round_trip(&Some(true), 1, 9); round_trip(&Option::<bool>::None, 1, 3); } #[test] fn bool_array() { round_trip(&vec![false, true, true, false, true, true, true, false, false, true, false, true], 6, 9); } #[test] fn ints_root() { round_trip(&0u32, 1, 5); round_trip(&1u32, 1, 5); for i in 2..=127u32 { round_trip(&i, 2, 6); } for i in 128..=255u32 { round_trip(&i, 2, 6); } for i in 256..1024u32 { round_trip(&i, 3, 8); } } // Special case for 1 element array encodes root object #[test] fn array1() { round_trip(&vec![99u64], 3, 8); round_trip(&vec![1u64], 2, 7); } #[test] fn int_vec() { round_trip(&vec![99u64, 100], 6, 10); } #[test] fn float64_vec() { round_trip(&vec![0.99], 10, 16); round_trip(&vec![0.01, 0.02, 0.03, 0.04], 36, 65); } #[test] fn float32_vec() { round_trip(&vec![0.99f32], 6, 14); round_trip(&vec![0.01f32, 0.02, 0.03, 0.04], 20, 38); } #[test] fn lossy_f64_vec() { let mut data = Vec::new(); for i in 0..50 { data.push(0.01 * i as f64); } let tolerance = -10; let options = encode_options! { options::LossyFloatTolerance(tolerance) }; let binary = tree_buf::write_with_options(&data, &options); assert_eq!(binary.len(), 104); let decoded = read::<Vec<f64>>(&binary).unwrap(); assert_eq!(data.len(), decoded.len()); for (e, d) in data.iter().zip(decoded.iter()) { assert!((e - d).abs() <= 0.001); } // Show how much smaller this is than lossless let options = encode_options! { options::LosslessFloat }; let binary = tree_buf::write_with_options(&data, &options); assert_eq!(binary.len(), 376); // Show that this is much better than fixed, since this would be a minimum for exactly 0 schema overhead. assert_eq!(std::mem::size_of::<f64>() * data.len(), 400); } #[test] fn nested_float_vec() { round_trip(&vec![vec![10.0, 11.0], vec![], vec![99.0]], 24, 32); } #[test] fn array_tuple() { round_trip(&vec![vec![(1u32, 2u32), (3, 4), (5, 6)]], 14, 19); } #[test] fn item() { let item = make_item(); round_trip(&item, 144, 221); } #[test] fn item_vec() { let item = make_item(); let item = vec![item; 5]; round_trip(&item, 379, 646); } #[test] fn nullable_array() { round_trip(&vec![Some(1u32), Some(2)], 10, 14); } #[test] fn visibility_modifiers() { #[derive(Default, Read, Write, Debug, PartialEq, Clone)] struct Inherited { a: u64, } #[derive(Default, Read, Write, Debug, PartialEq, Clone)] pub(crate) struct Crate { a: u64, } #[derive(Default, Read, Write, Debug, PartialEq, Clone)] pub struct Public { a: u64, } round_trip_default::<Inherited>(4, 8); round_trip_default::<Crate>(4, 8); round_trip_default::<Public>(4, 8); } #[test] fn ignores() { use tree_buf::Ignore; round_trip(&Ignore, 1, 3); #[derive(Default, Read, Write, Debug, PartialEq, Clone)] struct X { i: Ignore, } let x = X { i: Ignore }; round_trip(&x, 4, 6); #[derive(Read, Write, Debug, PartialEq, Clone)] enum E { A(Ignore), B(Ignore), } let e = E::A(Ignore); round_trip(&e, 4, 10); #[derive(Read, Write, Debug, PartialEq, Clone)] struct N { e: E, } let o = vec![N { e: E::A(Ignore) }, N { e: E::B(Ignore) }]; round_trip(&o, 16, 18); } // TODO: Using Quickcheck and Arbitrary with quickcheck_derive. #[test] fn various_types() { round_trip_default::<u64>(1, 5); round_trip_default::<u32>(1, 5); round_trip_default::<u16>(1, 5); round_trip_default::<u8>(1, 5); round_trip_default::<(u64, u64)>(3, 9); round_trip_default::<(u64, u32)>(3, 9); round_trip_default::<f64>(1, 14); // See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9 round_trip_default::<Vec<u32>>(1, 5); round_trip_default::<Option<Vec<u32>>>(1, 3); round_trip_default::<Option<u32>>(1, 3); round_trip_default::<Vec<Option<u32>>>(1, 5); round_trip_default::<String>(1, 6); } #[test] fn conversions() { // TODO: f32 //serialize_eq(1.0f64, 1.0f32, 0); //serialize_eq(1.0f32, 1.0f64, 0); //serialize_eq(9.0f32, 9.0f64, 0); // TODO: A bunch more of these } #[test] fn small_structs() { #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _1 { a: u64, } round_trip_default::<_1>(4, 8); } #[test] fn large_structs() { #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _14 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _15 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _16 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, p: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _17 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, p: f64, q: f64, } round_trip_default::<_14>(44, 200); round_trip_default::<_15>(47, 214); round_trip_default::<_16>(50, 228); round_trip_default::<_17>(53, 242); } #[test] fn map_0_root() { // See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9 let data = HashMap::<u32, u32>::new(); round_trip(&data, 2, 8); } #[test] fn map_1_root() { let mut data = HashMap::new(); data.insert("test".to_owned(), 5u32); round_trip(&data, 10, 22); } #[test] fn
() { let mut data = HashMap::new(); data.insert("test3".to_owned(), 5u32); data.insert("test2".to_owned(), 5); data.insert("test1".to_owned(), 0); round_trip(&data, None, None); } #[test] fn maps_array() { let mut data = Vec::new(); for i in 0..5u32 { let mut h = HashMap::new(); h.insert(i, Vec::<u32>::new()); h.insert(10, vec![10, 9, 8, 7]); data.push(h); } // Interestingly, the output size is not deterministic in this case. // It depends on whether the last key or value from iterating the HashMap is Default round_trip(&data, None, None); } #[test] fn maps_void() { let mut data = Vec::new(); for _ in 0..5 { let h = HashMap::<String, String>::new(); data.push(h); } round_trip(&data, 10, 13); } #[test] fn fixed_arrays() { round_trip(&[0u32, 1, 2, 3], 8, 10); round_trip(&[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 6, 8); } // This failed to compile at one point when moving generics for WriterArray out of associated type. #[test] fn enum_with_vec() { #[derive(Write, Read, Debug, PartialEq, Clone)] enum X { X(Vec<u64>), } round_trip(&X::X(vec![25, 30, 0, 0, 0]), 11, 21); } fn owned_vec(strs: Vec<&'static str>) -> Vec<String> { strs.iter().map(|s| String::from(*s)).collect() } #[test] fn strings_using_dictionary() { let data = vec!["abcd", "abcd", "def", "abcd", "abcd", "abcd", ""]; round_trip(&owned_vec(data), 21, 23); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd"]; round_trip(&owned_vec(data), 13, 15); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "def", "def"]; round_trip(&owned_vec(data), 17, 20); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "abcd", "def"]; round_trip(&owned_vec(data), 17, 20); } #[test] fn nested_strings_using_rle() { let data = ( //owned_vec(vec![]), owned_vec(vec!["abc", "abc", "abc"]), owned_vec(vec!["def", "def", "def"]), 1u32, ); //let data = owned_vec(vec!["abc", "abc", "abc"]); // TODO: Add sizes round_trip(&data, 26, 30); } #[test] fn long_bool_runs() { let mut data = Vec::new(); for i in 560..570 { for _ in 0..i { data.push(true); } data.push(false); } round_trip(&data, 36, 68); } #[test] fn int_to_bool_nested() { let data = ( vec![0u32,0,1,1,0], vec![0u32,0,0,1,1,1,1], ); round_trip(&data, 11, 15); let data = vec![ vec![0u32, 0, 1, 1,0], vec![1u32, 1, 1, 1, 1, 1, 0], vec![1u32, 0, 0, 0, 0, 0, 1], ]; round_trip(&data, 13, 18); } // TODO: Use coverage marks to ensure all types are used // https://ferrous-systems.com/blog/coverage-marks/ // This was useful for narrowing down a subset of a broken compressor. // It may be useful in the future /* #[test] fn broken_gorilla() { use rand::Rng; use std::convert::TryInto as _; use tree_buf::internal::encodings::gorilla; let broken = [-75.01536474599993, -75.00911189799993, 114.37647545700004]; let mut bytes = Vec::new(); gorilla::compress((&broken[..]).iter().copied(), &mut bytes).unwrap(); let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap(); assert_eq!(&broken[..], &out[..]); // 356301 - 356304 // 457009 - 457012 let data = std::fs::read("C:\\git\\floats.dat").unwrap(); let mut offset = 0; let mut values = Vec::new(); while offset < data.len() { let val = (&data[offset..(offset + 8)]).try_into().unwrap(); offset += 8; let f = f64::from_le_bytes(val); values.push(f); } return; fn attempt(values: &[f64], min: usize, max: usize) -> bool { let values = &values[min..max]; std::panic::catch_unwind(|| { let mut bytes = Vec::new(); gorilla::compress(values.iter().copied(), &mut bytes).unwrap(); let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap(); assert_eq!(values, &out[..]); }) .is_ok() } let mut min = 0; let mut max = values.len(); let mut rng = rand::thread_rng(); for _ in 0..100000 { let try_min = rng.gen_range(min, max); let try_max = rng.gen_range(try_min + 1, max + 1); if try_min == min && try_max == max { continue; } if !attempt(&values[..], try_min, try_max) { min = try_min; max = try_max; } } } */
map_n_root
identifier_name
round_trip.rs
use std::fmt::Debug; use tree_buf::prelude::*; mod common; use common::*; use std::collections::HashMap; use tree_buf::encode_options; use tree_buf::options; // Create this namespace to hide the prelude. This is a check that the hygenics do not require any types from tree_buf to be imported mod hide_namespace { use tree_buf::{Read, Write}; #[derive(Read, Write, PartialEq, Debug, Clone)] pub struct Bits { pub f: f64, pub obj_array: Vec<Bobs>, pub extra: Option<Bobs>, pub s: Box<String>, } #[derive(Read, Write, PartialEq, Debug, Clone)] pub struct Bobs { pub one: Vec<u64>, pub tup: (f64, f64), } } use hide_namespace::{Bits, Bobs}; // TODO: Compare to Avro - https://github.com/flavray/avro-rs fn make_item() -> Bits { Bits { f: 5.0, extra: Some(Bobs { one: vec![99], tup: (9999.99, 200.1), }), s: Box::new("abc".to_owned()), obj_array: vec![ Bobs { one: vec![3, 2, 1, 0], tup: (10.0, 200.2), }, Bobs { one: vec![], tup: (2.2, 200.3) }, Bobs { one: vec![20, 20, 20, 20, 20, 20, 20], tup: (0.0, 200.4), }, ], } } #[test] fn broken_int() { round_trip(&75339u64, 4, 10); } #[test] fn bools_root() { round_trip(&true, 1, 5); round_trip(&false, 1, 5); } #[test] fn opts_root() { round_trip(&Some(true), 1, 9); round_trip(&Option::<bool>::None, 1, 3); } #[test] fn bool_array() { round_trip(&vec![false, true, true, false, true, true, true, false, false, true, false, true], 6, 9); } #[test] fn ints_root() { round_trip(&0u32, 1, 5); round_trip(&1u32, 1, 5); for i in 2..=127u32 { round_trip(&i, 2, 6); } for i in 128..=255u32 { round_trip(&i, 2, 6); } for i in 256..1024u32 { round_trip(&i, 3, 8); } } // Special case for 1 element array encodes root object #[test] fn array1() { round_trip(&vec![99u64], 3, 8); round_trip(&vec![1u64], 2, 7); } #[test] fn int_vec() { round_trip(&vec![99u64, 100], 6, 10); } #[test] fn float64_vec() { round_trip(&vec![0.99], 10, 16); round_trip(&vec![0.01, 0.02, 0.03, 0.04], 36, 65); } #[test] fn float32_vec() { round_trip(&vec![0.99f32], 6, 14); round_trip(&vec![0.01f32, 0.02, 0.03, 0.04], 20, 38); } #[test] fn lossy_f64_vec() { let mut data = Vec::new(); for i in 0..50 { data.push(0.01 * i as f64); } let tolerance = -10; let options = encode_options! { options::LossyFloatTolerance(tolerance) }; let binary = tree_buf::write_with_options(&data, &options); assert_eq!(binary.len(), 104); let decoded = read::<Vec<f64>>(&binary).unwrap(); assert_eq!(data.len(), decoded.len()); for (e, d) in data.iter().zip(decoded.iter()) { assert!((e - d).abs() <= 0.001); } // Show how much smaller this is than lossless
// Show that this is much better than fixed, since this would be a minimum for exactly 0 schema overhead. assert_eq!(std::mem::size_of::<f64>() * data.len(), 400); } #[test] fn nested_float_vec() { round_trip(&vec![vec![10.0, 11.0], vec![], vec![99.0]], 24, 32); } #[test] fn array_tuple() { round_trip(&vec![vec![(1u32, 2u32), (3, 4), (5, 6)]], 14, 19); } #[test] fn item() { let item = make_item(); round_trip(&item, 144, 221); } #[test] fn item_vec() { let item = make_item(); let item = vec![item; 5]; round_trip(&item, 379, 646); } #[test] fn nullable_array() { round_trip(&vec![Some(1u32), Some(2)], 10, 14); } #[test] fn visibility_modifiers() { #[derive(Default, Read, Write, Debug, PartialEq, Clone)] struct Inherited { a: u64, } #[derive(Default, Read, Write, Debug, PartialEq, Clone)] pub(crate) struct Crate { a: u64, } #[derive(Default, Read, Write, Debug, PartialEq, Clone)] pub struct Public { a: u64, } round_trip_default::<Inherited>(4, 8); round_trip_default::<Crate>(4, 8); round_trip_default::<Public>(4, 8); } #[test] fn ignores() { use tree_buf::Ignore; round_trip(&Ignore, 1, 3); #[derive(Default, Read, Write, Debug, PartialEq, Clone)] struct X { i: Ignore, } let x = X { i: Ignore }; round_trip(&x, 4, 6); #[derive(Read, Write, Debug, PartialEq, Clone)] enum E { A(Ignore), B(Ignore), } let e = E::A(Ignore); round_trip(&e, 4, 10); #[derive(Read, Write, Debug, PartialEq, Clone)] struct N { e: E, } let o = vec![N { e: E::A(Ignore) }, N { e: E::B(Ignore) }]; round_trip(&o, 16, 18); } // TODO: Using Quickcheck and Arbitrary with quickcheck_derive. #[test] fn various_types() { round_trip_default::<u64>(1, 5); round_trip_default::<u32>(1, 5); round_trip_default::<u16>(1, 5); round_trip_default::<u8>(1, 5); round_trip_default::<(u64, u64)>(3, 9); round_trip_default::<(u64, u32)>(3, 9); round_trip_default::<f64>(1, 14); // See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9 round_trip_default::<Vec<u32>>(1, 5); round_trip_default::<Option<Vec<u32>>>(1, 3); round_trip_default::<Option<u32>>(1, 3); round_trip_default::<Vec<Option<u32>>>(1, 5); round_trip_default::<String>(1, 6); } #[test] fn conversions() { // TODO: f32 //serialize_eq(1.0f64, 1.0f32, 0); //serialize_eq(1.0f32, 1.0f64, 0); //serialize_eq(9.0f32, 9.0f64, 0); // TODO: A bunch more of these } #[test] fn small_structs() { #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _1 { a: u64, } round_trip_default::<_1>(4, 8); } #[test] fn large_structs() { #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _14 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _15 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _16 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, p: f64, } #[derive(Read, Write, Default, Debug, PartialEq, Clone)] struct _17 { a: f64, b: f64, c: f64, d: f64, e: f64, f: f64, g: f64, h: f64, i: f64, j: f64, k: f64, l: f64, m: f64, n: f64, o: f64, p: f64, q: f64, } round_trip_default::<_14>(44, 200); round_trip_default::<_15>(47, 214); round_trip_default::<_16>(50, 228); round_trip_default::<_17>(53, 242); } #[test] fn map_0_root() { // See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9 let data = HashMap::<u32, u32>::new(); round_trip(&data, 2, 8); } #[test] fn map_1_root() { let mut data = HashMap::new(); data.insert("test".to_owned(), 5u32); round_trip(&data, 10, 22); } #[test] fn map_n_root() { let mut data = HashMap::new(); data.insert("test3".to_owned(), 5u32); data.insert("test2".to_owned(), 5); data.insert("test1".to_owned(), 0); round_trip(&data, None, None); } #[test] fn maps_array() { let mut data = Vec::new(); for i in 0..5u32 { let mut h = HashMap::new(); h.insert(i, Vec::<u32>::new()); h.insert(10, vec![10, 9, 8, 7]); data.push(h); } // Interestingly, the output size is not deterministic in this case. // It depends on whether the last key or value from iterating the HashMap is Default round_trip(&data, None, None); } #[test] fn maps_void() { let mut data = Vec::new(); for _ in 0..5 { let h = HashMap::<String, String>::new(); data.push(h); } round_trip(&data, 10, 13); } #[test] fn fixed_arrays() { round_trip(&[0u32, 1, 2, 3], 8, 10); round_trip(&[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 6, 8); } // This failed to compile at one point when moving generics for WriterArray out of associated type. #[test] fn enum_with_vec() { #[derive(Write, Read, Debug, PartialEq, Clone)] enum X { X(Vec<u64>), } round_trip(&X::X(vec![25, 30, 0, 0, 0]), 11, 21); } fn owned_vec(strs: Vec<&'static str>) -> Vec<String> { strs.iter().map(|s| String::from(*s)).collect() } #[test] fn strings_using_dictionary() { let data = vec!["abcd", "abcd", "def", "abcd", "abcd", "abcd", ""]; round_trip(&owned_vec(data), 21, 23); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd"]; round_trip(&owned_vec(data), 13, 15); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "def", "def"]; round_trip(&owned_vec(data), 17, 20); let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "abcd", "def"]; round_trip(&owned_vec(data), 17, 20); } #[test] fn nested_strings_using_rle() { let data = ( //owned_vec(vec![]), owned_vec(vec!["abc", "abc", "abc"]), owned_vec(vec!["def", "def", "def"]), 1u32, ); //let data = owned_vec(vec!["abc", "abc", "abc"]); // TODO: Add sizes round_trip(&data, 26, 30); } #[test] fn long_bool_runs() { let mut data = Vec::new(); for i in 560..570 { for _ in 0..i { data.push(true); } data.push(false); } round_trip(&data, 36, 68); } #[test] fn int_to_bool_nested() { let data = ( vec![0u32,0,1,1,0], vec![0u32,0,0,1,1,1,1], ); round_trip(&data, 11, 15); let data = vec![ vec![0u32, 0, 1, 1,0], vec![1u32, 1, 1, 1, 1, 1, 0], vec![1u32, 0, 0, 0, 0, 0, 1], ]; round_trip(&data, 13, 18); } // TODO: Use coverage marks to ensure all types are used // https://ferrous-systems.com/blog/coverage-marks/ // This was useful for narrowing down a subset of a broken compressor. // It may be useful in the future /* #[test] fn broken_gorilla() { use rand::Rng; use std::convert::TryInto as _; use tree_buf::internal::encodings::gorilla; let broken = [-75.01536474599993, -75.00911189799993, 114.37647545700004]; let mut bytes = Vec::new(); gorilla::compress((&broken[..]).iter().copied(), &mut bytes).unwrap(); let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap(); assert_eq!(&broken[..], &out[..]); // 356301 - 356304 // 457009 - 457012 let data = std::fs::read("C:\\git\\floats.dat").unwrap(); let mut offset = 0; let mut values = Vec::new(); while offset < data.len() { let val = (&data[offset..(offset + 8)]).try_into().unwrap(); offset += 8; let f = f64::from_le_bytes(val); values.push(f); } return; fn attempt(values: &[f64], min: usize, max: usize) -> bool { let values = &values[min..max]; std::panic::catch_unwind(|| { let mut bytes = Vec::new(); gorilla::compress(values.iter().copied(), &mut bytes).unwrap(); let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap(); assert_eq!(values, &out[..]); }) .is_ok() } let mut min = 0; let mut max = values.len(); let mut rng = rand::thread_rng(); for _ in 0..100000 { let try_min = rng.gen_range(min, max); let try_max = rng.gen_range(try_min + 1, max + 1); if try_min == min && try_max == max { continue; } if !attempt(&values[..], try_min, try_max) { min = try_min; max = try_max; } } } */
let options = encode_options! { options::LosslessFloat }; let binary = tree_buf::write_with_options(&data, &options); assert_eq!(binary.len(), 376);
random_line_split
chip8.rs
use rand::{Rng, thread_rng}; pub const PIXEL_W: u16 = 64; // width of CHIP-8 screen pub const PIXEL_H: u16 = 32; // height of CHIP-8 screen pub const FONT_LOCATION: u16 = 0x80; // location of font set in system RAM pub const CARTRIDGE_LOCATION: u16 = 0x200; // location in system RAM where game data should be loaded on boot pub struct
{ pub memory: [u8; 4096], // RAM pub reg: [u8; 16], // registers pub gfx: [u8; (PIXEL_W * PIXEL_H) as usize], // pixels stack: [u16; 16], // subroutine stack pub key: [u8; 16], // keypad idx: u16, // index register pc: u16, // program counter sp: u16, // stack pointer pub delay_timer: u8, pub sound_timer: u8, pub draw_flag: bool, // set when clear screen or draw opcodes are called } impl Chip8 { pub fn new() -> Chip8 { let mut chip = Chip8 { memory: [0;4096], reg: [0;16], gfx: [0; (PIXEL_W * PIXEL_H) as usize], stack: [0; 16], key: [0; 16], idx: 0, pc: CARTRIDGE_LOCATION, sp: 0, delay_timer: 0, sound_timer: 0, draw_flag: false, }; // load font set for (i, v) in FONT_SET.iter().enumerate() { chip.memory[FONT_LOCATION as usize + i] = *v; } chip } pub fn cycle(&mut self) { // all opcodes are two bytes. // get the byte at memory[program counter] and memory[program counter + 1], // split them into nibbles for convenience. let w = self.memory[self.pc as usize] >> 4; let x = self.memory[self.pc as usize] & 0xF; let y = self.memory[(self.pc+1) as usize] >> 4; let z = self.memory[(self.pc+1) as usize] & 0xF; let yz = y << 4 | z; let xyz: u16 = (x as u16) << 8 | (y as u16) << 4 | (z as u16); let (_x, _y, _z) = (x as usize, y as usize, z as usize); let opcode = (w, x, y, z); if super::DEBUG { println!("=================\nregisters: {:02x?}", self.reg); println!("pc: 0x{:02x}, idx: 0x{:02x}, bytes at idx: {:02x?}", self.pc, self.idx, &self.memory[self.idx as usize..(self.idx+8) as usize]); println!("executing opcode {:02x?}", opcode); } match opcode { // skipping instruction 0XYZ // clear screen. (0x0, 0x0, 0xE, 0x0) => { self.draw_flag = true; self.gfx.iter_mut().for_each(|b| *b = 0); self.pc += 2; }, // return from subroutine. (0x0, 0x0, 0xE, 0xE) => { self.sp -= 1; self.pc = self.stack[self.sp as usize]; }, // go to xyz. (0x1, _, _, _) => self.pc = xyz, // call subroutine at xyz. (0x2, _, _, _) => { self.stack[self.sp as usize] = self.pc + 2; // put next instruction on stack self.sp += 1; // increase stack pointer self.pc = xyz; // jump to subroutine }, // skip next instruction if register x equals yz. (0x3, _, _, _) => { if self.reg[_x] == yz { self.pc += 2; } self.pc += 2; }, // skip next instruction if register x doesn't equal yz. (0x4, _, _, _) => { if self.reg[_x] != yz { self.pc += 2; } self.pc += 2; }, // skip next instruction if reg x == reg y. (0x5, _, _, 0x0) => { if self.reg[_x] == self.reg[_y] { self.pc += 2; } self.pc += 2; }, // set reg x to yz. (0x6, _, _, _) => { self.reg[_x] = yz; self.pc += 2; }, // add yz to reg x. (0x7, _, _, _) => { self.reg[_x] = self.reg[_x].wrapping_add(yz); self.pc += 2; }, // set reg x to value of reg y. (0x8, _, _, 0x0) => { self.reg[_x] = self.reg[_y]; self.pc += 2; }, // set reg x to reg x | reg y. (0x8, _, _, 0x1) => { self.reg[_x] |= self.reg[_y]; self.pc += 2; }, // set reg x to reg x & reg y. (0x8, _, _, 0x2) => { self.reg[_x] &= self.reg[_y]; self.pc += 2; }, // UNDOCUMENTED. set reg x to reg x ^ reg y. (0x8, _, _, 0x3) => { self.reg[_x] ^= self.reg[_y]; self.pc += 2; }, // add reg y to reg x. reg f is set to 1 when there's a carry, and to 0 when there isn't. (0x8, _, _, 0x4) => { let old_x = self.reg[_x]; self.reg[_x] = self.reg[_x].wrapping_add(self.reg[_y]); self.reg[0xF] = if self.reg[_x] < old_x { 1 } else { 0 }; self.pc += 2; }, // reg y is subtracted from reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't. (0x8, _, _, 0x5) => { self.reg[0xF] = if self.reg[_x] < self.reg[_y] { 0 } else { 1 }; self.reg[_x] = self.reg[_x].wrapping_sub(self.reg[_y]); self.pc += 2; }, // WEIRD UNDOCUMENTED LEGACY ONE. TODO: add legacy mode? (0x8, _, _, 0x6) => { // first attempt. newer version? self.reg[0xF] = self.reg[_x] & 0x1; self.reg[_x] >>= 1; // legacy? according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set // self.reg[0xF] = self.reg[_y] & 0x1; // self.reg[_x] = self.reg[_y] >> 1; self.pc += 2; }, // UNDOCUMENTED. sets reg x to reg y minus reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't. (0x8, _, _, 0x7) => { self.reg[0xF] = if self.reg[_y] < self.reg[_x] { 0 } else { 1 }; self.reg[_x] = self.reg[_y].wrapping_sub(self.reg[_x]); self.pc += 2; }, // UNDOCUMENTED. store the most significant bit of reg x in reg f and left-shift reg x by 1. (0x8, _, _, 0xE) => { // according to https://en.wikipedia.org/wiki/CHIP-8#Opcode_table self.reg[0xF] = (self.reg[_x] & (1 << 7)) >> 7; self.reg[_x] <<= 1; // according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set // self.reg[0xF] = (self.reg[_y] & (1 << 7)) >> 7; // self.reg[_x] = self.reg[_y] << 1; self.pc += 2; }, // skips the next instruction if reg x doesn't equal reg y. (0x9, _, _, 0x0) => { if self.reg[_x] != self.reg[_y] { self.pc += 2; } self.pc += 2; }, // Sets idx to the address xyz. (0xA, _, _, _) => { self.idx = xyz; self.pc += 2; }, // jump to xyz plus reg 0. (0xB, _, _, _) => { self.pc = xyz + (self.reg[0x0] as u16); }, // set reg x to the result of a bitwise and operation on a random number (Typically: 0 to 255) and yz. (0xC, _, _, _) => { let rand_val: u8 = thread_rng().gen(); self.reg[_x] = yz & rand_val; self.pc += 2; }, // draw sprites at coordinate reg x, reg y (NOT X AND Y AS I ORIGINALLY DID) a width of 8 and height of z. // get z sprites from memory starting at location idx. (0xD, _, _, _) => { self.draw_flag = true; let mut pixel_unset = false; let sprites = &self.memory[self.idx as usize .. (self.idx + (z as u16)) as usize]; for i in 0.._z { // for each row of 8 pixels (sprite) // x is columns, y is rows. gfx is a flat array. starting coordinate is ((y + row number) * PIXEL_W) + x. // every 8 bytes, we have to skip to next row, which means adding another PIXEL_W. if super::DEBUG { println!("drawing byte: 0b{:08b}", sprites[i]); } for j in 0..8 { let current_coordinate = self.reg[_x] as usize + ((self.reg[_y] as usize + i) * (PIXEL_W as usize)) + j; let current_sprite_bit = (sprites[i] & (1 << (7-j))) >> (7-j); if super::DEBUG { println!("drawing pixel 0b{:b} at {}, {}", current_sprite_bit, current_coordinate % PIXEL_W as usize, current_coordinate / PIXEL_W as usize ); } if self.gfx[current_coordinate % self.gfx.len()] & current_sprite_bit != 0 { // if the current byte/pixel is 1, and the sprite bit is 1, pixel_unset = true; // then the xor operation will flip an on bit to off, meaning we need to record and set reg f. } self.gfx[current_coordinate % self.gfx.len()] ^= current_sprite_bit; // xor with sprite bit to draw } } self.reg[0xF] = if pixel_unset { 1 } else { 0 }; self.pc += 2; if super::DEBUG { println!("screen:"); for i in 0..PIXEL_H { for j in 0..PIXEL_W { print!("{} ", self.gfx[((PIXEL_W * i) + j) as usize]) } println!(); } } }, // skip next instruction if key corresponding to reg x is pressed. (0xE, _, 0x9, 0xE) => { if self.key[self.reg[_x] as usize] != 0 { self.pc += 2; } self.pc += 2; }, // skip next instruction if key corresponding to reg x isn't pressed. (0xE, _, 0xA, 0x1) => { if self.key[self.reg[_x] as usize] == 0 { self.pc += 2; } self.pc += 2; }, // set reg x to value of delay timer. (0xF, _, 0x0, 0x7) => { self.reg[_x] = self.delay_timer; self.pc += 2; }, // wait for key press and store in reg x. (0xF, _, 0x0, 0xA) => { // we don't check for input in the middle of a cycle, so we should just pass, not incrementing program counter, // and let the program come back to here until a key is registered. if self.key != [0; 16] { 'key_checking: for (i, k) in self.key.iter().enumerate() { // including lifetime so we can break after only one key is stored to reg x if *k != 0 { self.reg[_x] = i as u8; self.pc += 2; break 'key_checking; } } } }, // set delay timer to value of reg x. (0xF, _, 0x1, 0x5) => { self.delay_timer = self.reg[_x]; self.pc += 2; }, // set sound timer to value of reg x. (0xF, _, 0x1, 0x8) => { self.sound_timer = self.reg[_x]; self.pc += 2; }, // add value of reg x to idx. (0xF, _, 0x1, 0xE) => { self.idx += self.reg[_x] as u16; self.pc += 2; }, // set idx to location of font char IN REGISTER X (not x). (0xF, _, 0x2, 0x9) => { self.idx = FONT_LOCATION + (self.reg[_x] as u16 * 5); self.pc += 2; }, // store the binary-coded decimal representation of reg x in memory[idx..idx+2]. (0xF, _, 0x3, 0x3) => { self.memory[self.idx as usize] = self.reg[_x] / 100; self.memory[self.idx as usize + 1] = (self.reg[_x] % 100) / 10; self.memory[self.idx as usize + 2] = self.reg[_x] % 10; self.pc += 2; }, // store reg 0 .. reg x (inclusive) in memory[idx..]. don't modify idx. (0xF, _, 0x5, 0x5) => { for i in 0 ..= _x { self.memory[self.idx as usize + i] = self.reg[i]; } self.pc += 2; }, // load reg 0 .. reg x (inclusive) from memory[idx..]. don't modify idx. (0xF, _, 0x6, 0x5) => { for i in 0 ..= _x { self.reg[i] = self.memory[self.idx as usize + i]; } self.pc += 2; }, oopsie => { println!("illegal instruction: {:02x?}", oopsie); self.pc += 2; }, }; } } const FONT_SET: [u8; 80] = [ 0xF0, 0x90, 0x90, 0x90, 0xF0, // 0 0x20, 0x60, 0x20, 0x20, 0x70, // 1 0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2 0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3 0x90, 0x90, 0xF0, 0x10, 0x10, // 4 0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5 0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6 0xF0, 0x10, 0x20, 0x40, 0x40, // 7 0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8 0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9 0xF0, 0x90, 0xF0, 0x90, 0x90, // A 0xE0, 0x90, 0xE0, 0x90, 0xE0, // B 0xF0, 0x80, 0x80, 0x80, 0xF0, // C 0xE0, 0x90, 0x90, 0x90, 0xE0, // D 0xF0, 0x80, 0xF0, 0x80, 0xF0, // E 0xF0, 0x80, 0xF0, 0x80, 0x80 // F ];
Chip8
identifier_name
chip8.rs
use rand::{Rng, thread_rng}; pub const PIXEL_W: u16 = 64; // width of CHIP-8 screen pub const PIXEL_H: u16 = 32; // height of CHIP-8 screen pub const FONT_LOCATION: u16 = 0x80; // location of font set in system RAM pub const CARTRIDGE_LOCATION: u16 = 0x200; // location in system RAM where game data should be loaded on boot pub struct Chip8 { pub memory: [u8; 4096], // RAM pub reg: [u8; 16], // registers pub gfx: [u8; (PIXEL_W * PIXEL_H) as usize], // pixels stack: [u16; 16], // subroutine stack pub key: [u8; 16], // keypad idx: u16, // index register pc: u16, // program counter sp: u16, // stack pointer pub delay_timer: u8, pub sound_timer: u8, pub draw_flag: bool, // set when clear screen or draw opcodes are called } impl Chip8 { pub fn new() -> Chip8 { let mut chip = Chip8 { memory: [0;4096], reg: [0;16], gfx: [0; (PIXEL_W * PIXEL_H) as usize], stack: [0; 16], key: [0; 16], idx: 0, pc: CARTRIDGE_LOCATION, sp: 0, delay_timer: 0, sound_timer: 0, draw_flag: false, }; // load font set for (i, v) in FONT_SET.iter().enumerate() { chip.memory[FONT_LOCATION as usize + i] = *v; } chip } pub fn cycle(&mut self) { // all opcodes are two bytes. // get the byte at memory[program counter] and memory[program counter + 1], // split them into nibbles for convenience. let w = self.memory[self.pc as usize] >> 4; let x = self.memory[self.pc as usize] & 0xF; let y = self.memory[(self.pc+1) as usize] >> 4; let z = self.memory[(self.pc+1) as usize] & 0xF; let yz = y << 4 | z; let xyz: u16 = (x as u16) << 8 | (y as u16) << 4 | (z as u16); let (_x, _y, _z) = (x as usize, y as usize, z as usize); let opcode = (w, x, y, z); if super::DEBUG { println!("=================\nregisters: {:02x?}", self.reg); println!("pc: 0x{:02x}, idx: 0x{:02x}, bytes at idx: {:02x?}", self.pc, self.idx, &self.memory[self.idx as usize..(self.idx+8) as usize]); println!("executing opcode {:02x?}", opcode); } match opcode { // skipping instruction 0XYZ // clear screen. (0x0, 0x0, 0xE, 0x0) => { self.draw_flag = true; self.gfx.iter_mut().for_each(|b| *b = 0); self.pc += 2; }, // return from subroutine. (0x0, 0x0, 0xE, 0xE) => { self.sp -= 1; self.pc = self.stack[self.sp as usize]; }, // go to xyz. (0x1, _, _, _) => self.pc = xyz, // call subroutine at xyz. (0x2, _, _, _) => { self.stack[self.sp as usize] = self.pc + 2; // put next instruction on stack self.sp += 1; // increase stack pointer self.pc = xyz; // jump to subroutine }, // skip next instruction if register x equals yz. (0x3, _, _, _) => { if self.reg[_x] == yz { self.pc += 2; } self.pc += 2; }, // skip next instruction if register x doesn't equal yz. (0x4, _, _, _) => { if self.reg[_x] != yz { self.pc += 2; } self.pc += 2; }, // skip next instruction if reg x == reg y. (0x5, _, _, 0x0) => { if self.reg[_x] == self.reg[_y] { self.pc += 2; } self.pc += 2; }, // set reg x to yz. (0x6, _, _, _) => { self.reg[_x] = yz; self.pc += 2; }, // add yz to reg x. (0x7, _, _, _) => { self.reg[_x] = self.reg[_x].wrapping_add(yz); self.pc += 2; }, // set reg x to value of reg y. (0x8, _, _, 0x0) => { self.reg[_x] = self.reg[_y]; self.pc += 2; }, // set reg x to reg x | reg y. (0x8, _, _, 0x1) => { self.reg[_x] |= self.reg[_y]; self.pc += 2; }, // set reg x to reg x & reg y. (0x8, _, _, 0x2) => { self.reg[_x] &= self.reg[_y]; self.pc += 2; }, // UNDOCUMENTED. set reg x to reg x ^ reg y. (0x8, _, _, 0x3) => { self.reg[_x] ^= self.reg[_y]; self.pc += 2; }, // add reg y to reg x. reg f is set to 1 when there's a carry, and to 0 when there isn't. (0x8, _, _, 0x4) => { let old_x = self.reg[_x]; self.reg[_x] = self.reg[_x].wrapping_add(self.reg[_y]); self.reg[0xF] = if self.reg[_x] < old_x { 1 } else { 0 }; self.pc += 2; }, // reg y is subtracted from reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't. (0x8, _, _, 0x5) => { self.reg[0xF] = if self.reg[_x] < self.reg[_y] { 0 } else { 1 }; self.reg[_x] = self.reg[_x].wrapping_sub(self.reg[_y]); self.pc += 2; }, // WEIRD UNDOCUMENTED LEGACY ONE. TODO: add legacy mode? (0x8, _, _, 0x6) => { // first attempt. newer version? self.reg[0xF] = self.reg[_x] & 0x1; self.reg[_x] >>= 1; // legacy? according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set // self.reg[0xF] = self.reg[_y] & 0x1; // self.reg[_x] = self.reg[_y] >> 1; self.pc += 2; }, // UNDOCUMENTED. sets reg x to reg y minus reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't. (0x8, _, _, 0x7) => { self.reg[0xF] = if self.reg[_y] < self.reg[_x] { 0 } else { 1 }; self.reg[_x] = self.reg[_y].wrapping_sub(self.reg[_x]); self.pc += 2; }, // UNDOCUMENTED. store the most significant bit of reg x in reg f and left-shift reg x by 1. (0x8, _, _, 0xE) => { // according to https://en.wikipedia.org/wiki/CHIP-8#Opcode_table self.reg[0xF] = (self.reg[_x] & (1 << 7)) >> 7; self.reg[_x] <<= 1; // according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set // self.reg[0xF] = (self.reg[_y] & (1 << 7)) >> 7; // self.reg[_x] = self.reg[_y] << 1; self.pc += 2; }, // skips the next instruction if reg x doesn't equal reg y. (0x9, _, _, 0x0) => { if self.reg[_x] != self.reg[_y] { self.pc += 2; } self.pc += 2; }, // Sets idx to the address xyz. (0xA, _, _, _) => { self.idx = xyz; self.pc += 2; }, // jump to xyz plus reg 0. (0xB, _, _, _) => { self.pc = xyz + (self.reg[0x0] as u16); }, // set reg x to the result of a bitwise and operation on a random number (Typically: 0 to 255) and yz. (0xC, _, _, _) =>
, // draw sprites at coordinate reg x, reg y (NOT X AND Y AS I ORIGINALLY DID) a width of 8 and height of z. // get z sprites from memory starting at location idx. (0xD, _, _, _) => { self.draw_flag = true; let mut pixel_unset = false; let sprites = &self.memory[self.idx as usize .. (self.idx + (z as u16)) as usize]; for i in 0.._z { // for each row of 8 pixels (sprite) // x is columns, y is rows. gfx is a flat array. starting coordinate is ((y + row number) * PIXEL_W) + x. // every 8 bytes, we have to skip to next row, which means adding another PIXEL_W. if super::DEBUG { println!("drawing byte: 0b{:08b}", sprites[i]); } for j in 0..8 { let current_coordinate = self.reg[_x] as usize + ((self.reg[_y] as usize + i) * (PIXEL_W as usize)) + j; let current_sprite_bit = (sprites[i] & (1 << (7-j))) >> (7-j); if super::DEBUG { println!("drawing pixel 0b{:b} at {}, {}", current_sprite_bit, current_coordinate % PIXEL_W as usize, current_coordinate / PIXEL_W as usize ); } if self.gfx[current_coordinate % self.gfx.len()] & current_sprite_bit != 0 { // if the current byte/pixel is 1, and the sprite bit is 1, pixel_unset = true; // then the xor operation will flip an on bit to off, meaning we need to record and set reg f. } self.gfx[current_coordinate % self.gfx.len()] ^= current_sprite_bit; // xor with sprite bit to draw } } self.reg[0xF] = if pixel_unset { 1 } else { 0 }; self.pc += 2; if super::DEBUG { println!("screen:"); for i in 0..PIXEL_H { for j in 0..PIXEL_W { print!("{} ", self.gfx[((PIXEL_W * i) + j) as usize]) } println!(); } } }, // skip next instruction if key corresponding to reg x is pressed. (0xE, _, 0x9, 0xE) => { if self.key[self.reg[_x] as usize] != 0 { self.pc += 2; } self.pc += 2; }, // skip next instruction if key corresponding to reg x isn't pressed. (0xE, _, 0xA, 0x1) => { if self.key[self.reg[_x] as usize] == 0 { self.pc += 2; } self.pc += 2; }, // set reg x to value of delay timer. (0xF, _, 0x0, 0x7) => { self.reg[_x] = self.delay_timer; self.pc += 2; }, // wait for key press and store in reg x. (0xF, _, 0x0, 0xA) => { // we don't check for input in the middle of a cycle, so we should just pass, not incrementing program counter, // and let the program come back to here until a key is registered. if self.key != [0; 16] { 'key_checking: for (i, k) in self.key.iter().enumerate() { // including lifetime so we can break after only one key is stored to reg x if *k != 0 { self.reg[_x] = i as u8; self.pc += 2; break 'key_checking; } } } }, // set delay timer to value of reg x. (0xF, _, 0x1, 0x5) => { self.delay_timer = self.reg[_x]; self.pc += 2; }, // set sound timer to value of reg x. (0xF, _, 0x1, 0x8) => { self.sound_timer = self.reg[_x]; self.pc += 2; }, // add value of reg x to idx. (0xF, _, 0x1, 0xE) => { self.idx += self.reg[_x] as u16; self.pc += 2; }, // set idx to location of font char IN REGISTER X (not x). (0xF, _, 0x2, 0x9) => { self.idx = FONT_LOCATION + (self.reg[_x] as u16 * 5); self.pc += 2; }, // store the binary-coded decimal representation of reg x in memory[idx..idx+2]. (0xF, _, 0x3, 0x3) => { self.memory[self.idx as usize] = self.reg[_x] / 100; self.memory[self.idx as usize + 1] = (self.reg[_x] % 100) / 10; self.memory[self.idx as usize + 2] = self.reg[_x] % 10; self.pc += 2; }, // store reg 0 .. reg x (inclusive) in memory[idx..]. don't modify idx. (0xF, _, 0x5, 0x5) => { for i in 0 ..= _x { self.memory[self.idx as usize + i] = self.reg[i]; } self.pc += 2; }, // load reg 0 .. reg x (inclusive) from memory[idx..]. don't modify idx. (0xF, _, 0x6, 0x5) => { for i in 0 ..= _x { self.reg[i] = self.memory[self.idx as usize + i]; } self.pc += 2; }, oopsie => { println!("illegal instruction: {:02x?}", oopsie); self.pc += 2; }, }; } } const FONT_SET: [u8; 80] = [ 0xF0, 0x90, 0x90, 0x90, 0xF0, // 0 0x20, 0x60, 0x20, 0x20, 0x70, // 1 0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2 0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3 0x90, 0x90, 0xF0, 0x10, 0x10, // 4 0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5 0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6 0xF0, 0x10, 0x20, 0x40, 0x40, // 7 0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8 0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9 0xF0, 0x90, 0xF0, 0x90, 0x90, // A 0xE0, 0x90, 0xE0, 0x90, 0xE0, // B 0xF0, 0x80, 0x80, 0x80, 0xF0, // C 0xE0, 0x90, 0x90, 0x90, 0xE0, // D 0xF0, 0x80, 0xF0, 0x80, 0xF0, // E 0xF0, 0x80, 0xF0, 0x80, 0x80 // F ];
{ let rand_val: u8 = thread_rng().gen(); self.reg[_x] = yz & rand_val; self.pc += 2; }
conditional_block
chip8.rs
use rand::{Rng, thread_rng}; pub const PIXEL_W: u16 = 64; // width of CHIP-8 screen pub const PIXEL_H: u16 = 32; // height of CHIP-8 screen pub const FONT_LOCATION: u16 = 0x80; // location of font set in system RAM pub const CARTRIDGE_LOCATION: u16 = 0x200; // location in system RAM where game data should be loaded on boot pub struct Chip8 { pub memory: [u8; 4096], // RAM pub reg: [u8; 16], // registers pub gfx: [u8; (PIXEL_W * PIXEL_H) as usize], // pixels stack: [u16; 16], // subroutine stack pub key: [u8; 16], // keypad idx: u16, // index register pc: u16, // program counter sp: u16, // stack pointer pub delay_timer: u8, pub sound_timer: u8, pub draw_flag: bool, // set when clear screen or draw opcodes are called } impl Chip8 { pub fn new() -> Chip8 { let mut chip = Chip8 { memory: [0;4096], reg: [0;16], gfx: [0; (PIXEL_W * PIXEL_H) as usize], stack: [0; 16], key: [0; 16], idx: 0, pc: CARTRIDGE_LOCATION, sp: 0, delay_timer: 0, sound_timer: 0, draw_flag: false, }; // load font set for (i, v) in FONT_SET.iter().enumerate() { chip.memory[FONT_LOCATION as usize + i] = *v; } chip } pub fn cycle(&mut self) { // all opcodes are two bytes. // get the byte at memory[program counter] and memory[program counter + 1], // split them into nibbles for convenience. let w = self.memory[self.pc as usize] >> 4; let x = self.memory[self.pc as usize] & 0xF; let y = self.memory[(self.pc+1) as usize] >> 4; let z = self.memory[(self.pc+1) as usize] & 0xF; let yz = y << 4 | z; let xyz: u16 = (x as u16) << 8 | (y as u16) << 4 | (z as u16); let (_x, _y, _z) = (x as usize, y as usize, z as usize); let opcode = (w, x, y, z); if super::DEBUG { println!("=================\nregisters: {:02x?}", self.reg); println!("pc: 0x{:02x}, idx: 0x{:02x}, bytes at idx: {:02x?}", self.pc, self.idx, &self.memory[self.idx as usize..(self.idx+8) as usize]); println!("executing opcode {:02x?}", opcode); } match opcode { // skipping instruction 0XYZ // clear screen. (0x0, 0x0, 0xE, 0x0) => { self.draw_flag = true; self.gfx.iter_mut().for_each(|b| *b = 0); self.pc += 2; }, // return from subroutine. (0x0, 0x0, 0xE, 0xE) => { self.sp -= 1; self.pc = self.stack[self.sp as usize]; }, // go to xyz. (0x1, _, _, _) => self.pc = xyz, // call subroutine at xyz. (0x2, _, _, _) => { self.stack[self.sp as usize] = self.pc + 2; // put next instruction on stack self.sp += 1; // increase stack pointer self.pc = xyz; // jump to subroutine }, // skip next instruction if register x equals yz. (0x3, _, _, _) => { if self.reg[_x] == yz { self.pc += 2; } self.pc += 2; }, // skip next instruction if register x doesn't equal yz. (0x4, _, _, _) => { if self.reg[_x] != yz { self.pc += 2; } self.pc += 2; }, // skip next instruction if reg x == reg y. (0x5, _, _, 0x0) => { if self.reg[_x] == self.reg[_y] { self.pc += 2; } self.pc += 2; }, // set reg x to yz. (0x6, _, _, _) => { self.reg[_x] = yz; self.pc += 2; }, // add yz to reg x. (0x7, _, _, _) => { self.reg[_x] = self.reg[_x].wrapping_add(yz); self.pc += 2; }, // set reg x to value of reg y. (0x8, _, _, 0x0) => { self.reg[_x] = self.reg[_y]; self.pc += 2; }, // set reg x to reg x | reg y. (0x8, _, _, 0x1) => { self.reg[_x] |= self.reg[_y]; self.pc += 2; }, // set reg x to reg x & reg y. (0x8, _, _, 0x2) => { self.reg[_x] &= self.reg[_y]; self.pc += 2; }, // UNDOCUMENTED. set reg x to reg x ^ reg y. (0x8, _, _, 0x3) => { self.reg[_x] ^= self.reg[_y]; self.pc += 2; }, // add reg y to reg x. reg f is set to 1 when there's a carry, and to 0 when there isn't. (0x8, _, _, 0x4) => { let old_x = self.reg[_x]; self.reg[_x] = self.reg[_x].wrapping_add(self.reg[_y]); self.reg[0xF] = if self.reg[_x] < old_x { 1 } else { 0 }; self.pc += 2; }, // reg y is subtracted from reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't. (0x8, _, _, 0x5) => { self.reg[0xF] = if self.reg[_x] < self.reg[_y] { 0 } else { 1 }; self.reg[_x] = self.reg[_x].wrapping_sub(self.reg[_y]); self.pc += 2; }, // WEIRD UNDOCUMENTED LEGACY ONE. TODO: add legacy mode? (0x8, _, _, 0x6) => { // first attempt. newer version? self.reg[0xF] = self.reg[_x] & 0x1; self.reg[_x] >>= 1; // legacy? according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set // self.reg[0xF] = self.reg[_y] & 0x1; // self.reg[_x] = self.reg[_y] >> 1; self.pc += 2; }, // UNDOCUMENTED. sets reg x to reg y minus reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't. (0x8, _, _, 0x7) => { self.reg[0xF] = if self.reg[_y] < self.reg[_x] { 0 } else { 1 }; self.reg[_x] = self.reg[_y].wrapping_sub(self.reg[_x]); self.pc += 2; }, // UNDOCUMENTED. store the most significant bit of reg x in reg f and left-shift reg x by 1. (0x8, _, _, 0xE) => { // according to https://en.wikipedia.org/wiki/CHIP-8#Opcode_table self.reg[0xF] = (self.reg[_x] & (1 << 7)) >> 7; self.reg[_x] <<= 1; // according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set // self.reg[0xF] = (self.reg[_y] & (1 << 7)) >> 7; // self.reg[_x] = self.reg[_y] << 1; self.pc += 2; }, // skips the next instruction if reg x doesn't equal reg y. (0x9, _, _, 0x0) => { if self.reg[_x] != self.reg[_y] { self.pc += 2; } self.pc += 2; }, // Sets idx to the address xyz. (0xA, _, _, _) => { self.idx = xyz; self.pc += 2; }, // jump to xyz plus reg 0. (0xB, _, _, _) => { self.pc = xyz + (self.reg[0x0] as u16); }, // set reg x to the result of a bitwise and operation on a random number (Typically: 0 to 255) and yz. (0xC, _, _, _) => { let rand_val: u8 = thread_rng().gen(); self.reg[_x] = yz & rand_val; self.pc += 2; }, // draw sprites at coordinate reg x, reg y (NOT X AND Y AS I ORIGINALLY DID) a width of 8 and height of z. // get z sprites from memory starting at location idx. (0xD, _, _, _) => { self.draw_flag = true; let mut pixel_unset = false; let sprites = &self.memory[self.idx as usize .. (self.idx + (z as u16)) as usize]; for i in 0.._z { // for each row of 8 pixels (sprite) // x is columns, y is rows. gfx is a flat array. starting coordinate is ((y + row number) * PIXEL_W) + x. // every 8 bytes, we have to skip to next row, which means adding another PIXEL_W. if super::DEBUG { println!("drawing byte: 0b{:08b}", sprites[i]); } for j in 0..8 { let current_coordinate = self.reg[_x] as usize + ((self.reg[_y] as usize + i) * (PIXEL_W as usize)) + j; let current_sprite_bit = (sprites[i] & (1 << (7-j))) >> (7-j);
current_sprite_bit, current_coordinate % PIXEL_W as usize, current_coordinate / PIXEL_W as usize ); } if self.gfx[current_coordinate % self.gfx.len()] & current_sprite_bit != 0 { // if the current byte/pixel is 1, and the sprite bit is 1, pixel_unset = true; // then the xor operation will flip an on bit to off, meaning we need to record and set reg f. } self.gfx[current_coordinate % self.gfx.len()] ^= current_sprite_bit; // xor with sprite bit to draw } } self.reg[0xF] = if pixel_unset { 1 } else { 0 }; self.pc += 2; if super::DEBUG { println!("screen:"); for i in 0..PIXEL_H { for j in 0..PIXEL_W { print!("{} ", self.gfx[((PIXEL_W * i) + j) as usize]) } println!(); } } }, // skip next instruction if key corresponding to reg x is pressed. (0xE, _, 0x9, 0xE) => { if self.key[self.reg[_x] as usize] != 0 { self.pc += 2; } self.pc += 2; }, // skip next instruction if key corresponding to reg x isn't pressed. (0xE, _, 0xA, 0x1) => { if self.key[self.reg[_x] as usize] == 0 { self.pc += 2; } self.pc += 2; }, // set reg x to value of delay timer. (0xF, _, 0x0, 0x7) => { self.reg[_x] = self.delay_timer; self.pc += 2; }, // wait for key press and store in reg x. (0xF, _, 0x0, 0xA) => { // we don't check for input in the middle of a cycle, so we should just pass, not incrementing program counter, // and let the program come back to here until a key is registered. if self.key != [0; 16] { 'key_checking: for (i, k) in self.key.iter().enumerate() { // including lifetime so we can break after only one key is stored to reg x if *k != 0 { self.reg[_x] = i as u8; self.pc += 2; break 'key_checking; } } } }, // set delay timer to value of reg x. (0xF, _, 0x1, 0x5) => { self.delay_timer = self.reg[_x]; self.pc += 2; }, // set sound timer to value of reg x. (0xF, _, 0x1, 0x8) => { self.sound_timer = self.reg[_x]; self.pc += 2; }, // add value of reg x to idx. (0xF, _, 0x1, 0xE) => { self.idx += self.reg[_x] as u16; self.pc += 2; }, // set idx to location of font char IN REGISTER X (not x). (0xF, _, 0x2, 0x9) => { self.idx = FONT_LOCATION + (self.reg[_x] as u16 * 5); self.pc += 2; }, // store the binary-coded decimal representation of reg x in memory[idx..idx+2]. (0xF, _, 0x3, 0x3) => { self.memory[self.idx as usize] = self.reg[_x] / 100; self.memory[self.idx as usize + 1] = (self.reg[_x] % 100) / 10; self.memory[self.idx as usize + 2] = self.reg[_x] % 10; self.pc += 2; }, // store reg 0 .. reg x (inclusive) in memory[idx..]. don't modify idx. (0xF, _, 0x5, 0x5) => { for i in 0 ..= _x { self.memory[self.idx as usize + i] = self.reg[i]; } self.pc += 2; }, // load reg 0 .. reg x (inclusive) from memory[idx..]. don't modify idx. (0xF, _, 0x6, 0x5) => { for i in 0 ..= _x { self.reg[i] = self.memory[self.idx as usize + i]; } self.pc += 2; }, oopsie => { println!("illegal instruction: {:02x?}", oopsie); self.pc += 2; }, }; } } const FONT_SET: [u8; 80] = [ 0xF0, 0x90, 0x90, 0x90, 0xF0, // 0 0x20, 0x60, 0x20, 0x20, 0x70, // 1 0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2 0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3 0x90, 0x90, 0xF0, 0x10, 0x10, // 4 0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5 0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6 0xF0, 0x10, 0x20, 0x40, 0x40, // 7 0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8 0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9 0xF0, 0x90, 0xF0, 0x90, 0x90, // A 0xE0, 0x90, 0xE0, 0x90, 0xE0, // B 0xF0, 0x80, 0x80, 0x80, 0xF0, // C 0xE0, 0x90, 0x90, 0x90, 0xE0, // D 0xF0, 0x80, 0xF0, 0x80, 0xF0, // E 0xF0, 0x80, 0xF0, 0x80, 0x80 // F ];
if super::DEBUG { println!("drawing pixel 0b{:b} at {}, {}",
random_line_split
mod.rs
//! Boa's implementation of ECMAScript's global `BigInt` object. //! //! `BigInt` is a built-in object that provides a way to represent whole numbers larger //! than the largest number JavaScript can reliably represent with the Number primitive //! and represented by the `Number.MAX_SAFE_INTEGER` constant. //! `BigInt` can be used for arbitrarily large integers. //! //! More information: //! - [ECMAScript reference][spec] //! - [MDN documentation][mdn] //! //! [spec]: https://tc39.es/ecma262/#sec-bigint-objects //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt use crate::{ builtins::BuiltInObject, context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors}, error::JsNativeError, object::JsObject, property::Attribute, realm::Realm, symbol::JsSymbol, value::{IntegerOrInfinity, PreferredType}, Context, JsArgs, JsBigInt, JsResult, JsValue, }; use boa_profiler::Profiler; use num_bigint::ToBigInt; use super::{BuiltInBuilder, BuiltInConstructor, IntrinsicObject}; #[cfg(test)] mod tests; /// `BigInt` implementation. #[derive(Debug, Clone, Copy)] pub struct BigInt;
impl IntrinsicObject for BigInt { fn init(realm: &Realm) { let _timer = Profiler::global().start_event(Self::NAME, "init"); BuiltInBuilder::from_standard_constructor::<Self>(realm) .method(Self::to_string, "toString", 0) .method(Self::value_of, "valueOf", 0) .static_method(Self::as_int_n, "asIntN", 2) .static_method(Self::as_uint_n, "asUintN", 2) .property( JsSymbol::to_string_tag(), Self::NAME, Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE, ) .build(); } fn get(intrinsics: &Intrinsics) -> JsObject { Self::STANDARD_CONSTRUCTOR(intrinsics.constructors()).constructor() } } impl BuiltInObject for BigInt { const NAME: &'static str = "BigInt"; } impl BuiltInConstructor for BigInt { const LENGTH: usize = 1; const STANDARD_CONSTRUCTOR: fn(&StandardConstructors) -> &StandardConstructor = StandardConstructors::bigint; /// `BigInt()` /// /// The `BigInt()` constructor is used to create `BigInt` objects. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint-objects /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/BigInt fn constructor( new_target: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { // 1. If NewTarget is not undefined, throw a TypeError exception. if !new_target.is_undefined() { return Err(JsNativeError::typ() .with_message("BigInt is not a constructor") .into()); } let value = args.get_or_undefined(0); // 2. Let prim be ? ToPrimitive(value, number). let prim = value.to_primitive(context, PreferredType::Number)?; // 3. If Type(prim) is Number, return ? NumberToBigInt(prim). if let Some(number) = prim.as_number() { return Self::number_to_bigint(number); } // 4. Otherwise, return ? ToBigInt(prim). Ok(prim.to_bigint(context)?.into()) } } impl BigInt { /// `NumberToBigInt ( number )` /// /// More information: /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-numbertobigint fn number_to_bigint(number: f64) -> JsResult<JsValue> { // 1. If IsIntegralNumber(number) is false, throw a RangeError exception. if number.is_nan() || number.is_infinite() || number.fract() != 0.0 { return Err(JsNativeError::range() .with_message(format!("cannot convert {number} to a BigInt")) .into()); } // 2. Return the BigInt value that represents โ„(number). Ok(JsBigInt::from(number.to_bigint().expect("This conversion must be safe")).into()) } /// The abstract operation `thisBigIntValue` takes argument value. /// /// The phrase โ€œthis `BigInt` valueโ€ within the specification of a method refers to the /// result returned by calling the abstract operation `thisBigIntValue` with the `this` value /// of the method invocation passed as the argument. /// /// More information: /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-thisbigintvalue fn this_bigint_value(value: &JsValue) -> JsResult<JsBigInt> { value // 1. If Type(value) is BigInt, return value. .as_bigint() .cloned() // 2. If Type(value) is Object and value has a [[BigIntData]] internal slot, then // a. Assert: Type(value.[[BigIntData]]) is BigInt. // b. Return value.[[BigIntData]]. .or_else(|| { value .as_object() .and_then(|obj| obj.borrow().as_bigint().cloned()) }) // 3. Throw a TypeError exception. .ok_or_else(|| { JsNativeError::typ() .with_message("'this' is not a BigInt") .into() }) } /// `BigInt.prototype.toString( [radix] )` /// /// The `toString()` method returns a string representing the specified `BigInt` object. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.tostring /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/toString #[allow(clippy::wrong_self_convention)] pub(crate) fn to_string( this: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { // 1. Let x be ? thisBigIntValue(this value). let x = Self::this_bigint_value(this)?; let radix = args.get_or_undefined(0); // 2. If radix is undefined, let radixMV be 10. let radix_mv = if radix.is_undefined() { // 5. If radixMV = 10, return ! ToString(x). // Note: early return optimization. return Ok(x.to_string().into()); // 3. Else, let radixMV be ? ToIntegerOrInfinity(radix). } else { radix.to_integer_or_infinity(context)? }; // 4. If radixMV < 2 or radixMV > 36, throw a RangeError exception. let radix_mv = match radix_mv { IntegerOrInfinity::Integer(i) if (2..=36).contains(&i) => i, _ => { return Err(JsNativeError::range() .with_message("radix must be an integer at least 2 and no greater than 36") .into()) } }; // 5. If radixMV = 10, return ! ToString(x). if radix_mv == 10 { return Ok(x.to_string().into()); } // 1. Let x be ? thisBigIntValue(this value). // 6. Return the String representation of this Number value using the radix specified by radixMV. // Letters a-z are used for digits with values 10 through 35. // The precise algorithm is implementation-defined, however the algorithm should be a generalization of that specified in 6.1.6.2.23. Ok(JsValue::new(x.to_string_radix(radix_mv as u32))) } /// `BigInt.prototype.valueOf()` /// /// The `valueOf()` method returns the wrapped primitive value of a Number object. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.valueof /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/valueOf pub(crate) fn value_of( this: &JsValue, _: &[JsValue], _: &mut Context<'_>, ) -> JsResult<JsValue> { Ok(JsValue::new(Self::this_bigint_value(this)?)) } /// `BigInt.asIntN()` /// /// The `BigInt.asIntN()` method wraps the value of a `BigInt` to a signed integer between `-2**(width - 1)` and `2**(width-1) - 1`. /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.asintn /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asIntN #[allow(clippy::wrong_self_convention)] pub(crate) fn as_int_n( _: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { let (modulo, bits) = Self::calculate_as_uint_n(args, context)?; if bits > 0 && modulo >= JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits) - 1))? { Ok(JsValue::new(JsBigInt::sub( &modulo, &JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?, ))) } else { Ok(JsValue::new(modulo)) } } /// `BigInt.asUintN()` /// /// The `BigInt.asUintN()` method wraps the value of a `BigInt` to an unsigned integer between `0` and `2**(width) - 1`. /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.asuintn /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asUintN #[allow(clippy::wrong_self_convention)] pub(crate) fn as_uint_n( _: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { let (modulo, _) = Self::calculate_as_uint_n(args, context)?; Ok(JsValue::new(modulo)) } /// Helper function to wrap the value of a `BigInt` to an unsigned integer. /// /// This function expects the same arguments as `as_uint_n` and wraps the value of a `BigInt`. /// Additionally to the wrapped unsigned value it returns the converted `bits` argument, so it /// can be reused from the `as_int_n` method. fn calculate_as_uint_n( args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<(JsBigInt, u32)> { let bits_arg = args.get_or_undefined(0); let bigint_arg = args.get_or_undefined(1); let bits = bits_arg.to_index(context)?; let bits = u32::try_from(bits).unwrap_or(u32::MAX); let bigint = bigint_arg.to_bigint(context)?; Ok(( JsBigInt::mod_floor( &bigint, &JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?, ), bits, )) } }
random_line_split
mod.rs
//! Boa's implementation of ECMAScript's global `BigInt` object. //! //! `BigInt` is a built-in object that provides a way to represent whole numbers larger //! than the largest number JavaScript can reliably represent with the Number primitive //! and represented by the `Number.MAX_SAFE_INTEGER` constant. //! `BigInt` can be used for arbitrarily large integers. //! //! More information: //! - [ECMAScript reference][spec] //! - [MDN documentation][mdn] //! //! [spec]: https://tc39.es/ecma262/#sec-bigint-objects //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt use crate::{ builtins::BuiltInObject, context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors}, error::JsNativeError, object::JsObject, property::Attribute, realm::Realm, symbol::JsSymbol, value::{IntegerOrInfinity, PreferredType}, Context, JsArgs, JsBigInt, JsResult, JsValue, }; use boa_profiler::Profiler; use num_bigint::ToBigInt; use super::{BuiltInBuilder, BuiltInConstructor, IntrinsicObject}; #[cfg(test)] mod tests; /// `BigInt` implementation. #[derive(Debug, Clone, Copy)] pub struct BigInt; impl IntrinsicObject for BigInt { fn
(realm: &Realm) { let _timer = Profiler::global().start_event(Self::NAME, "init"); BuiltInBuilder::from_standard_constructor::<Self>(realm) .method(Self::to_string, "toString", 0) .method(Self::value_of, "valueOf", 0) .static_method(Self::as_int_n, "asIntN", 2) .static_method(Self::as_uint_n, "asUintN", 2) .property( JsSymbol::to_string_tag(), Self::NAME, Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE, ) .build(); } fn get(intrinsics: &Intrinsics) -> JsObject { Self::STANDARD_CONSTRUCTOR(intrinsics.constructors()).constructor() } } impl BuiltInObject for BigInt { const NAME: &'static str = "BigInt"; } impl BuiltInConstructor for BigInt { const LENGTH: usize = 1; const STANDARD_CONSTRUCTOR: fn(&StandardConstructors) -> &StandardConstructor = StandardConstructors::bigint; /// `BigInt()` /// /// The `BigInt()` constructor is used to create `BigInt` objects. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint-objects /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/BigInt fn constructor( new_target: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { // 1. If NewTarget is not undefined, throw a TypeError exception. if !new_target.is_undefined() { return Err(JsNativeError::typ() .with_message("BigInt is not a constructor") .into()); } let value = args.get_or_undefined(0); // 2. Let prim be ? ToPrimitive(value, number). let prim = value.to_primitive(context, PreferredType::Number)?; // 3. If Type(prim) is Number, return ? NumberToBigInt(prim). if let Some(number) = prim.as_number() { return Self::number_to_bigint(number); } // 4. Otherwise, return ? ToBigInt(prim). Ok(prim.to_bigint(context)?.into()) } } impl BigInt { /// `NumberToBigInt ( number )` /// /// More information: /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-numbertobigint fn number_to_bigint(number: f64) -> JsResult<JsValue> { // 1. If IsIntegralNumber(number) is false, throw a RangeError exception. if number.is_nan() || number.is_infinite() || number.fract() != 0.0 { return Err(JsNativeError::range() .with_message(format!("cannot convert {number} to a BigInt")) .into()); } // 2. Return the BigInt value that represents โ„(number). Ok(JsBigInt::from(number.to_bigint().expect("This conversion must be safe")).into()) } /// The abstract operation `thisBigIntValue` takes argument value. /// /// The phrase โ€œthis `BigInt` valueโ€ within the specification of a method refers to the /// result returned by calling the abstract operation `thisBigIntValue` with the `this` value /// of the method invocation passed as the argument. /// /// More information: /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-thisbigintvalue fn this_bigint_value(value: &JsValue) -> JsResult<JsBigInt> { value // 1. If Type(value) is BigInt, return value. .as_bigint() .cloned() // 2. If Type(value) is Object and value has a [[BigIntData]] internal slot, then // a. Assert: Type(value.[[BigIntData]]) is BigInt. // b. Return value.[[BigIntData]]. .or_else(|| { value .as_object() .and_then(|obj| obj.borrow().as_bigint().cloned()) }) // 3. Throw a TypeError exception. .ok_or_else(|| { JsNativeError::typ() .with_message("'this' is not a BigInt") .into() }) } /// `BigInt.prototype.toString( [radix] )` /// /// The `toString()` method returns a string representing the specified `BigInt` object. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.tostring /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/toString #[allow(clippy::wrong_self_convention)] pub(crate) fn to_string( this: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { // 1. Let x be ? thisBigIntValue(this value). let x = Self::this_bigint_value(this)?; let radix = args.get_or_undefined(0); // 2. If radix is undefined, let radixMV be 10. let radix_mv = if radix.is_undefined() { // 5. If radixMV = 10, return ! ToString(x). // Note: early return optimization. return Ok(x.to_string().into()); // 3. Else, let radixMV be ? ToIntegerOrInfinity(radix). } else { radix.to_integer_or_infinity(context)? }; // 4. If radixMV < 2 or radixMV > 36, throw a RangeError exception. let radix_mv = match radix_mv { IntegerOrInfinity::Integer(i) if (2..=36).contains(&i) => i, _ => { return Err(JsNativeError::range() .with_message("radix must be an integer at least 2 and no greater than 36") .into()) } }; // 5. If radixMV = 10, return ! ToString(x). if radix_mv == 10 { return Ok(x.to_string().into()); } // 1. Let x be ? thisBigIntValue(this value). // 6. Return the String representation of this Number value using the radix specified by radixMV. // Letters a-z are used for digits with values 10 through 35. // The precise algorithm is implementation-defined, however the algorithm should be a generalization of that specified in 6.1.6.2.23. Ok(JsValue::new(x.to_string_radix(radix_mv as u32))) } /// `BigInt.prototype.valueOf()` /// /// The `valueOf()` method returns the wrapped primitive value of a Number object. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.valueof /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/valueOf pub(crate) fn value_of( this: &JsValue, _: &[JsValue], _: &mut Context<'_>, ) -> JsResult<JsValue> { Ok(JsValue::new(Self::this_bigint_value(this)?)) } /// `BigInt.asIntN()` /// /// The `BigInt.asIntN()` method wraps the value of a `BigInt` to a signed integer between `-2**(width - 1)` and `2**(width-1) - 1`. /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.asintn /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asIntN #[allow(clippy::wrong_self_convention)] pub(crate) fn as_int_n( _: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { let (modulo, bits) = Self::calculate_as_uint_n(args, context)?; if bits > 0 && modulo >= JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits) - 1))? { Ok(JsValue::new(JsBigInt::sub( &modulo, &JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?, ))) } else { Ok(JsValue::new(modulo)) } } /// `BigInt.asUintN()` /// /// The `BigInt.asUintN()` method wraps the value of a `BigInt` to an unsigned integer between `0` and `2**(width) - 1`. /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.asuintn /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asUintN #[allow(clippy::wrong_self_convention)] pub(crate) fn as_uint_n( _: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { let (modulo, _) = Self::calculate_as_uint_n(args, context)?; Ok(JsValue::new(modulo)) } /// Helper function to wrap the value of a `BigInt` to an unsigned integer. /// /// This function expects the same arguments as `as_uint_n` and wraps the value of a `BigInt`. /// Additionally to the wrapped unsigned value it returns the converted `bits` argument, so it /// can be reused from the `as_int_n` method. fn calculate_as_uint_n( args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<(JsBigInt, u32)> { let bits_arg = args.get_or_undefined(0); let bigint_arg = args.get_or_undefined(1); let bits = bits_arg.to_index(context)?; let bits = u32::try_from(bits).unwrap_or(u32::MAX); let bigint = bigint_arg.to_bigint(context)?; Ok(( JsBigInt::mod_floor( &bigint, &JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?, ), bits, )) } }
init
identifier_name
mod.rs
//! Boa's implementation of ECMAScript's global `BigInt` object. //! //! `BigInt` is a built-in object that provides a way to represent whole numbers larger //! than the largest number JavaScript can reliably represent with the Number primitive //! and represented by the `Number.MAX_SAFE_INTEGER` constant. //! `BigInt` can be used for arbitrarily large integers. //! //! More information: //! - [ECMAScript reference][spec] //! - [MDN documentation][mdn] //! //! [spec]: https://tc39.es/ecma262/#sec-bigint-objects //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt use crate::{ builtins::BuiltInObject, context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors}, error::JsNativeError, object::JsObject, property::Attribute, realm::Realm, symbol::JsSymbol, value::{IntegerOrInfinity, PreferredType}, Context, JsArgs, JsBigInt, JsResult, JsValue, }; use boa_profiler::Profiler; use num_bigint::ToBigInt; use super::{BuiltInBuilder, BuiltInConstructor, IntrinsicObject}; #[cfg(test)] mod tests; /// `BigInt` implementation. #[derive(Debug, Clone, Copy)] pub struct BigInt; impl IntrinsicObject for BigInt { fn init(realm: &Realm) { let _timer = Profiler::global().start_event(Self::NAME, "init"); BuiltInBuilder::from_standard_constructor::<Self>(realm) .method(Self::to_string, "toString", 0) .method(Self::value_of, "valueOf", 0) .static_method(Self::as_int_n, "asIntN", 2) .static_method(Self::as_uint_n, "asUintN", 2) .property( JsSymbol::to_string_tag(), Self::NAME, Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE, ) .build(); } fn get(intrinsics: &Intrinsics) -> JsObject { Self::STANDARD_CONSTRUCTOR(intrinsics.constructors()).constructor() } } impl BuiltInObject for BigInt { const NAME: &'static str = "BigInt"; } impl BuiltInConstructor for BigInt { const LENGTH: usize = 1; const STANDARD_CONSTRUCTOR: fn(&StandardConstructors) -> &StandardConstructor = StandardConstructors::bigint; /// `BigInt()` /// /// The `BigInt()` constructor is used to create `BigInt` objects. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint-objects /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/BigInt fn constructor( new_target: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { // 1. If NewTarget is not undefined, throw a TypeError exception. if !new_target.is_undefined() { return Err(JsNativeError::typ() .with_message("BigInt is not a constructor") .into()); } let value = args.get_or_undefined(0); // 2. Let prim be ? ToPrimitive(value, number). let prim = value.to_primitive(context, PreferredType::Number)?; // 3. If Type(prim) is Number, return ? NumberToBigInt(prim). if let Some(number) = prim.as_number() { return Self::number_to_bigint(number); } // 4. Otherwise, return ? ToBigInt(prim). Ok(prim.to_bigint(context)?.into()) } } impl BigInt { /// `NumberToBigInt ( number )` /// /// More information: /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-numbertobigint fn number_to_bigint(number: f64) -> JsResult<JsValue> { // 1. If IsIntegralNumber(number) is false, throw a RangeError exception. if number.is_nan() || number.is_infinite() || number.fract() != 0.0 { return Err(JsNativeError::range() .with_message(format!("cannot convert {number} to a BigInt")) .into()); } // 2. Return the BigInt value that represents โ„(number). Ok(JsBigInt::from(number.to_bigint().expect("This conversion must be safe")).into()) } /// The abstract operation `thisBigIntValue` takes argument value. /// /// The phrase โ€œthis `BigInt` valueโ€ within the specification of a method refers to the /// result returned by calling the abstract operation `thisBigIntValue` with the `this` value /// of the method invocation passed as the argument. /// /// More information: /// - [ECMAScript reference][spec] /// /// [spec]: https://tc39.es/ecma262/#sec-thisbigintvalue fn this_bigint_value(value: &JsValue) -> JsResult<JsBigInt> { value // 1. If Type(value) is BigInt, return value. .as_bigint() .cloned() // 2. If Type(value) is Object and value has a [[BigIntData]] internal slot, then // a. Assert: Type(value.[[BigIntData]]) is BigInt. // b. Return value.[[BigIntData]]. .or_else(|| { value .as_object() .and_then(|obj| obj.borrow().as_bigint().cloned()) }) // 3. Throw a TypeError exception. .ok_or_else(|| { JsNativeError::typ() .with_message("'this' is not a BigInt") .into() }) } /// `BigInt.prototype.toString( [radix] )` /// /// The `toString()` method returns a string representing the specified `BigInt` object. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.tostring /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/toString #[allow(clippy::wrong_self_convention)] pub(crate) fn to_string( this: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { // 1. Let x be ? thisBigIntValue(this value). let x = Self::this_bigint_value(this)?; let radix = args.get_or_undefined(0); // 2. If radix is undefined, let radixMV be 10. let radix_mv = if radix.is_undefined() { // 5. If radixMV = 10, return ! ToString(x). // Note: early return optimization. return Ok(x.to_string().into()); // 3. Else, let radixMV be ? ToIntegerOrInfinity(radix). } else { radix.to_integer_or_infinity(context)? }; // 4. If radixMV < 2 or radixMV > 36, throw a RangeError exception. let radix_mv = match radix_mv { IntegerOrInfinity::Integer(i) if (2..=36).contains(&i) => i, _ => { return Err(JsNativeError::range() .with_message("radix must be an integer at least 2 and no greater than 36") .into()) } }; // 5. If radixMV = 10, return ! ToString(x). if radix_mv == 10 { return Ok(x.to_string().into()); } // 1. Let x be ? thisBigIntValue(this value). // 6. Return the String representation of this Number value using the radix specified by radixMV. // Letters a-z are used for digits with values 10 through 35. // The precise algorithm is implementation-defined, however the algorithm should be a generalization of that specified in 6.1.6.2.23. Ok(JsValue::new(x.to_string_radix(radix_mv as u32))) } /// `BigInt.prototype.valueOf()` /// /// The `valueOf()` method returns the wrapped primitive value of a Number object. /// /// More information: /// - [ECMAScript reference][spec] /// - [MDN documentation][mdn] /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.valueof /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/valueOf pub(crate) fn value_of( this: &JsValue, _: &[JsValue], _: &mut Context<'_>, ) -> JsResult<JsValue> { Ok(JsValue::new(Self::this_bigint_value(this)?)) } /// `BigInt.asIntN()` /// /// The `BigInt.asIntN()` method wraps the value of a `BigInt` to a signed integer between `-2**(width - 1)` and `2**(width-1) - 1`. /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.asintn /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asIntN #[allow(clippy::wrong_self_convention)] pub(crate) fn as_int_n( _: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { let (modulo, bits) = Self::calculate_as_uint_n(args, context)?; if bits > 0 && modulo >= JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits) - 1))? {
{ Ok(JsValue::new(modulo)) } } /// `BigInt.asUintN()` /// /// The `BigInt.asUintN()` method wraps the value of a `BigInt` to an unsigned integer between `0` and `2**(width) - 1`. /// /// [spec]: https://tc39.es/ecma262/#sec-bigint.asuintn /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asUintN #[allow(clippy::wrong_self_convention)] pub(crate) fn as_uint_n( _: &JsValue, args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<JsValue> { let (modulo, _) = Self::calculate_as_uint_n(args, context)?; Ok(JsValue::new(modulo)) } /// Helper function to wrap the value of a `BigInt` to an unsigned integer. /// /// This function expects the same arguments as `as_uint_n` and wraps the value of a `BigInt`. /// Additionally to the wrapped unsigned value it returns the converted `bits` argument, so it /// can be reused from the `as_int_n` method. fn calculate_as_uint_n( args: &[JsValue], context: &mut Context<'_>, ) -> JsResult<(JsBigInt, u32)> { let bits_arg = args.get_or_undefined(0); let bigint_arg = args.get_or_undefined(1); let bits = bits_arg.to_index(context)?; let bits = u32::try_from(bits).unwrap_or(u32::MAX); let bigint = bigint_arg.to_bigint(context)?; Ok(( JsBigInt::mod_floor( &bigint, &JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?, ), bits, )) } }
Ok(JsValue::new(JsBigInt::sub( &modulo, &JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?, ))) } else
conditional_block
dhcpd.py
# Copyright 2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A very quick and dirty DHCP server This is currently missing lots of features and sort of limited with respect to subnets and so on, but it's a start. """ from pox.core import core import pox.openflow.libopenflow_01 as of import pox.lib.packet as pkt from pox.lib.addresses import IPAddr,EthAddr,parse_cidr from pox.lib.addresses import IP_BROADCAST, IP_ANY from pox.lib.revent import * from pox.lib.util import dpid_to_str log = core.getLogger() def ip_for_event (event): """ Use a switch's DPID as an EthAddr """ eth = dpid_to_str(event.dpid,True).split("|")[0].replace("-",":") return EthAddr(eth) class DHCPLease (Event): """ Raised when a lease is given Call nak() to abort this lease """ def __init__ (self, host_mac, ip): super(DHCPLease, self).__init__() self.host_mac = host_mac self.ip = ip self._nak = False def nak (self): self._nak = True class AddressPool (object): """ Superclass for DHCP address pools Note that it's just a subset of a list (thus, you can always just use a list as a pool). The one exception is an optional "subnet_mask" hint. It probably makes sense to change this abstraction so that we can more easily return addresses from multiple ranges, and because some things (e.g., getitem) are potentially difficult to implement and not particularly useful (since we only need to remove a single item at a time). """ def __init__ (self): """ Initialize this pool. """ pass def __contains__ (self, item): """ Is this IPAddr in the pool? """ return False def append (self, item): """ Add this IP address back into the pool """ pass def remove (self, item): """ Remove this IPAddr from the pool """ pass def __len__ (self): """ Returns number of IP addresses in the pool """ return 0 def __getitem__ (self, index): """ Get an IPAddr from the pool. Note that this will only be called with index = 0! """ pass class SimpleAddressPool (AddressPool): """ Simple AddressPool for simple subnet based pools. """ def __init__ (self, network = "192.168.0.0/24", first = 1, last = None, count = None): """ Simple subnet-based address pool Allocates count IP addresses out of network/network_size, starting with the first'th. You may specify the end of the range with either last (to specify the last'th address to use) or count to specify the number to use. If both are None, use up to the end of all legal addresses. Example for all of 192.168.x.x/16: SimpleAddressPool("192.168.0.0/16", 1, 65534) """ network,network_size = parse_cidr(network) self.first = first self.network_size = network_size self.host_size = 32-network_size self.network = IPAddr(network) if last is None and count is None: self.last = (1 << self.host_size) - 2 elif last is not None: self.last = last elif count is not None: self.last = self.first + count - 1 else: raise RuntimeError("Cannot specify both last and count") self.removed = set() if self.count <= 0: raise RuntimeError("Bad first/last range") if first == 0: raise RuntimeError("Can't allocate 0th address") if self.host_size < 0 or self.host_size > 32: raise RuntimeError("Bad network") if IPAddr(self.last | self.network.toUnsigned()) not in self: raise RuntimeError("Bad first/last range") def __repr__ (self): return str(self) def __str__ (self): t = self.network.toUnsigned() t = (IPAddr(t|self.first),IPAddr(t|self.last)) return "<Addresses from %s to %s>" % t @property def subnet_mask (self): return IPAddr(((1<<self.network_size)-1) << self.host_size) @property def count (self): return self.last - self.first + 1 def __contains__ (self, item): item = IPAddr(item) if item in self.removed: return False n = item.toUnsigned() mask = (1<<self.host_size)-1 nm = (n & mask) | self.network.toUnsigned() if nm != n: return False if (n & mask) == mask: return False if (n & mask) < self.first: return False if (n & mask) > self.last: return False return True def append (self, item): item = IPAddr(item) if item not in self.removed: if item in self: raise RuntimeError("%s is already in this pool" % (item,)) else: raise RuntimeError("%s does not belong in this pool" % (item,)) self.removed.remove(item) def remove (self, item): item = IPAddr(item) if item not in self: raise RuntimeError("%s not in this pool" % (item,)) self.removed.add(item) def __len__ (self): return (self.last-self.first+1) - len(self.removed) def __getitem__ (self, index): if index < 0: raise RuntimeError("Negative indices not allowed") if index >= len(self): raise IndexError("Item does not exist") c = self.first # Use a heuristic to find the first element faster (we hope) # Note this means that removing items changes the order of # our "list". c += len(self.removed) while c > self.last: c -= self.count while True: addr = IPAddr(c | self.network.toUnsigned()) if addr not in self.removed: assert addr in self index -= 1 if index < 0: return addr c += 1 if c > self.last: c -= self.count class DHCPD (EventMixin): _eventMixin_events = set([DHCPLease]) _servers = [] def __init__ (self, ip_address = "192.168.0.254", router_address = (), dns_address = (), pool = None, subnet = None, install_flow = True, dpid = None, ports = None): def fix_addr (addr, backup): if addr is None: return None if addr is (): return IPAddr(backup) return IPAddr(addr) self._install_flow = install_flow self.ip_addr = IPAddr(ip_address) self.router_addr = fix_addr(router_address, ip_address) self.dns_addr = fix_addr(dns_address, self.router_addr) if dpid is None: self.dpid = None else: try: dpid = int(dpid) except: dpid = util.str_to_dpid(dpid) self.dpid = dpid if ports is None: self.ports = None else: self.ports = set(ports) if self.ports: assert self.dpid is not None # Doesn't make sense self._servers.append(self) if pool is None: self.pool = [IPAddr("192.168.0."+str(x)) for x in range(100,199)] self.subnet = IPAddr(subnet or "255.255.255.0") else: self.pool = pool self.subnet = subnet if hasattr(pool, 'subnet_mask'): self.subnet = pool.subnet_mask if self.subnet is None: raise RuntimeError("You must specify a subnet mask or use a " "pool with a subnet hint") self.lease_time = 60 * 60 # An hour #TODO: Actually make them expire :) self.offers = {} # Eth -> IP we offered self.leases = {} # Eth -> IP we leased if self.ip_addr in self.pool: log.debug("Removing my own IP (%s) from address pool", self.ip_addr) self.pool.remove(self.ip_addr) core.openflow.addListeners(self) @classmethod def get_server_for_port (cls, dpid, port): """ Given a dpid.port, returns DHCPD instance responsible for it or None If there is a server, but the connection to the relevant switch is down, returns None. """ for s in cls.servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: return s port_no = conn.ports.get(port) if port_no is None: continue port_no = port_no.port_no for p in s.ports: p = conn.ports.get(p) if p is None: continue if p.port_no == port_no: return s return None @classmethod def get_ports_for_dpid (cls, dpid): """ Given a dpid, returns all port,server that are configured for it If the switch is disconnected, returns None. """ r = set() for s in cls._servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: for p in conn.ports: r.add((p.port_no,s)) else: for p in s.ports: p = conn.ports.get(p) if p is None: continue r.add((p.port_no,s)) return r def _handle_ConnectionUp (self, event): if self.dpid is not None and self.dpid != event.dpid: return if self._install_flow: msg = self._get_flow_mod() event.connection.send(msg) def _get_flow_mod (self, msg_type=of.ofp_flow_mod): """ Get flow mods that will send DHCP to the controller """ #TODO: We might over-match right now since we don't limit by port msg = msg_type() msg.match = of.ofp_match() msg.match.dl_type = pkt.ethernet.IP_TYPE msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL #msg.match.nw_dst = IP_BROADCAST msg.match.tp_src = pkt.dhcp.CLIENT_PORT msg.match.tp_dst = pkt.dhcp.SERVER_PORT msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER)) #msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) return msg def _get_pool (self, event): """ Get an IP pool for this event. Return None to not issue an IP. You should probably log this. """ return self.pool def _handle_PacketIn (self, event): # Is it to us? (Or at least not specifically NOT to us...) if self.dpid is not None and self.dpid != event.dpid: return if self.ports: for p in self.ports: if p == event.port: break if p in event.connection.ports: if event.connection.ports[p].port_no == event.port: break else: return ipp = event.parsed.find('ipv4') if not ipp or not ipp.parsed: return if ipp.dstip not in (IP_ANY,IP_BROADCAST,self.ip_addr): return # Is it full and proper DHCP? nwp = ipp.payload if not nwp or not nwp.parsed or not isinstance(nwp, pkt.udp): return if nwp.srcport != pkt.dhcp.CLIENT_PORT: return if nwp.dstport != pkt.dhcp.SERVER_PORT: return p = nwp.payload if not p: log.debug("%s: no packet", str(event.connection)) return if not isinstance(p, pkt.dhcp): log.debug("%s: packet is not DHCP", str(event.connection)) return if not p.parsed: log.debug("%s: DHCP packet not parsed", str(event.connection)) return if p.op != p.BOOTREQUEST: return t = p.options.get(p.MSG_TYPE_OPT) if t is None: return pool = self._get_pool(event) if pool is None: return if t.type == p.DISCOVER_MSG: self.exec_discover(event, p, pool) elif t.type == p.REQUEST_MSG: self.exec_request(event, p, pool) elif t.type == p.RELEASE_MSG: self.exec_release(event, p, pool) def reply (self, event, msg): orig = event.parsed.find('dhcp') broadcast = (orig.flags & orig.BROADCAST_FLAG) != 0 msg.op = msg.BOOTREPLY msg.chaddr = event.parsed.src msg.htype = 1 msg.hlen = 6 msg.xid = orig.xid msg.add_option(pkt.DHCP.DHCPServerIdentifierOption(self.ip_addr)) ethp = pkt.ethernet(src=ip_for_event(event),dst=event.parsed.src) ethp.type = pkt.ethernet.IP_TYPE ipp = pkt.ipv4(srcip = self.ip_addr) ipp.dstip = event.parsed.find('ipv4').srcip if broadcast: ipp.dstip = IP_BROADCAST ethp.dst = pkt.ETHERNET.ETHER_BROADCAST ipp.protocol = ipp.UDP_PROTOCOL udpp = pkt.udp() udpp.srcport = pkt.dhcp.SERVER_PORT udpp.dstport = pkt.dhcp.CLIENT_PORT udpp.payload = msg ipp.payload = udpp ethp.payload = ipp po = of.ofp_packet_out(data=ethp.pack()) po.actions.append(of.ofp_action_output(port=event.port)) event.connection.send(po) def nak (self, event, msg = None): if msg is None: msg = pkt.dhcp() msg.add_option(pkt.DHCP.DHCPMsgTypeOption(msg.NAK_MSG)) msg.siaddr = self.ip_addr self.reply(event, msg) def exec_release (self, event, p, pool): src = event.parsed.src if src != p.chaddr: log.warn("%s tried to release %s with bad chaddr" % (src,p.ciaddr)) return if self.leases.get(p.chaddr) != p.ciaddr: log.warn("%s tried to release unleased %s" % (src,p.ciaddr)) return del self.leases[p.chaddr] pool.append(p.ciaddr) log.info("%s released %s" % (src,p.ciaddr)) def exec_request (self, event, p, pool): if not p.REQUEST_IP_OPT in p.options: # Uhhh... return wanted_ip = p.options[p.REQUEST_IP_OPT].addr src = event.parsed.src got_ip = None if src in self.leases: if wanted_ip != self.leases[src]: pool.append(self.leases[src]) del self.leases[src] else: got_ip = self.leases[src] if got_ip is None: if src in self.offers: if wanted_ip != self.offers[src]: pool.append(self.offers[src]) del self.offers[src] else: got_ip = self.offers[src] if got_ip is None: if wanted_ip in pool: pool.remove(wanted_ip) got_ip = wanted_ip if got_ip is None: log.warn("%s asked for un-offered %s", src, wanted_ip) self.nak(event) return assert got_ip == wanted_ip self.leases[src] = got_ip ev = DHCPLease(src, got_ip) self.raiseEvent(ev) if ev._nak: self.nak(event) return log.info("Leased %s to %s" % (got_ip, src)) reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.ACK_MSG)) reply.yiaddr = wanted_ip reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def exec_discover (self, event, p, pool): reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.OFFER_MSG)) src = event.parsed.src if src in self.leases: offer = self.leases[src] del self.leases[src] self.offers[src] = offer else: offer = self.offers.get(src) if offer is None: if len(pool) == 0: log.error("Out of IP addresses") self.nak(event) return offer = pool[0] if p.REQUEST_IP_OPT in p.options: wanted_ip = p.options[p.REQUEST_IP_OPT].addr if wanted_ip in pool: offer = wanted_ip pool.remove(offer) self.offers[src] = offer reply.yiaddr = offer reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def fill (self, wanted_opts, msg): """ Fill out some options in msg """ if msg.SUBNET_MASK_OPT in wanted_opts: msg.add_option(pkt.DHCP.DHCPSubnetMaskOption(self.subnet)) if msg.ROUTERS_OPT in wanted_opts and self.router_addr is not None: msg.add_option(pkt.DHCP.DHCPRoutersOption(self.router_addr)) if msg.DNS_SERVER_OPT in wanted_opts and self.dns_addr is not None: msg.add_option(pkt.DHCP.DHCPDNSServersOption(self.dns_addr)) msg.add_option(pkt.DHCP.DHCPIPAddressLeaseTimeOption(self.lease_time)) def default (no_flow = False, network = "192.168.0.0/24", # Address range first = 100, last = 199, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = ()): # Auto """ Launch DHCP server defaulting to 192.168.0.100-199 """ launch(no_flow, network, first, last, count, ip, router, dns) def launch (no_flow = False, network = "192.168.0.0/24", # Address range first = 1, last = None, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = (), # Auto dpid = None, # All ports = None, # All __INSTANCE__ = None):
""" Launch DHCP server Defaults to serving 192.168.0.1 to 192.168.0.253 network Subnet to allocate addresses from first First'th address in subnet to use (256 is x.x.1.0 in a /16) last Last'th address in subnet to use count Alternate way to specify last address to use ip IP to use for DHCP server router Router IP to tell clients. Defaults to 'ip'. 'None' will stop the server from telling clients anything dns DNS IP to tell clients. Defaults to 'router'. 'None' will stop the server from telling clients anything. """ def fixint (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None return int(i) def fix (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None if i == '()': return () return i first,last,count = map(fixint,(first,last,count)) router,dns = map(fix,(router,dns)) if ports is not None: ports = ports.split(",") ports = set(int(p) if p.isdigit() else p for p in ports) pool = SimpleAddressPool(network = network, first = first, last = last, count = count) inst = DHCPD(install_flow = not no_flow, pool = pool, ip_address = ip, router_address = router, dns_address = dns, dpid = dpid, ports = ports) if __INSTANCE__[0] == 0: # First or only instance core.register(inst) log.debug("DHCP serving a%s", str(pool)[2:-1])
identifier_body
dhcpd.py
# Copyright 2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A very quick and dirty DHCP server This is currently missing lots of features and sort of limited with respect to subnets and so on, but it's a start. """ from pox.core import core import pox.openflow.libopenflow_01 as of import pox.lib.packet as pkt from pox.lib.addresses import IPAddr,EthAddr,parse_cidr from pox.lib.addresses import IP_BROADCAST, IP_ANY from pox.lib.revent import * from pox.lib.util import dpid_to_str log = core.getLogger() def ip_for_event (event): """ Use a switch's DPID as an EthAddr """ eth = dpid_to_str(event.dpid,True).split("|")[0].replace("-",":") return EthAddr(eth) class DHCPLease (Event): """ Raised when a lease is given Call nak() to abort this lease """ def __init__ (self, host_mac, ip): super(DHCPLease, self).__init__() self.host_mac = host_mac self.ip = ip self._nak = False def nak (self): self._nak = True class AddressPool (object): """ Superclass for DHCP address pools Note that it's just a subset of a list (thus, you can always just use a list as a pool). The one exception is an optional "subnet_mask" hint. It probably makes sense to change this abstraction so that we can more easily return addresses from multiple ranges, and because some things (e.g., getitem) are potentially difficult to implement and not particularly useful (since we only need to remove a single item at a time). """ def __init__ (self): """ Initialize this pool. """ pass def __contains__ (self, item): """ Is this IPAddr in the pool? """ return False def append (self, item): """ Add this IP address back into the pool """ pass def remove (self, item): """ Remove this IPAddr from the pool """ pass def __len__ (self): """ Returns number of IP addresses in the pool """ return 0 def __getitem__ (self, index): """ Get an IPAddr from the pool. Note that this will only be called with index = 0! """ pass class SimpleAddressPool (AddressPool): """ Simple AddressPool for simple subnet based pools. """ def __init__ (self, network = "192.168.0.0/24", first = 1, last = None, count = None): """ Simple subnet-based address pool Allocates count IP addresses out of network/network_size, starting with the first'th. You may specify the end of the range with either last (to specify the last'th address to use) or count to specify the number to use. If both are None, use up to the end of all legal addresses. Example for all of 192.168.x.x/16: SimpleAddressPool("192.168.0.0/16", 1, 65534) """ network,network_size = parse_cidr(network) self.first = first self.network_size = network_size self.host_size = 32-network_size self.network = IPAddr(network) if last is None and count is None: self.last = (1 << self.host_size) - 2 elif last is not None: self.last = last elif count is not None: self.last = self.first + count - 1 else: raise RuntimeError("Cannot specify both last and count") self.removed = set() if self.count <= 0: raise RuntimeError("Bad first/last range") if first == 0: raise RuntimeError("Can't allocate 0th address") if self.host_size < 0 or self.host_size > 32: raise RuntimeError("Bad network") if IPAddr(self.last | self.network.toUnsigned()) not in self: raise RuntimeError("Bad first/last range") def __repr__ (self): return str(self) def __str__ (self): t = self.network.toUnsigned() t = (IPAddr(t|self.first),IPAddr(t|self.last)) return "<Addresses from %s to %s>" % t @property def subnet_mask (self): return IPAddr(((1<<self.network_size)-1) << self.host_size) @property def count (self): return self.last - self.first + 1 def __contains__ (self, item): item = IPAddr(item) if item in self.removed: return False n = item.toUnsigned() mask = (1<<self.host_size)-1 nm = (n & mask) | self.network.toUnsigned() if nm != n: return False if (n & mask) == mask: return False if (n & mask) < self.first: return False if (n & mask) > self.last: return False return True def append (self, item): item = IPAddr(item) if item not in self.removed: if item in self: raise RuntimeError("%s is already in this pool" % (item,)) else: raise RuntimeError("%s does not belong in this pool" % (item,)) self.removed.remove(item) def remove (self, item): item = IPAddr(item) if item not in self: raise RuntimeError("%s not in this pool" % (item,)) self.removed.add(item) def __len__ (self): return (self.last-self.first+1) - len(self.removed) def __getitem__ (self, index): if index < 0: raise RuntimeError("Negative indices not allowed") if index >= len(self): raise IndexError("Item does not exist") c = self.first # Use a heuristic to find the first element faster (we hope) # Note this means that removing items changes the order of # our "list". c += len(self.removed) while c > self.last: c -= self.count while True: addr = IPAddr(c | self.network.toUnsigned()) if addr not in self.removed: assert addr in self index -= 1 if index < 0: return addr c += 1 if c > self.last: c -= self.count class DHCPD (EventMixin): _eventMixin_events = set([DHCPLease]) _servers = [] def __init__ (self, ip_address = "192.168.0.254", router_address = (), dns_address = (), pool = None, subnet = None, install_flow = True, dpid = None, ports = None): def fix_addr (addr, backup): if addr is None: return None if addr is (): return IPAddr(backup) return IPAddr(addr) self._install_flow = install_flow self.ip_addr = IPAddr(ip_address) self.router_addr = fix_addr(router_address, ip_address) self.dns_addr = fix_addr(dns_address, self.router_addr) if dpid is None: self.dpid = None else: try: dpid = int(dpid) except: dpid = util.str_to_dpid(dpid) self.dpid = dpid if ports is None: self.ports = None else: self.ports = set(ports) if self.ports: assert self.dpid is not None # Doesn't make sense self._servers.append(self) if pool is None: self.pool = [IPAddr("192.168.0."+str(x)) for x in range(100,199)] self.subnet = IPAddr(subnet or "255.255.255.0") else: self.pool = pool self.subnet = subnet if hasattr(pool, 'subnet_mask'): self.subnet = pool.subnet_mask if self.subnet is None: raise RuntimeError("You must specify a subnet mask or use a " "pool with a subnet hint") self.lease_time = 60 * 60 # An hour #TODO: Actually make them expire :) self.offers = {} # Eth -> IP we offered self.leases = {} # Eth -> IP we leased if self.ip_addr in self.pool: log.debug("Removing my own IP (%s) from address pool", self.ip_addr) self.pool.remove(self.ip_addr) core.openflow.addListeners(self) @classmethod def get_server_for_port (cls, dpid, port): """ Given a dpid.port, returns DHCPD instance responsible for it or None If there is a server, but the connection to the relevant switch is down, returns None. """ for s in cls.servers: if s.dpid != dpid:
conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: return s port_no = conn.ports.get(port) if port_no is None: continue port_no = port_no.port_no for p in s.ports: p = conn.ports.get(p) if p is None: continue if p.port_no == port_no: return s return None @classmethod def get_ports_for_dpid (cls, dpid): """ Given a dpid, returns all port,server that are configured for it If the switch is disconnected, returns None. """ r = set() for s in cls._servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: for p in conn.ports: r.add((p.port_no,s)) else: for p in s.ports: p = conn.ports.get(p) if p is None: continue r.add((p.port_no,s)) return r def _handle_ConnectionUp (self, event): if self.dpid is not None and self.dpid != event.dpid: return if self._install_flow: msg = self._get_flow_mod() event.connection.send(msg) def _get_flow_mod (self, msg_type=of.ofp_flow_mod): """ Get flow mods that will send DHCP to the controller """ #TODO: We might over-match right now since we don't limit by port msg = msg_type() msg.match = of.ofp_match() msg.match.dl_type = pkt.ethernet.IP_TYPE msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL #msg.match.nw_dst = IP_BROADCAST msg.match.tp_src = pkt.dhcp.CLIENT_PORT msg.match.tp_dst = pkt.dhcp.SERVER_PORT msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER)) #msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) return msg def _get_pool (self, event): """ Get an IP pool for this event. Return None to not issue an IP. You should probably log this. """ return self.pool def _handle_PacketIn (self, event): # Is it to us? (Or at least not specifically NOT to us...) if self.dpid is not None and self.dpid != event.dpid: return if self.ports: for p in self.ports: if p == event.port: break if p in event.connection.ports: if event.connection.ports[p].port_no == event.port: break else: return ipp = event.parsed.find('ipv4') if not ipp or not ipp.parsed: return if ipp.dstip not in (IP_ANY,IP_BROADCAST,self.ip_addr): return # Is it full and proper DHCP? nwp = ipp.payload if not nwp or not nwp.parsed or not isinstance(nwp, pkt.udp): return if nwp.srcport != pkt.dhcp.CLIENT_PORT: return if nwp.dstport != pkt.dhcp.SERVER_PORT: return p = nwp.payload if not p: log.debug("%s: no packet", str(event.connection)) return if not isinstance(p, pkt.dhcp): log.debug("%s: packet is not DHCP", str(event.connection)) return if not p.parsed: log.debug("%s: DHCP packet not parsed", str(event.connection)) return if p.op != p.BOOTREQUEST: return t = p.options.get(p.MSG_TYPE_OPT) if t is None: return pool = self._get_pool(event) if pool is None: return if t.type == p.DISCOVER_MSG: self.exec_discover(event, p, pool) elif t.type == p.REQUEST_MSG: self.exec_request(event, p, pool) elif t.type == p.RELEASE_MSG: self.exec_release(event, p, pool) def reply (self, event, msg): orig = event.parsed.find('dhcp') broadcast = (orig.flags & orig.BROADCAST_FLAG) != 0 msg.op = msg.BOOTREPLY msg.chaddr = event.parsed.src msg.htype = 1 msg.hlen = 6 msg.xid = orig.xid msg.add_option(pkt.DHCP.DHCPServerIdentifierOption(self.ip_addr)) ethp = pkt.ethernet(src=ip_for_event(event),dst=event.parsed.src) ethp.type = pkt.ethernet.IP_TYPE ipp = pkt.ipv4(srcip = self.ip_addr) ipp.dstip = event.parsed.find('ipv4').srcip if broadcast: ipp.dstip = IP_BROADCAST ethp.dst = pkt.ETHERNET.ETHER_BROADCAST ipp.protocol = ipp.UDP_PROTOCOL udpp = pkt.udp() udpp.srcport = pkt.dhcp.SERVER_PORT udpp.dstport = pkt.dhcp.CLIENT_PORT udpp.payload = msg ipp.payload = udpp ethp.payload = ipp po = of.ofp_packet_out(data=ethp.pack()) po.actions.append(of.ofp_action_output(port=event.port)) event.connection.send(po) def nak (self, event, msg = None): if msg is None: msg = pkt.dhcp() msg.add_option(pkt.DHCP.DHCPMsgTypeOption(msg.NAK_MSG)) msg.siaddr = self.ip_addr self.reply(event, msg) def exec_release (self, event, p, pool): src = event.parsed.src if src != p.chaddr: log.warn("%s tried to release %s with bad chaddr" % (src,p.ciaddr)) return if self.leases.get(p.chaddr) != p.ciaddr: log.warn("%s tried to release unleased %s" % (src,p.ciaddr)) return del self.leases[p.chaddr] pool.append(p.ciaddr) log.info("%s released %s" % (src,p.ciaddr)) def exec_request (self, event, p, pool): if not p.REQUEST_IP_OPT in p.options: # Uhhh... return wanted_ip = p.options[p.REQUEST_IP_OPT].addr src = event.parsed.src got_ip = None if src in self.leases: if wanted_ip != self.leases[src]: pool.append(self.leases[src]) del self.leases[src] else: got_ip = self.leases[src] if got_ip is None: if src in self.offers: if wanted_ip != self.offers[src]: pool.append(self.offers[src]) del self.offers[src] else: got_ip = self.offers[src] if got_ip is None: if wanted_ip in pool: pool.remove(wanted_ip) got_ip = wanted_ip if got_ip is None: log.warn("%s asked for un-offered %s", src, wanted_ip) self.nak(event) return assert got_ip == wanted_ip self.leases[src] = got_ip ev = DHCPLease(src, got_ip) self.raiseEvent(ev) if ev._nak: self.nak(event) return log.info("Leased %s to %s" % (got_ip, src)) reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.ACK_MSG)) reply.yiaddr = wanted_ip reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def exec_discover (self, event, p, pool): reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.OFFER_MSG)) src = event.parsed.src if src in self.leases: offer = self.leases[src] del self.leases[src] self.offers[src] = offer else: offer = self.offers.get(src) if offer is None: if len(pool) == 0: log.error("Out of IP addresses") self.nak(event) return offer = pool[0] if p.REQUEST_IP_OPT in p.options: wanted_ip = p.options[p.REQUEST_IP_OPT].addr if wanted_ip in pool: offer = wanted_ip pool.remove(offer) self.offers[src] = offer reply.yiaddr = offer reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def fill (self, wanted_opts, msg): """ Fill out some options in msg """ if msg.SUBNET_MASK_OPT in wanted_opts: msg.add_option(pkt.DHCP.DHCPSubnetMaskOption(self.subnet)) if msg.ROUTERS_OPT in wanted_opts and self.router_addr is not None: msg.add_option(pkt.DHCP.DHCPRoutersOption(self.router_addr)) if msg.DNS_SERVER_OPT in wanted_opts and self.dns_addr is not None: msg.add_option(pkt.DHCP.DHCPDNSServersOption(self.dns_addr)) msg.add_option(pkt.DHCP.DHCPIPAddressLeaseTimeOption(self.lease_time)) def default (no_flow = False, network = "192.168.0.0/24", # Address range first = 100, last = 199, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = ()): # Auto """ Launch DHCP server defaulting to 192.168.0.100-199 """ launch(no_flow, network, first, last, count, ip, router, dns) def launch (no_flow = False, network = "192.168.0.0/24", # Address range first = 1, last = None, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = (), # Auto dpid = None, # All ports = None, # All __INSTANCE__ = None): """ Launch DHCP server Defaults to serving 192.168.0.1 to 192.168.0.253 network Subnet to allocate addresses from first First'th address in subnet to use (256 is x.x.1.0 in a /16) last Last'th address in subnet to use count Alternate way to specify last address to use ip IP to use for DHCP server router Router IP to tell clients. Defaults to 'ip'. 'None' will stop the server from telling clients anything dns DNS IP to tell clients. Defaults to 'router'. 'None' will stop the server from telling clients anything. """ def fixint (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None return int(i) def fix (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None if i == '()': return () return i first,last,count = map(fixint,(first,last,count)) router,dns = map(fix,(router,dns)) if ports is not None: ports = ports.split(",") ports = set(int(p) if p.isdigit() else p for p in ports) pool = SimpleAddressPool(network = network, first = first, last = last, count = count) inst = DHCPD(install_flow = not no_flow, pool = pool, ip_address = ip, router_address = router, dns_address = dns, dpid = dpid, ports = ports) if __INSTANCE__[0] == 0: # First or only instance core.register(inst) log.debug("DHCP serving a%s", str(pool)[2:-1])
continue
conditional_block
dhcpd.py
# Copyright 2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A very quick and dirty DHCP server This is currently missing lots of features and sort of limited with respect to subnets and so on, but it's a start. """ from pox.core import core import pox.openflow.libopenflow_01 as of import pox.lib.packet as pkt from pox.lib.addresses import IPAddr,EthAddr,parse_cidr from pox.lib.addresses import IP_BROADCAST, IP_ANY from pox.lib.revent import * from pox.lib.util import dpid_to_str log = core.getLogger() def ip_for_event (event): """ Use a switch's DPID as an EthAddr """ eth = dpid_to_str(event.dpid,True).split("|")[0].replace("-",":") return EthAddr(eth) class DHCPLease (Event): """ Raised when a lease is given Call nak() to abort this lease """ def __init__ (self, host_mac, ip): super(DHCPLease, self).__init__() self.host_mac = host_mac self.ip = ip self._nak = False def nak (self): self._nak = True class AddressPool (object): """ Superclass for DHCP address pools Note that it's just a subset of a list (thus, you can always just use a list as a pool). The one exception is an optional "subnet_mask" hint. It probably makes sense to change this abstraction so that we can more easily return addresses from multiple ranges, and because some things (e.g., getitem) are potentially difficult to implement and not particularly useful (since we only need to remove a single item at a time). """ def __init__ (self): """ Initialize this pool. """ pass def __contains__ (self, item): """ Is this IPAddr in the pool? """ return False def append (self, item): """ Add this IP address back into the pool """ pass def remove (self, item): """ Remove this IPAddr from the pool """ pass def __len__ (self): """ Returns number of IP addresses in the pool """ return 0 def __getitem__ (self, index): """ Get an IPAddr from the pool. Note that this will only be called with index = 0! """ pass class SimpleAddressPool (AddressPool): """ Simple AddressPool for simple subnet based pools. """ def __init__ (self, network = "192.168.0.0/24", first = 1, last = None, count = None): """ Simple subnet-based address pool Allocates count IP addresses out of network/network_size, starting with the first'th. You may specify the end of the range with either last (to specify the last'th address to use) or count to specify the number to use. If both are None, use up to the end of all legal addresses. Example for all of 192.168.x.x/16: SimpleAddressPool("192.168.0.0/16", 1, 65534) """ network,network_size = parse_cidr(network) self.first = first self.network_size = network_size self.host_size = 32-network_size self.network = IPAddr(network) if last is None and count is None: self.last = (1 << self.host_size) - 2 elif last is not None: self.last = last elif count is not None: self.last = self.first + count - 1 else: raise RuntimeError("Cannot specify both last and count") self.removed = set() if self.count <= 0: raise RuntimeError("Bad first/last range") if first == 0: raise RuntimeError("Can't allocate 0th address") if self.host_size < 0 or self.host_size > 32: raise RuntimeError("Bad network") if IPAddr(self.last | self.network.toUnsigned()) not in self: raise RuntimeError("Bad first/last range") def __repr__ (self): return str(self) def __str__ (self): t = self.network.toUnsigned() t = (IPAddr(t|self.first),IPAddr(t|self.last)) return "<Addresses from %s to %s>" % t @property def subnet_mask (self): return IPAddr(((1<<self.network_size)-1) << self.host_size) @property def count (self): return self.last - self.first + 1 def
(self, item): item = IPAddr(item) if item in self.removed: return False n = item.toUnsigned() mask = (1<<self.host_size)-1 nm = (n & mask) | self.network.toUnsigned() if nm != n: return False if (n & mask) == mask: return False if (n & mask) < self.first: return False if (n & mask) > self.last: return False return True def append (self, item): item = IPAddr(item) if item not in self.removed: if item in self: raise RuntimeError("%s is already in this pool" % (item,)) else: raise RuntimeError("%s does not belong in this pool" % (item,)) self.removed.remove(item) def remove (self, item): item = IPAddr(item) if item not in self: raise RuntimeError("%s not in this pool" % (item,)) self.removed.add(item) def __len__ (self): return (self.last-self.first+1) - len(self.removed) def __getitem__ (self, index): if index < 0: raise RuntimeError("Negative indices not allowed") if index >= len(self): raise IndexError("Item does not exist") c = self.first # Use a heuristic to find the first element faster (we hope) # Note this means that removing items changes the order of # our "list". c += len(self.removed) while c > self.last: c -= self.count while True: addr = IPAddr(c | self.network.toUnsigned()) if addr not in self.removed: assert addr in self index -= 1 if index < 0: return addr c += 1 if c > self.last: c -= self.count class DHCPD (EventMixin): _eventMixin_events = set([DHCPLease]) _servers = [] def __init__ (self, ip_address = "192.168.0.254", router_address = (), dns_address = (), pool = None, subnet = None, install_flow = True, dpid = None, ports = None): def fix_addr (addr, backup): if addr is None: return None if addr is (): return IPAddr(backup) return IPAddr(addr) self._install_flow = install_flow self.ip_addr = IPAddr(ip_address) self.router_addr = fix_addr(router_address, ip_address) self.dns_addr = fix_addr(dns_address, self.router_addr) if dpid is None: self.dpid = None else: try: dpid = int(dpid) except: dpid = util.str_to_dpid(dpid) self.dpid = dpid if ports is None: self.ports = None else: self.ports = set(ports) if self.ports: assert self.dpid is not None # Doesn't make sense self._servers.append(self) if pool is None: self.pool = [IPAddr("192.168.0."+str(x)) for x in range(100,199)] self.subnet = IPAddr(subnet or "255.255.255.0") else: self.pool = pool self.subnet = subnet if hasattr(pool, 'subnet_mask'): self.subnet = pool.subnet_mask if self.subnet is None: raise RuntimeError("You must specify a subnet mask or use a " "pool with a subnet hint") self.lease_time = 60 * 60 # An hour #TODO: Actually make them expire :) self.offers = {} # Eth -> IP we offered self.leases = {} # Eth -> IP we leased if self.ip_addr in self.pool: log.debug("Removing my own IP (%s) from address pool", self.ip_addr) self.pool.remove(self.ip_addr) core.openflow.addListeners(self) @classmethod def get_server_for_port (cls, dpid, port): """ Given a dpid.port, returns DHCPD instance responsible for it or None If there is a server, but the connection to the relevant switch is down, returns None. """ for s in cls.servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: return s port_no = conn.ports.get(port) if port_no is None: continue port_no = port_no.port_no for p in s.ports: p = conn.ports.get(p) if p is None: continue if p.port_no == port_no: return s return None @classmethod def get_ports_for_dpid (cls, dpid): """ Given a dpid, returns all port,server that are configured for it If the switch is disconnected, returns None. """ r = set() for s in cls._servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: for p in conn.ports: r.add((p.port_no,s)) else: for p in s.ports: p = conn.ports.get(p) if p is None: continue r.add((p.port_no,s)) return r def _handle_ConnectionUp (self, event): if self.dpid is not None and self.dpid != event.dpid: return if self._install_flow: msg = self._get_flow_mod() event.connection.send(msg) def _get_flow_mod (self, msg_type=of.ofp_flow_mod): """ Get flow mods that will send DHCP to the controller """ #TODO: We might over-match right now since we don't limit by port msg = msg_type() msg.match = of.ofp_match() msg.match.dl_type = pkt.ethernet.IP_TYPE msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL #msg.match.nw_dst = IP_BROADCAST msg.match.tp_src = pkt.dhcp.CLIENT_PORT msg.match.tp_dst = pkt.dhcp.SERVER_PORT msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER)) #msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) return msg def _get_pool (self, event): """ Get an IP pool for this event. Return None to not issue an IP. You should probably log this. """ return self.pool def _handle_PacketIn (self, event): # Is it to us? (Or at least not specifically NOT to us...) if self.dpid is not None and self.dpid != event.dpid: return if self.ports: for p in self.ports: if p == event.port: break if p in event.connection.ports: if event.connection.ports[p].port_no == event.port: break else: return ipp = event.parsed.find('ipv4') if not ipp or not ipp.parsed: return if ipp.dstip not in (IP_ANY,IP_BROADCAST,self.ip_addr): return # Is it full and proper DHCP? nwp = ipp.payload if not nwp or not nwp.parsed or not isinstance(nwp, pkt.udp): return if nwp.srcport != pkt.dhcp.CLIENT_PORT: return if nwp.dstport != pkt.dhcp.SERVER_PORT: return p = nwp.payload if not p: log.debug("%s: no packet", str(event.connection)) return if not isinstance(p, pkt.dhcp): log.debug("%s: packet is not DHCP", str(event.connection)) return if not p.parsed: log.debug("%s: DHCP packet not parsed", str(event.connection)) return if p.op != p.BOOTREQUEST: return t = p.options.get(p.MSG_TYPE_OPT) if t is None: return pool = self._get_pool(event) if pool is None: return if t.type == p.DISCOVER_MSG: self.exec_discover(event, p, pool) elif t.type == p.REQUEST_MSG: self.exec_request(event, p, pool) elif t.type == p.RELEASE_MSG: self.exec_release(event, p, pool) def reply (self, event, msg): orig = event.parsed.find('dhcp') broadcast = (orig.flags & orig.BROADCAST_FLAG) != 0 msg.op = msg.BOOTREPLY msg.chaddr = event.parsed.src msg.htype = 1 msg.hlen = 6 msg.xid = orig.xid msg.add_option(pkt.DHCP.DHCPServerIdentifierOption(self.ip_addr)) ethp = pkt.ethernet(src=ip_for_event(event),dst=event.parsed.src) ethp.type = pkt.ethernet.IP_TYPE ipp = pkt.ipv4(srcip = self.ip_addr) ipp.dstip = event.parsed.find('ipv4').srcip if broadcast: ipp.dstip = IP_BROADCAST ethp.dst = pkt.ETHERNET.ETHER_BROADCAST ipp.protocol = ipp.UDP_PROTOCOL udpp = pkt.udp() udpp.srcport = pkt.dhcp.SERVER_PORT udpp.dstport = pkt.dhcp.CLIENT_PORT udpp.payload = msg ipp.payload = udpp ethp.payload = ipp po = of.ofp_packet_out(data=ethp.pack()) po.actions.append(of.ofp_action_output(port=event.port)) event.connection.send(po) def nak (self, event, msg = None): if msg is None: msg = pkt.dhcp() msg.add_option(pkt.DHCP.DHCPMsgTypeOption(msg.NAK_MSG)) msg.siaddr = self.ip_addr self.reply(event, msg) def exec_release (self, event, p, pool): src = event.parsed.src if src != p.chaddr: log.warn("%s tried to release %s with bad chaddr" % (src,p.ciaddr)) return if self.leases.get(p.chaddr) != p.ciaddr: log.warn("%s tried to release unleased %s" % (src,p.ciaddr)) return del self.leases[p.chaddr] pool.append(p.ciaddr) log.info("%s released %s" % (src,p.ciaddr)) def exec_request (self, event, p, pool): if not p.REQUEST_IP_OPT in p.options: # Uhhh... return wanted_ip = p.options[p.REQUEST_IP_OPT].addr src = event.parsed.src got_ip = None if src in self.leases: if wanted_ip != self.leases[src]: pool.append(self.leases[src]) del self.leases[src] else: got_ip = self.leases[src] if got_ip is None: if src in self.offers: if wanted_ip != self.offers[src]: pool.append(self.offers[src]) del self.offers[src] else: got_ip = self.offers[src] if got_ip is None: if wanted_ip in pool: pool.remove(wanted_ip) got_ip = wanted_ip if got_ip is None: log.warn("%s asked for un-offered %s", src, wanted_ip) self.nak(event) return assert got_ip == wanted_ip self.leases[src] = got_ip ev = DHCPLease(src, got_ip) self.raiseEvent(ev) if ev._nak: self.nak(event) return log.info("Leased %s to %s" % (got_ip, src)) reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.ACK_MSG)) reply.yiaddr = wanted_ip reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def exec_discover (self, event, p, pool): reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.OFFER_MSG)) src = event.parsed.src if src in self.leases: offer = self.leases[src] del self.leases[src] self.offers[src] = offer else: offer = self.offers.get(src) if offer is None: if len(pool) == 0: log.error("Out of IP addresses") self.nak(event) return offer = pool[0] if p.REQUEST_IP_OPT in p.options: wanted_ip = p.options[p.REQUEST_IP_OPT].addr if wanted_ip in pool: offer = wanted_ip pool.remove(offer) self.offers[src] = offer reply.yiaddr = offer reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def fill (self, wanted_opts, msg): """ Fill out some options in msg """ if msg.SUBNET_MASK_OPT in wanted_opts: msg.add_option(pkt.DHCP.DHCPSubnetMaskOption(self.subnet)) if msg.ROUTERS_OPT in wanted_opts and self.router_addr is not None: msg.add_option(pkt.DHCP.DHCPRoutersOption(self.router_addr)) if msg.DNS_SERVER_OPT in wanted_opts and self.dns_addr is not None: msg.add_option(pkt.DHCP.DHCPDNSServersOption(self.dns_addr)) msg.add_option(pkt.DHCP.DHCPIPAddressLeaseTimeOption(self.lease_time)) def default (no_flow = False, network = "192.168.0.0/24", # Address range first = 100, last = 199, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = ()): # Auto """ Launch DHCP server defaulting to 192.168.0.100-199 """ launch(no_flow, network, first, last, count, ip, router, dns) def launch (no_flow = False, network = "192.168.0.0/24", # Address range first = 1, last = None, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = (), # Auto dpid = None, # All ports = None, # All __INSTANCE__ = None): """ Launch DHCP server Defaults to serving 192.168.0.1 to 192.168.0.253 network Subnet to allocate addresses from first First'th address in subnet to use (256 is x.x.1.0 in a /16) last Last'th address in subnet to use count Alternate way to specify last address to use ip IP to use for DHCP server router Router IP to tell clients. Defaults to 'ip'. 'None' will stop the server from telling clients anything dns DNS IP to tell clients. Defaults to 'router'. 'None' will stop the server from telling clients anything. """ def fixint (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None return int(i) def fix (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None if i == '()': return () return i first,last,count = map(fixint,(first,last,count)) router,dns = map(fix,(router,dns)) if ports is not None: ports = ports.split(",") ports = set(int(p) if p.isdigit() else p for p in ports) pool = SimpleAddressPool(network = network, first = first, last = last, count = count) inst = DHCPD(install_flow = not no_flow, pool = pool, ip_address = ip, router_address = router, dns_address = dns, dpid = dpid, ports = ports) if __INSTANCE__[0] == 0: # First or only instance core.register(inst) log.debug("DHCP serving a%s", str(pool)[2:-1])
__contains__
identifier_name
dhcpd.py
# Copyright 2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A very quick and dirty DHCP server This is currently missing lots of features and sort of limited with respect to subnets and so on, but it's a start. """ from pox.core import core import pox.openflow.libopenflow_01 as of import pox.lib.packet as pkt from pox.lib.addresses import IPAddr,EthAddr,parse_cidr from pox.lib.addresses import IP_BROADCAST, IP_ANY from pox.lib.revent import * from pox.lib.util import dpid_to_str log = core.getLogger() def ip_for_event (event): """ Use a switch's DPID as an EthAddr """ eth = dpid_to_str(event.dpid,True).split("|")[0].replace("-",":") return EthAddr(eth) class DHCPLease (Event): """ Raised when a lease is given Call nak() to abort this lease """ def __init__ (self, host_mac, ip): super(DHCPLease, self).__init__() self.host_mac = host_mac self.ip = ip self._nak = False def nak (self): self._nak = True class AddressPool (object): """ Superclass for DHCP address pools Note that it's just a subset of a list (thus, you can always just use a list as a pool). The one exception is an optional "subnet_mask" hint. It probably makes sense to change this abstraction so that we can more easily return addresses from multiple ranges, and because some things (e.g., getitem) are potentially difficult to implement and not particularly useful (since we only need to remove a single item at a time). """ def __init__ (self): """ Initialize this pool. """ pass def __contains__ (self, item): """ Is this IPAddr in the pool? """ return False def append (self, item): """ Add this IP address back into the pool """ pass def remove (self, item): """ Remove this IPAddr from the pool """ pass def __len__ (self): """ Returns number of IP addresses in the pool """ return 0 def __getitem__ (self, index): """ Get an IPAddr from the pool. Note that this will only be called with index = 0! """ pass class SimpleAddressPool (AddressPool): """ Simple AddressPool for simple subnet based pools. """ def __init__ (self, network = "192.168.0.0/24", first = 1, last = None, count = None): """ Simple subnet-based address pool Allocates count IP addresses out of network/network_size, starting with the first'th. You may specify the end of the range with either last (to specify the last'th address to use) or count to specify the number to use. If both are None, use up to the end of all legal addresses. Example for all of 192.168.x.x/16: SimpleAddressPool("192.168.0.0/16", 1, 65534) """ network,network_size = parse_cidr(network) self.first = first self.network_size = network_size self.host_size = 32-network_size self.network = IPAddr(network) if last is None and count is None:
else: raise RuntimeError("Cannot specify both last and count") self.removed = set() if self.count <= 0: raise RuntimeError("Bad first/last range") if first == 0: raise RuntimeError("Can't allocate 0th address") if self.host_size < 0 or self.host_size > 32: raise RuntimeError("Bad network") if IPAddr(self.last | self.network.toUnsigned()) not in self: raise RuntimeError("Bad first/last range") def __repr__ (self): return str(self) def __str__ (self): t = self.network.toUnsigned() t = (IPAddr(t|self.first),IPAddr(t|self.last)) return "<Addresses from %s to %s>" % t @property def subnet_mask (self): return IPAddr(((1<<self.network_size)-1) << self.host_size) @property def count (self): return self.last - self.first + 1 def __contains__ (self, item): item = IPAddr(item) if item in self.removed: return False n = item.toUnsigned() mask = (1<<self.host_size)-1 nm = (n & mask) | self.network.toUnsigned() if nm != n: return False if (n & mask) == mask: return False if (n & mask) < self.first: return False if (n & mask) > self.last: return False return True def append (self, item): item = IPAddr(item) if item not in self.removed: if item in self: raise RuntimeError("%s is already in this pool" % (item,)) else: raise RuntimeError("%s does not belong in this pool" % (item,)) self.removed.remove(item) def remove (self, item): item = IPAddr(item) if item not in self: raise RuntimeError("%s not in this pool" % (item,)) self.removed.add(item) def __len__ (self): return (self.last-self.first+1) - len(self.removed) def __getitem__ (self, index): if index < 0: raise RuntimeError("Negative indices not allowed") if index >= len(self): raise IndexError("Item does not exist") c = self.first # Use a heuristic to find the first element faster (we hope) # Note this means that removing items changes the order of # our "list". c += len(self.removed) while c > self.last: c -= self.count while True: addr = IPAddr(c | self.network.toUnsigned()) if addr not in self.removed: assert addr in self index -= 1 if index < 0: return addr c += 1 if c > self.last: c -= self.count class DHCPD (EventMixin): _eventMixin_events = set([DHCPLease]) _servers = [] def __init__ (self, ip_address = "192.168.0.254", router_address = (), dns_address = (), pool = None, subnet = None, install_flow = True, dpid = None, ports = None): def fix_addr (addr, backup): if addr is None: return None if addr is (): return IPAddr(backup) return IPAddr(addr) self._install_flow = install_flow self.ip_addr = IPAddr(ip_address) self.router_addr = fix_addr(router_address, ip_address) self.dns_addr = fix_addr(dns_address, self.router_addr) if dpid is None: self.dpid = None else: try: dpid = int(dpid) except: dpid = util.str_to_dpid(dpid) self.dpid = dpid if ports is None: self.ports = None else: self.ports = set(ports) if self.ports: assert self.dpid is not None # Doesn't make sense self._servers.append(self) if pool is None: self.pool = [IPAddr("192.168.0."+str(x)) for x in range(100,199)] self.subnet = IPAddr(subnet or "255.255.255.0") else: self.pool = pool self.subnet = subnet if hasattr(pool, 'subnet_mask'): self.subnet = pool.subnet_mask if self.subnet is None: raise RuntimeError("You must specify a subnet mask or use a " "pool with a subnet hint") self.lease_time = 60 * 60 # An hour #TODO: Actually make them expire :) self.offers = {} # Eth -> IP we offered self.leases = {} # Eth -> IP we leased if self.ip_addr in self.pool: log.debug("Removing my own IP (%s) from address pool", self.ip_addr) self.pool.remove(self.ip_addr) core.openflow.addListeners(self) @classmethod def get_server_for_port (cls, dpid, port): """ Given a dpid.port, returns DHCPD instance responsible for it or None If there is a server, but the connection to the relevant switch is down, returns None. """ for s in cls.servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: return s port_no = conn.ports.get(port) if port_no is None: continue port_no = port_no.port_no for p in s.ports: p = conn.ports.get(p) if p is None: continue if p.port_no == port_no: return s return None @classmethod def get_ports_for_dpid (cls, dpid): """ Given a dpid, returns all port,server that are configured for it If the switch is disconnected, returns None. """ r = set() for s in cls._servers: if s.dpid != dpid: continue conn = core.openflow.getConnection(s.dpid) if not conn: continue if s.ports is None: for p in conn.ports: r.add((p.port_no,s)) else: for p in s.ports: p = conn.ports.get(p) if p is None: continue r.add((p.port_no,s)) return r def _handle_ConnectionUp (self, event): if self.dpid is not None and self.dpid != event.dpid: return if self._install_flow: msg = self._get_flow_mod() event.connection.send(msg) def _get_flow_mod (self, msg_type=of.ofp_flow_mod): """ Get flow mods that will send DHCP to the controller """ #TODO: We might over-match right now since we don't limit by port msg = msg_type() msg.match = of.ofp_match() msg.match.dl_type = pkt.ethernet.IP_TYPE msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL #msg.match.nw_dst = IP_BROADCAST msg.match.tp_src = pkt.dhcp.CLIENT_PORT msg.match.tp_dst = pkt.dhcp.SERVER_PORT msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER)) #msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) return msg def _get_pool (self, event): """ Get an IP pool for this event. Return None to not issue an IP. You should probably log this. """ return self.pool def _handle_PacketIn (self, event): # Is it to us? (Or at least not specifically NOT to us...) if self.dpid is not None and self.dpid != event.dpid: return if self.ports: for p in self.ports: if p == event.port: break if p in event.connection.ports: if event.connection.ports[p].port_no == event.port: break else: return ipp = event.parsed.find('ipv4') if not ipp or not ipp.parsed: return if ipp.dstip not in (IP_ANY,IP_BROADCAST,self.ip_addr): return # Is it full and proper DHCP? nwp = ipp.payload if not nwp or not nwp.parsed or not isinstance(nwp, pkt.udp): return if nwp.srcport != pkt.dhcp.CLIENT_PORT: return if nwp.dstport != pkt.dhcp.SERVER_PORT: return p = nwp.payload if not p: log.debug("%s: no packet", str(event.connection)) return if not isinstance(p, pkt.dhcp): log.debug("%s: packet is not DHCP", str(event.connection)) return if not p.parsed: log.debug("%s: DHCP packet not parsed", str(event.connection)) return if p.op != p.BOOTREQUEST: return t = p.options.get(p.MSG_TYPE_OPT) if t is None: return pool = self._get_pool(event) if pool is None: return if t.type == p.DISCOVER_MSG: self.exec_discover(event, p, pool) elif t.type == p.REQUEST_MSG: self.exec_request(event, p, pool) elif t.type == p.RELEASE_MSG: self.exec_release(event, p, pool) def reply (self, event, msg): orig = event.parsed.find('dhcp') broadcast = (orig.flags & orig.BROADCAST_FLAG) != 0 msg.op = msg.BOOTREPLY msg.chaddr = event.parsed.src msg.htype = 1 msg.hlen = 6 msg.xid = orig.xid msg.add_option(pkt.DHCP.DHCPServerIdentifierOption(self.ip_addr)) ethp = pkt.ethernet(src=ip_for_event(event),dst=event.parsed.src) ethp.type = pkt.ethernet.IP_TYPE ipp = pkt.ipv4(srcip = self.ip_addr) ipp.dstip = event.parsed.find('ipv4').srcip if broadcast: ipp.dstip = IP_BROADCAST ethp.dst = pkt.ETHERNET.ETHER_BROADCAST ipp.protocol = ipp.UDP_PROTOCOL udpp = pkt.udp() udpp.srcport = pkt.dhcp.SERVER_PORT udpp.dstport = pkt.dhcp.CLIENT_PORT udpp.payload = msg ipp.payload = udpp ethp.payload = ipp po = of.ofp_packet_out(data=ethp.pack()) po.actions.append(of.ofp_action_output(port=event.port)) event.connection.send(po) def nak (self, event, msg = None): if msg is None: msg = pkt.dhcp() msg.add_option(pkt.DHCP.DHCPMsgTypeOption(msg.NAK_MSG)) msg.siaddr = self.ip_addr self.reply(event, msg) def exec_release (self, event, p, pool): src = event.parsed.src if src != p.chaddr: log.warn("%s tried to release %s with bad chaddr" % (src,p.ciaddr)) return if self.leases.get(p.chaddr) != p.ciaddr: log.warn("%s tried to release unleased %s" % (src,p.ciaddr)) return del self.leases[p.chaddr] pool.append(p.ciaddr) log.info("%s released %s" % (src,p.ciaddr)) def exec_request (self, event, p, pool): if not p.REQUEST_IP_OPT in p.options: # Uhhh... return wanted_ip = p.options[p.REQUEST_IP_OPT].addr src = event.parsed.src got_ip = None if src in self.leases: if wanted_ip != self.leases[src]: pool.append(self.leases[src]) del self.leases[src] else: got_ip = self.leases[src] if got_ip is None: if src in self.offers: if wanted_ip != self.offers[src]: pool.append(self.offers[src]) del self.offers[src] else: got_ip = self.offers[src] if got_ip is None: if wanted_ip in pool: pool.remove(wanted_ip) got_ip = wanted_ip if got_ip is None: log.warn("%s asked for un-offered %s", src, wanted_ip) self.nak(event) return assert got_ip == wanted_ip self.leases[src] = got_ip ev = DHCPLease(src, got_ip) self.raiseEvent(ev) if ev._nak: self.nak(event) return log.info("Leased %s to %s" % (got_ip, src)) reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.ACK_MSG)) reply.yiaddr = wanted_ip reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def exec_discover (self, event, p, pool): reply = pkt.dhcp() reply.add_option(pkt.DHCP.DHCPMsgTypeOption(p.OFFER_MSG)) src = event.parsed.src if src in self.leases: offer = self.leases[src] del self.leases[src] self.offers[src] = offer else: offer = self.offers.get(src) if offer is None: if len(pool) == 0: log.error("Out of IP addresses") self.nak(event) return offer = pool[0] if p.REQUEST_IP_OPT in p.options: wanted_ip = p.options[p.REQUEST_IP_OPT].addr if wanted_ip in pool: offer = wanted_ip pool.remove(offer) self.offers[src] = offer reply.yiaddr = offer reply.siaddr = self.ip_addr wanted_opts = set() if p.PARAM_REQ_OPT in p.options: wanted_opts.update(p.options[p.PARAM_REQ_OPT].options) self.fill(wanted_opts, reply) self.reply(event, reply) def fill (self, wanted_opts, msg): """ Fill out some options in msg """ if msg.SUBNET_MASK_OPT in wanted_opts: msg.add_option(pkt.DHCP.DHCPSubnetMaskOption(self.subnet)) if msg.ROUTERS_OPT in wanted_opts and self.router_addr is not None: msg.add_option(pkt.DHCP.DHCPRoutersOption(self.router_addr)) if msg.DNS_SERVER_OPT in wanted_opts and self.dns_addr is not None: msg.add_option(pkt.DHCP.DHCPDNSServersOption(self.dns_addr)) msg.add_option(pkt.DHCP.DHCPIPAddressLeaseTimeOption(self.lease_time)) def default (no_flow = False, network = "192.168.0.0/24", # Address range first = 100, last = 199, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = ()): # Auto """ Launch DHCP server defaulting to 192.168.0.100-199 """ launch(no_flow, network, first, last, count, ip, router, dns) def launch (no_flow = False, network = "192.168.0.0/24", # Address range first = 1, last = None, count = None, # Address range ip = "192.168.0.254", router = (), # Auto dns = (), # Auto dpid = None, # All ports = None, # All __INSTANCE__ = None): """ Launch DHCP server Defaults to serving 192.168.0.1 to 192.168.0.253 network Subnet to allocate addresses from first First'th address in subnet to use (256 is x.x.1.0 in a /16) last Last'th address in subnet to use count Alternate way to specify last address to use ip IP to use for DHCP server router Router IP to tell clients. Defaults to 'ip'. 'None' will stop the server from telling clients anything dns DNS IP to tell clients. Defaults to 'router'. 'None' will stop the server from telling clients anything. """ def fixint (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None return int(i) def fix (i): i = str(i) if i.lower() == "none": return None if i.lower() == "true": return None if i == '()': return () return i first,last,count = map(fixint,(first,last,count)) router,dns = map(fix,(router,dns)) if ports is not None: ports = ports.split(",") ports = set(int(p) if p.isdigit() else p for p in ports) pool = SimpleAddressPool(network = network, first = first, last = last, count = count) inst = DHCPD(install_flow = not no_flow, pool = pool, ip_address = ip, router_address = router, dns_address = dns, dpid = dpid, ports = ports) if __INSTANCE__[0] == 0: # First or only instance core.register(inst) log.debug("DHCP serving a%s", str(pool)[2:-1])
self.last = (1 << self.host_size) - 2 elif last is not None: self.last = last elif count is not None: self.last = self.first + count - 1
random_line_split
WebGLCanvas.js
// // Copyright (c) 2014 Sam Leitch. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // // modified by Matthias Behrens (github.com/soliton4) for Broadway.js // universal module definition (function (root, factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define([], factory); } else if (typeof exports === 'object')
else { // Browser globals (root is window) root.WebGLCanvas = factory(); } }(this, function () { /** * This class can be used to render output pictures from an H264bsdDecoder to a canvas element. * If available the content is rendered using WebGL. */ function H264bsdCanvas(canvas, forceNoGL, contextOptions) { this.canvasElement = canvas; this.contextOptions = contextOptions; if(!forceNoGL) this.initContextGL(); if(this.contextGL) { this.initProgram(); this.initBuffers(); this.initTextures(); }; }; /** * Returns true if the canvas supports WebGL */ H264bsdCanvas.prototype.isWebGL = function() { return this.contextGL; }; /** * Create the GL context from the canvas element */ H264bsdCanvas.prototype.initContextGL = function() { var canvas = this.canvasElement; var gl = null; var validContextNames = ["webgl", "experimental-webgl", "moz-webgl", "webkit-3d"]; var nameIndex = 0; while(!gl && nameIndex < validContextNames.length) { var contextName = validContextNames[nameIndex]; try { if (this.contextOptions){ gl = canvas.getContext(contextName, this.contextOptions); }else{ gl = canvas.getContext(contextName); }; } catch (e) { gl = null; } if(!gl || typeof gl.getParameter !== "function") { gl = null; } ++nameIndex; }; this.contextGL = gl; }; /** * Initialize GL shader program */ H264bsdCanvas.prototype.initProgram = function() { var gl = this.contextGL; var vertexShaderScript = [ 'attribute vec4 vertexPos;', 'attribute vec4 texturePos;', 'varying vec2 textureCoord;', 'void main()', '{', 'gl_Position = vertexPos;', 'textureCoord = texturePos.xy;', '}' ].join('\n'); var fragmentShaderScript = [ 'precision highp float;', 'varying highp vec2 textureCoord;', 'uniform sampler2D ySampler;', 'uniform sampler2D uSampler;', 'uniform sampler2D vSampler;', 'const mat4 YUV2RGB = mat4', '(', '1.1643828125, 0, 1.59602734375, -.87078515625,', '1.1643828125, -.39176171875, -.81296875, .52959375,', '1.1643828125, 2.017234375, 0, -1.081390625,', '0, 0, 0, 1', ');', 'void main(void) {', 'highp float y = texture2D(ySampler, textureCoord).r;', 'highp float u = texture2D(uSampler, textureCoord).r;', 'highp float v = texture2D(vSampler, textureCoord).r;', 'gl_FragColor = vec4(y, u, v, 1) * YUV2RGB;', '}' ].join('\n'); var vertexShader = gl.createShader(gl.VERTEX_SHADER); gl.shaderSource(vertexShader, vertexShaderScript); gl.compileShader(vertexShader); if(!gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS)) { console.log('Vertex shader failed to compile: ' + gl.getShaderInfoLog(vertexShader)); } var fragmentShader = gl.createShader(gl.FRAGMENT_SHADER); gl.shaderSource(fragmentShader, fragmentShaderScript); gl.compileShader(fragmentShader); if(!gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS)) { console.log('Fragment shader failed to compile: ' + gl.getShaderInfoLog(fragmentShader)); } var program = gl.createProgram(); gl.attachShader(program, vertexShader); gl.attachShader(program, fragmentShader); gl.linkProgram(program); if(!gl.getProgramParameter(program, gl.LINK_STATUS)) { console.log('Program failed to compile: ' + gl.getProgramInfoLog(program)); } gl.useProgram(program); this.shaderProgram = program; }; /** * Initialize vertex buffers and attach to shader program */ H264bsdCanvas.prototype.initBuffers = function() { var gl = this.contextGL; var program = this.shaderProgram; var vertexPosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, vertexPosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 1, -1, 1, 1, -1, -1, -1]), gl.STATIC_DRAW); var vertexPosRef = gl.getAttribLocation(program, 'vertexPos'); gl.enableVertexAttribArray(vertexPosRef); gl.vertexAttribPointer(vertexPosRef, 2, gl.FLOAT, false, 0, 0); var texturePosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 0, 0, 0, 1, 1, 0, 1]), gl.STATIC_DRAW); var texturePosRef = gl.getAttribLocation(program, 'texturePos'); gl.enableVertexAttribArray(texturePosRef); gl.vertexAttribPointer(texturePosRef, 2, gl.FLOAT, false, 0, 0); this.texturePosBuffer = texturePosBuffer; }; /** * Initialize GL textures and attach to shader program */ H264bsdCanvas.prototype.initTextures = function() { var gl = this.contextGL; var program = this.shaderProgram; var yTextureRef = this.initTexture(); var ySamplerRef = gl.getUniformLocation(program, 'ySampler'); gl.uniform1i(ySamplerRef, 0); this.yTextureRef = yTextureRef; var uTextureRef = this.initTexture(); var uSamplerRef = gl.getUniformLocation(program, 'uSampler'); gl.uniform1i(uSamplerRef, 1); this.uTextureRef = uTextureRef; var vTextureRef = this.initTexture(); var vSamplerRef = gl.getUniformLocation(program, 'vSampler'); gl.uniform1i(vSamplerRef, 2); this.vTextureRef = vTextureRef; }; /** * Create and configure a single texture */ H264bsdCanvas.prototype.initTexture = function() { var gl = this.contextGL; var textureRef = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, textureRef); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.bindTexture(gl.TEXTURE_2D, null); return textureRef; }; /** * Draw picture data to the canvas. * If this object is using WebGL, the data must be an I420 formatted ArrayBuffer, * Otherwise, data must be an RGBA formatted ArrayBuffer. */ H264bsdCanvas.prototype.drawNextOutputPicture = function(width, height, croppingParams, data) { var gl = this.contextGL; if(gl) { this.drawNextOuptutPictureGL(width, height, croppingParams, data); } else { this.drawNextOuptutPictureRGBA(width, height, croppingParams, data); } }; /** * Draw the next output picture using WebGL */ H264bsdCanvas.prototype.drawNextOuptutPictureGL = function(width, height, croppingParams, data) { var gl = this.contextGL; var texturePosBuffer = this.texturePosBuffer; var yTextureRef = this.yTextureRef; var uTextureRef = this.uTextureRef; var vTextureRef = this.vTextureRef; if(croppingParams === null) { gl.viewport(0, 0, width, height); } else { gl.viewport(0, 0, croppingParams.width, croppingParams.height); var tTop = croppingParams.top / height; var tLeft = croppingParams.left / width; var tBottom = croppingParams.height / height; var tRight = croppingParams.width / width; var texturePosValues = new Float32Array([tRight, tTop, tLeft, tTop, tRight, tBottom, tLeft, tBottom]); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, texturePosValues, gl.DYNAMIC_DRAW); } var i420Data = data; var yDataLength = width * height; var yData = i420Data.subarray(0, yDataLength); gl.activeTexture(gl.TEXTURE0); gl.bindTexture(gl.TEXTURE_2D, yTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width, height, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, yData); var cbDataLength = width/2 * height/2; var cbData = i420Data.subarray(yDataLength, yDataLength + cbDataLength); gl.activeTexture(gl.TEXTURE1); gl.bindTexture(gl.TEXTURE_2D, uTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, cbData); var crDataLength = cbDataLength; var crData = i420Data.subarray(yDataLength + cbDataLength, yDataLength + cbDataLength + crDataLength); gl.activeTexture(gl.TEXTURE2); gl.bindTexture(gl.TEXTURE_2D, vTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, crData); gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4); }; /** * Draw next output picture using ARGB data on a 2d canvas. */ H264bsdCanvas.prototype.drawNextOuptutPictureRGBA = function(width, height, croppingParams, data) { var canvas = this.canvasElement; var croppingParams = null; var argbData = data; var ctx = canvas.getContext('2d'); var imageData = ctx.getImageData(0, 0, width, height); imageData.data.set(argbData); if(croppingParams === null) { ctx.putImageData(imageData, 0, 0); } else { ctx.putImageData(imageData, -croppingParams.left, -croppingParams.top, 0, 0, croppingParams.width, croppingParams.height); } }; return H264bsdCanvas; }));
{ // Node. Does not work with strict CommonJS, but // only CommonJS-like environments that support module.exports, // like Node. module.exports = factory(); }
conditional_block
WebGLCanvas.js
// // Copyright (c) 2014 Sam Leitch. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // // modified by Matthias Behrens (github.com/soliton4) for Broadway.js // universal module definition (function (root, factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define([], factory); } else if (typeof exports === 'object') { // Node. Does not work with strict CommonJS, but // only CommonJS-like environments that support module.exports, // like Node. module.exports = factory(); } else { // Browser globals (root is window) root.WebGLCanvas = factory(); } }(this, function () { /** * This class can be used to render output pictures from an H264bsdDecoder to a canvas element. * If available the content is rendered using WebGL. */ function
(canvas, forceNoGL, contextOptions) { this.canvasElement = canvas; this.contextOptions = contextOptions; if(!forceNoGL) this.initContextGL(); if(this.contextGL) { this.initProgram(); this.initBuffers(); this.initTextures(); }; }; /** * Returns true if the canvas supports WebGL */ H264bsdCanvas.prototype.isWebGL = function() { return this.contextGL; }; /** * Create the GL context from the canvas element */ H264bsdCanvas.prototype.initContextGL = function() { var canvas = this.canvasElement; var gl = null; var validContextNames = ["webgl", "experimental-webgl", "moz-webgl", "webkit-3d"]; var nameIndex = 0; while(!gl && nameIndex < validContextNames.length) { var contextName = validContextNames[nameIndex]; try { if (this.contextOptions){ gl = canvas.getContext(contextName, this.contextOptions); }else{ gl = canvas.getContext(contextName); }; } catch (e) { gl = null; } if(!gl || typeof gl.getParameter !== "function") { gl = null; } ++nameIndex; }; this.contextGL = gl; }; /** * Initialize GL shader program */ H264bsdCanvas.prototype.initProgram = function() { var gl = this.contextGL; var vertexShaderScript = [ 'attribute vec4 vertexPos;', 'attribute vec4 texturePos;', 'varying vec2 textureCoord;', 'void main()', '{', 'gl_Position = vertexPos;', 'textureCoord = texturePos.xy;', '}' ].join('\n'); var fragmentShaderScript = [ 'precision highp float;', 'varying highp vec2 textureCoord;', 'uniform sampler2D ySampler;', 'uniform sampler2D uSampler;', 'uniform sampler2D vSampler;', 'const mat4 YUV2RGB = mat4', '(', '1.1643828125, 0, 1.59602734375, -.87078515625,', '1.1643828125, -.39176171875, -.81296875, .52959375,', '1.1643828125, 2.017234375, 0, -1.081390625,', '0, 0, 0, 1', ');', 'void main(void) {', 'highp float y = texture2D(ySampler, textureCoord).r;', 'highp float u = texture2D(uSampler, textureCoord).r;', 'highp float v = texture2D(vSampler, textureCoord).r;', 'gl_FragColor = vec4(y, u, v, 1) * YUV2RGB;', '}' ].join('\n'); var vertexShader = gl.createShader(gl.VERTEX_SHADER); gl.shaderSource(vertexShader, vertexShaderScript); gl.compileShader(vertexShader); if(!gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS)) { console.log('Vertex shader failed to compile: ' + gl.getShaderInfoLog(vertexShader)); } var fragmentShader = gl.createShader(gl.FRAGMENT_SHADER); gl.shaderSource(fragmentShader, fragmentShaderScript); gl.compileShader(fragmentShader); if(!gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS)) { console.log('Fragment shader failed to compile: ' + gl.getShaderInfoLog(fragmentShader)); } var program = gl.createProgram(); gl.attachShader(program, vertexShader); gl.attachShader(program, fragmentShader); gl.linkProgram(program); if(!gl.getProgramParameter(program, gl.LINK_STATUS)) { console.log('Program failed to compile: ' + gl.getProgramInfoLog(program)); } gl.useProgram(program); this.shaderProgram = program; }; /** * Initialize vertex buffers and attach to shader program */ H264bsdCanvas.prototype.initBuffers = function() { var gl = this.contextGL; var program = this.shaderProgram; var vertexPosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, vertexPosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 1, -1, 1, 1, -1, -1, -1]), gl.STATIC_DRAW); var vertexPosRef = gl.getAttribLocation(program, 'vertexPos'); gl.enableVertexAttribArray(vertexPosRef); gl.vertexAttribPointer(vertexPosRef, 2, gl.FLOAT, false, 0, 0); var texturePosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 0, 0, 0, 1, 1, 0, 1]), gl.STATIC_DRAW); var texturePosRef = gl.getAttribLocation(program, 'texturePos'); gl.enableVertexAttribArray(texturePosRef); gl.vertexAttribPointer(texturePosRef, 2, gl.FLOAT, false, 0, 0); this.texturePosBuffer = texturePosBuffer; }; /** * Initialize GL textures and attach to shader program */ H264bsdCanvas.prototype.initTextures = function() { var gl = this.contextGL; var program = this.shaderProgram; var yTextureRef = this.initTexture(); var ySamplerRef = gl.getUniformLocation(program, 'ySampler'); gl.uniform1i(ySamplerRef, 0); this.yTextureRef = yTextureRef; var uTextureRef = this.initTexture(); var uSamplerRef = gl.getUniformLocation(program, 'uSampler'); gl.uniform1i(uSamplerRef, 1); this.uTextureRef = uTextureRef; var vTextureRef = this.initTexture(); var vSamplerRef = gl.getUniformLocation(program, 'vSampler'); gl.uniform1i(vSamplerRef, 2); this.vTextureRef = vTextureRef; }; /** * Create and configure a single texture */ H264bsdCanvas.prototype.initTexture = function() { var gl = this.contextGL; var textureRef = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, textureRef); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.bindTexture(gl.TEXTURE_2D, null); return textureRef; }; /** * Draw picture data to the canvas. * If this object is using WebGL, the data must be an I420 formatted ArrayBuffer, * Otherwise, data must be an RGBA formatted ArrayBuffer. */ H264bsdCanvas.prototype.drawNextOutputPicture = function(width, height, croppingParams, data) { var gl = this.contextGL; if(gl) { this.drawNextOuptutPictureGL(width, height, croppingParams, data); } else { this.drawNextOuptutPictureRGBA(width, height, croppingParams, data); } }; /** * Draw the next output picture using WebGL */ H264bsdCanvas.prototype.drawNextOuptutPictureGL = function(width, height, croppingParams, data) { var gl = this.contextGL; var texturePosBuffer = this.texturePosBuffer; var yTextureRef = this.yTextureRef; var uTextureRef = this.uTextureRef; var vTextureRef = this.vTextureRef; if(croppingParams === null) { gl.viewport(0, 0, width, height); } else { gl.viewport(0, 0, croppingParams.width, croppingParams.height); var tTop = croppingParams.top / height; var tLeft = croppingParams.left / width; var tBottom = croppingParams.height / height; var tRight = croppingParams.width / width; var texturePosValues = new Float32Array([tRight, tTop, tLeft, tTop, tRight, tBottom, tLeft, tBottom]); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, texturePosValues, gl.DYNAMIC_DRAW); } var i420Data = data; var yDataLength = width * height; var yData = i420Data.subarray(0, yDataLength); gl.activeTexture(gl.TEXTURE0); gl.bindTexture(gl.TEXTURE_2D, yTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width, height, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, yData); var cbDataLength = width/2 * height/2; var cbData = i420Data.subarray(yDataLength, yDataLength + cbDataLength); gl.activeTexture(gl.TEXTURE1); gl.bindTexture(gl.TEXTURE_2D, uTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, cbData); var crDataLength = cbDataLength; var crData = i420Data.subarray(yDataLength + cbDataLength, yDataLength + cbDataLength + crDataLength); gl.activeTexture(gl.TEXTURE2); gl.bindTexture(gl.TEXTURE_2D, vTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, crData); gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4); }; /** * Draw next output picture using ARGB data on a 2d canvas. */ H264bsdCanvas.prototype.drawNextOuptutPictureRGBA = function(width, height, croppingParams, data) { var canvas = this.canvasElement; var croppingParams = null; var argbData = data; var ctx = canvas.getContext('2d'); var imageData = ctx.getImageData(0, 0, width, height); imageData.data.set(argbData); if(croppingParams === null) { ctx.putImageData(imageData, 0, 0); } else { ctx.putImageData(imageData, -croppingParams.left, -croppingParams.top, 0, 0, croppingParams.width, croppingParams.height); } }; return H264bsdCanvas; }));
H264bsdCanvas
identifier_name
WebGLCanvas.js
// // Copyright (c) 2014 Sam Leitch. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // // modified by Matthias Behrens (github.com/soliton4) for Broadway.js // universal module definition (function (root, factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define([], factory); } else if (typeof exports === 'object') { // Node. Does not work with strict CommonJS, but // only CommonJS-like environments that support module.exports, // like Node. module.exports = factory(); } else { // Browser globals (root is window) root.WebGLCanvas = factory(); } }(this, function () { /** * This class can be used to render output pictures from an H264bsdDecoder to a canvas element. * If available the content is rendered using WebGL. */ function H264bsdCanvas(canvas, forceNoGL, contextOptions)
; /** * Returns true if the canvas supports WebGL */ H264bsdCanvas.prototype.isWebGL = function() { return this.contextGL; }; /** * Create the GL context from the canvas element */ H264bsdCanvas.prototype.initContextGL = function() { var canvas = this.canvasElement; var gl = null; var validContextNames = ["webgl", "experimental-webgl", "moz-webgl", "webkit-3d"]; var nameIndex = 0; while(!gl && nameIndex < validContextNames.length) { var contextName = validContextNames[nameIndex]; try { if (this.contextOptions){ gl = canvas.getContext(contextName, this.contextOptions); }else{ gl = canvas.getContext(contextName); }; } catch (e) { gl = null; } if(!gl || typeof gl.getParameter !== "function") { gl = null; } ++nameIndex; }; this.contextGL = gl; }; /** * Initialize GL shader program */ H264bsdCanvas.prototype.initProgram = function() { var gl = this.contextGL; var vertexShaderScript = [ 'attribute vec4 vertexPos;', 'attribute vec4 texturePos;', 'varying vec2 textureCoord;', 'void main()', '{', 'gl_Position = vertexPos;', 'textureCoord = texturePos.xy;', '}' ].join('\n'); var fragmentShaderScript = [ 'precision highp float;', 'varying highp vec2 textureCoord;', 'uniform sampler2D ySampler;', 'uniform sampler2D uSampler;', 'uniform sampler2D vSampler;', 'const mat4 YUV2RGB = mat4', '(', '1.1643828125, 0, 1.59602734375, -.87078515625,', '1.1643828125, -.39176171875, -.81296875, .52959375,', '1.1643828125, 2.017234375, 0, -1.081390625,', '0, 0, 0, 1', ');', 'void main(void) {', 'highp float y = texture2D(ySampler, textureCoord).r;', 'highp float u = texture2D(uSampler, textureCoord).r;', 'highp float v = texture2D(vSampler, textureCoord).r;', 'gl_FragColor = vec4(y, u, v, 1) * YUV2RGB;', '}' ].join('\n'); var vertexShader = gl.createShader(gl.VERTEX_SHADER); gl.shaderSource(vertexShader, vertexShaderScript); gl.compileShader(vertexShader); if(!gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS)) { console.log('Vertex shader failed to compile: ' + gl.getShaderInfoLog(vertexShader)); } var fragmentShader = gl.createShader(gl.FRAGMENT_SHADER); gl.shaderSource(fragmentShader, fragmentShaderScript); gl.compileShader(fragmentShader); if(!gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS)) { console.log('Fragment shader failed to compile: ' + gl.getShaderInfoLog(fragmentShader)); } var program = gl.createProgram(); gl.attachShader(program, vertexShader); gl.attachShader(program, fragmentShader); gl.linkProgram(program); if(!gl.getProgramParameter(program, gl.LINK_STATUS)) { console.log('Program failed to compile: ' + gl.getProgramInfoLog(program)); } gl.useProgram(program); this.shaderProgram = program; }; /** * Initialize vertex buffers and attach to shader program */ H264bsdCanvas.prototype.initBuffers = function() { var gl = this.contextGL; var program = this.shaderProgram; var vertexPosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, vertexPosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 1, -1, 1, 1, -1, -1, -1]), gl.STATIC_DRAW); var vertexPosRef = gl.getAttribLocation(program, 'vertexPos'); gl.enableVertexAttribArray(vertexPosRef); gl.vertexAttribPointer(vertexPosRef, 2, gl.FLOAT, false, 0, 0); var texturePosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 0, 0, 0, 1, 1, 0, 1]), gl.STATIC_DRAW); var texturePosRef = gl.getAttribLocation(program, 'texturePos'); gl.enableVertexAttribArray(texturePosRef); gl.vertexAttribPointer(texturePosRef, 2, gl.FLOAT, false, 0, 0); this.texturePosBuffer = texturePosBuffer; }; /** * Initialize GL textures and attach to shader program */ H264bsdCanvas.prototype.initTextures = function() { var gl = this.contextGL; var program = this.shaderProgram; var yTextureRef = this.initTexture(); var ySamplerRef = gl.getUniformLocation(program, 'ySampler'); gl.uniform1i(ySamplerRef, 0); this.yTextureRef = yTextureRef; var uTextureRef = this.initTexture(); var uSamplerRef = gl.getUniformLocation(program, 'uSampler'); gl.uniform1i(uSamplerRef, 1); this.uTextureRef = uTextureRef; var vTextureRef = this.initTexture(); var vSamplerRef = gl.getUniformLocation(program, 'vSampler'); gl.uniform1i(vSamplerRef, 2); this.vTextureRef = vTextureRef; }; /** * Create and configure a single texture */ H264bsdCanvas.prototype.initTexture = function() { var gl = this.contextGL; var textureRef = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, textureRef); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.bindTexture(gl.TEXTURE_2D, null); return textureRef; }; /** * Draw picture data to the canvas. * If this object is using WebGL, the data must be an I420 formatted ArrayBuffer, * Otherwise, data must be an RGBA formatted ArrayBuffer. */ H264bsdCanvas.prototype.drawNextOutputPicture = function(width, height, croppingParams, data) { var gl = this.contextGL; if(gl) { this.drawNextOuptutPictureGL(width, height, croppingParams, data); } else { this.drawNextOuptutPictureRGBA(width, height, croppingParams, data); } }; /** * Draw the next output picture using WebGL */ H264bsdCanvas.prototype.drawNextOuptutPictureGL = function(width, height, croppingParams, data) { var gl = this.contextGL; var texturePosBuffer = this.texturePosBuffer; var yTextureRef = this.yTextureRef; var uTextureRef = this.uTextureRef; var vTextureRef = this.vTextureRef; if(croppingParams === null) { gl.viewport(0, 0, width, height); } else { gl.viewport(0, 0, croppingParams.width, croppingParams.height); var tTop = croppingParams.top / height; var tLeft = croppingParams.left / width; var tBottom = croppingParams.height / height; var tRight = croppingParams.width / width; var texturePosValues = new Float32Array([tRight, tTop, tLeft, tTop, tRight, tBottom, tLeft, tBottom]); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, texturePosValues, gl.DYNAMIC_DRAW); } var i420Data = data; var yDataLength = width * height; var yData = i420Data.subarray(0, yDataLength); gl.activeTexture(gl.TEXTURE0); gl.bindTexture(gl.TEXTURE_2D, yTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width, height, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, yData); var cbDataLength = width/2 * height/2; var cbData = i420Data.subarray(yDataLength, yDataLength + cbDataLength); gl.activeTexture(gl.TEXTURE1); gl.bindTexture(gl.TEXTURE_2D, uTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, cbData); var crDataLength = cbDataLength; var crData = i420Data.subarray(yDataLength + cbDataLength, yDataLength + cbDataLength + crDataLength); gl.activeTexture(gl.TEXTURE2); gl.bindTexture(gl.TEXTURE_2D, vTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, crData); gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4); }; /** * Draw next output picture using ARGB data on a 2d canvas. */ H264bsdCanvas.prototype.drawNextOuptutPictureRGBA = function(width, height, croppingParams, data) { var canvas = this.canvasElement; var croppingParams = null; var argbData = data; var ctx = canvas.getContext('2d'); var imageData = ctx.getImageData(0, 0, width, height); imageData.data.set(argbData); if(croppingParams === null) { ctx.putImageData(imageData, 0, 0); } else { ctx.putImageData(imageData, -croppingParams.left, -croppingParams.top, 0, 0, croppingParams.width, croppingParams.height); } }; return H264bsdCanvas; }));
{ this.canvasElement = canvas; this.contextOptions = contextOptions; if(!forceNoGL) this.initContextGL(); if(this.contextGL) { this.initProgram(); this.initBuffers(); this.initTextures(); }; }
identifier_body
WebGLCanvas.js
// // Copyright (c) 2014 Sam Leitch. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // // modified by Matthias Behrens (github.com/soliton4) for Broadway.js // universal module definition (function (root, factory) { if (typeof define === 'function' && define.amd) { // AMD. Register as an anonymous module. define([], factory); } else if (typeof exports === 'object') { // Node. Does not work with strict CommonJS, but // only CommonJS-like environments that support module.exports, // like Node. module.exports = factory(); } else { // Browser globals (root is window) root.WebGLCanvas = factory(); } }(this, function () { /** * This class can be used to render output pictures from an H264bsdDecoder to a canvas element. * If available the content is rendered using WebGL. */ function H264bsdCanvas(canvas, forceNoGL, contextOptions) { this.canvasElement = canvas; this.contextOptions = contextOptions; if(!forceNoGL) this.initContextGL(); if(this.contextGL) { this.initProgram(); this.initBuffers(); this.initTextures(); }; }; /** * Returns true if the canvas supports WebGL */ H264bsdCanvas.prototype.isWebGL = function() { return this.contextGL; }; /** * Create the GL context from the canvas element */ H264bsdCanvas.prototype.initContextGL = function() { var canvas = this.canvasElement; var gl = null; var validContextNames = ["webgl", "experimental-webgl", "moz-webgl", "webkit-3d"]; var nameIndex = 0; while(!gl && nameIndex < validContextNames.length) { var contextName = validContextNames[nameIndex]; try { if (this.contextOptions){ gl = canvas.getContext(contextName, this.contextOptions); }else{ gl = canvas.getContext(contextName); }; } catch (e) { gl = null; } if(!gl || typeof gl.getParameter !== "function") { gl = null; } ++nameIndex; }; this.contextGL = gl; }; /** * Initialize GL shader program */ H264bsdCanvas.prototype.initProgram = function() { var gl = this.contextGL; var vertexShaderScript = [ 'attribute vec4 vertexPos;', 'attribute vec4 texturePos;', 'varying vec2 textureCoord;', 'void main()', '{', 'gl_Position = vertexPos;', 'textureCoord = texturePos.xy;', '}' ].join('\n'); var fragmentShaderScript = [ 'precision highp float;', 'varying highp vec2 textureCoord;', 'uniform sampler2D ySampler;', 'uniform sampler2D uSampler;', 'uniform sampler2D vSampler;', 'const mat4 YUV2RGB = mat4', '(', '1.1643828125, 0, 1.59602734375, -.87078515625,', '1.1643828125, -.39176171875, -.81296875, .52959375,', '1.1643828125, 2.017234375, 0, -1.081390625,', '0, 0, 0, 1', ');', 'void main(void) {', 'highp float y = texture2D(ySampler, textureCoord).r;', 'highp float u = texture2D(uSampler, textureCoord).r;', 'highp float v = texture2D(vSampler, textureCoord).r;', 'gl_FragColor = vec4(y, u, v, 1) * YUV2RGB;', '}' ].join('\n'); var vertexShader = gl.createShader(gl.VERTEX_SHADER); gl.shaderSource(vertexShader, vertexShaderScript); gl.compileShader(vertexShader); if(!gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS)) { console.log('Vertex shader failed to compile: ' + gl.getShaderInfoLog(vertexShader)); } var fragmentShader = gl.createShader(gl.FRAGMENT_SHADER); gl.shaderSource(fragmentShader, fragmentShaderScript); gl.compileShader(fragmentShader); if(!gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS)) { console.log('Fragment shader failed to compile: ' + gl.getShaderInfoLog(fragmentShader)); } var program = gl.createProgram(); gl.attachShader(program, vertexShader); gl.attachShader(program, fragmentShader); gl.linkProgram(program); if(!gl.getProgramParameter(program, gl.LINK_STATUS)) { console.log('Program failed to compile: ' + gl.getProgramInfoLog(program)); } gl.useProgram(program); this.shaderProgram = program; }; /** * Initialize vertex buffers and attach to shader program */ H264bsdCanvas.prototype.initBuffers = function() { var gl = this.contextGL; var program = this.shaderProgram; var vertexPosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, vertexPosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 1, -1, 1, 1, -1, -1, -1]), gl.STATIC_DRAW);
gl.vertexAttribPointer(vertexPosRef, 2, gl.FLOAT, false, 0, 0); var texturePosBuffer = gl.createBuffer(); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([1, 0, 0, 0, 1, 1, 0, 1]), gl.STATIC_DRAW); var texturePosRef = gl.getAttribLocation(program, 'texturePos'); gl.enableVertexAttribArray(texturePosRef); gl.vertexAttribPointer(texturePosRef, 2, gl.FLOAT, false, 0, 0); this.texturePosBuffer = texturePosBuffer; }; /** * Initialize GL textures and attach to shader program */ H264bsdCanvas.prototype.initTextures = function() { var gl = this.contextGL; var program = this.shaderProgram; var yTextureRef = this.initTexture(); var ySamplerRef = gl.getUniformLocation(program, 'ySampler'); gl.uniform1i(ySamplerRef, 0); this.yTextureRef = yTextureRef; var uTextureRef = this.initTexture(); var uSamplerRef = gl.getUniformLocation(program, 'uSampler'); gl.uniform1i(uSamplerRef, 1); this.uTextureRef = uTextureRef; var vTextureRef = this.initTexture(); var vSamplerRef = gl.getUniformLocation(program, 'vSampler'); gl.uniform1i(vSamplerRef, 2); this.vTextureRef = vTextureRef; }; /** * Create and configure a single texture */ H264bsdCanvas.prototype.initTexture = function() { var gl = this.contextGL; var textureRef = gl.createTexture(); gl.bindTexture(gl.TEXTURE_2D, textureRef); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); gl.bindTexture(gl.TEXTURE_2D, null); return textureRef; }; /** * Draw picture data to the canvas. * If this object is using WebGL, the data must be an I420 formatted ArrayBuffer, * Otherwise, data must be an RGBA formatted ArrayBuffer. */ H264bsdCanvas.prototype.drawNextOutputPicture = function(width, height, croppingParams, data) { var gl = this.contextGL; if(gl) { this.drawNextOuptutPictureGL(width, height, croppingParams, data); } else { this.drawNextOuptutPictureRGBA(width, height, croppingParams, data); } }; /** * Draw the next output picture using WebGL */ H264bsdCanvas.prototype.drawNextOuptutPictureGL = function(width, height, croppingParams, data) { var gl = this.contextGL; var texturePosBuffer = this.texturePosBuffer; var yTextureRef = this.yTextureRef; var uTextureRef = this.uTextureRef; var vTextureRef = this.vTextureRef; if(croppingParams === null) { gl.viewport(0, 0, width, height); } else { gl.viewport(0, 0, croppingParams.width, croppingParams.height); var tTop = croppingParams.top / height; var tLeft = croppingParams.left / width; var tBottom = croppingParams.height / height; var tRight = croppingParams.width / width; var texturePosValues = new Float32Array([tRight, tTop, tLeft, tTop, tRight, tBottom, tLeft, tBottom]); gl.bindBuffer(gl.ARRAY_BUFFER, texturePosBuffer); gl.bufferData(gl.ARRAY_BUFFER, texturePosValues, gl.DYNAMIC_DRAW); } var i420Data = data; var yDataLength = width * height; var yData = i420Data.subarray(0, yDataLength); gl.activeTexture(gl.TEXTURE0); gl.bindTexture(gl.TEXTURE_2D, yTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width, height, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, yData); var cbDataLength = width/2 * height/2; var cbData = i420Data.subarray(yDataLength, yDataLength + cbDataLength); gl.activeTexture(gl.TEXTURE1); gl.bindTexture(gl.TEXTURE_2D, uTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, cbData); var crDataLength = cbDataLength; var crData = i420Data.subarray(yDataLength + cbDataLength, yDataLength + cbDataLength + crDataLength); gl.activeTexture(gl.TEXTURE2); gl.bindTexture(gl.TEXTURE_2D, vTextureRef); gl.texImage2D(gl.TEXTURE_2D, 0, gl.LUMINANCE, width/2, height/2, 0, gl.LUMINANCE, gl.UNSIGNED_BYTE, crData); gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4); }; /** * Draw next output picture using ARGB data on a 2d canvas. */ H264bsdCanvas.prototype.drawNextOuptutPictureRGBA = function(width, height, croppingParams, data) { var canvas = this.canvasElement; var croppingParams = null; var argbData = data; var ctx = canvas.getContext('2d'); var imageData = ctx.getImageData(0, 0, width, height); imageData.data.set(argbData); if(croppingParams === null) { ctx.putImageData(imageData, 0, 0); } else { ctx.putImageData(imageData, -croppingParams.left, -croppingParams.top, 0, 0, croppingParams.width, croppingParams.height); } }; return H264bsdCanvas; }));
var vertexPosRef = gl.getAttribLocation(program, 'vertexPos'); gl.enableVertexAttribArray(vertexPosRef);
random_line_split
thread_pool.rs
use std::marker::PhantomData; use std::sync::Arc; use std::thread::JoinHandle; use std::{iter, mem}; use crossbeam::atomic::AtomicCell; use crossbeam::deque::{Injector, Steal, Stealer, Worker}; use crossbeam::sync::{Parker, Unparker, WaitGroup}; use futures::task::{Context, Poll, Waker}; use futures::Future; use std::pin::Pin; /// A chunk of work with some metadata struct Task { _group_id: TaskGroupId, task_fn: Box<dyn TaskFn>, } type TaskGroupId = usize; pub trait TaskFn: FnOnce() + Send {} pub trait StaticTaskFn: TaskFn + 'static {} impl<T> TaskFn for T where T: FnOnce() + Send {} impl<T> StaticTaskFn for T where T: TaskFn + 'static {} impl Task { /// Create a new task to be executed at some point fn new<F>(group_id: TaskGroupId, f: F) -> Self where F: StaticTaskFn, { Self { _group_id: group_id, task_fn: Box::new(f), } } /// Executes the task /// TODO: use `FnTraits` once stable fn call_once(self) { (self.task_fn)() } } /// A worker thread pool for compute-heavy tasks /// /// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough? /// TODO: schedule tasks based on group context? /// #[derive(Debug)] pub struct ThreadPool { target_thread_count: usize, global_queue: Arc<Injector<Task>>, stealers: Vec<Stealer<Task>>, threads: Vec<JoinHandle<()>>, next_group_id: AtomicCell<usize>, parked_threads: Arc<Injector<Unparker>>, } impl ThreadPool { /// Creates a new thread pool with `number_of_threads` threads. /// /// # Panics /// Panics if `number_of_threads` is 0. /// pub fn new(number_of_threads: usize) -> Self { assert!( number_of_threads > 0, "There must be at least one thread for the thread pool" ); let worker_deques: Vec<Worker<Task>> = (0..number_of_threads).map(|_| Worker::new_fifo()).collect(); let mut thread_pool = Self { target_thread_count: number_of_threads, global_queue: Arc::new(Injector::new()), stealers: worker_deques.iter().map(Worker::stealer).collect(), threads: Vec::with_capacity(number_of_threads), next_group_id: AtomicCell::new(0), parked_threads: Arc::new(Injector::new()), }; for worker_deque in worker_deques { let global_queue = thread_pool.global_queue.clone(); let stealers = thread_pool.stealers.clone(); let parked_threads = thread_pool.parked_threads.clone(); thread_pool.threads.push(std::thread::spawn(move || { Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads); })) } thread_pool } fn await_work( local: &Worker<Task>, global: &Injector<Task>, stealers: &[Stealer<Task>], parked_threads: &Injector<Unparker>, ) { let parker = Parker::new(); let unparker = parker.unparker(); loop { // Pop a task from the local queue, if not empty. let task = local.pop().or_else(|| { // Otherwise, we need to look for a task elsewhere. iter::repeat_with(|| { // Try stealing a batch of tasks from the global queue. global .steal_batch_and_pop(local) // Or try stealing a task from one of the other threads. .or_else(|| stealers.iter().map(Stealer::steal).collect()) }) // Loop while no task was stolen and any steal operation needs to be retried. .find(|s| !s.is_retry()) // Extract the stolen task, if there is one. .and_then(Steal::success) }); if let Some(task) = task { // TODO: recover panics task.call_once(); } else { parked_threads.push(unparker.clone()); parker.park(); } } } pub fn create_context(&self) -> ThreadPoolContext { ThreadPoolContext::new(self, self.next_group_id.fetch_add(1)) } fn compute(&self, task: Task) { self.global_queue.push(task); // un-park a thread since there is new work if let Steal::Success(unparker) = self.parked_threads.steal() { unparker.unpark(); } } } impl Default for ThreadPool { fn default() -> Self { Self::new(num_cpus::get()) } } /// A computation context for a group that spawns tasks in a `ThreadPool` #[derive(Copy, Clone, Debug)] pub struct ThreadPoolContext<'pool> { thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId, } impl<'pool> ThreadPoolContext<'pool> { /// Create a new `ThreadPoolContext` fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self { Self { thread_pool, task_group_id, } } /// What is the degree of parallelism that the `ThreadPool` aims for? /// This is helpful to determine how to split the work into tasks. pub fn degree_of_parallelism(&self) -> usize
/// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: StaticTaskFn, { self.thread_pool .compute(Task::new(self.task_group_id, task)); } /// Execute a bunch of tasks in a scope that blocks until all tasks are finished. /// Provides a lifetime for that scope. /// TODO: provide an async version so that async workflows can do something in the meantime? /// TODO: handle panics: if a thread panics, this function will block forever pub fn scope<'scope, S>(&'pool self, scope_fn: S) where S: FnOnce(&Scope<'pool, 'scope>) + 'scope, { let scope = Scope::<'pool, 'scope> { thread_pool_context: &self, wait_group: WaitGroup::new(), _scope_marker: PhantomData, }; scope_fn(&scope); scope.wait_group.wait(); } } /// A scope in which you can execute tasks and it blocks until all tasks are finished #[derive(Debug)] pub struct Scope<'pool, 'scope> { thread_pool_context: &'pool ThreadPoolContext<'pool>, wait_group: WaitGroup, // needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183 _scope_marker: PhantomData<&'scope mut &'scope ()>, } impl<'pool, 'scope> Scope<'pool, 'scope> { /// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: TaskFn + 'scope, { let wait_group = self.wait_group.clone(); // Allocate the `task` on the heap and erase the `'scope` bound. let task: Box<dyn TaskFn + 'scope> = Box::new(task); let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) }; self.thread_pool_context.compute(move || { task(); // decrement `WaitGroup` counter drop(wait_group); }); } /// Compute a task in the `ThreadPool` and return a `Future` of a result pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R> where F: FnOnce() -> R + Send + 'scope, R: Clone + Send + 'static, { let future = TaskResult::default(); let future_ref = future.clone(); self.compute(move || { future_ref.set(task()); }); future } } /// A future that provides the task result pub struct TaskResult<R> { option: Arc<AtomicCell<TaskResultOption<R>>>, } // we can't derive `Clone` since it requires `R` to be `Clone` as well impl<R> Clone for TaskResult<R> { fn clone(&self) -> Self { Self { option: self.option.clone(), } } } /// The state of the `TaskResult` future #[derive(Debug)] enum TaskResultOption<R> { None, Result(R), Waiting(Waker), } impl<R> Default for TaskResultOption<R> { fn default() -> Self { TaskResultOption::None } } impl<R> TaskResult<R> { fn set(&self, result: R) { match self.option.swap(TaskResultOption::Result(result)) { TaskResultOption::None => {} // do nothing TaskResultOption::Result(_) => { unreachable!("There must not be a second computation of the result") } TaskResultOption::Waiting(waker) => waker.wake(), }; } } impl<R> Default for TaskResult<R> { fn default() -> Self { Self { option: Default::default(), } } } impl<R> Future for TaskResult<R> { type Output = R; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { match self .option .swap(TaskResultOption::Waiting(cx.waker().clone())) { TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending, TaskResultOption::Result(r) => Poll::Ready(r), } } } #[cfg(test)] mod tests { use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering}; use futures::future; use super::*; use crossbeam::utils::Backoff; #[test] #[allow(clippy::blacklisted_name)] fn one_task() { let thread_pool = ThreadPool::new(1); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(42, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_one_thread() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_two_threads() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn lots_of_tasks() { let thread_pool = ThreadPool::new(2); let number_of_tasks = 1_000_000; let tasks_completed = Arc::new(AtomicI32::new(0)); for _ in 0..number_of_tasks { let tasks_completed = tasks_completed.clone(); thread_pool.compute(Task::new(0, move || { tasks_completed.fetch_add(1, Ordering::SeqCst); })); } let backoff = Backoff::new(); while tasks_completed.load(Ordering::SeqCst) != number_of_tasks { backoff.snooze(); } } #[test] fn context() { let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = Arc::new(AtomicI32::new(0)); let result_clone = result.clone(); context.compute(move || { result_clone.fetch_add(42, Ordering::SeqCst); }); let backoff = Backoff::new(); while result.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn scoped() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = AtomicUsize::new(0); context.scope(|scope| { for _ in 0..NUMBER_OF_TASKS { scope.compute(|| { result.fetch_add(1, Ordering::SeqCst); }); } }); assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS); } #[test] fn scoped_vec() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut result = vec![0; NUMBER_OF_TASKS]; context.scope(|scope| { for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) { scope.compute(move || chunk[0] = i); } }); assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result); } #[test] fn compute_results() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut futures = Vec::with_capacity(NUMBER_OF_TASKS); context.scope(|scope| { for i in 0..NUMBER_OF_TASKS { futures.push(scope.compute_result(move || i)); } }); let result = futures::executor::block_on(future::join_all(futures)); assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>()); } #[test] fn parking() { let thread_pool = ThreadPool::new(1); let context = thread_pool.create_context(); // wait for the thread to be parked let backoff = Backoff::new(); while thread_pool.parked_threads.len() == 0 { backoff.snooze(); } let mut unparked = false; context.scope(|scope| scope.compute(|| unparked = true)); assert!(unparked) } }
{ self.thread_pool.target_thread_count }
identifier_body
thread_pool.rs
use std::marker::PhantomData; use std::sync::Arc; use std::thread::JoinHandle; use std::{iter, mem}; use crossbeam::atomic::AtomicCell; use crossbeam::deque::{Injector, Steal, Stealer, Worker}; use crossbeam::sync::{Parker, Unparker, WaitGroup}; use futures::task::{Context, Poll, Waker}; use futures::Future; use std::pin::Pin; /// A chunk of work with some metadata struct Task { _group_id: TaskGroupId, task_fn: Box<dyn TaskFn>, } type TaskGroupId = usize; pub trait TaskFn: FnOnce() + Send {} pub trait StaticTaskFn: TaskFn + 'static {} impl<T> TaskFn for T where T: FnOnce() + Send {} impl<T> StaticTaskFn for T where T: TaskFn + 'static {} impl Task { /// Create a new task to be executed at some point fn new<F>(group_id: TaskGroupId, f: F) -> Self where F: StaticTaskFn, { Self { _group_id: group_id, task_fn: Box::new(f), } } /// Executes the task /// TODO: use `FnTraits` once stable fn call_once(self) { (self.task_fn)() } } /// A worker thread pool for compute-heavy tasks /// /// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough? /// TODO: schedule tasks based on group context? /// #[derive(Debug)] pub struct ThreadPool { target_thread_count: usize, global_queue: Arc<Injector<Task>>, stealers: Vec<Stealer<Task>>, threads: Vec<JoinHandle<()>>, next_group_id: AtomicCell<usize>, parked_threads: Arc<Injector<Unparker>>, } impl ThreadPool { /// Creates a new thread pool with `number_of_threads` threads. /// /// # Panics /// Panics if `number_of_threads` is 0. /// pub fn new(number_of_threads: usize) -> Self { assert!( number_of_threads > 0, "There must be at least one thread for the thread pool" ); let worker_deques: Vec<Worker<Task>> = (0..number_of_threads).map(|_| Worker::new_fifo()).collect(); let mut thread_pool = Self { target_thread_count: number_of_threads, global_queue: Arc::new(Injector::new()), stealers: worker_deques.iter().map(Worker::stealer).collect(), threads: Vec::with_capacity(number_of_threads), next_group_id: AtomicCell::new(0), parked_threads: Arc::new(Injector::new()), }; for worker_deque in worker_deques { let global_queue = thread_pool.global_queue.clone(); let stealers = thread_pool.stealers.clone(); let parked_threads = thread_pool.parked_threads.clone(); thread_pool.threads.push(std::thread::spawn(move || { Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads); })) } thread_pool } fn await_work( local: &Worker<Task>, global: &Injector<Task>, stealers: &[Stealer<Task>], parked_threads: &Injector<Unparker>, ) { let parker = Parker::new(); let unparker = parker.unparker(); loop { // Pop a task from the local queue, if not empty. let task = local.pop().or_else(|| { // Otherwise, we need to look for a task elsewhere. iter::repeat_with(|| { // Try stealing a batch of tasks from the global queue. global .steal_batch_and_pop(local) // Or try stealing a task from one of the other threads. .or_else(|| stealers.iter().map(Stealer::steal).collect()) }) // Loop while no task was stolen and any steal operation needs to be retried. .find(|s| !s.is_retry()) // Extract the stolen task, if there is one. .and_then(Steal::success) }); if let Some(task) = task { // TODO: recover panics task.call_once(); } else { parked_threads.push(unparker.clone()); parker.park(); } } } pub fn create_context(&self) -> ThreadPoolContext { ThreadPoolContext::new(self, self.next_group_id.fetch_add(1)) } fn compute(&self, task: Task) { self.global_queue.push(task); // un-park a thread since there is new work if let Steal::Success(unparker) = self.parked_threads.steal() { unparker.unpark(); } } } impl Default for ThreadPool { fn default() -> Self { Self::new(num_cpus::get()) } } /// A computation context for a group that spawns tasks in a `ThreadPool` #[derive(Copy, Clone, Debug)] pub struct ThreadPoolContext<'pool> { thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId, } impl<'pool> ThreadPoolContext<'pool> { /// Create a new `ThreadPoolContext` fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self { Self { thread_pool, task_group_id, } } /// What is the degree of parallelism that the `ThreadPool` aims for? /// This is helpful to determine how to split the work into tasks. pub fn
(&self) -> usize { self.thread_pool.target_thread_count } /// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: StaticTaskFn, { self.thread_pool .compute(Task::new(self.task_group_id, task)); } /// Execute a bunch of tasks in a scope that blocks until all tasks are finished. /// Provides a lifetime for that scope. /// TODO: provide an async version so that async workflows can do something in the meantime? /// TODO: handle panics: if a thread panics, this function will block forever pub fn scope<'scope, S>(&'pool self, scope_fn: S) where S: FnOnce(&Scope<'pool, 'scope>) + 'scope, { let scope = Scope::<'pool, 'scope> { thread_pool_context: &self, wait_group: WaitGroup::new(), _scope_marker: PhantomData, }; scope_fn(&scope); scope.wait_group.wait(); } } /// A scope in which you can execute tasks and it blocks until all tasks are finished #[derive(Debug)] pub struct Scope<'pool, 'scope> { thread_pool_context: &'pool ThreadPoolContext<'pool>, wait_group: WaitGroup, // needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183 _scope_marker: PhantomData<&'scope mut &'scope ()>, } impl<'pool, 'scope> Scope<'pool, 'scope> { /// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: TaskFn + 'scope, { let wait_group = self.wait_group.clone(); // Allocate the `task` on the heap and erase the `'scope` bound. let task: Box<dyn TaskFn + 'scope> = Box::new(task); let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) }; self.thread_pool_context.compute(move || { task(); // decrement `WaitGroup` counter drop(wait_group); }); } /// Compute a task in the `ThreadPool` and return a `Future` of a result pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R> where F: FnOnce() -> R + Send + 'scope, R: Clone + Send + 'static, { let future = TaskResult::default(); let future_ref = future.clone(); self.compute(move || { future_ref.set(task()); }); future } } /// A future that provides the task result pub struct TaskResult<R> { option: Arc<AtomicCell<TaskResultOption<R>>>, } // we can't derive `Clone` since it requires `R` to be `Clone` as well impl<R> Clone for TaskResult<R> { fn clone(&self) -> Self { Self { option: self.option.clone(), } } } /// The state of the `TaskResult` future #[derive(Debug)] enum TaskResultOption<R> { None, Result(R), Waiting(Waker), } impl<R> Default for TaskResultOption<R> { fn default() -> Self { TaskResultOption::None } } impl<R> TaskResult<R> { fn set(&self, result: R) { match self.option.swap(TaskResultOption::Result(result)) { TaskResultOption::None => {} // do nothing TaskResultOption::Result(_) => { unreachable!("There must not be a second computation of the result") } TaskResultOption::Waiting(waker) => waker.wake(), }; } } impl<R> Default for TaskResult<R> { fn default() -> Self { Self { option: Default::default(), } } } impl<R> Future for TaskResult<R> { type Output = R; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { match self .option .swap(TaskResultOption::Waiting(cx.waker().clone())) { TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending, TaskResultOption::Result(r) => Poll::Ready(r), } } } #[cfg(test)] mod tests { use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering}; use futures::future; use super::*; use crossbeam::utils::Backoff; #[test] #[allow(clippy::blacklisted_name)] fn one_task() { let thread_pool = ThreadPool::new(1); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(42, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_one_thread() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_two_threads() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn lots_of_tasks() { let thread_pool = ThreadPool::new(2); let number_of_tasks = 1_000_000; let tasks_completed = Arc::new(AtomicI32::new(0)); for _ in 0..number_of_tasks { let tasks_completed = tasks_completed.clone(); thread_pool.compute(Task::new(0, move || { tasks_completed.fetch_add(1, Ordering::SeqCst); })); } let backoff = Backoff::new(); while tasks_completed.load(Ordering::SeqCst) != number_of_tasks { backoff.snooze(); } } #[test] fn context() { let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = Arc::new(AtomicI32::new(0)); let result_clone = result.clone(); context.compute(move || { result_clone.fetch_add(42, Ordering::SeqCst); }); let backoff = Backoff::new(); while result.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn scoped() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = AtomicUsize::new(0); context.scope(|scope| { for _ in 0..NUMBER_OF_TASKS { scope.compute(|| { result.fetch_add(1, Ordering::SeqCst); }); } }); assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS); } #[test] fn scoped_vec() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut result = vec![0; NUMBER_OF_TASKS]; context.scope(|scope| { for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) { scope.compute(move || chunk[0] = i); } }); assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result); } #[test] fn compute_results() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut futures = Vec::with_capacity(NUMBER_OF_TASKS); context.scope(|scope| { for i in 0..NUMBER_OF_TASKS { futures.push(scope.compute_result(move || i)); } }); let result = futures::executor::block_on(future::join_all(futures)); assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>()); } #[test] fn parking() { let thread_pool = ThreadPool::new(1); let context = thread_pool.create_context(); // wait for the thread to be parked let backoff = Backoff::new(); while thread_pool.parked_threads.len() == 0 { backoff.snooze(); } let mut unparked = false; context.scope(|scope| scope.compute(|| unparked = true)); assert!(unparked) } }
degree_of_parallelism
identifier_name
thread_pool.rs
use std::marker::PhantomData; use std::sync::Arc; use std::thread::JoinHandle; use std::{iter, mem}; use crossbeam::atomic::AtomicCell; use crossbeam::deque::{Injector, Steal, Stealer, Worker}; use crossbeam::sync::{Parker, Unparker, WaitGroup}; use futures::task::{Context, Poll, Waker}; use futures::Future; use std::pin::Pin; /// A chunk of work with some metadata struct Task { _group_id: TaskGroupId, task_fn: Box<dyn TaskFn>, } type TaskGroupId = usize; pub trait TaskFn: FnOnce() + Send {} pub trait StaticTaskFn: TaskFn + 'static {} impl<T> TaskFn for T where T: FnOnce() + Send {} impl<T> StaticTaskFn for T where T: TaskFn + 'static {} impl Task { /// Create a new task to be executed at some point fn new<F>(group_id: TaskGroupId, f: F) -> Self where F: StaticTaskFn, { Self { _group_id: group_id, task_fn: Box::new(f), } } /// Executes the task /// TODO: use `FnTraits` once stable fn call_once(self) { (self.task_fn)() } } /// A worker thread pool for compute-heavy tasks /// /// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough? /// TODO: schedule tasks based on group context? /// #[derive(Debug)] pub struct ThreadPool { target_thread_count: usize, global_queue: Arc<Injector<Task>>, stealers: Vec<Stealer<Task>>, threads: Vec<JoinHandle<()>>, next_group_id: AtomicCell<usize>, parked_threads: Arc<Injector<Unparker>>, } impl ThreadPool { /// Creates a new thread pool with `number_of_threads` threads. /// /// # Panics /// Panics if `number_of_threads` is 0. /// pub fn new(number_of_threads: usize) -> Self { assert!( number_of_threads > 0, "There must be at least one thread for the thread pool" ); let worker_deques: Vec<Worker<Task>> = (0..number_of_threads).map(|_| Worker::new_fifo()).collect(); let mut thread_pool = Self { target_thread_count: number_of_threads, global_queue: Arc::new(Injector::new()), stealers: worker_deques.iter().map(Worker::stealer).collect(), threads: Vec::with_capacity(number_of_threads), next_group_id: AtomicCell::new(0), parked_threads: Arc::new(Injector::new()), }; for worker_deque in worker_deques { let global_queue = thread_pool.global_queue.clone(); let stealers = thread_pool.stealers.clone(); let parked_threads = thread_pool.parked_threads.clone(); thread_pool.threads.push(std::thread::spawn(move || { Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads); })) } thread_pool } fn await_work( local: &Worker<Task>, global: &Injector<Task>, stealers: &[Stealer<Task>], parked_threads: &Injector<Unparker>, ) { let parker = Parker::new(); let unparker = parker.unparker(); loop { // Pop a task from the local queue, if not empty. let task = local.pop().or_else(|| { // Otherwise, we need to look for a task elsewhere. iter::repeat_with(|| { // Try stealing a batch of tasks from the global queue. global .steal_batch_and_pop(local) // Or try stealing a task from one of the other threads. .or_else(|| stealers.iter().map(Stealer::steal).collect()) }) // Loop while no task was stolen and any steal operation needs to be retried. .find(|s| !s.is_retry()) // Extract the stolen task, if there is one. .and_then(Steal::success) }); if let Some(task) = task
else { parked_threads.push(unparker.clone()); parker.park(); } } } pub fn create_context(&self) -> ThreadPoolContext { ThreadPoolContext::new(self, self.next_group_id.fetch_add(1)) } fn compute(&self, task: Task) { self.global_queue.push(task); // un-park a thread since there is new work if let Steal::Success(unparker) = self.parked_threads.steal() { unparker.unpark(); } } } impl Default for ThreadPool { fn default() -> Self { Self::new(num_cpus::get()) } } /// A computation context for a group that spawns tasks in a `ThreadPool` #[derive(Copy, Clone, Debug)] pub struct ThreadPoolContext<'pool> { thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId, } impl<'pool> ThreadPoolContext<'pool> { /// Create a new `ThreadPoolContext` fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self { Self { thread_pool, task_group_id, } } /// What is the degree of parallelism that the `ThreadPool` aims for? /// This is helpful to determine how to split the work into tasks. pub fn degree_of_parallelism(&self) -> usize { self.thread_pool.target_thread_count } /// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: StaticTaskFn, { self.thread_pool .compute(Task::new(self.task_group_id, task)); } /// Execute a bunch of tasks in a scope that blocks until all tasks are finished. /// Provides a lifetime for that scope. /// TODO: provide an async version so that async workflows can do something in the meantime? /// TODO: handle panics: if a thread panics, this function will block forever pub fn scope<'scope, S>(&'pool self, scope_fn: S) where S: FnOnce(&Scope<'pool, 'scope>) + 'scope, { let scope = Scope::<'pool, 'scope> { thread_pool_context: &self, wait_group: WaitGroup::new(), _scope_marker: PhantomData, }; scope_fn(&scope); scope.wait_group.wait(); } } /// A scope in which you can execute tasks and it blocks until all tasks are finished #[derive(Debug)] pub struct Scope<'pool, 'scope> { thread_pool_context: &'pool ThreadPoolContext<'pool>, wait_group: WaitGroup, // needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183 _scope_marker: PhantomData<&'scope mut &'scope ()>, } impl<'pool, 'scope> Scope<'pool, 'scope> { /// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: TaskFn + 'scope, { let wait_group = self.wait_group.clone(); // Allocate the `task` on the heap and erase the `'scope` bound. let task: Box<dyn TaskFn + 'scope> = Box::new(task); let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) }; self.thread_pool_context.compute(move || { task(); // decrement `WaitGroup` counter drop(wait_group); }); } /// Compute a task in the `ThreadPool` and return a `Future` of a result pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R> where F: FnOnce() -> R + Send + 'scope, R: Clone + Send + 'static, { let future = TaskResult::default(); let future_ref = future.clone(); self.compute(move || { future_ref.set(task()); }); future } } /// A future that provides the task result pub struct TaskResult<R> { option: Arc<AtomicCell<TaskResultOption<R>>>, } // we can't derive `Clone` since it requires `R` to be `Clone` as well impl<R> Clone for TaskResult<R> { fn clone(&self) -> Self { Self { option: self.option.clone(), } } } /// The state of the `TaskResult` future #[derive(Debug)] enum TaskResultOption<R> { None, Result(R), Waiting(Waker), } impl<R> Default for TaskResultOption<R> { fn default() -> Self { TaskResultOption::None } } impl<R> TaskResult<R> { fn set(&self, result: R) { match self.option.swap(TaskResultOption::Result(result)) { TaskResultOption::None => {} // do nothing TaskResultOption::Result(_) => { unreachable!("There must not be a second computation of the result") } TaskResultOption::Waiting(waker) => waker.wake(), }; } } impl<R> Default for TaskResult<R> { fn default() -> Self { Self { option: Default::default(), } } } impl<R> Future for TaskResult<R> { type Output = R; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { match self .option .swap(TaskResultOption::Waiting(cx.waker().clone())) { TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending, TaskResultOption::Result(r) => Poll::Ready(r), } } } #[cfg(test)] mod tests { use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering}; use futures::future; use super::*; use crossbeam::utils::Backoff; #[test] #[allow(clippy::blacklisted_name)] fn one_task() { let thread_pool = ThreadPool::new(1); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(42, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_one_thread() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_two_threads() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn lots_of_tasks() { let thread_pool = ThreadPool::new(2); let number_of_tasks = 1_000_000; let tasks_completed = Arc::new(AtomicI32::new(0)); for _ in 0..number_of_tasks { let tasks_completed = tasks_completed.clone(); thread_pool.compute(Task::new(0, move || { tasks_completed.fetch_add(1, Ordering::SeqCst); })); } let backoff = Backoff::new(); while tasks_completed.load(Ordering::SeqCst) != number_of_tasks { backoff.snooze(); } } #[test] fn context() { let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = Arc::new(AtomicI32::new(0)); let result_clone = result.clone(); context.compute(move || { result_clone.fetch_add(42, Ordering::SeqCst); }); let backoff = Backoff::new(); while result.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn scoped() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = AtomicUsize::new(0); context.scope(|scope| { for _ in 0..NUMBER_OF_TASKS { scope.compute(|| { result.fetch_add(1, Ordering::SeqCst); }); } }); assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS); } #[test] fn scoped_vec() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut result = vec![0; NUMBER_OF_TASKS]; context.scope(|scope| { for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) { scope.compute(move || chunk[0] = i); } }); assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result); } #[test] fn compute_results() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut futures = Vec::with_capacity(NUMBER_OF_TASKS); context.scope(|scope| { for i in 0..NUMBER_OF_TASKS { futures.push(scope.compute_result(move || i)); } }); let result = futures::executor::block_on(future::join_all(futures)); assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>()); } #[test] fn parking() { let thread_pool = ThreadPool::new(1); let context = thread_pool.create_context(); // wait for the thread to be parked let backoff = Backoff::new(); while thread_pool.parked_threads.len() == 0 { backoff.snooze(); } let mut unparked = false; context.scope(|scope| scope.compute(|| unparked = true)); assert!(unparked) } }
{ // TODO: recover panics task.call_once(); }
conditional_block
thread_pool.rs
use std::marker::PhantomData; use std::sync::Arc; use std::thread::JoinHandle; use std::{iter, mem}; use crossbeam::atomic::AtomicCell; use crossbeam::deque::{Injector, Steal, Stealer, Worker}; use crossbeam::sync::{Parker, Unparker, WaitGroup}; use futures::task::{Context, Poll, Waker}; use futures::Future; use std::pin::Pin; /// A chunk of work with some metadata struct Task { _group_id: TaskGroupId, task_fn: Box<dyn TaskFn>, } type TaskGroupId = usize; pub trait TaskFn: FnOnce() + Send {} pub trait StaticTaskFn: TaskFn + 'static {} impl<T> TaskFn for T where T: FnOnce() + Send {} impl<T> StaticTaskFn for T where T: TaskFn + 'static {} impl Task { /// Create a new task to be executed at some point fn new<F>(group_id: TaskGroupId, f: F) -> Self where F: StaticTaskFn, { Self { _group_id: group_id, task_fn: Box::new(f), } } /// Executes the task /// TODO: use `FnTraits` once stable fn call_once(self) { (self.task_fn)() } } /// A worker thread pool for compute-heavy tasks /// /// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough? /// TODO: schedule tasks based on group context? /// #[derive(Debug)] pub struct ThreadPool { target_thread_count: usize, global_queue: Arc<Injector<Task>>, stealers: Vec<Stealer<Task>>, threads: Vec<JoinHandle<()>>, next_group_id: AtomicCell<usize>, parked_threads: Arc<Injector<Unparker>>, } impl ThreadPool { /// Creates a new thread pool with `number_of_threads` threads. /// /// # Panics /// Panics if `number_of_threads` is 0. /// pub fn new(number_of_threads: usize) -> Self { assert!( number_of_threads > 0, "There must be at least one thread for the thread pool" ); let worker_deques: Vec<Worker<Task>> = (0..number_of_threads).map(|_| Worker::new_fifo()).collect(); let mut thread_pool = Self { target_thread_count: number_of_threads, global_queue: Arc::new(Injector::new()), stealers: worker_deques.iter().map(Worker::stealer).collect(), threads: Vec::with_capacity(number_of_threads), next_group_id: AtomicCell::new(0), parked_threads: Arc::new(Injector::new()), }; for worker_deque in worker_deques { let global_queue = thread_pool.global_queue.clone(); let stealers = thread_pool.stealers.clone(); let parked_threads = thread_pool.parked_threads.clone(); thread_pool.threads.push(std::thread::spawn(move || { Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads); })) } thread_pool } fn await_work( local: &Worker<Task>, global: &Injector<Task>, stealers: &[Stealer<Task>], parked_threads: &Injector<Unparker>, ) { let parker = Parker::new(); let unparker = parker.unparker(); loop { // Pop a task from the local queue, if not empty. let task = local.pop().or_else(|| { // Otherwise, we need to look for a task elsewhere. iter::repeat_with(|| { // Try stealing a batch of tasks from the global queue. global .steal_batch_and_pop(local) // Or try stealing a task from one of the other threads. .or_else(|| stealers.iter().map(Stealer::steal).collect()) }) // Loop while no task was stolen and any steal operation needs to be retried. .find(|s| !s.is_retry()) // Extract the stolen task, if there is one. .and_then(Steal::success) }); if let Some(task) = task { // TODO: recover panics task.call_once(); } else { parked_threads.push(unparker.clone()); parker.park(); } } } pub fn create_context(&self) -> ThreadPoolContext { ThreadPoolContext::new(self, self.next_group_id.fetch_add(1)) } fn compute(&self, task: Task) { self.global_queue.push(task); // un-park a thread since there is new work if let Steal::Success(unparker) = self.parked_threads.steal() { unparker.unpark(); } } } impl Default for ThreadPool { fn default() -> Self { Self::new(num_cpus::get()) } } /// A computation context for a group that spawns tasks in a `ThreadPool` #[derive(Copy, Clone, Debug)] pub struct ThreadPoolContext<'pool> { thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId, } impl<'pool> ThreadPoolContext<'pool> { /// Create a new `ThreadPoolContext` fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self { Self { thread_pool, task_group_id, } } /// What is the degree of parallelism that the `ThreadPool` aims for? /// This is helpful to determine how to split the work into tasks. pub fn degree_of_parallelism(&self) -> usize { self.thread_pool.target_thread_count } /// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: StaticTaskFn, { self.thread_pool .compute(Task::new(self.task_group_id, task)); } /// Execute a bunch of tasks in a scope that blocks until all tasks are finished. /// Provides a lifetime for that scope. /// TODO: provide an async version so that async workflows can do something in the meantime? /// TODO: handle panics: if a thread panics, this function will block forever pub fn scope<'scope, S>(&'pool self, scope_fn: S) where S: FnOnce(&Scope<'pool, 'scope>) + 'scope, { let scope = Scope::<'pool, 'scope> { thread_pool_context: &self, wait_group: WaitGroup::new(), _scope_marker: PhantomData, }; scope_fn(&scope); scope.wait_group.wait(); } } /// A scope in which you can execute tasks and it blocks until all tasks are finished #[derive(Debug)] pub struct Scope<'pool, 'scope> { thread_pool_context: &'pool ThreadPoolContext<'pool>, wait_group: WaitGroup, // needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183 _scope_marker: PhantomData<&'scope mut &'scope ()>, } impl<'pool, 'scope> Scope<'pool, 'scope> { /// Compute a task in the `ThreadPool` pub fn compute<F>(&self, task: F) where F: TaskFn + 'scope, { let wait_group = self.wait_group.clone(); // Allocate the `task` on the heap and erase the `'scope` bound. let task: Box<dyn TaskFn + 'scope> = Box::new(task); let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) }; self.thread_pool_context.compute(move || { task(); // decrement `WaitGroup` counter drop(wait_group); }); } /// Compute a task in the `ThreadPool` and return a `Future` of a result pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R> where F: FnOnce() -> R + Send + 'scope, R: Clone + Send + 'static, { let future = TaskResult::default(); let future_ref = future.clone(); self.compute(move || { future_ref.set(task()); }); future } } /// A future that provides the task result pub struct TaskResult<R> { option: Arc<AtomicCell<TaskResultOption<R>>>, } // we can't derive `Clone` since it requires `R` to be `Clone` as well impl<R> Clone for TaskResult<R> { fn clone(&self) -> Self { Self { option: self.option.clone(), } } } /// The state of the `TaskResult` future #[derive(Debug)] enum TaskResultOption<R> { None, Result(R), Waiting(Waker), } impl<R> Default for TaskResultOption<R> { fn default() -> Self { TaskResultOption::None } } impl<R> TaskResult<R> { fn set(&self, result: R) { match self.option.swap(TaskResultOption::Result(result)) { TaskResultOption::None => {} // do nothing TaskResultOption::Result(_) => { unreachable!("There must not be a second computation of the result") } TaskResultOption::Waiting(waker) => waker.wake(), }; } } impl<R> Default for TaskResult<R> { fn default() -> Self { Self { option: Default::default(), } } } impl<R> Future for TaskResult<R> { type Output = R; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { match self .option .swap(TaskResultOption::Waiting(cx.waker().clone())) { TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending, TaskResultOption::Result(r) => Poll::Ready(r), } } } #[cfg(test)] mod tests { use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering}; use futures::future; use super::*; use crossbeam::utils::Backoff; #[test] #[allow(clippy::blacklisted_name)] fn one_task() { let thread_pool = ThreadPool::new(1); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(42, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_one_thread() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] #[allow(clippy::blacklisted_name)] fn two_task_two_threads() { let thread_pool = ThreadPool::new(2); let foo = Arc::new(AtomicI32::new(0)); let bar = foo.clone(); thread_pool.compute(Task::new(0, move || { bar.fetch_add(20, Ordering::SeqCst); })); let baz = foo.clone(); thread_pool.compute(Task::new(0, move || { baz.fetch_add(22, Ordering::SeqCst); })); let backoff = Backoff::new(); while foo.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn lots_of_tasks() { let thread_pool = ThreadPool::new(2); let number_of_tasks = 1_000_000; let tasks_completed = Arc::new(AtomicI32::new(0)); for _ in 0..number_of_tasks { let tasks_completed = tasks_completed.clone(); thread_pool.compute(Task::new(0, move || { tasks_completed.fetch_add(1, Ordering::SeqCst); })); } let backoff = Backoff::new(); while tasks_completed.load(Ordering::SeqCst) != number_of_tasks { backoff.snooze(); } } #[test] fn context() { let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = Arc::new(AtomicI32::new(0)); let result_clone = result.clone(); context.compute(move || { result_clone.fetch_add(42, Ordering::SeqCst); }); let backoff = Backoff::new(); while result.load(Ordering::SeqCst) != 42 { backoff.snooze(); } } #[test] fn scoped() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let result = AtomicUsize::new(0); context.scope(|scope| { for _ in 0..NUMBER_OF_TASKS { scope.compute(|| { result.fetch_add(1, Ordering::SeqCst); });
#[test] fn scoped_vec() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut result = vec![0; NUMBER_OF_TASKS]; context.scope(|scope| { for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) { scope.compute(move || chunk[0] = i); } }); assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result); } #[test] fn compute_results() { const NUMBER_OF_TASKS: usize = 42; let thread_pool = ThreadPool::new(2); let context = thread_pool.create_context(); let mut futures = Vec::with_capacity(NUMBER_OF_TASKS); context.scope(|scope| { for i in 0..NUMBER_OF_TASKS { futures.push(scope.compute_result(move || i)); } }); let result = futures::executor::block_on(future::join_all(futures)); assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>()); } #[test] fn parking() { let thread_pool = ThreadPool::new(1); let context = thread_pool.create_context(); // wait for the thread to be parked let backoff = Backoff::new(); while thread_pool.parked_threads.len() == 0 { backoff.snooze(); } let mut unparked = false; context.scope(|scope| scope.compute(|| unparked = true)); assert!(unparked) } }
} }); assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS); }
random_line_split
ffi.py
from .ctoybox import Game, State as FrameState, Input import numpy as np from PIL import Image import json from typing import Dict, Any, List, Tuple, Union, Optional def json_str(js: Union[Dict[str, Any], Input, str]) -> str: """ Turn an object into a JSON string -- handles dictionaries, the Input class, and JSON you've already prepared (e.g., strings). """ if type(js) is dict: js = json.dumps(js) elif type(js) is Input: js = json.dumps(js.__dict__) elif type(js) is not str: raise ValueError( "Unknown json type: %s (only str and dict supported)" % type(js) ) return js class Simulator(object): """ The Simulator is an instance of a game configuration. You can call new_game on it to begin. """ def __init__(self, game_name, sim=None): """ Construct a new instance. Parameters: game_name: one of "breakout", "amidar", etc. sim: optionally a Rust pointer to an existing simulator. """ if sim is None: sim = Game(game_name) self.__sim = sim # sim should be a pointer self.game_name = game_name def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def set_seed(self, seed: int): """Configure the random number generator that spawns new game states. Parameters: seed: a parameter to reset the built-in random number generator. """ self.__sim.seed(seed) def get_frame_size(self) -> Tuple[int, int]: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size() def get_frame_width(self) -> int: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size()[0] def get_frame_height(self) -> int: """Get the height in pixels of the frames this game renders.""" return self.__sim.frame_size()[1] def get_simulator(self) -> Game: """Get access to the raw simulator pointer.""" return self.__sim def new_game(self) -> "State": """Start a new game.""" return State(self, self.__sim.new_game()) def state_from_json(self, js: Union[Dict[str, Any], str]) -> "State": """Generate a State from the state json and this configuration. Parameters: js: a JSON object or string containing a serialized state. """ state: FrameState = self.__sim.new_state(json_str(js)) return State(self, state=state) def to_json(self) -> Dict[str, Any]: """Get the configuration of this simulator/config as JSON""" return json.loads(self.__sim.to_json()) def from_json(self, config_js: Union[Dict[str, Any], str]): """Mutably update this simulator/config with the replacement json.""" self.__sim = self.__sim.from_json(json_str(config_js)) def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for any state for this game.""" return json.loads(self.__sim.frame_schema()) def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for any config for this game.""" return json.loads(self.__sim.config_schema()) class State(object): """ The State object represents everything the game needs to know about any single simulated frame. You can rewind in time by storing and restoring these state representations. - Access the json: ``to_json`` - Access the image: ``render_frame`` """ def __init__(self, sim: Simulator, state=None): """ Construct a new State instance wrapper. Parameters: sim: The simulator responsible for this state. state: Optional pointer to a state to use (otherwise it will create one). """ self.sim = sim """A reference to the simulator that created this state.""" self.__state = state or sim.__sim.new_game() """The raw pointer to the state itself.""" self.game_name = sim.game_name """The name of the game that created this state.""" def __enter__(self): return self def __del__(self):
def __exit__(self, exc_type, exc_value, traceback): self.__del__() def clone(self) -> 'State': """Quickly make a copy of this state; should be more efficient than saving the JSON.""" return State(self.sim, state=self.get_state().copy()) def get_state(self) -> FrameState: """Get the raw state pointer.""" assert self.__state is not None return self.__state def lives(self) -> int: """How many lives are remaining in the current state?""" return self.__state.lives() def level(self) -> int: """How many levels have been completed in the current state?""" return self.__state.level() def score(self) -> int: """How many points have been earned in the current state?""" return self.__state.score() def game_over(self): """Determine whether the game has ended; i.e., the player has run out of lives. >>> assert self.lives() < 0 == self.game_over() """ return self.lives() < 0 def query_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """ Ask a question of the Rust state; queries are currently implemented manually. Parameters: query: the message to send to the rust state. args: the arguments to send to the rust state, defaults to "null". Returns: response: A JSON response loaded to python objects. Raises: ValueError: if anything goes wrong with the query ```python with Toybox("breakout") as tb: tb.query_json("bricks_remaining") ``` """ return json.loads(self.__state.query(json_str(query), json_str(args))) def render_frame(self, sim: Simulator, grayscale: bool = True) -> np.array: """Generate an image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. grayscale: True if we want to render in grayscale rather than in color (RGBA). """ if grayscale: return self.render_frame_rgb(sim) else: return self.render_frame_color(sim) def render_frame_color(self, sim: Simulator) -> np.array: """Generate an RGBA image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() rgba = 4 size = h * w * rgba frame = bytearray(size) self.get_state().render_into_buffer(frame, True) return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba) def render_frame_rgb(self, sim: Simulator) -> np.array: """Generate an RGB image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ rgba_frame = self.render_frame_color(sim) return rgba_frame[:, :, :3] def render_frame_grayscale(self, sim: Simulator) -> np.array: """Generate a grayscale image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() depth = 1 size = h * w * depth frame = bytearray(size) self.get_state().render_into_buffer(frame, False) return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth) def to_json(self) -> Dict[str, Any]: """Get a JSON representation of the state.""" return json.loads(self.get_state().to_json()) class Toybox(object): """ This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks: ```python with Toybox("amidar") as tb: print(tb.get_score()) # the 'tb' variable only lives in the block. ``` Important: Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks. """ def __init__(self, game_name: str, grayscale: bool = True, frameskip: int = 0, seed: Optional[int] = None, withstate: Optional[dict] = None): """ Construct a new Toybox state/game wrapper. Use this in a with block! Parameters: game_name: One of "breakout", "space_invaders", "amidar", etc. grayscale: Toybox can render directly to grayscale, saving time. Default is True. frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0. seed: The seed """ self.game_name = game_name self.frames_per_action = frameskip + 1 self.rsimulator = Simulator(game_name) self.rstate = self.rsimulator.new_game() self.grayscale = grayscale if seed: self.set_seed(seed) self.new_game() if withstate: self.write_state_json(withstate) def new_game(self): """ Modify this Toybox wrapper to have a new_game state. Important: This discards the old state! """ old_state = self.rstate del old_state self.rstate = self.rsimulator.new_game() def get_height(self) -> int: """Get the height of the rendered game in pixels.""" return self.rsimulator.get_frame_height() def get_width(self) -> int: """Get the width of the rendered game in pixels.""" return self.rsimulator.get_frame_width() def get_legal_action_set(self) -> List[int]: """Get the set of actions consumed by this game: they are ALE numbered.""" sim = self.rsimulator.get_simulator() return sim.legal_actions() def apply_ale_action(self, action_int: int): """Takes an integer corresponding to an action, as specified in ALE. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. ```python ALE_INPUT_MAPPING = { 0 : "NOOP", 1 : "FIRE", 2 : "UP", 3 : "RIGHT", 4 : "LEFT", 5 : "DOWN", 6 : "UPRIGHT", 7 : "UPLEFT", 8 : "DOWNRIGHT", 9 : "DOWNLEFT", 10 : "UPFIRE", 11 : "RIGHTFIRE", 12 : "LEFTFIRE", 13 : "DOWNFIRE", 14 : "UPRIGHTFIRE", 15 : "UPLEFTFIRE", 16 : "DOWNRIGHTFIRE", 17 : "DOWNLEFTFIRE" } ``` Parameters: action_int: A number from 0 to 17 inclusive. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): if not self.rstate.get_state().apply_ale_action(action_int): raise ValueError( "Expected to apply action, but failed: {0}".format(action_int) ) def apply_action(self, action_input_obj: Input): """Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. Parameters: action_input_obj: An instance of the [ctoybox.Input][] class. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): self.rstate.get_state().apply_action(action_input_obj) def get_state(self) -> np.array: """This state here actually refers to the graphical, RGBA or grayscale representation of the current state.""" return self.rstate.render_frame(self.rsimulator, self.grayscale) def set_seed(self, seed: int): """Control the random number generator of the config -- only affects a new_game. Parameters: seed: a parameter to reset the built-in random number generator. """ self.rsimulator.set_seed(seed) # Maybe call new game here? def save_frame_image(self, path: str, grayscale: bool = False): """Save the current frame image to a PNG file. Parameters: path: the filename to save to. grayscale: whether images should be saved in color or black & white. """ img = None if grayscale: img = Image.fromarray( self.rstate.render_frame_grayscale(self.rsimulator), "L" ) else: img = Image.fromarray( self.rstate.render_frame_color(self.rsimulator), "RGBA" ) img.save(path, format="png") def get_rgb_frame(self) -> np.array: """Get the RGB frame as a numpy array.""" return self.rstate.render_frame_rgb(self.rsimulator) def get_score(self) -> int: """Access the current score. Returns: The number of points earned in the current state.""" return self.rstate.score() def get_lives(self) -> int: """Access the number of lives. Returns: The number of lives remaining in the current state.""" return self.rstate.lives() def get_level(self) -> int: """ Access the number of levels. Returns: The number of levels completed in the current state.""" return self.rstate.level() def game_over(self) -> bool: """ Check for game over condition. Returns: ``True`` if the player has run out of lives in the current state. """ return self.rstate.game_over() def state_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python object.""" return self.rstate.to_json() def to_state_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict. Important: This method is deprecated; please use ``state_to_json`` instead! """ return self.state_to_json() def config_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict.""" return self.rsimulator.to_json() def write_state_json(self, js: Dict[str, Any]): """Overwrite the state's JSON representation from a python dict. Parameters: js: the python representation of the JSON state. """ old_state = self.rstate del old_state self.rstate = self.rsimulator.state_from_json(js) def write_config_json(self, config_js: Dict[str, Any]): """Overwrite the config's JSON representation from a python dict. It is likely that some changes will be seen until you call new_game() Parameters: config_js: the python representation of the config JSON """ # from_json replaces simulator! self.rsimulator.from_json(config_js) # new_game replaces state! self.new_game() def query_state_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection. Parameters: query: the query string to send to the game. args: a JSON argument to attach to the query string. """ return self.rstate.query_json(query, args) def __del__(self): self.rstate = None self.rsimulator = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.__del__() def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for the frame State object.""" return self.rsimulator.schema_for_state() def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for the Config object.""" return self.rsimulator.schema_for_config()
self.__state = None self.sim = None
identifier_body
ffi.py
from .ctoybox import Game, State as FrameState, Input import numpy as np from PIL import Image import json from typing import Dict, Any, List, Tuple, Union, Optional def json_str(js: Union[Dict[str, Any], Input, str]) -> str: """ Turn an object into a JSON string -- handles dictionaries, the Input class, and JSON you've already prepared (e.g., strings). """ if type(js) is dict: js = json.dumps(js) elif type(js) is Input: js = json.dumps(js.__dict__) elif type(js) is not str: raise ValueError( "Unknown json type: %s (only str and dict supported)" % type(js) ) return js class Simulator(object): """ The Simulator is an instance of a game configuration. You can call new_game on it to begin. """ def __init__(self, game_name, sim=None): """ Construct a new instance. Parameters: game_name: one of "breakout", "amidar", etc. sim: optionally a Rust pointer to an existing simulator. """ if sim is None: sim = Game(game_name) self.__sim = sim # sim should be a pointer self.game_name = game_name def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def set_seed(self, seed: int): """Configure the random number generator that spawns new game states. Parameters: seed: a parameter to reset the built-in random number generator. """ self.__sim.seed(seed) def get_frame_size(self) -> Tuple[int, int]: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size() def get_frame_width(self) -> int: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size()[0] def get_frame_height(self) -> int: """Get the height in pixels of the frames this game renders.""" return self.__sim.frame_size()[1] def get_simulator(self) -> Game: """Get access to the raw simulator pointer.""" return self.__sim def new_game(self) -> "State": """Start a new game.""" return State(self, self.__sim.new_game()) def state_from_json(self, js: Union[Dict[str, Any], str]) -> "State": """Generate a State from the state json and this configuration. Parameters: js: a JSON object or string containing a serialized state. """ state: FrameState = self.__sim.new_state(json_str(js)) return State(self, state=state) def to_json(self) -> Dict[str, Any]: """Get the configuration of this simulator/config as JSON""" return json.loads(self.__sim.to_json()) def from_json(self, config_js: Union[Dict[str, Any], str]): """Mutably update this simulator/config with the replacement json.""" self.__sim = self.__sim.from_json(json_str(config_js)) def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for any state for this game.""" return json.loads(self.__sim.frame_schema()) def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for any config for this game.""" return json.loads(self.__sim.config_schema()) class State(object): """ The State object represents everything the game needs to know about any single simulated frame. You can rewind in time by storing and restoring these state representations. - Access the json: ``to_json`` - Access the image: ``render_frame`` """ def __init__(self, sim: Simulator, state=None): """ Construct a new State instance wrapper. Parameters: sim: The simulator responsible for this state. state: Optional pointer to a state to use (otherwise it will create one). """ self.sim = sim """A reference to the simulator that created this state.""" self.__state = state or sim.__sim.new_game() """The raw pointer to the state itself.""" self.game_name = sim.game_name """The name of the game that created this state.""" def __enter__(self): return self def __del__(self): self.__state = None self.sim = None def __exit__(self, exc_type, exc_value, traceback): self.__del__() def clone(self) -> 'State': """Quickly make a copy of this state; should be more efficient than saving the JSON.""" return State(self.sim, state=self.get_state().copy()) def get_state(self) -> FrameState: """Get the raw state pointer.""" assert self.__state is not None return self.__state def lives(self) -> int: """How many lives are remaining in the current state?""" return self.__state.lives() def level(self) -> int: """How many levels have been completed in the current state?""" return self.__state.level() def score(self) -> int: """How many points have been earned in the current state?""" return self.__state.score() def game_over(self): """Determine whether the game has ended; i.e., the player has run out of lives. >>> assert self.lives() < 0 == self.game_over() """ return self.lives() < 0 def query_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """ Ask a question of the Rust state; queries are currently implemented manually. Parameters: query: the message to send to the rust state. args: the arguments to send to the rust state, defaults to "null". Returns: response: A JSON response loaded to python objects. Raises: ValueError: if anything goes wrong with the query ```python with Toybox("breakout") as tb: tb.query_json("bricks_remaining") ``` """ return json.loads(self.__state.query(json_str(query), json_str(args))) def render_frame(self, sim: Simulator, grayscale: bool = True) -> np.array: """Generate an image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. grayscale: True if we want to render in grayscale rather than in color (RGBA). """ if grayscale: return self.render_frame_rgb(sim) else: return self.render_frame_color(sim) def render_frame_color(self, sim: Simulator) -> np.array: """Generate an RGBA image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() rgba = 4 size = h * w * rgba frame = bytearray(size) self.get_state().render_into_buffer(frame, True) return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba) def render_frame_rgb(self, sim: Simulator) -> np.array: """Generate an RGB image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ rgba_frame = self.render_frame_color(sim) return rgba_frame[:, :, :3] def render_frame_grayscale(self, sim: Simulator) -> np.array: """Generate a grayscale image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() depth = 1 size = h * w * depth frame = bytearray(size) self.get_state().render_into_buffer(frame, False) return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth) def to_json(self) -> Dict[str, Any]: """Get a JSON representation of the state.""" return json.loads(self.get_state().to_json()) class Toybox(object): """ This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks: ```python with Toybox("amidar") as tb: print(tb.get_score()) # the 'tb' variable only lives in the block. ``` Important: Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks. """ def __init__(self, game_name: str, grayscale: bool = True, frameskip: int = 0, seed: Optional[int] = None, withstate: Optional[dict] = None): """ Construct a new Toybox state/game wrapper. Use this in a with block! Parameters: game_name: One of "breakout", "space_invaders", "amidar", etc. grayscale: Toybox can render directly to grayscale, saving time. Default is True. frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0. seed: The seed """ self.game_name = game_name self.frames_per_action = frameskip + 1 self.rsimulator = Simulator(game_name) self.rstate = self.rsimulator.new_game() self.grayscale = grayscale if seed: self.set_seed(seed) self.new_game() if withstate: self.write_state_json(withstate) def new_game(self): """ Modify this Toybox wrapper to have a new_game state. Important: This discards the old state! """ old_state = self.rstate del old_state self.rstate = self.rsimulator.new_game() def get_height(self) -> int: """Get the height of the rendered game in pixels.""" return self.rsimulator.get_frame_height() def get_width(self) -> int: """Get the width of the rendered game in pixels.""" return self.rsimulator.get_frame_width() def get_legal_action_set(self) -> List[int]: """Get the set of actions consumed by this game: they are ALE numbered.""" sim = self.rsimulator.get_simulator() return sim.legal_actions() def apply_ale_action(self, action_int: int): """Takes an integer corresponding to an action, as specified in ALE. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. ```python ALE_INPUT_MAPPING = { 0 : "NOOP", 1 : "FIRE", 2 : "UP", 3 : "RIGHT", 4 : "LEFT", 5 : "DOWN", 6 : "UPRIGHT", 7 : "UPLEFT", 8 : "DOWNRIGHT", 9 : "DOWNLEFT", 10 : "UPFIRE", 11 : "RIGHTFIRE", 12 : "LEFTFIRE", 13 : "DOWNFIRE", 14 : "UPRIGHTFIRE", 15 : "UPLEFTFIRE", 16 : "DOWNRIGHTFIRE", 17 : "DOWNLEFTFIRE" } ``` Parameters: action_int: A number from 0 to 17 inclusive. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): if not self.rstate.get_state().apply_ale_action(action_int):
def apply_action(self, action_input_obj: Input): """Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. Parameters: action_input_obj: An instance of the [ctoybox.Input][] class. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): self.rstate.get_state().apply_action(action_input_obj) def get_state(self) -> np.array: """This state here actually refers to the graphical, RGBA or grayscale representation of the current state.""" return self.rstate.render_frame(self.rsimulator, self.grayscale) def set_seed(self, seed: int): """Control the random number generator of the config -- only affects a new_game. Parameters: seed: a parameter to reset the built-in random number generator. """ self.rsimulator.set_seed(seed) # Maybe call new game here? def save_frame_image(self, path: str, grayscale: bool = False): """Save the current frame image to a PNG file. Parameters: path: the filename to save to. grayscale: whether images should be saved in color or black & white. """ img = None if grayscale: img = Image.fromarray( self.rstate.render_frame_grayscale(self.rsimulator), "L" ) else: img = Image.fromarray( self.rstate.render_frame_color(self.rsimulator), "RGBA" ) img.save(path, format="png") def get_rgb_frame(self) -> np.array: """Get the RGB frame as a numpy array.""" return self.rstate.render_frame_rgb(self.rsimulator) def get_score(self) -> int: """Access the current score. Returns: The number of points earned in the current state.""" return self.rstate.score() def get_lives(self) -> int: """Access the number of lives. Returns: The number of lives remaining in the current state.""" return self.rstate.lives() def get_level(self) -> int: """ Access the number of levels. Returns: The number of levels completed in the current state.""" return self.rstate.level() def game_over(self) -> bool: """ Check for game over condition. Returns: ``True`` if the player has run out of lives in the current state. """ return self.rstate.game_over() def state_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python object.""" return self.rstate.to_json() def to_state_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict. Important: This method is deprecated; please use ``state_to_json`` instead! """ return self.state_to_json() def config_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict.""" return self.rsimulator.to_json() def write_state_json(self, js: Dict[str, Any]): """Overwrite the state's JSON representation from a python dict. Parameters: js: the python representation of the JSON state. """ old_state = self.rstate del old_state self.rstate = self.rsimulator.state_from_json(js) def write_config_json(self, config_js: Dict[str, Any]): """Overwrite the config's JSON representation from a python dict. It is likely that some changes will be seen until you call new_game() Parameters: config_js: the python representation of the config JSON """ # from_json replaces simulator! self.rsimulator.from_json(config_js) # new_game replaces state! self.new_game() def query_state_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection. Parameters: query: the query string to send to the game. args: a JSON argument to attach to the query string. """ return self.rstate.query_json(query, args) def __del__(self): self.rstate = None self.rsimulator = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.__del__() def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for the frame State object.""" return self.rsimulator.schema_for_state() def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for the Config object.""" return self.rsimulator.schema_for_config()
raise ValueError( "Expected to apply action, but failed: {0}".format(action_int) )
conditional_block
ffi.py
from .ctoybox import Game, State as FrameState, Input import numpy as np from PIL import Image import json from typing import Dict, Any, List, Tuple, Union, Optional def json_str(js: Union[Dict[str, Any], Input, str]) -> str: """ Turn an object into a JSON string -- handles dictionaries, the Input class, and JSON you've already prepared (e.g., strings). """ if type(js) is dict: js = json.dumps(js) elif type(js) is Input: js = json.dumps(js.__dict__) elif type(js) is not str: raise ValueError( "Unknown json type: %s (only str and dict supported)" % type(js) ) return js class
(object): """ The Simulator is an instance of a game configuration. You can call new_game on it to begin. """ def __init__(self, game_name, sim=None): """ Construct a new instance. Parameters: game_name: one of "breakout", "amidar", etc. sim: optionally a Rust pointer to an existing simulator. """ if sim is None: sim = Game(game_name) self.__sim = sim # sim should be a pointer self.game_name = game_name def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def set_seed(self, seed: int): """Configure the random number generator that spawns new game states. Parameters: seed: a parameter to reset the built-in random number generator. """ self.__sim.seed(seed) def get_frame_size(self) -> Tuple[int, int]: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size() def get_frame_width(self) -> int: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size()[0] def get_frame_height(self) -> int: """Get the height in pixels of the frames this game renders.""" return self.__sim.frame_size()[1] def get_simulator(self) -> Game: """Get access to the raw simulator pointer.""" return self.__sim def new_game(self) -> "State": """Start a new game.""" return State(self, self.__sim.new_game()) def state_from_json(self, js: Union[Dict[str, Any], str]) -> "State": """Generate a State from the state json and this configuration. Parameters: js: a JSON object or string containing a serialized state. """ state: FrameState = self.__sim.new_state(json_str(js)) return State(self, state=state) def to_json(self) -> Dict[str, Any]: """Get the configuration of this simulator/config as JSON""" return json.loads(self.__sim.to_json()) def from_json(self, config_js: Union[Dict[str, Any], str]): """Mutably update this simulator/config with the replacement json.""" self.__sim = self.__sim.from_json(json_str(config_js)) def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for any state for this game.""" return json.loads(self.__sim.frame_schema()) def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for any config for this game.""" return json.loads(self.__sim.config_schema()) class State(object): """ The State object represents everything the game needs to know about any single simulated frame. You can rewind in time by storing and restoring these state representations. - Access the json: ``to_json`` - Access the image: ``render_frame`` """ def __init__(self, sim: Simulator, state=None): """ Construct a new State instance wrapper. Parameters: sim: The simulator responsible for this state. state: Optional pointer to a state to use (otherwise it will create one). """ self.sim = sim """A reference to the simulator that created this state.""" self.__state = state or sim.__sim.new_game() """The raw pointer to the state itself.""" self.game_name = sim.game_name """The name of the game that created this state.""" def __enter__(self): return self def __del__(self): self.__state = None self.sim = None def __exit__(self, exc_type, exc_value, traceback): self.__del__() def clone(self) -> 'State': """Quickly make a copy of this state; should be more efficient than saving the JSON.""" return State(self.sim, state=self.get_state().copy()) def get_state(self) -> FrameState: """Get the raw state pointer.""" assert self.__state is not None return self.__state def lives(self) -> int: """How many lives are remaining in the current state?""" return self.__state.lives() def level(self) -> int: """How many levels have been completed in the current state?""" return self.__state.level() def score(self) -> int: """How many points have been earned in the current state?""" return self.__state.score() def game_over(self): """Determine whether the game has ended; i.e., the player has run out of lives. >>> assert self.lives() < 0 == self.game_over() """ return self.lives() < 0 def query_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """ Ask a question of the Rust state; queries are currently implemented manually. Parameters: query: the message to send to the rust state. args: the arguments to send to the rust state, defaults to "null". Returns: response: A JSON response loaded to python objects. Raises: ValueError: if anything goes wrong with the query ```python with Toybox("breakout") as tb: tb.query_json("bricks_remaining") ``` """ return json.loads(self.__state.query(json_str(query), json_str(args))) def render_frame(self, sim: Simulator, grayscale: bool = True) -> np.array: """Generate an image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. grayscale: True if we want to render in grayscale rather than in color (RGBA). """ if grayscale: return self.render_frame_rgb(sim) else: return self.render_frame_color(sim) def render_frame_color(self, sim: Simulator) -> np.array: """Generate an RGBA image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() rgba = 4 size = h * w * rgba frame = bytearray(size) self.get_state().render_into_buffer(frame, True) return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba) def render_frame_rgb(self, sim: Simulator) -> np.array: """Generate an RGB image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ rgba_frame = self.render_frame_color(sim) return rgba_frame[:, :, :3] def render_frame_grayscale(self, sim: Simulator) -> np.array: """Generate a grayscale image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() depth = 1 size = h * w * depth frame = bytearray(size) self.get_state().render_into_buffer(frame, False) return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth) def to_json(self) -> Dict[str, Any]: """Get a JSON representation of the state.""" return json.loads(self.get_state().to_json()) class Toybox(object): """ This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks: ```python with Toybox("amidar") as tb: print(tb.get_score()) # the 'tb' variable only lives in the block. ``` Important: Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks. """ def __init__(self, game_name: str, grayscale: bool = True, frameskip: int = 0, seed: Optional[int] = None, withstate: Optional[dict] = None): """ Construct a new Toybox state/game wrapper. Use this in a with block! Parameters: game_name: One of "breakout", "space_invaders", "amidar", etc. grayscale: Toybox can render directly to grayscale, saving time. Default is True. frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0. seed: The seed """ self.game_name = game_name self.frames_per_action = frameskip + 1 self.rsimulator = Simulator(game_name) self.rstate = self.rsimulator.new_game() self.grayscale = grayscale if seed: self.set_seed(seed) self.new_game() if withstate: self.write_state_json(withstate) def new_game(self): """ Modify this Toybox wrapper to have a new_game state. Important: This discards the old state! """ old_state = self.rstate del old_state self.rstate = self.rsimulator.new_game() def get_height(self) -> int: """Get the height of the rendered game in pixels.""" return self.rsimulator.get_frame_height() def get_width(self) -> int: """Get the width of the rendered game in pixels.""" return self.rsimulator.get_frame_width() def get_legal_action_set(self) -> List[int]: """Get the set of actions consumed by this game: they are ALE numbered.""" sim = self.rsimulator.get_simulator() return sim.legal_actions() def apply_ale_action(self, action_int: int): """Takes an integer corresponding to an action, as specified in ALE. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. ```python ALE_INPUT_MAPPING = { 0 : "NOOP", 1 : "FIRE", 2 : "UP", 3 : "RIGHT", 4 : "LEFT", 5 : "DOWN", 6 : "UPRIGHT", 7 : "UPLEFT", 8 : "DOWNRIGHT", 9 : "DOWNLEFT", 10 : "UPFIRE", 11 : "RIGHTFIRE", 12 : "LEFTFIRE", 13 : "DOWNFIRE", 14 : "UPRIGHTFIRE", 15 : "UPLEFTFIRE", 16 : "DOWNRIGHTFIRE", 17 : "DOWNLEFTFIRE" } ``` Parameters: action_int: A number from 0 to 17 inclusive. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): if not self.rstate.get_state().apply_ale_action(action_int): raise ValueError( "Expected to apply action, but failed: {0}".format(action_int) ) def apply_action(self, action_input_obj: Input): """Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. Parameters: action_input_obj: An instance of the [ctoybox.Input][] class. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): self.rstate.get_state().apply_action(action_input_obj) def get_state(self) -> np.array: """This state here actually refers to the graphical, RGBA or grayscale representation of the current state.""" return self.rstate.render_frame(self.rsimulator, self.grayscale) def set_seed(self, seed: int): """Control the random number generator of the config -- only affects a new_game. Parameters: seed: a parameter to reset the built-in random number generator. """ self.rsimulator.set_seed(seed) # Maybe call new game here? def save_frame_image(self, path: str, grayscale: bool = False): """Save the current frame image to a PNG file. Parameters: path: the filename to save to. grayscale: whether images should be saved in color or black & white. """ img = None if grayscale: img = Image.fromarray( self.rstate.render_frame_grayscale(self.rsimulator), "L" ) else: img = Image.fromarray( self.rstate.render_frame_color(self.rsimulator), "RGBA" ) img.save(path, format="png") def get_rgb_frame(self) -> np.array: """Get the RGB frame as a numpy array.""" return self.rstate.render_frame_rgb(self.rsimulator) def get_score(self) -> int: """Access the current score. Returns: The number of points earned in the current state.""" return self.rstate.score() def get_lives(self) -> int: """Access the number of lives. Returns: The number of lives remaining in the current state.""" return self.rstate.lives() def get_level(self) -> int: """ Access the number of levels. Returns: The number of levels completed in the current state.""" return self.rstate.level() def game_over(self) -> bool: """ Check for game over condition. Returns: ``True`` if the player has run out of lives in the current state. """ return self.rstate.game_over() def state_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python object.""" return self.rstate.to_json() def to_state_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict. Important: This method is deprecated; please use ``state_to_json`` instead! """ return self.state_to_json() def config_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict.""" return self.rsimulator.to_json() def write_state_json(self, js: Dict[str, Any]): """Overwrite the state's JSON representation from a python dict. Parameters: js: the python representation of the JSON state. """ old_state = self.rstate del old_state self.rstate = self.rsimulator.state_from_json(js) def write_config_json(self, config_js: Dict[str, Any]): """Overwrite the config's JSON representation from a python dict. It is likely that some changes will be seen until you call new_game() Parameters: config_js: the python representation of the config JSON """ # from_json replaces simulator! self.rsimulator.from_json(config_js) # new_game replaces state! self.new_game() def query_state_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection. Parameters: query: the query string to send to the game. args: a JSON argument to attach to the query string. """ return self.rstate.query_json(query, args) def __del__(self): self.rstate = None self.rsimulator = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.__del__() def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for the frame State object.""" return self.rsimulator.schema_for_state() def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for the Config object.""" return self.rsimulator.schema_for_config()
Simulator
identifier_name
ffi.py
from .ctoybox import Game, State as FrameState, Input import numpy as np from PIL import Image import json from typing import Dict, Any, List, Tuple, Union, Optional def json_str(js: Union[Dict[str, Any], Input, str]) -> str: """ Turn an object into a JSON string -- handles dictionaries, the Input class, and JSON you've already prepared (e.g., strings). """ if type(js) is dict: js = json.dumps(js) elif type(js) is Input: js = json.dumps(js.__dict__) elif type(js) is not str: raise ValueError( "Unknown json type: %s (only str and dict supported)" % type(js) ) return js class Simulator(object): """ The Simulator is an instance of a game configuration. You can call new_game on it to begin. """ def __init__(self, game_name, sim=None): """ Construct a new instance. Parameters: game_name: one of "breakout", "amidar", etc. sim: optionally a Rust pointer to an existing simulator. """ if sim is None: sim = Game(game_name) self.__sim = sim # sim should be a pointer self.game_name = game_name def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def set_seed(self, seed: int): """Configure the random number generator that spawns new game states. Parameters: seed: a parameter to reset the built-in random number generator. """ self.__sim.seed(seed) def get_frame_size(self) -> Tuple[int, int]: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size() def get_frame_width(self) -> int: """Get the width in pixels of the frames this game renders.""" return self.__sim.frame_size()[0] def get_frame_height(self) -> int: """Get the height in pixels of the frames this game renders.""" return self.__sim.frame_size()[1] def get_simulator(self) -> Game: """Get access to the raw simulator pointer.""" return self.__sim def new_game(self) -> "State": """Start a new game.""" return State(self, self.__sim.new_game()) def state_from_json(self, js: Union[Dict[str, Any], str]) -> "State": """Generate a State from the state json and this configuration. Parameters: js: a JSON object or string containing a serialized state. """ state: FrameState = self.__sim.new_state(json_str(js)) return State(self, state=state) def to_json(self) -> Dict[str, Any]: """Get the configuration of this simulator/config as JSON""" return json.loads(self.__sim.to_json()) def from_json(self, config_js: Union[Dict[str, Any], str]): """Mutably update this simulator/config with the replacement json.""" self.__sim = self.__sim.from_json(json_str(config_js)) def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for any state for this game.""" return json.loads(self.__sim.frame_schema()) def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for any config for this game.""" return json.loads(self.__sim.config_schema()) class State(object): """ The State object represents everything the game needs to know about any single simulated frame. You can rewind in time by storing and restoring these state representations. - Access the json: ``to_json`` - Access the image: ``render_frame`` """ def __init__(self, sim: Simulator, state=None): """ Construct a new State instance wrapper. Parameters: sim: The simulator responsible for this state. state: Optional pointer to a state to use (otherwise it will create one). """ self.sim = sim """A reference to the simulator that created this state.""" self.__state = state or sim.__sim.new_game() """The raw pointer to the state itself.""" self.game_name = sim.game_name """The name of the game that created this state.""" def __enter__(self): return self def __del__(self): self.__state = None self.sim = None def __exit__(self, exc_type, exc_value, traceback): self.__del__() def clone(self) -> 'State': """Quickly make a copy of this state; should be more efficient than saving the JSON.""" return State(self.sim, state=self.get_state().copy()) def get_state(self) -> FrameState: """Get the raw state pointer.""" assert self.__state is not None return self.__state def lives(self) -> int: """How many lives are remaining in the current state?""" return self.__state.lives() def level(self) -> int: """How many levels have been completed in the current state?""" return self.__state.level() def score(self) -> int: """How many points have been earned in the current state?""" return self.__state.score() def game_over(self): """Determine whether the game has ended; i.e., the player has run out of lives. >>> assert self.lives() < 0 == self.game_over() """ return self.lives() < 0 def query_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """ Ask a question of the Rust state; queries are currently implemented manually. Parameters: query: the message to send to the rust state. args: the arguments to send to the rust state, defaults to "null". Returns: response: A JSON response loaded to python objects. Raises: ValueError: if anything goes wrong with the query ```python with Toybox("breakout") as tb: tb.query_json("bricks_remaining") ``` """ return json.loads(self.__state.query(json_str(query), json_str(args))) def render_frame(self, sim: Simulator, grayscale: bool = True) -> np.array: """Generate an image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. grayscale: True if we want to render in grayscale rather than in color (RGBA). """ if grayscale: return self.render_frame_rgb(sim) else: return self.render_frame_color(sim) def render_frame_color(self, sim: Simulator) -> np.array: """Generate an RGBA image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() rgba = 4 size = h * w * rgba frame = bytearray(size) self.get_state().render_into_buffer(frame, True) return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba) def render_frame_rgb(self, sim: Simulator) -> np.array: """Generate an RGB image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ rgba_frame = self.render_frame_color(sim) return rgba_frame[:, :, :3] def render_frame_grayscale(self, sim: Simulator) -> np.array: """Generate a grayscale image from the current frame state object. Parameters: sim: the simulator to use; this tells us the width/height necessary. """ (w, h) = sim.get_frame_size() depth = 1 size = h * w * depth frame = bytearray(size) self.get_state().render_into_buffer(frame, False) return np.asarray(frame, dtype=np.uint8).reshape(h, w, depth) def to_json(self) -> Dict[str, Any]: """Get a JSON representation of the state.""" return json.loads(self.get_state().to_json()) class Toybox(object): """ This is a stateful representation of Toybox -- since it manages memory, we provide ``__enter__`` and ``__exit__`` usage for Python's with-blocks: ```python with Toybox("amidar") as tb: print(tb.get_score()) # the 'tb' variable only lives in the block. ``` Important: Note how we should use this in a with-block; this will clean up pointers and prevent memory leaks. """ def __init__(self, game_name: str, grayscale: bool = True, frameskip: int = 0, seed: Optional[int] = None, withstate: Optional[dict] = None): """ Construct a new Toybox state/game wrapper. Use this in a with block! Parameters: game_name: One of "breakout", "space_invaders", "amidar", etc. grayscale: Toybox can render directly to grayscale, saving time. Default is True. frameskip: When an action is submitted, for how many extra frames should it be applied? Default is 0. seed: The seed """ self.game_name = game_name self.frames_per_action = frameskip + 1 self.rsimulator = Simulator(game_name) self.rstate = self.rsimulator.new_game() self.grayscale = grayscale if seed: self.set_seed(seed) self.new_game() if withstate: self.write_state_json(withstate) def new_game(self): """ Modify this Toybox wrapper to have a new_game state. Important: This discards the old state! """ old_state = self.rstate del old_state self.rstate = self.rsimulator.new_game() def get_height(self) -> int: """Get the height of the rendered game in pixels.""" return self.rsimulator.get_frame_height() def get_width(self) -> int: """Get the width of the rendered game in pixels.""" return self.rsimulator.get_frame_width() def get_legal_action_set(self) -> List[int]: """Get the set of actions consumed by this game: they are ALE numbered.""" sim = self.rsimulator.get_simulator() return sim.legal_actions() def apply_ale_action(self, action_int: int): """Takes an integer corresponding to an action, as specified in ALE. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. ```python ALE_INPUT_MAPPING = { 0 : "NOOP", 1 : "FIRE", 2 : "UP", 3 : "RIGHT", 4 : "LEFT", 5 : "DOWN", 6 : "UPRIGHT", 7 : "UPLEFT", 8 : "DOWNRIGHT", 9 : "DOWNLEFT", 10 : "UPFIRE", 11 : "RIGHTFIRE", 12 : "LEFTFIRE", 13 : "DOWNFIRE", 14 : "UPRIGHTFIRE", 15 : "UPLEFTFIRE", 16 : "DOWNRIGHTFIRE", 17 : "DOWNLEFTFIRE" } ``` Parameters: action_int: A number from 0 to 17 inclusive. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): if not self.rstate.get_state().apply_ale_action(action_int): raise ValueError( "Expected to apply action, but failed: {0}".format(action_int) ) def apply_action(self, action_input_obj: Input): """Takes an [ctoybox.Input][] action and applies it - unlike the ALE actions (which allow some permutations) this allows for fine-grained button pressing. This applies the action *k* times, where *k* based on the frameskip passed to the Toybox constructor. Parameters: action_input_obj: An instance of the [ctoybox.Input][] class. """ # implement frameskip(k) by sending the action (k+1) times every time we have an action. for _ in range(self.frames_per_action): self.rstate.get_state().apply_action(action_input_obj) def get_state(self) -> np.array: """This state here actually refers to the graphical, RGBA or grayscale representation of the current state.""" return self.rstate.render_frame(self.rsimulator, self.grayscale) def set_seed(self, seed: int): """Control the random number generator of the config -- only affects a new_game. Parameters: seed: a parameter to reset the built-in random number generator. """ self.rsimulator.set_seed(seed) # Maybe call new game here? def save_frame_image(self, path: str, grayscale: bool = False): """Save the current frame image to a PNG file. Parameters: path: the filename to save to. grayscale: whether images should be saved in color or black & white. """ img = None if grayscale: img = Image.fromarray( self.rstate.render_frame_grayscale(self.rsimulator), "L" ) else: img = Image.fromarray( self.rstate.render_frame_color(self.rsimulator), "RGBA" ) img.save(path, format="png") def get_rgb_frame(self) -> np.array: """Get the RGB frame as a numpy array.""" return self.rstate.render_frame_rgb(self.rsimulator) def get_score(self) -> int: """Access the current score. Returns: The number of points earned in the current state."""
def get_lives(self) -> int: """Access the number of lives. Returns: The number of lives remaining in the current state.""" return self.rstate.lives() def get_level(self) -> int: """ Access the number of levels. Returns: The number of levels completed in the current state.""" return self.rstate.level() def game_over(self) -> bool: """ Check for game over condition. Returns: ``True`` if the player has run out of lives in the current state. """ return self.rstate.game_over() def state_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python object.""" return self.rstate.to_json() def to_state_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict. Important: This method is deprecated; please use ``state_to_json`` instead! """ return self.state_to_json() def config_to_json(self) -> Dict[str, Any]: """Get the state's JSON representation as a python dict.""" return self.rsimulator.to_json() def write_state_json(self, js: Dict[str, Any]): """Overwrite the state's JSON representation from a python dict. Parameters: js: the python representation of the JSON state. """ old_state = self.rstate del old_state self.rstate = self.rsimulator.state_from_json(js) def write_config_json(self, config_js: Dict[str, Any]): """Overwrite the config's JSON representation from a python dict. It is likely that some changes will be seen until you call new_game() Parameters: config_js: the python representation of the config JSON """ # from_json replaces simulator! self.rsimulator.from_json(config_js) # new_game replaces state! self.new_game() def query_state_json( self, query: str, args: Union[Dict[str, Any], str] = "null" ) -> Dict[str, Any]: """Submit a query to the game's query system -- faster than accessing the whole JSON for quick introspection. Parameters: query: the query string to send to the game. args: a JSON argument to attach to the query string. """ return self.rstate.query_json(query, args) def __del__(self): self.rstate = None self.rsimulator = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.__del__() def schema_for_state(self) -> Dict[str, Any]: """Get the JSON Schema for the frame State object.""" return self.rsimulator.schema_for_state() def schema_for_config(self) -> Dict[str, Any]: """Get the JSON Schema for the Config object.""" return self.rsimulator.schema_for_config()
return self.rstate.score()
random_line_split
api.d.ts
/** * @packageDocumentation * @module API-EVM */ import { Buffer } from 'buffer/'; import BN from 'bn.js'; import AvalancheCore from '../../avalanche'; import { JRPCAPI } from '../../common/jrpcapi'; import { UTXOSet } from './utxos'; import { KeyChain } from './keychain'; import { Tx, UnsignedTx } from './tx'; import { Index } from './../../common/interfaces'; /** * Class for interacting with a node's EVMAPI * * @category RPCAPIs * * @remarks This extends the [[JRPCAPI]] class. This class should not be directly called. Instead, use the [[Avalanche.addAPI]] function to register this interface with Avalanche. */ export declare class EVMAPI extends JRPCAPI { /** * @ignore */ protected keychain: KeyChain; protected blockchainID: string; protected blockchainAlias: string; protected AVAXAssetID: Buffer; protected txFee: BN; /** * Gets the alias for the blockchainID if it exists, otherwise returns `undefined`. * * @returns The alias for the blockchainID */ getBlockchainAlias: () => string; /** * Sets the alias for the blockchainID. * * @param alias The alias for the blockchainID. * */ setBlockchainAlias: (alias: string) => string; /** * Gets the blockchainID and returns it. * * @returns The blockchainID */ getBlockchainID: () => string; /** * Refresh blockchainID, and if a blockchainID is passed in, use that. * * @param Optional. BlockchainID to assign, if none, uses the default based on networkID. * * @returns A boolean if the blockchainID was successfully refreshed. */ refreshBlockchainID: (blockchainID?: string) => boolean; /** * Takes an address string and returns its {@link https://github.com/feross/buffer|Buffer} representation if valid. * * @returns A {@link https://github.com/feross/buffer|Buffer} for the address if valid, undefined if not valid. */ parseAddress: (addr: string) => Buffer; addressFromBuffer: (address: Buffer) => string; /** * Retrieves an assets name and symbol. * * @param assetID Either a {@link https://github.com/feross/buffer|Buffer} or an b58 serialized string for the AssetID or its alias. * * @returns Returns a Promise<Asset> with keys "name", "symbol", "assetID" and "denomination". */ getAssetDescription: (assetID: Buffer | string) => Promise<any>; /** * Fetches the AVAX AssetID and returns it in a Promise. * * @param refresh This function caches the response. Refresh = true will bust the cache. * * @returns The the provided string representing the AVAX AssetID */ getAVAXAssetID: (refresh?: boolean) => Promise<Buffer>; /** * Overrides the defaults and sets the cache to a specific AVAX AssetID * * @param avaxAssetID A cb58 string or Buffer representing the AVAX AssetID * * @returns The the provided string representing the AVAX AssetID */ setAVAXAssetID: (avaxAssetID: string | Buffer) => void; /** * Gets the default tx fee for this chain. * * @returns The default tx fee as a {@link https://github.com/indutny/bn.js/|BN} */ getDefaultTxFee: () => BN; /** * Gets the tx fee for this chain. * * @returns The tx fee as a {@link https://github.com/indutny/bn.js/|BN} */ getTxFee: () => BN; /** * Send ANT (Avalanche Native Token) assets including AVAX from the C-Chain to an account on the X-Chain. * * After calling this method, you must call the X-Chainโ€™s import method to complete the transfer. * * @param username The Keystore user that controls the X-Chain account specified in `to` * @param password The password of the Keystore user * @param to The account on the X-Chain to send the AVAX to. * @param amount Amount of asset to export as a {@link https://github.com/indutny/bn.js/|BN} * @param assetID The asset id which is being sent * * @returns String representing the transaction id */ export: (username: string, password: string, to: string, amount: BN, assetID: string) => Promise<string>; /** * Send AVAX from the C-Chain to an account on the X-Chain. * * After calling this method, you must call the X-Chainโ€™s importAVAX method to complete the transfer. * * @param username The Keystore user that controls the X-Chain account specified in `to` * @param password The password of the Keystore user * @param to The account on the X-Chain to send the AVAX to. * @param amount Amount of AVAX to export as a {@link https://github.com/indutny/bn.js/|BN} * * @returns String representing the transaction id */ exportAVAX: (username: string, password: string, to: string, amount: BN) => Promise<string>; /** * Retrieves the UTXOs related to the addresses provided from the node's `getUTXOs` method. * * @param addresses An array of addresses as cb58 strings or addresses as {@link https://github.com/feross/buffer|Buffer}s * @param sourceChain A string for the chain to look for the UTXO's. Default is to use this chain, but if exported UTXOs exist * from other chains, this can used to pull them instead. * @param limit Optional. Returns at most [limit] addresses. If [limit] == 0 or > [maxUTXOsToFetch], fetches up to [maxUTXOsToFetch]. * @param startIndex Optional. [StartIndex] defines where to start fetching UTXOs (for pagination.) * UTXOs fetched are from addresses equal to or greater than [StartIndex.Address] * For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.Utxo] will be returned. */ getUTXOs: (addresses: string[] | string, sourceChain?: string, limit?: number, startIndex?: Index) => Promise<{ numFetched: number; utxos; endIndex: Index; }>; /** * Send ANT (Avalanche Native Token) assets including AVAX from an account on the X-Chain to an address on the C-Chain. This transaction * must be signed with the key of the account that the asset is sent from and which pays * the transaction fee. * * @param username The Keystore user that controls the account specified in `to` * @param password The password of the Keystore user * @param to The address of the account the asset is sent to. * @param sourceChain The chainID where the funds are coming from. Ex: "X" * * @returns Promise for a string for the transaction, which should be sent to the network * by calling issueTx. */ import: (username: string, password: string, to: string, sourceChain: string) => Promise<string>; /** * Send AVAX from an account on the X-Chain to an address on the C-Chain. This transaction * must be signed with the key of the account that the AVAX is sent from and which pays * the transaction fee. * * @param username The Keystore user that controls the account specified in `to` * @param password The password of the Keystore user * @param to The address of the account the AVAX is sent to. This must be the same as the to * argument in the corresponding call to the X-Chainโ€™s exportAVAX * @param sourceChain The chainID where the funds are coming from. * * @returns Promise for a string for the transaction, which should be sent to the network * by calling issueTx. */ importAVAX: (username: string, password: string, to: string, sourceChain: string) => Promise<string>; /** * Give a user control over an address by providing the private key that controls the address. * * @param username The name of the user to store the private key * @param password The password that unlocks the user * @param privateKey A string representing the private key in the vm's format * * @returns The address for the imported private key. */ importKey: (username: string, password: string, privateKey: string) => Promise<string>; /** * Calls the node's issueTx method from the API and returns the resulting transaction ID as a string. * * @param tx A string, {@link https://github.com/feross/buffer|Buffer}, or [[Tx]] representing a transaction * * @returns A Promise<string> representing the transaction ID of the posted transaction. */ issueTx: (tx: string | Buffer | Tx) => Promise<string>; /** * Exports the private key for an address. * * @param username The name of the user with the private key * @param password The password used to decrypt the private key * @param address The address whose private key should be exported * * @returns Promise with the decrypted private key as store in the database */ exportKey: (username: string, password: string, address: string) => Promise<string>; /** * Helper function which creates an unsigned Import Tx. For more granular control, you may create your own * [[UnsignedTx]] manually (with their corresponding [[TransferableInput]]s, [[TransferableOutput]]s). * * @param utxoset A set of UTXOs that the transaction is built on * @param toAddress The address to send the funds * @param ownerAddresses The addresses being used to import * @param sourceChain The chainid for where the import is coming from * @param fromAddresses The addresses being used to send the funds from the UTXOs provided * * @returns An unsigned transaction ([[UnsignedTx]]) which contains a [[ImportTx]]. * * @remarks * This helper exists because the endpoint API should be the primary point of entry for most functionality. */ buildImportTx: (utxoset: UTXOSet, toAddress: string, ownerAddresses: string[], sourceChain: Buffer | string, fromAddresses: string[]) => Promise<UnsignedTx>; /** * Helper function which creates an unsigned Export Tx. For more granular control, you may create your own * [[UnsignedTx]] manually (with their corresponding [[TransferableInput]]s, [[TransferableOutput]]s). * * @param amount The amount being exported as a {@link https://github.com/indutny/bn.js/|BN} * @param assetID The asset id which is being sent
* @param toAddresses The addresses to send the funds * @param fromAddresses The addresses being used to send the funds from the UTXOs provided * @param changeAddresses The addresses that can spend the change remaining from the spent UTXOs * @param asOf Optional. The timestamp to verify the transaction against as a {@link https://github.com/indutny/bn.js/|BN} * @param locktime Optional. The locktime field created in the resulting outputs * @param threshold Optional. The number of signatures required to spend the funds in the resultant UTXO * * @returns An unsigned transaction ([[UnsignedTx]]) which contains an [[ExportTx]]. */ buildExportTx: (amount: BN, assetID: Buffer | string, destinationChain: Buffer | string, fromAddressHex: string, fromAddressBech: string, toAddresses: string[], nonce?: number, locktime?: BN, threshold?: number) => Promise<UnsignedTx>; /** * Gets a reference to the keychain for this class. * * @returns The instance of [[KeyChain]] for this class */ keyChain: () => KeyChain; /** * @ignore */ protected _cleanAddressArray(addresses: string[] | Buffer[], caller: string): string[]; /** * This class should not be instantiated directly. * Instead use the [[Avalanche.addAPI]] method. * * @param core A reference to the Avalanche class * @param baseurl Defaults to the string "/ext/bc/C/avax" as the path to blockchain's baseurl * @param blockchainID The Blockchain's ID. Defaults to an empty string: '' */ constructor(core: AvalancheCore, baseurl?: string, blockchainID?: string); } //# sourceMappingURL=api.d.ts.map
* @param destinationChain The chainid for where the assets will be sent.
random_line_split
api.d.ts
/** * @packageDocumentation * @module API-EVM */ import { Buffer } from 'buffer/'; import BN from 'bn.js'; import AvalancheCore from '../../avalanche'; import { JRPCAPI } from '../../common/jrpcapi'; import { UTXOSet } from './utxos'; import { KeyChain } from './keychain'; import { Tx, UnsignedTx } from './tx'; import { Index } from './../../common/interfaces'; /** * Class for interacting with a node's EVMAPI * * @category RPCAPIs * * @remarks This extends the [[JRPCAPI]] class. This class should not be directly called. Instead, use the [[Avalanche.addAPI]] function to register this interface with Avalanche. */ export declare class
extends JRPCAPI { /** * @ignore */ protected keychain: KeyChain; protected blockchainID: string; protected blockchainAlias: string; protected AVAXAssetID: Buffer; protected txFee: BN; /** * Gets the alias for the blockchainID if it exists, otherwise returns `undefined`. * * @returns The alias for the blockchainID */ getBlockchainAlias: () => string; /** * Sets the alias for the blockchainID. * * @param alias The alias for the blockchainID. * */ setBlockchainAlias: (alias: string) => string; /** * Gets the blockchainID and returns it. * * @returns The blockchainID */ getBlockchainID: () => string; /** * Refresh blockchainID, and if a blockchainID is passed in, use that. * * @param Optional. BlockchainID to assign, if none, uses the default based on networkID. * * @returns A boolean if the blockchainID was successfully refreshed. */ refreshBlockchainID: (blockchainID?: string) => boolean; /** * Takes an address string and returns its {@link https://github.com/feross/buffer|Buffer} representation if valid. * * @returns A {@link https://github.com/feross/buffer|Buffer} for the address if valid, undefined if not valid. */ parseAddress: (addr: string) => Buffer; addressFromBuffer: (address: Buffer) => string; /** * Retrieves an assets name and symbol. * * @param assetID Either a {@link https://github.com/feross/buffer|Buffer} or an b58 serialized string for the AssetID or its alias. * * @returns Returns a Promise<Asset> with keys "name", "symbol", "assetID" and "denomination". */ getAssetDescription: (assetID: Buffer | string) => Promise<any>; /** * Fetches the AVAX AssetID and returns it in a Promise. * * @param refresh This function caches the response. Refresh = true will bust the cache. * * @returns The the provided string representing the AVAX AssetID */ getAVAXAssetID: (refresh?: boolean) => Promise<Buffer>; /** * Overrides the defaults and sets the cache to a specific AVAX AssetID * * @param avaxAssetID A cb58 string or Buffer representing the AVAX AssetID * * @returns The the provided string representing the AVAX AssetID */ setAVAXAssetID: (avaxAssetID: string | Buffer) => void; /** * Gets the default tx fee for this chain. * * @returns The default tx fee as a {@link https://github.com/indutny/bn.js/|BN} */ getDefaultTxFee: () => BN; /** * Gets the tx fee for this chain. * * @returns The tx fee as a {@link https://github.com/indutny/bn.js/|BN} */ getTxFee: () => BN; /** * Send ANT (Avalanche Native Token) assets including AVAX from the C-Chain to an account on the X-Chain. * * After calling this method, you must call the X-Chainโ€™s import method to complete the transfer. * * @param username The Keystore user that controls the X-Chain account specified in `to` * @param password The password of the Keystore user * @param to The account on the X-Chain to send the AVAX to. * @param amount Amount of asset to export as a {@link https://github.com/indutny/bn.js/|BN} * @param assetID The asset id which is being sent * * @returns String representing the transaction id */ export: (username: string, password: string, to: string, amount: BN, assetID: string) => Promise<string>; /** * Send AVAX from the C-Chain to an account on the X-Chain. * * After calling this method, you must call the X-Chainโ€™s importAVAX method to complete the transfer. * * @param username The Keystore user that controls the X-Chain account specified in `to` * @param password The password of the Keystore user * @param to The account on the X-Chain to send the AVAX to. * @param amount Amount of AVAX to export as a {@link https://github.com/indutny/bn.js/|BN} * * @returns String representing the transaction id */ exportAVAX: (username: string, password: string, to: string, amount: BN) => Promise<string>; /** * Retrieves the UTXOs related to the addresses provided from the node's `getUTXOs` method. * * @param addresses An array of addresses as cb58 strings or addresses as {@link https://github.com/feross/buffer|Buffer}s * @param sourceChain A string for the chain to look for the UTXO's. Default is to use this chain, but if exported UTXOs exist * from other chains, this can used to pull them instead. * @param limit Optional. Returns at most [limit] addresses. If [limit] == 0 or > [maxUTXOsToFetch], fetches up to [maxUTXOsToFetch]. * @param startIndex Optional. [StartIndex] defines where to start fetching UTXOs (for pagination.) * UTXOs fetched are from addresses equal to or greater than [StartIndex.Address] * For address [StartIndex.Address], only UTXOs with IDs greater than [StartIndex.Utxo] will be returned. */ getUTXOs: (addresses: string[] | string, sourceChain?: string, limit?: number, startIndex?: Index) => Promise<{ numFetched: number; utxos; endIndex: Index; }>; /** * Send ANT (Avalanche Native Token) assets including AVAX from an account on the X-Chain to an address on the C-Chain. This transaction * must be signed with the key of the account that the asset is sent from and which pays * the transaction fee. * * @param username The Keystore user that controls the account specified in `to` * @param password The password of the Keystore user * @param to The address of the account the asset is sent to. * @param sourceChain The chainID where the funds are coming from. Ex: "X" * * @returns Promise for a string for the transaction, which should be sent to the network * by calling issueTx. */ import: (username: string, password: string, to: string, sourceChain: string) => Promise<string>; /** * Send AVAX from an account on the X-Chain to an address on the C-Chain. This transaction * must be signed with the key of the account that the AVAX is sent from and which pays * the transaction fee. * * @param username The Keystore user that controls the account specified in `to` * @param password The password of the Keystore user * @param to The address of the account the AVAX is sent to. This must be the same as the to * argument in the corresponding call to the X-Chainโ€™s exportAVAX * @param sourceChain The chainID where the funds are coming from. * * @returns Promise for a string for the transaction, which should be sent to the network * by calling issueTx. */ importAVAX: (username: string, password: string, to: string, sourceChain: string) => Promise<string>; /** * Give a user control over an address by providing the private key that controls the address. * * @param username The name of the user to store the private key * @param password The password that unlocks the user * @param privateKey A string representing the private key in the vm's format * * @returns The address for the imported private key. */ importKey: (username: string, password: string, privateKey: string) => Promise<string>; /** * Calls the node's issueTx method from the API and returns the resulting transaction ID as a string. * * @param tx A string, {@link https://github.com/feross/buffer|Buffer}, or [[Tx]] representing a transaction * * @returns A Promise<string> representing the transaction ID of the posted transaction. */ issueTx: (tx: string | Buffer | Tx) => Promise<string>; /** * Exports the private key for an address. * * @param username The name of the user with the private key * @param password The password used to decrypt the private key * @param address The address whose private key should be exported * * @returns Promise with the decrypted private key as store in the database */ exportKey: (username: string, password: string, address: string) => Promise<string>; /** * Helper function which creates an unsigned Import Tx. For more granular control, you may create your own * [[UnsignedTx]] manually (with their corresponding [[TransferableInput]]s, [[TransferableOutput]]s). * * @param utxoset A set of UTXOs that the transaction is built on * @param toAddress The address to send the funds * @param ownerAddresses The addresses being used to import * @param sourceChain The chainid for where the import is coming from * @param fromAddresses The addresses being used to send the funds from the UTXOs provided * * @returns An unsigned transaction ([[UnsignedTx]]) which contains a [[ImportTx]]. * * @remarks * This helper exists because the endpoint API should be the primary point of entry for most functionality. */ buildImportTx: (utxoset: UTXOSet, toAddress: string, ownerAddresses: string[], sourceChain: Buffer | string, fromAddresses: string[]) => Promise<UnsignedTx>; /** * Helper function which creates an unsigned Export Tx. For more granular control, you may create your own * [[UnsignedTx]] manually (with their corresponding [[TransferableInput]]s, [[TransferableOutput]]s). * * @param amount The amount being exported as a {@link https://github.com/indutny/bn.js/|BN} * @param assetID The asset id which is being sent * @param destinationChain The chainid for where the assets will be sent. * @param toAddresses The addresses to send the funds * @param fromAddresses The addresses being used to send the funds from the UTXOs provided * @param changeAddresses The addresses that can spend the change remaining from the spent UTXOs * @param asOf Optional. The timestamp to verify the transaction against as a {@link https://github.com/indutny/bn.js/|BN} * @param locktime Optional. The locktime field created in the resulting outputs * @param threshold Optional. The number of signatures required to spend the funds in the resultant UTXO * * @returns An unsigned transaction ([[UnsignedTx]]) which contains an [[ExportTx]]. */ buildExportTx: (amount: BN, assetID: Buffer | string, destinationChain: Buffer | string, fromAddressHex: string, fromAddressBech: string, toAddresses: string[], nonce?: number, locktime?: BN, threshold?: number) => Promise<UnsignedTx>; /** * Gets a reference to the keychain for this class. * * @returns The instance of [[KeyChain]] for this class */ keyChain: () => KeyChain; /** * @ignore */ protected _cleanAddressArray(addresses: string[] | Buffer[], caller: string): string[]; /** * This class should not be instantiated directly. * Instead use the [[Avalanche.addAPI]] method. * * @param core A reference to the Avalanche class * @param baseurl Defaults to the string "/ext/bc/C/avax" as the path to blockchain's baseurl * @param blockchainID The Blockchain's ID. Defaults to an empty string: '' */ constructor(core: AvalancheCore, baseurl?: string, blockchainID?: string); } //# sourceMappingURL=api.d.ts.map
EVMAPI
identifier_name
Kooi_NPacific_1D.py
# created 19/12/19- North Pacific: Kooi et al. 2017 in 1D (depth) from parcels import FieldSet, ParticleSet, JITParticle, ScipyParticle, AdvectionRK4_3D, AdvectionRK4, ErrorCode, ParticleFile, Variable, Field, NestedField, VectorField, timer from datetime import timedelta as delta from datetime import datetime import numpy as np import math from glob import glob import os import xarray as xr import sys import time as timelib import matplotlib.pyplot as plt import warnings import pickle import matplotlib.ticker as mtick import pandas as pd import operator from numpy import * import scipy.linalg import math as math warnings.filterwarnings("ignore") #------ CHOOSE (Note: the same values must also be placed in the Kooi kernel: lines 53 and 54) ----- rho_pl = "920" # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = "1e-04" # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 lon = np.array([-161,-159]) #lon release locations lat = np.array([35,37]) #lat release locations simdays = 150 #number of days running the sim secsdt = 60 #timestep of sim time0 = 0 secsoutdt = 60*60 # seconds in an hour (must be in hours due to algal pickle profiles being hours) total_secs = secsoutdt*24.*simdays - secsoutdt # total time (in seconds) being run for the sim dt_secs = total_secs/secsoutdt '''Loading the Kooi theoretical profiles for physical seawater properties: not time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles.pickle', 'rb') as f: depth,T_z,S_z,rho_z,upsilon_z,mu_z = pickle.load(f) depth = np.array(depth) '''Loading the Kooi theoretical profiles for biological seawater properties: time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles_t.pickle', 'rb') as p: depth,time,A_A_t,mu_A_t = pickle.load(p) time = np.linspace(time0,total_secs,dt_secs+1) '''General functions and kernals''' def Kooi(particle,fieldset,time): #------ CHOOSE AGAIN----- rho_pl = 920. # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = 1e-04 # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 z = particle.depth # [m] t = particle.temp # [oC] sw_visc = particle.sw_visc # seawatar viscosity[kg m-1 s-1] aa = particle.aa # ambient algal concentration[no m-3] mu_aa = particle.mu_aa/86400. # attached algal growth [s-1] kin_visc = particle.kin_visc # kinematic viscosity[m2 s-1] rho_sw = particle.rho_sw # seawater density [kg m-3] a = particle.a # number of attached algae[no. m-2] vs = particle.vs # particle velocity [m s-1] #------ Constants and algal properties ----- g = 7.32e10/(86400.**2.) # gravitational acceleration (m d-2), now [s-2] k = 1.0306E-13/(86400.**2.) # Boltzmann constant [m2 kg d-2 K-1] now [s-2] (=1.3804E-23) rho_bf = 1388. # density of biofilm ([g m-3] v_a = 2.0E-16 # Volume of 1 algal cell [m-3] m_a = 0.39/86400. # mortality rate, now [s-1] r20 = 0.1/86400. # respiration rate, now [s-1] q10 = 2. # temperature coefficient respiration [-] gamma = 1.728E5/86400. # shear [d-1], now [s-1] #------ Volumes ----- v_pl = (4./3.)*math.pi*r_pl**3. # volume of plastic [m3] theta_pl = 4.*math.pi*r_pl**2. # surface area of plastic particle [m2] r_a = ((3./4.)*(v_a/math.pi))**(1./3.) # radius of algae [m] v_bf = (v_a*a)*theta_pl # volume of biofilm [m3] v_tot = v_bf + v_pl # volume of total [m3] t_bf = ((v_tot*(3./(4.*math.pi)))**(1./3.))-r_pl # biofilm thickness [m] #------ Diffusivity ----- r_tot = r_pl + t_bf # total radius [m] rho_tot = (r_pl**3. * rho_pl + ((r_pl + t_bf)**3. - r_pl**3.)*rho_bf)/(r_pl + t_bf)**3. # total density [kg m-3] theta_tot = 4.*math.pi*r_tot**2. # surface area of total [m2] d_pl = k * (t + 273.16)/(6. * math.pi * sw_visc * r_tot) # diffusivity of plastic particle [m2 s-1] d_a = k * (t + 273.16)/(6. * math.pi * sw_visc * r_a) # diffusivity of algal cells [m2 s-1] #------ Encounter rates ----- beta_abrown = 4.*math.pi*(d_pl + d_a)*(r_tot + r_a) # Brownian motion [m3 s-1] beta_ashear = 1.3*gamma*((r_tot + r_a)**3.) # advective shear [m3 s-1] beta_aset = (1./2.)*math.pi*r_tot**2. * abs(vs) # differential settling [m3 s-1] beta_a = beta_abrown + beta_ashear + beta_aset # collision rate [m3 s-1] #------ Attached algal growth (Eq. 11 in Kooi et al. 2017) ----- a_coll = (beta_a*aa)/theta_pl a_growth = mu_aa*a a_mort = m_a*a a_resp = (q10**((t-20.)/10.))*r20*a particle.a += (a_coll + a_growth - a_mort - a_resp) * particle.dt dn = 2. * (r_tot) # equivalent spherical diameter [m] delta_rho = (rho_tot - rho_sw)/rho_sw # normalised difference in density between total plastic+bf and seawater[-] dstar = ((rho_tot - rho_sw) * g * dn**3.)/(rho_sw * kin_visc**2.) # [-] if dstar > 5e9: w = 1000. elif dstar <0.05: w = (dstar**2.) *1.71E-4 else: w = 10.**(-3.76715 + (1.92944*math.log10(dstar)) - (0.09815*math.log10(dstar)**2.) - (0.00575*math.log10(dstar)**3.) + (0.00056*math.log10(dstar)**4.)) #------ Settling of particle ----- if delta_rho > 0: # sinks vs = (g * kin_visc * w * delta_rho)**(1./3.) else: #rises a_del_rho = delta_rho*-1. vs = -1.*(g * kin_visc * w * a_del_rho)**(1./3.) # m s-1 particle.vs_init = vs # initial particle velocity, before forcing a 0 m s-1 value when particle is above 0.6 m (in loop below) z0 = z + vs * particle.dt if z0 <=0.6 or z0 >= 4000.: # NEMO's 'surface depth' vs = 0 particle.depth = 0.6 else: particle.depth += vs * particle.dt particle.vs = vs def DeleteParticle(particle, fieldset, time): """Kernel for deleting particles if they are out of bounds.""" print('particle is deleted') #print(particle.lon, particle.lat, particle.depth) particle.delete() # def Sink(particle, fieldset, time): # """Test to check that adding constant sinking speed works (to be replaced with Kooi equation later)""" # sp = 10./86400. #The sinkspeed m/day (CAN CHANGE THIS LATER- in Kooi et al. 2017 for particle of 0.1mm = 100 m d-1) # particle.depth += sp * particle.dt #(sp/(24*60*60)) * particle.dt # m/s : 1e-3 def Profiles(particle, fieldset, time): particle.temp = fieldset.T[time, particle.depth,particle.lat,particle.lon] particle.rho_sw = fieldset.D[time,particle.depth,particle.lat,particle.lon] particle.kin_visc = fieldset.KV[time,particle.depth,particle.lat,particle.lon] particle.sw_visc = fieldset.SV[time,particle.depth,particle.lat,particle.lon] particle.aa = fieldset.AA[time,particle.depth,particle.lat,particle.lon] particle.mu_aa = fieldset.AAmu[time,particle.depth,particle.lat,particle.lon] """ Defining the particle class """ class
(JITParticle): u = Variable('u', dtype=np.float32,to_write=False) v = Variable('v', dtype=np.float32,to_write=False) w = Variable('w',dtype=np.float32,to_write=True) temp = Variable('temp',dtype=np.float32,to_write=True) rho_sw = Variable('rho_sw',dtype=np.float32,to_write=False) kin_visc = Variable('kin_visc',dtype=np.float32,to_write=False) sw_visc = Variable('sw_visc',dtype=np.float32,to_write=False) aa = Variable('aa',dtype=np.float32,to_write=True) mu_aa = Variable('mu_aa',dtype=np.float32,to_write=False) a = Variable('a',dtype=np.float32,to_write=True) vs = Variable('vs',dtype=np.float32,to_write=True) vs_init = Variable('vs_init',dtype=np.float32,to_write=True) rho_tot = Variable('rho_tot',dtype=np.float32,to_write=True) """ Defining the fieldset""" depth = np.array(depth) S = np.transpose(np.tile(np.array(S_z),(len(lat),len(lon),len(time),1)), (2,3,0,1))*1000. # salinity (in Kooi equations/profiles, the salinity was in kg/kg so now converting to g/kg) T = np.transpose(np.tile(np.array(T_z),(len(lat),len(lon),len(time),1)),(2,3,0,1)) # temperature D = np.transpose(np.tile(np.array(rho_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # density KV = np.transpose(np.tile(np.array(upsilon_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # kinematic viscosity SV = np.transpose(np.tile(np.array(mu_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # dynamic viscosity of seawater AA = np.transpose(np.tile(np.array(A_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae AAmu = np.transpose(np.tile(np.array(mu_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae growth U = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a U component (eastward velocity) V = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a V component (northward velocity) data = {'U': U, 'V': V, 'T': T, 'D': D, 'KV': KV, 'SV': SV, 'AA': AA, 'AAmu': AAmu} dimensions = {'U': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'V': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'T': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'D': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'KV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'SV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'AA':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}, 'AAmu':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}} fieldset = FieldSet.from_data(data, dimensions, allow_time_extrapolation = True) #transpose=True, pset = ParticleSet.from_list(fieldset=fieldset, # the fields on which the particles are advected pclass=plastic_particle, # the type of particles lon=-160., # a vector of release longitudes lat=36., time = [0], depth = [0.6]) """ Kernal + Execution""" kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(Profiles) + pset.Kernel(Kooi) dirwrite = '/home/dlobelle/Kooi_data/data_output/1D_results/' outfile = dirwrite+'Kooionly_1D_'+str(round(simdays,2))+'d_rho'+rho_pl+'_rpl'+r_pl+'_'+str(secsdt)+'dtsecs_'+str(round(secsoutdt/3600.,2))+'hrsoutdt.nc' pfile= ParticleFile(outfile, pset, outputdt=delta(seconds = secsoutdt)) pset.execute(kernels, runtime=delta(days=simdays), dt=delta(seconds = secsdt), output_file=pfile, verbose_progress=True, recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle}) pfile.close() print('Execution finished')
plastic_particle
identifier_name
Kooi_NPacific_1D.py
# created 19/12/19- North Pacific: Kooi et al. 2017 in 1D (depth) from parcels import FieldSet, ParticleSet, JITParticle, ScipyParticle, AdvectionRK4_3D, AdvectionRK4, ErrorCode, ParticleFile, Variable, Field, NestedField, VectorField, timer from datetime import timedelta as delta from datetime import datetime import numpy as np import math from glob import glob import os import xarray as xr import sys import time as timelib import matplotlib.pyplot as plt import warnings import pickle import matplotlib.ticker as mtick import pandas as pd import operator from numpy import * import scipy.linalg import math as math warnings.filterwarnings("ignore") #------ CHOOSE (Note: the same values must also be placed in the Kooi kernel: lines 53 and 54) ----- rho_pl = "920" # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = "1e-04" # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 lon = np.array([-161,-159]) #lon release locations lat = np.array([35,37]) #lat release locations simdays = 150 #number of days running the sim secsdt = 60 #timestep of sim time0 = 0 secsoutdt = 60*60 # seconds in an hour (must be in hours due to algal pickle profiles being hours) total_secs = secsoutdt*24.*simdays - secsoutdt # total time (in seconds) being run for the sim dt_secs = total_secs/secsoutdt '''Loading the Kooi theoretical profiles for physical seawater properties: not time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles.pickle', 'rb') as f: depth,T_z,S_z,rho_z,upsilon_z,mu_z = pickle.load(f) depth = np.array(depth) '''Loading the Kooi theoretical profiles for biological seawater properties: time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles_t.pickle', 'rb') as p: depth,time,A_A_t,mu_A_t = pickle.load(p) time = np.linspace(time0,total_secs,dt_secs+1) '''General functions and kernals''' def Kooi(particle,fieldset,time): #------ CHOOSE AGAIN----- rho_pl = 920. # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = 1e-04 # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 z = particle.depth # [m] t = particle.temp # [oC] sw_visc = particle.sw_visc # seawatar viscosity[kg m-1 s-1] aa = particle.aa # ambient algal concentration[no m-3] mu_aa = particle.mu_aa/86400. # attached algal growth [s-1] kin_visc = particle.kin_visc # kinematic viscosity[m2 s-1] rho_sw = particle.rho_sw # seawater density [kg m-3] a = particle.a # number of attached algae[no. m-2] vs = particle.vs # particle velocity [m s-1] #------ Constants and algal properties ----- g = 7.32e10/(86400.**2.) # gravitational acceleration (m d-2), now [s-2] k = 1.0306E-13/(86400.**2.) # Boltzmann constant [m2 kg d-2 K-1] now [s-2] (=1.3804E-23) rho_bf = 1388. # density of biofilm ([g m-3] v_a = 2.0E-16 # Volume of 1 algal cell [m-3] m_a = 0.39/86400. # mortality rate, now [s-1] r20 = 0.1/86400. # respiration rate, now [s-1] q10 = 2. # temperature coefficient respiration [-] gamma = 1.728E5/86400. # shear [d-1], now [s-1] #------ Volumes ----- v_pl = (4./3.)*math.pi*r_pl**3. # volume of plastic [m3] theta_pl = 4.*math.pi*r_pl**2. # surface area of plastic particle [m2] r_a = ((3./4.)*(v_a/math.pi))**(1./3.) # radius of algae [m] v_bf = (v_a*a)*theta_pl # volume of biofilm [m3] v_tot = v_bf + v_pl # volume of total [m3] t_bf = ((v_tot*(3./(4.*math.pi)))**(1./3.))-r_pl # biofilm thickness [m] #------ Diffusivity ----- r_tot = r_pl + t_bf # total radius [m] rho_tot = (r_pl**3. * rho_pl + ((r_pl + t_bf)**3. - r_pl**3.)*rho_bf)/(r_pl + t_bf)**3. # total density [kg m-3] theta_tot = 4.*math.pi*r_tot**2. # surface area of total [m2] d_pl = k * (t + 273.16)/(6. * math.pi * sw_visc * r_tot) # diffusivity of plastic particle [m2 s-1] d_a = k * (t + 273.16)/(6. * math.pi * sw_visc * r_a) # diffusivity of algal cells [m2 s-1] #------ Encounter rates ----- beta_abrown = 4.*math.pi*(d_pl + d_a)*(r_tot + r_a) # Brownian motion [m3 s-1] beta_ashear = 1.3*gamma*((r_tot + r_a)**3.) # advective shear [m3 s-1] beta_aset = (1./2.)*math.pi*r_tot**2. * abs(vs) # differential settling [m3 s-1] beta_a = beta_abrown + beta_ashear + beta_aset # collision rate [m3 s-1] #------ Attached algal growth (Eq. 11 in Kooi et al. 2017) ----- a_coll = (beta_a*aa)/theta_pl a_growth = mu_aa*a a_mort = m_a*a a_resp = (q10**((t-20.)/10.))*r20*a particle.a += (a_coll + a_growth - a_mort - a_resp) * particle.dt dn = 2. * (r_tot) # equivalent spherical diameter [m] delta_rho = (rho_tot - rho_sw)/rho_sw # normalised difference in density between total plastic+bf and seawater[-] dstar = ((rho_tot - rho_sw) * g * dn**3.)/(rho_sw * kin_visc**2.) # [-] if dstar > 5e9: w = 1000. elif dstar <0.05: w = (dstar**2.) *1.71E-4 else: w = 10.**(-3.76715 + (1.92944*math.log10(dstar)) - (0.09815*math.log10(dstar)**2.) - (0.00575*math.log10(dstar)**3.) + (0.00056*math.log10(dstar)**4.)) #------ Settling of particle ----- if delta_rho > 0: # sinks vs = (g * kin_visc * w * delta_rho)**(1./3.) else: #rises a_del_rho = delta_rho*-1. vs = -1.*(g * kin_visc * w * a_del_rho)**(1./3.) # m s-1 particle.vs_init = vs # initial particle velocity, before forcing a 0 m s-1 value when particle is above 0.6 m (in loop below) z0 = z + vs * particle.dt if z0 <=0.6 or z0 >= 4000.: # NEMO's 'surface depth' vs = 0 particle.depth = 0.6 else: particle.depth += vs * particle.dt particle.vs = vs def DeleteParticle(particle, fieldset, time): """Kernel for deleting particles if they are out of bounds.""" print('particle is deleted') #print(particle.lon, particle.lat, particle.depth) particle.delete() # def Sink(particle, fieldset, time): # """Test to check that adding constant sinking speed works (to be replaced with Kooi equation later)""" # sp = 10./86400. #The sinkspeed m/day (CAN CHANGE THIS LATER- in Kooi et al. 2017 for particle of 0.1mm = 100 m d-1) # particle.depth += sp * particle.dt #(sp/(24*60*60)) * particle.dt # m/s : 1e-3 def Profiles(particle, fieldset, time): particle.temp = fieldset.T[time, particle.depth,particle.lat,particle.lon] particle.rho_sw = fieldset.D[time,particle.depth,particle.lat,particle.lon] particle.kin_visc = fieldset.KV[time,particle.depth,particle.lat,particle.lon] particle.sw_visc = fieldset.SV[time,particle.depth,particle.lat,particle.lon] particle.aa = fieldset.AA[time,particle.depth,particle.lat,particle.lon] particle.mu_aa = fieldset.AAmu[time,particle.depth,particle.lat,particle.lon] """ Defining the particle class """
temp = Variable('temp',dtype=np.float32,to_write=True) rho_sw = Variable('rho_sw',dtype=np.float32,to_write=False) kin_visc = Variable('kin_visc',dtype=np.float32,to_write=False) sw_visc = Variable('sw_visc',dtype=np.float32,to_write=False) aa = Variable('aa',dtype=np.float32,to_write=True) mu_aa = Variable('mu_aa',dtype=np.float32,to_write=False) a = Variable('a',dtype=np.float32,to_write=True) vs = Variable('vs',dtype=np.float32,to_write=True) vs_init = Variable('vs_init',dtype=np.float32,to_write=True) rho_tot = Variable('rho_tot',dtype=np.float32,to_write=True) """ Defining the fieldset""" depth = np.array(depth) S = np.transpose(np.tile(np.array(S_z),(len(lat),len(lon),len(time),1)), (2,3,0,1))*1000. # salinity (in Kooi equations/profiles, the salinity was in kg/kg so now converting to g/kg) T = np.transpose(np.tile(np.array(T_z),(len(lat),len(lon),len(time),1)),(2,3,0,1)) # temperature D = np.transpose(np.tile(np.array(rho_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # density KV = np.transpose(np.tile(np.array(upsilon_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # kinematic viscosity SV = np.transpose(np.tile(np.array(mu_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # dynamic viscosity of seawater AA = np.transpose(np.tile(np.array(A_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae AAmu = np.transpose(np.tile(np.array(mu_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae growth U = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a U component (eastward velocity) V = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a V component (northward velocity) data = {'U': U, 'V': V, 'T': T, 'D': D, 'KV': KV, 'SV': SV, 'AA': AA, 'AAmu': AAmu} dimensions = {'U': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'V': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'T': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'D': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'KV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'SV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'AA':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}, 'AAmu':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}} fieldset = FieldSet.from_data(data, dimensions, allow_time_extrapolation = True) #transpose=True, pset = ParticleSet.from_list(fieldset=fieldset, # the fields on which the particles are advected pclass=plastic_particle, # the type of particles lon=-160., # a vector of release longitudes lat=36., time = [0], depth = [0.6]) """ Kernal + Execution""" kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(Profiles) + pset.Kernel(Kooi) dirwrite = '/home/dlobelle/Kooi_data/data_output/1D_results/' outfile = dirwrite+'Kooionly_1D_'+str(round(simdays,2))+'d_rho'+rho_pl+'_rpl'+r_pl+'_'+str(secsdt)+'dtsecs_'+str(round(secsoutdt/3600.,2))+'hrsoutdt.nc' pfile= ParticleFile(outfile, pset, outputdt=delta(seconds = secsoutdt)) pset.execute(kernels, runtime=delta(days=simdays), dt=delta(seconds = secsdt), output_file=pfile, verbose_progress=True, recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle}) pfile.close() print('Execution finished')
class plastic_particle(JITParticle): u = Variable('u', dtype=np.float32,to_write=False) v = Variable('v', dtype=np.float32,to_write=False) w = Variable('w',dtype=np.float32,to_write=True)
random_line_split
Kooi_NPacific_1D.py
# created 19/12/19- North Pacific: Kooi et al. 2017 in 1D (depth) from parcels import FieldSet, ParticleSet, JITParticle, ScipyParticle, AdvectionRK4_3D, AdvectionRK4, ErrorCode, ParticleFile, Variable, Field, NestedField, VectorField, timer from datetime import timedelta as delta from datetime import datetime import numpy as np import math from glob import glob import os import xarray as xr import sys import time as timelib import matplotlib.pyplot as plt import warnings import pickle import matplotlib.ticker as mtick import pandas as pd import operator from numpy import * import scipy.linalg import math as math warnings.filterwarnings("ignore") #------ CHOOSE (Note: the same values must also be placed in the Kooi kernel: lines 53 and 54) ----- rho_pl = "920" # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = "1e-04" # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 lon = np.array([-161,-159]) #lon release locations lat = np.array([35,37]) #lat release locations simdays = 150 #number of days running the sim secsdt = 60 #timestep of sim time0 = 0 secsoutdt = 60*60 # seconds in an hour (must be in hours due to algal pickle profiles being hours) total_secs = secsoutdt*24.*simdays - secsoutdt # total time (in seconds) being run for the sim dt_secs = total_secs/secsoutdt '''Loading the Kooi theoretical profiles for physical seawater properties: not time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles.pickle', 'rb') as f: depth,T_z,S_z,rho_z,upsilon_z,mu_z = pickle.load(f) depth = np.array(depth) '''Loading the Kooi theoretical profiles for biological seawater properties: time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles_t.pickle', 'rb') as p: depth,time,A_A_t,mu_A_t = pickle.load(p) time = np.linspace(time0,total_secs,dt_secs+1) '''General functions and kernals''' def Kooi(particle,fieldset,time): #------ CHOOSE AGAIN----- rho_pl = 920. # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = 1e-04 # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 z = particle.depth # [m] t = particle.temp # [oC] sw_visc = particle.sw_visc # seawatar viscosity[kg m-1 s-1] aa = particle.aa # ambient algal concentration[no m-3] mu_aa = particle.mu_aa/86400. # attached algal growth [s-1] kin_visc = particle.kin_visc # kinematic viscosity[m2 s-1] rho_sw = particle.rho_sw # seawater density [kg m-3] a = particle.a # number of attached algae[no. m-2] vs = particle.vs # particle velocity [m s-1] #------ Constants and algal properties ----- g = 7.32e10/(86400.**2.) # gravitational acceleration (m d-2), now [s-2] k = 1.0306E-13/(86400.**2.) # Boltzmann constant [m2 kg d-2 K-1] now [s-2] (=1.3804E-23) rho_bf = 1388. # density of biofilm ([g m-3] v_a = 2.0E-16 # Volume of 1 algal cell [m-3] m_a = 0.39/86400. # mortality rate, now [s-1] r20 = 0.1/86400. # respiration rate, now [s-1] q10 = 2. # temperature coefficient respiration [-] gamma = 1.728E5/86400. # shear [d-1], now [s-1] #------ Volumes ----- v_pl = (4./3.)*math.pi*r_pl**3. # volume of plastic [m3] theta_pl = 4.*math.pi*r_pl**2. # surface area of plastic particle [m2] r_a = ((3./4.)*(v_a/math.pi))**(1./3.) # radius of algae [m] v_bf = (v_a*a)*theta_pl # volume of biofilm [m3] v_tot = v_bf + v_pl # volume of total [m3] t_bf = ((v_tot*(3./(4.*math.pi)))**(1./3.))-r_pl # biofilm thickness [m] #------ Diffusivity ----- r_tot = r_pl + t_bf # total radius [m] rho_tot = (r_pl**3. * rho_pl + ((r_pl + t_bf)**3. - r_pl**3.)*rho_bf)/(r_pl + t_bf)**3. # total density [kg m-3] theta_tot = 4.*math.pi*r_tot**2. # surface area of total [m2] d_pl = k * (t + 273.16)/(6. * math.pi * sw_visc * r_tot) # diffusivity of plastic particle [m2 s-1] d_a = k * (t + 273.16)/(6. * math.pi * sw_visc * r_a) # diffusivity of algal cells [m2 s-1] #------ Encounter rates ----- beta_abrown = 4.*math.pi*(d_pl + d_a)*(r_tot + r_a) # Brownian motion [m3 s-1] beta_ashear = 1.3*gamma*((r_tot + r_a)**3.) # advective shear [m3 s-1] beta_aset = (1./2.)*math.pi*r_tot**2. * abs(vs) # differential settling [m3 s-1] beta_a = beta_abrown + beta_ashear + beta_aset # collision rate [m3 s-1] #------ Attached algal growth (Eq. 11 in Kooi et al. 2017) ----- a_coll = (beta_a*aa)/theta_pl a_growth = mu_aa*a a_mort = m_a*a a_resp = (q10**((t-20.)/10.))*r20*a particle.a += (a_coll + a_growth - a_mort - a_resp) * particle.dt dn = 2. * (r_tot) # equivalent spherical diameter [m] delta_rho = (rho_tot - rho_sw)/rho_sw # normalised difference in density between total plastic+bf and seawater[-] dstar = ((rho_tot - rho_sw) * g * dn**3.)/(rho_sw * kin_visc**2.) # [-] if dstar > 5e9: w = 1000. elif dstar <0.05:
else: w = 10.**(-3.76715 + (1.92944*math.log10(dstar)) - (0.09815*math.log10(dstar)**2.) - (0.00575*math.log10(dstar)**3.) + (0.00056*math.log10(dstar)**4.)) #------ Settling of particle ----- if delta_rho > 0: # sinks vs = (g * kin_visc * w * delta_rho)**(1./3.) else: #rises a_del_rho = delta_rho*-1. vs = -1.*(g * kin_visc * w * a_del_rho)**(1./3.) # m s-1 particle.vs_init = vs # initial particle velocity, before forcing a 0 m s-1 value when particle is above 0.6 m (in loop below) z0 = z + vs * particle.dt if z0 <=0.6 or z0 >= 4000.: # NEMO's 'surface depth' vs = 0 particle.depth = 0.6 else: particle.depth += vs * particle.dt particle.vs = vs def DeleteParticle(particle, fieldset, time): """Kernel for deleting particles if they are out of bounds.""" print('particle is deleted') #print(particle.lon, particle.lat, particle.depth) particle.delete() # def Sink(particle, fieldset, time): # """Test to check that adding constant sinking speed works (to be replaced with Kooi equation later)""" # sp = 10./86400. #The sinkspeed m/day (CAN CHANGE THIS LATER- in Kooi et al. 2017 for particle of 0.1mm = 100 m d-1) # particle.depth += sp * particle.dt #(sp/(24*60*60)) * particle.dt # m/s : 1e-3 def Profiles(particle, fieldset, time): particle.temp = fieldset.T[time, particle.depth,particle.lat,particle.lon] particle.rho_sw = fieldset.D[time,particle.depth,particle.lat,particle.lon] particle.kin_visc = fieldset.KV[time,particle.depth,particle.lat,particle.lon] particle.sw_visc = fieldset.SV[time,particle.depth,particle.lat,particle.lon] particle.aa = fieldset.AA[time,particle.depth,particle.lat,particle.lon] particle.mu_aa = fieldset.AAmu[time,particle.depth,particle.lat,particle.lon] """ Defining the particle class """ class plastic_particle(JITParticle): u = Variable('u', dtype=np.float32,to_write=False) v = Variable('v', dtype=np.float32,to_write=False) w = Variable('w',dtype=np.float32,to_write=True) temp = Variable('temp',dtype=np.float32,to_write=True) rho_sw = Variable('rho_sw',dtype=np.float32,to_write=False) kin_visc = Variable('kin_visc',dtype=np.float32,to_write=False) sw_visc = Variable('sw_visc',dtype=np.float32,to_write=False) aa = Variable('aa',dtype=np.float32,to_write=True) mu_aa = Variable('mu_aa',dtype=np.float32,to_write=False) a = Variable('a',dtype=np.float32,to_write=True) vs = Variable('vs',dtype=np.float32,to_write=True) vs_init = Variable('vs_init',dtype=np.float32,to_write=True) rho_tot = Variable('rho_tot',dtype=np.float32,to_write=True) """ Defining the fieldset""" depth = np.array(depth) S = np.transpose(np.tile(np.array(S_z),(len(lat),len(lon),len(time),1)), (2,3,0,1))*1000. # salinity (in Kooi equations/profiles, the salinity was in kg/kg so now converting to g/kg) T = np.transpose(np.tile(np.array(T_z),(len(lat),len(lon),len(time),1)),(2,3,0,1)) # temperature D = np.transpose(np.tile(np.array(rho_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # density KV = np.transpose(np.tile(np.array(upsilon_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # kinematic viscosity SV = np.transpose(np.tile(np.array(mu_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # dynamic viscosity of seawater AA = np.transpose(np.tile(np.array(A_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae AAmu = np.transpose(np.tile(np.array(mu_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae growth U = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a U component (eastward velocity) V = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a V component (northward velocity) data = {'U': U, 'V': V, 'T': T, 'D': D, 'KV': KV, 'SV': SV, 'AA': AA, 'AAmu': AAmu} dimensions = {'U': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'V': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'T': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'D': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'KV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'SV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'AA':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}, 'AAmu':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}} fieldset = FieldSet.from_data(data, dimensions, allow_time_extrapolation = True) #transpose=True, pset = ParticleSet.from_list(fieldset=fieldset, # the fields on which the particles are advected pclass=plastic_particle, # the type of particles lon=-160., # a vector of release longitudes lat=36., time = [0], depth = [0.6]) """ Kernal + Execution""" kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(Profiles) + pset.Kernel(Kooi) dirwrite = '/home/dlobelle/Kooi_data/data_output/1D_results/' outfile = dirwrite+'Kooionly_1D_'+str(round(simdays,2))+'d_rho'+rho_pl+'_rpl'+r_pl+'_'+str(secsdt)+'dtsecs_'+str(round(secsoutdt/3600.,2))+'hrsoutdt.nc' pfile= ParticleFile(outfile, pset, outputdt=delta(seconds = secsoutdt)) pset.execute(kernels, runtime=delta(days=simdays), dt=delta(seconds = secsdt), output_file=pfile, verbose_progress=True, recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle}) pfile.close() print('Execution finished')
w = (dstar**2.) *1.71E-4
conditional_block
Kooi_NPacific_1D.py
# created 19/12/19- North Pacific: Kooi et al. 2017 in 1D (depth) from parcels import FieldSet, ParticleSet, JITParticle, ScipyParticle, AdvectionRK4_3D, AdvectionRK4, ErrorCode, ParticleFile, Variable, Field, NestedField, VectorField, timer from datetime import timedelta as delta from datetime import datetime import numpy as np import math from glob import glob import os import xarray as xr import sys import time as timelib import matplotlib.pyplot as plt import warnings import pickle import matplotlib.ticker as mtick import pandas as pd import operator from numpy import * import scipy.linalg import math as math warnings.filterwarnings("ignore") #------ CHOOSE (Note: the same values must also be placed in the Kooi kernel: lines 53 and 54) ----- rho_pl = "920" # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = "1e-04" # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 lon = np.array([-161,-159]) #lon release locations lat = np.array([35,37]) #lat release locations simdays = 150 #number of days running the sim secsdt = 60 #timestep of sim time0 = 0 secsoutdt = 60*60 # seconds in an hour (must be in hours due to algal pickle profiles being hours) total_secs = secsoutdt*24.*simdays - secsoutdt # total time (in seconds) being run for the sim dt_secs = total_secs/secsoutdt '''Loading the Kooi theoretical profiles for physical seawater properties: not time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles.pickle', 'rb') as f: depth,T_z,S_z,rho_z,upsilon_z,mu_z = pickle.load(f) depth = np.array(depth) '''Loading the Kooi theoretical profiles for biological seawater properties: time-dependent. Generated in separate python file''' with open('/home/dlobelle/Kooi_data/data_input/profiles_t.pickle', 'rb') as p: depth,time,A_A_t,mu_A_t = pickle.load(p) time = np.linspace(time0,total_secs,dt_secs+1) '''General functions and kernals''' def Kooi(particle,fieldset,time): #------ CHOOSE AGAIN----- rho_pl = 920. # density of plastic (kg m-3): DEFAULT FOR FIG 1: 920 but full range is: 840, 920, 940, 1050, 1380 (last 2 are initially non-buoyant) r_pl = 1e-04 # radius of plastic (m): DEFAULT FOR FIG 1: 10-3 to 10-6 included but full range is: 10 mm to 0.1 um or 10-2 to 10-7 z = particle.depth # [m] t = particle.temp # [oC] sw_visc = particle.sw_visc # seawatar viscosity[kg m-1 s-1] aa = particle.aa # ambient algal concentration[no m-3] mu_aa = particle.mu_aa/86400. # attached algal growth [s-1] kin_visc = particle.kin_visc # kinematic viscosity[m2 s-1] rho_sw = particle.rho_sw # seawater density [kg m-3] a = particle.a # number of attached algae[no. m-2] vs = particle.vs # particle velocity [m s-1] #------ Constants and algal properties ----- g = 7.32e10/(86400.**2.) # gravitational acceleration (m d-2), now [s-2] k = 1.0306E-13/(86400.**2.) # Boltzmann constant [m2 kg d-2 K-1] now [s-2] (=1.3804E-23) rho_bf = 1388. # density of biofilm ([g m-3] v_a = 2.0E-16 # Volume of 1 algal cell [m-3] m_a = 0.39/86400. # mortality rate, now [s-1] r20 = 0.1/86400. # respiration rate, now [s-1] q10 = 2. # temperature coefficient respiration [-] gamma = 1.728E5/86400. # shear [d-1], now [s-1] #------ Volumes ----- v_pl = (4./3.)*math.pi*r_pl**3. # volume of plastic [m3] theta_pl = 4.*math.pi*r_pl**2. # surface area of plastic particle [m2] r_a = ((3./4.)*(v_a/math.pi))**(1./3.) # radius of algae [m] v_bf = (v_a*a)*theta_pl # volume of biofilm [m3] v_tot = v_bf + v_pl # volume of total [m3] t_bf = ((v_tot*(3./(4.*math.pi)))**(1./3.))-r_pl # biofilm thickness [m] #------ Diffusivity ----- r_tot = r_pl + t_bf # total radius [m] rho_tot = (r_pl**3. * rho_pl + ((r_pl + t_bf)**3. - r_pl**3.)*rho_bf)/(r_pl + t_bf)**3. # total density [kg m-3] theta_tot = 4.*math.pi*r_tot**2. # surface area of total [m2] d_pl = k * (t + 273.16)/(6. * math.pi * sw_visc * r_tot) # diffusivity of plastic particle [m2 s-1] d_a = k * (t + 273.16)/(6. * math.pi * sw_visc * r_a) # diffusivity of algal cells [m2 s-1] #------ Encounter rates ----- beta_abrown = 4.*math.pi*(d_pl + d_a)*(r_tot + r_a) # Brownian motion [m3 s-1] beta_ashear = 1.3*gamma*((r_tot + r_a)**3.) # advective shear [m3 s-1] beta_aset = (1./2.)*math.pi*r_tot**2. * abs(vs) # differential settling [m3 s-1] beta_a = beta_abrown + beta_ashear + beta_aset # collision rate [m3 s-1] #------ Attached algal growth (Eq. 11 in Kooi et al. 2017) ----- a_coll = (beta_a*aa)/theta_pl a_growth = mu_aa*a a_mort = m_a*a a_resp = (q10**((t-20.)/10.))*r20*a particle.a += (a_coll + a_growth - a_mort - a_resp) * particle.dt dn = 2. * (r_tot) # equivalent spherical diameter [m] delta_rho = (rho_tot - rho_sw)/rho_sw # normalised difference in density between total plastic+bf and seawater[-] dstar = ((rho_tot - rho_sw) * g * dn**3.)/(rho_sw * kin_visc**2.) # [-] if dstar > 5e9: w = 1000. elif dstar <0.05: w = (dstar**2.) *1.71E-4 else: w = 10.**(-3.76715 + (1.92944*math.log10(dstar)) - (0.09815*math.log10(dstar)**2.) - (0.00575*math.log10(dstar)**3.) + (0.00056*math.log10(dstar)**4.)) #------ Settling of particle ----- if delta_rho > 0: # sinks vs = (g * kin_visc * w * delta_rho)**(1./3.) else: #rises a_del_rho = delta_rho*-1. vs = -1.*(g * kin_visc * w * a_del_rho)**(1./3.) # m s-1 particle.vs_init = vs # initial particle velocity, before forcing a 0 m s-1 value when particle is above 0.6 m (in loop below) z0 = z + vs * particle.dt if z0 <=0.6 or z0 >= 4000.: # NEMO's 'surface depth' vs = 0 particle.depth = 0.6 else: particle.depth += vs * particle.dt particle.vs = vs def DeleteParticle(particle, fieldset, time): """Kernel for deleting particles if they are out of bounds.""" print('particle is deleted') #print(particle.lon, particle.lat, particle.depth) particle.delete() # def Sink(particle, fieldset, time): # """Test to check that adding constant sinking speed works (to be replaced with Kooi equation later)""" # sp = 10./86400. #The sinkspeed m/day (CAN CHANGE THIS LATER- in Kooi et al. 2017 for particle of 0.1mm = 100 m d-1) # particle.depth += sp * particle.dt #(sp/(24*60*60)) * particle.dt # m/s : 1e-3 def Profiles(particle, fieldset, time): particle.temp = fieldset.T[time, particle.depth,particle.lat,particle.lon] particle.rho_sw = fieldset.D[time,particle.depth,particle.lat,particle.lon] particle.kin_visc = fieldset.KV[time,particle.depth,particle.lat,particle.lon] particle.sw_visc = fieldset.SV[time,particle.depth,particle.lat,particle.lon] particle.aa = fieldset.AA[time,particle.depth,particle.lat,particle.lon] particle.mu_aa = fieldset.AAmu[time,particle.depth,particle.lat,particle.lon] """ Defining the particle class """ class plastic_particle(JITParticle):
""" Defining the fieldset""" depth = np.array(depth) S = np.transpose(np.tile(np.array(S_z),(len(lat),len(lon),len(time),1)), (2,3,0,1))*1000. # salinity (in Kooi equations/profiles, the salinity was in kg/kg so now converting to g/kg) T = np.transpose(np.tile(np.array(T_z),(len(lat),len(lon),len(time),1)),(2,3,0,1)) # temperature D = np.transpose(np.tile(np.array(rho_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # density KV = np.transpose(np.tile(np.array(upsilon_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # kinematic viscosity SV = np.transpose(np.tile(np.array(mu_z),(len(lat),len(lon),len(time),1)), (2,3,0,1)) # dynamic viscosity of seawater AA = np.transpose(np.tile(np.array(A_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae AAmu = np.transpose(np.tile(np.array(mu_A_t),(len(lat),len(lon),simdays,1)), (2,3,0,1)) # ambient algae growth U = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a U component (eastward velocity) V = np.zeros(shape=(len(time),len(S_z),len(lat),len(lon))) # this is just a filler since the particle set must have a V component (northward velocity) data = {'U': U, 'V': V, 'T': T, 'D': D, 'KV': KV, 'SV': SV, 'AA': AA, 'AAmu': AAmu} dimensions = {'U': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'V': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'T': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'D': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'KV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'SV': {'time': time,'depth': depth, 'lat': lat, 'lon': lon}, 'AA':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}, 'AAmu':{'time': time, 'depth': depth, 'lat': lat, 'lon': lon}} fieldset = FieldSet.from_data(data, dimensions, allow_time_extrapolation = True) #transpose=True, pset = ParticleSet.from_list(fieldset=fieldset, # the fields on which the particles are advected pclass=plastic_particle, # the type of particles lon=-160., # a vector of release longitudes lat=36., time = [0], depth = [0.6]) """ Kernal + Execution""" kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(Profiles) + pset.Kernel(Kooi) dirwrite = '/home/dlobelle/Kooi_data/data_output/1D_results/' outfile = dirwrite+'Kooionly_1D_'+str(round(simdays,2))+'d_rho'+rho_pl+'_rpl'+r_pl+'_'+str(secsdt)+'dtsecs_'+str(round(secsoutdt/3600.,2))+'hrsoutdt.nc' pfile= ParticleFile(outfile, pset, outputdt=delta(seconds = secsoutdt)) pset.execute(kernels, runtime=delta(days=simdays), dt=delta(seconds = secsdt), output_file=pfile, verbose_progress=True, recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle}) pfile.close() print('Execution finished')
u = Variable('u', dtype=np.float32,to_write=False) v = Variable('v', dtype=np.float32,to_write=False) w = Variable('w',dtype=np.float32,to_write=True) temp = Variable('temp',dtype=np.float32,to_write=True) rho_sw = Variable('rho_sw',dtype=np.float32,to_write=False) kin_visc = Variable('kin_visc',dtype=np.float32,to_write=False) sw_visc = Variable('sw_visc',dtype=np.float32,to_write=False) aa = Variable('aa',dtype=np.float32,to_write=True) mu_aa = Variable('mu_aa',dtype=np.float32,to_write=False) a = Variable('a',dtype=np.float32,to_write=True) vs = Variable('vs',dtype=np.float32,to_write=True) vs_init = Variable('vs_init',dtype=np.float32,to_write=True) rho_tot = Variable('rho_tot',dtype=np.float32,to_write=True)
identifier_body
main.rs
extern crate gl; extern crate imgui; extern crate imgui_opengl_renderer; extern crate imgui_sdl2; extern crate sdl2; /* @TODO: - Show line numbers! - Use traits to Send, Parse and Draw - Create a checkbox to enable debugging the parser, queries, etc; - Write a logger to use a imgui window */ use imgui::im_str; use sdl2::event::Event; use sdl2::keyboard::Keycode; use std::collections::HashSet; use std::io::{BufRead, BufReader, Error, Write}; use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio}; use std::sync::mpsc::SendError; use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError}; use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex}; use std::thread; use std::thread::sleep; use std::time::Instant; use std::{ io::{self, Read}, process, time::Duration, }; mod debugger; mod graphics; mod parser; mod ui; use graphics::build_text; use std::cmp::max; use ui::is_window_docked; fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) { for command in commands { send_command(command, &sender).unwrap(); sleep(Duration::from_millis(time)); } } pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> { sender.send(String::from(command))?; Ok(()) } pub fn is_split(id: u32) -> bool { unsafe { let node = imgui::sys::igDockBuilderGetNode(id); if std::ptr::null() == node { false } else { imgui::sys::ImGuiDockNode_IsSplitNode(node) } } } const STEP_COMMANDS: [&str; 5] = [ "step\n", "-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n", "-stack-list-locals 1\n", r#" -data-disassemble -s $pc -e "$pc + 20" -- 0 "#, r#" -data-read-memory &arr x 1 1 128 "#, ]; const STARTUP_COMMANDS: [&str; 3] = [ "start\n", "target record-full\n", "-data-list-register-names\n", ]; fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>) where F: Fn(),
fn start_process_thread( child: &mut Child, receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) { let mut stdin = child.stdin.take().unwrap(); let stdout = child.stdout.take().unwrap(); use crate::debugger::DebuggerState; // Receiving commands and sending them to GDB's stdin thread::spawn(move || { for line in receiver { stdin.write_all(line.as_bytes()).unwrap(); } }); // Reading and processing GDB stdout thread::spawn(move || { let mut f = BufReader::new(stdout); loop { let mut line = String::new(); f.read_line(&mut line).unwrap(); print!("[LINE] {}", line); let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap(); let vals = parser::parse(&line, gdb); println!("[PARSER] {:#?}", &vals); if let Ok(v) = vals { // Here we try to limit the scope were we hold the mutex gdb.update(&v); } } }); } fn start_process( receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) -> Child { let mut child = Command::new("gdb") .arg("--interpreter=mi3") .arg("./examples/a.exe") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .expect("Failed to start process"); start_process_thread(&mut child, receiver, gdb_mutex); println!("Started process: {}", child.id()); child } fn main() -> Result<(), Error> { let (tx, rx) = channel(); let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new())); let mut child = start_process(rx, Arc::clone(&gdb_mutex)); send_commands(&tx, &STARTUP_COMMANDS, 100); start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx); child.kill()?; Ok(()) }
{ let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); { let gl_attr = video_subsystem.gl_attr(); gl_attr.set_context_profile(sdl2::video::GLProfile::Core); gl_attr.set_context_version(3, 0); } let window = video_subsystem .window("rust-sdl2 demo", 1000, 950) .position_centered() .resizable() .allow_highdpi() .opengl() .build() .unwrap(); let _gl_context = window .gl_create_context() .expect("Couldn't create GL context"); gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _); let mut imgui = imgui::Context::create(); imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE; let mut path = std::path::PathBuf::new(); path.push("imgui"); path.set_extension("ini"); //imgui.set_ini_filename(Some(path)); imgui.set_ini_filename(None); let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window); let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| { video_subsystem.gl_get_proc_address(s) as _ }); let mut last_frame = Instant::now(); let mut event_pump = sdl_context.event_pump().unwrap(); let mut prev_keys = HashSet::new(); let mut file_txt = String::from("no file loaded"); let mut input_buf = imgui::ImString::new("type something here"); 'running: loop { for event in event_pump.poll_iter() { imgui_sdl2.handle_event(&mut imgui, &event); if imgui_sdl2.ignore_event(&event) { continue; } match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => break 'running, _ => {} } } let keys = event_pump .keyboard_state() .pressed_scancodes() .filter_map(Keycode::from_scancode) .collect(); // Get the difference between the new and old sets. let new_keys = &keys - &prev_keys; // Call step commands if new_keys.contains(&Keycode::Right) { send_commands(sender, &STEP_COMMANDS, 50); } if new_keys.contains(&Keycode::Left) { send_command("reverse-step\n", sender).unwrap(); } prev_keys = keys; imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state()); let now = Instant::now(); let delta = now - last_frame; let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0; last_frame = now; imgui.io_mut().delta_time = delta_s; let ui = imgui.frame(); let mut left_dock: u32 = 0; let mut left_top: u32 = 0; let mut left_down: u32 = 0; let mut right_dock: u32 = 0; let mut right_top: u32 = 0; let mut right_down: u32 = 0; let mut main_dock: u32 = 0; unsafe { main_dock = imgui::sys::igDockSpaceOverViewport( imgui::sys::igGetMainViewport(), 0, ::std::ptr::null::<imgui::sys::ImGuiWindowClass>(), ); } if !is_split(main_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( main_dock, imgui::Direction::Right as i32, 0.3f32, &mut right_dock, &mut left_dock, ); } } if right_dock != 0 && !is_split(right_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( right_dock, imgui::Direction::Up as i32, 0.5f32, &mut right_top, &mut right_down, ); } } if left_dock != 0 && !is_split(left_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( left_dock, imgui::Direction::Up as i32, 0.65f32, &mut left_top, &mut left_down, ); } } let mut gdb = gdb_mutex.lock().unwrap(); if let Some(str) = gdb.get_file() { file_txt = str; } ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| { let mut x = 1.0f32; for (i, l) in file_txt.lines().enumerate() { if (i + 1) == gdb.line as usize { ui.text_colored([x, 0f32, 0f32, 1.0f32], &l); x -= 0.5f32; } else { ui.text_colored([x, x, x, 1.0f32], &l); } } }); ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.variables { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.registers_ordered() { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| { { imgui::TabBar::new(im_str!("test")) .reorderable(true) .build(&ui, || { for (k, v) in &gdb.asm { let s: &imgui::ImStr; let c_str: std::ffi::CString; unsafe { c_str = std::ffi::CString::new(k.as_str()).unwrap(); s = imgui::ImStr::from_utf8_with_nul_unchecked( c_str.as_bytes_with_nul(), ); } let pc_addr = gdb.pc_addr.get(k).unwrap(); imgui::TabItem::new(s).build(&ui, || { ui.text_colored( [0.8f32, 0.8f32, 0.2f32, 1f32], format!("{:#x}", pc_addr), ); ui.separator(); ui.columns(2, im_str!("asm_col"), true); for (addr, line) in v { if line.len() > 0 { if addr == pc_addr { ui.text_colored( [1f32, 0f32, 0f32, 1f32], format!("{:#x}", addr), ); } else { ui.text_colored( [1f32, 1f32, 1f32, 1f32], format!("{:#x}", addr), ); } ui.next_column(); ui.text_colored([1f32, 1f32, 1f32, 1f32], line); ui.next_column(); } } }) } }) } }); ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| { ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output); if imgui::InputText::new(ui, im_str!(""), &mut input_buf) .enter_returns_true(true) .build() { let mut cmd = String::from(input_buf.to_str()); cmd.push('\n'); send_command(&cmd, &sender).unwrap(); input_buf.clear(); } }); ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| { let (addr, mem) = &gdb.memory; let mut addr = *addr; let mut s = format!("{:#08x} ", addr); let mut col = 0.2f32; for (i, val) in mem.iter().enumerate() { if *val != 0u64 { col = 1f32; } s.push_str(&format!("{:02x}", val)); s.push(' '); addr += 1; if (i + 1) % 8 == 0 { ui.text_colored([col, col, col, 1f32], &s); // cleaning the string for the next line s = format!("{:#08x} ", addr); col = 0.2f32; } } //@Error maybe some values won't be rendered here }); //ui.show_demo_window(&mut true); unsafe { gl::ClearColor(0.2, 0.2, 0.2, 1.0); gl::Clear(gl::COLOR_BUFFER_BIT); } imgui_sdl2.prepare_render(&ui, &window); renderer.render(ui); window.gl_swap_window(); } }
identifier_body
main.rs
extern crate gl; extern crate imgui; extern crate imgui_opengl_renderer; extern crate imgui_sdl2; extern crate sdl2; /* @TODO: - Show line numbers! - Use traits to Send, Parse and Draw - Create a checkbox to enable debugging the parser, queries, etc; - Write a logger to use a imgui window */ use imgui::im_str; use sdl2::event::Event; use sdl2::keyboard::Keycode; use std::collections::HashSet; use std::io::{BufRead, BufReader, Error, Write}; use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio}; use std::sync::mpsc::SendError; use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError}; use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex}; use std::thread; use std::thread::sleep; use std::time::Instant; use std::{ io::{self, Read}, process, time::Duration, }; mod debugger; mod graphics; mod parser; mod ui; use graphics::build_text; use std::cmp::max; use ui::is_window_docked; fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) { for command in commands { send_command(command, &sender).unwrap(); sleep(Duration::from_millis(time)); } } pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> { sender.send(String::from(command))?; Ok(()) } pub fn is_split(id: u32) -> bool { unsafe { let node = imgui::sys::igDockBuilderGetNode(id); if std::ptr::null() == node
else { imgui::sys::ImGuiDockNode_IsSplitNode(node) } } } const STEP_COMMANDS: [&str; 5] = [ "step\n", "-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n", "-stack-list-locals 1\n", r#" -data-disassemble -s $pc -e "$pc + 20" -- 0 "#, r#" -data-read-memory &arr x 1 1 128 "#, ]; const STARTUP_COMMANDS: [&str; 3] = [ "start\n", "target record-full\n", "-data-list-register-names\n", ]; fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>) where F: Fn(), { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); { let gl_attr = video_subsystem.gl_attr(); gl_attr.set_context_profile(sdl2::video::GLProfile::Core); gl_attr.set_context_version(3, 0); } let window = video_subsystem .window("rust-sdl2 demo", 1000, 950) .position_centered() .resizable() .allow_highdpi() .opengl() .build() .unwrap(); let _gl_context = window .gl_create_context() .expect("Couldn't create GL context"); gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _); let mut imgui = imgui::Context::create(); imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE; let mut path = std::path::PathBuf::new(); path.push("imgui"); path.set_extension("ini"); //imgui.set_ini_filename(Some(path)); imgui.set_ini_filename(None); let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window); let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| { video_subsystem.gl_get_proc_address(s) as _ }); let mut last_frame = Instant::now(); let mut event_pump = sdl_context.event_pump().unwrap(); let mut prev_keys = HashSet::new(); let mut file_txt = String::from("no file loaded"); let mut input_buf = imgui::ImString::new("type something here"); 'running: loop { for event in event_pump.poll_iter() { imgui_sdl2.handle_event(&mut imgui, &event); if imgui_sdl2.ignore_event(&event) { continue; } match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => break 'running, _ => {} } } let keys = event_pump .keyboard_state() .pressed_scancodes() .filter_map(Keycode::from_scancode) .collect(); // Get the difference between the new and old sets. let new_keys = &keys - &prev_keys; // Call step commands if new_keys.contains(&Keycode::Right) { send_commands(sender, &STEP_COMMANDS, 50); } if new_keys.contains(&Keycode::Left) { send_command("reverse-step\n", sender).unwrap(); } prev_keys = keys; imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state()); let now = Instant::now(); let delta = now - last_frame; let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0; last_frame = now; imgui.io_mut().delta_time = delta_s; let ui = imgui.frame(); let mut left_dock: u32 = 0; let mut left_top: u32 = 0; let mut left_down: u32 = 0; let mut right_dock: u32 = 0; let mut right_top: u32 = 0; let mut right_down: u32 = 0; let mut main_dock: u32 = 0; unsafe { main_dock = imgui::sys::igDockSpaceOverViewport( imgui::sys::igGetMainViewport(), 0, ::std::ptr::null::<imgui::sys::ImGuiWindowClass>(), ); } if !is_split(main_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( main_dock, imgui::Direction::Right as i32, 0.3f32, &mut right_dock, &mut left_dock, ); } } if right_dock != 0 && !is_split(right_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( right_dock, imgui::Direction::Up as i32, 0.5f32, &mut right_top, &mut right_down, ); } } if left_dock != 0 && !is_split(left_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( left_dock, imgui::Direction::Up as i32, 0.65f32, &mut left_top, &mut left_down, ); } } let mut gdb = gdb_mutex.lock().unwrap(); if let Some(str) = gdb.get_file() { file_txt = str; } ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| { let mut x = 1.0f32; for (i, l) in file_txt.lines().enumerate() { if (i + 1) == gdb.line as usize { ui.text_colored([x, 0f32, 0f32, 1.0f32], &l); x -= 0.5f32; } else { ui.text_colored([x, x, x, 1.0f32], &l); } } }); ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.variables { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.registers_ordered() { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| { { imgui::TabBar::new(im_str!("test")) .reorderable(true) .build(&ui, || { for (k, v) in &gdb.asm { let s: &imgui::ImStr; let c_str: std::ffi::CString; unsafe { c_str = std::ffi::CString::new(k.as_str()).unwrap(); s = imgui::ImStr::from_utf8_with_nul_unchecked( c_str.as_bytes_with_nul(), ); } let pc_addr = gdb.pc_addr.get(k).unwrap(); imgui::TabItem::new(s).build(&ui, || { ui.text_colored( [0.8f32, 0.8f32, 0.2f32, 1f32], format!("{:#x}", pc_addr), ); ui.separator(); ui.columns(2, im_str!("asm_col"), true); for (addr, line) in v { if line.len() > 0 { if addr == pc_addr { ui.text_colored( [1f32, 0f32, 0f32, 1f32], format!("{:#x}", addr), ); } else { ui.text_colored( [1f32, 1f32, 1f32, 1f32], format!("{:#x}", addr), ); } ui.next_column(); ui.text_colored([1f32, 1f32, 1f32, 1f32], line); ui.next_column(); } } }) } }) } }); ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| { ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output); if imgui::InputText::new(ui, im_str!(""), &mut input_buf) .enter_returns_true(true) .build() { let mut cmd = String::from(input_buf.to_str()); cmd.push('\n'); send_command(&cmd, &sender).unwrap(); input_buf.clear(); } }); ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| { let (addr, mem) = &gdb.memory; let mut addr = *addr; let mut s = format!("{:#08x} ", addr); let mut col = 0.2f32; for (i, val) in mem.iter().enumerate() { if *val != 0u64 { col = 1f32; } s.push_str(&format!("{:02x}", val)); s.push(' '); addr += 1; if (i + 1) % 8 == 0 { ui.text_colored([col, col, col, 1f32], &s); // cleaning the string for the next line s = format!("{:#08x} ", addr); col = 0.2f32; } } //@Error maybe some values won't be rendered here }); //ui.show_demo_window(&mut true); unsafe { gl::ClearColor(0.2, 0.2, 0.2, 1.0); gl::Clear(gl::COLOR_BUFFER_BIT); } imgui_sdl2.prepare_render(&ui, &window); renderer.render(ui); window.gl_swap_window(); } } fn start_process_thread( child: &mut Child, receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) { let mut stdin = child.stdin.take().unwrap(); let stdout = child.stdout.take().unwrap(); use crate::debugger::DebuggerState; // Receiving commands and sending them to GDB's stdin thread::spawn(move || { for line in receiver { stdin.write_all(line.as_bytes()).unwrap(); } }); // Reading and processing GDB stdout thread::spawn(move || { let mut f = BufReader::new(stdout); loop { let mut line = String::new(); f.read_line(&mut line).unwrap(); print!("[LINE] {}", line); let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap(); let vals = parser::parse(&line, gdb); println!("[PARSER] {:#?}", &vals); if let Ok(v) = vals { // Here we try to limit the scope were we hold the mutex gdb.update(&v); } } }); } fn start_process( receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) -> Child { let mut child = Command::new("gdb") .arg("--interpreter=mi3") .arg("./examples/a.exe") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .expect("Failed to start process"); start_process_thread(&mut child, receiver, gdb_mutex); println!("Started process: {}", child.id()); child } fn main() -> Result<(), Error> { let (tx, rx) = channel(); let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new())); let mut child = start_process(rx, Arc::clone(&gdb_mutex)); send_commands(&tx, &STARTUP_COMMANDS, 100); start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx); child.kill()?; Ok(()) }
{ false }
conditional_block
main.rs
extern crate gl; extern crate imgui; extern crate imgui_opengl_renderer; extern crate imgui_sdl2; extern crate sdl2; /* @TODO: - Show line numbers! - Use traits to Send, Parse and Draw - Create a checkbox to enable debugging the parser, queries, etc; - Write a logger to use a imgui window */ use imgui::im_str; use sdl2::event::Event; use sdl2::keyboard::Keycode; use std::collections::HashSet; use std::io::{BufRead, BufReader, Error, Write}; use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio}; use std::sync::mpsc::SendError; use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError}; use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex}; use std::thread; use std::thread::sleep; use std::time::Instant; use std::{ io::{self, Read}, process, time::Duration, }; mod debugger; mod graphics; mod parser; mod ui; use graphics::build_text; use std::cmp::max; use ui::is_window_docked; fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) { for command in commands { send_command(command, &sender).unwrap(); sleep(Duration::from_millis(time)); } } pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> { sender.send(String::from(command))?; Ok(()) } pub fn is_split(id: u32) -> bool { unsafe { let node = imgui::sys::igDockBuilderGetNode(id); if std::ptr::null() == node { false } else { imgui::sys::ImGuiDockNode_IsSplitNode(node) } } } const STEP_COMMANDS: [&str; 5] = [ "step\n", "-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n", "-stack-list-locals 1\n", r#" -data-disassemble -s $pc -e "$pc + 20" -- 0 "#, r#" -data-read-memory &arr x 1 1 128 "#, ]; const STARTUP_COMMANDS: [&str; 3] = [ "start\n", "target record-full\n", "-data-list-register-names\n", ]; fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>) where F: Fn(), { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); { let gl_attr = video_subsystem.gl_attr(); gl_attr.set_context_profile(sdl2::video::GLProfile::Core); gl_attr.set_context_version(3, 0); } let window = video_subsystem .window("rust-sdl2 demo", 1000, 950) .position_centered() .resizable() .allow_highdpi() .opengl() .build() .unwrap(); let _gl_context = window .gl_create_context() .expect("Couldn't create GL context"); gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _); let mut imgui = imgui::Context::create(); imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE; let mut path = std::path::PathBuf::new(); path.push("imgui"); path.set_extension("ini"); //imgui.set_ini_filename(Some(path)); imgui.set_ini_filename(None); let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window); let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| { video_subsystem.gl_get_proc_address(s) as _ }); let mut last_frame = Instant::now(); let mut event_pump = sdl_context.event_pump().unwrap(); let mut prev_keys = HashSet::new(); let mut file_txt = String::from("no file loaded"); let mut input_buf = imgui::ImString::new("type something here"); 'running: loop { for event in event_pump.poll_iter() { imgui_sdl2.handle_event(&mut imgui, &event); if imgui_sdl2.ignore_event(&event) { continue; } match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => break 'running, _ => {} } } let keys = event_pump .keyboard_state() .pressed_scancodes() .filter_map(Keycode::from_scancode) .collect(); // Get the difference between the new and old sets. let new_keys = &keys - &prev_keys; // Call step commands if new_keys.contains(&Keycode::Right) { send_commands(sender, &STEP_COMMANDS, 50); } if new_keys.contains(&Keycode::Left) { send_command("reverse-step\n", sender).unwrap(); } prev_keys = keys; imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state()); let now = Instant::now(); let delta = now - last_frame; let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0; last_frame = now; imgui.io_mut().delta_time = delta_s; let ui = imgui.frame(); let mut left_dock: u32 = 0; let mut left_top: u32 = 0; let mut left_down: u32 = 0; let mut right_dock: u32 = 0; let mut right_top: u32 = 0; let mut right_down: u32 = 0; let mut main_dock: u32 = 0; unsafe { main_dock = imgui::sys::igDockSpaceOverViewport( imgui::sys::igGetMainViewport(), 0, ::std::ptr::null::<imgui::sys::ImGuiWindowClass>(), ); } if !is_split(main_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( main_dock, imgui::Direction::Right as i32, 0.3f32, &mut right_dock, &mut left_dock, ); } } if right_dock != 0 && !is_split(right_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( right_dock, imgui::Direction::Up as i32, 0.5f32, &mut right_top, &mut right_down, ); } } if left_dock != 0 && !is_split(left_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( left_dock, imgui::Direction::Up as i32, 0.65f32, &mut left_top, &mut left_down, ); } } let mut gdb = gdb_mutex.lock().unwrap(); if let Some(str) = gdb.get_file() { file_txt = str; } ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| { let mut x = 1.0f32; for (i, l) in file_txt.lines().enumerate() { if (i + 1) == gdb.line as usize { ui.text_colored([x, 0f32, 0f32, 1.0f32], &l); x -= 0.5f32;
ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.variables { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.registers_ordered() { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| { { imgui::TabBar::new(im_str!("test")) .reorderable(true) .build(&ui, || { for (k, v) in &gdb.asm { let s: &imgui::ImStr; let c_str: std::ffi::CString; unsafe { c_str = std::ffi::CString::new(k.as_str()).unwrap(); s = imgui::ImStr::from_utf8_with_nul_unchecked( c_str.as_bytes_with_nul(), ); } let pc_addr = gdb.pc_addr.get(k).unwrap(); imgui::TabItem::new(s).build(&ui, || { ui.text_colored( [0.8f32, 0.8f32, 0.2f32, 1f32], format!("{:#x}", pc_addr), ); ui.separator(); ui.columns(2, im_str!("asm_col"), true); for (addr, line) in v { if line.len() > 0 { if addr == pc_addr { ui.text_colored( [1f32, 0f32, 0f32, 1f32], format!("{:#x}", addr), ); } else { ui.text_colored( [1f32, 1f32, 1f32, 1f32], format!("{:#x}", addr), ); } ui.next_column(); ui.text_colored([1f32, 1f32, 1f32, 1f32], line); ui.next_column(); } } }) } }) } }); ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| { ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output); if imgui::InputText::new(ui, im_str!(""), &mut input_buf) .enter_returns_true(true) .build() { let mut cmd = String::from(input_buf.to_str()); cmd.push('\n'); send_command(&cmd, &sender).unwrap(); input_buf.clear(); } }); ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| { let (addr, mem) = &gdb.memory; let mut addr = *addr; let mut s = format!("{:#08x} ", addr); let mut col = 0.2f32; for (i, val) in mem.iter().enumerate() { if *val != 0u64 { col = 1f32; } s.push_str(&format!("{:02x}", val)); s.push(' '); addr += 1; if (i + 1) % 8 == 0 { ui.text_colored([col, col, col, 1f32], &s); // cleaning the string for the next line s = format!("{:#08x} ", addr); col = 0.2f32; } } //@Error maybe some values won't be rendered here }); //ui.show_demo_window(&mut true); unsafe { gl::ClearColor(0.2, 0.2, 0.2, 1.0); gl::Clear(gl::COLOR_BUFFER_BIT); } imgui_sdl2.prepare_render(&ui, &window); renderer.render(ui); window.gl_swap_window(); } } fn start_process_thread( child: &mut Child, receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) { let mut stdin = child.stdin.take().unwrap(); let stdout = child.stdout.take().unwrap(); use crate::debugger::DebuggerState; // Receiving commands and sending them to GDB's stdin thread::spawn(move || { for line in receiver { stdin.write_all(line.as_bytes()).unwrap(); } }); // Reading and processing GDB stdout thread::spawn(move || { let mut f = BufReader::new(stdout); loop { let mut line = String::new(); f.read_line(&mut line).unwrap(); print!("[LINE] {}", line); let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap(); let vals = parser::parse(&line, gdb); println!("[PARSER] {:#?}", &vals); if let Ok(v) = vals { // Here we try to limit the scope were we hold the mutex gdb.update(&v); } } }); } fn start_process( receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) -> Child { let mut child = Command::new("gdb") .arg("--interpreter=mi3") .arg("./examples/a.exe") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .expect("Failed to start process"); start_process_thread(&mut child, receiver, gdb_mutex); println!("Started process: {}", child.id()); child } fn main() -> Result<(), Error> { let (tx, rx) = channel(); let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new())); let mut child = start_process(rx, Arc::clone(&gdb_mutex)); send_commands(&tx, &STARTUP_COMMANDS, 100); start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx); child.kill()?; Ok(()) }
} else { ui.text_colored([x, x, x, 1.0f32], &l); } } });
random_line_split
main.rs
extern crate gl; extern crate imgui; extern crate imgui_opengl_renderer; extern crate imgui_sdl2; extern crate sdl2; /* @TODO: - Show line numbers! - Use traits to Send, Parse and Draw - Create a checkbox to enable debugging the parser, queries, etc; - Write a logger to use a imgui window */ use imgui::im_str; use sdl2::event::Event; use sdl2::keyboard::Keycode; use std::collections::HashSet; use std::io::{BufRead, BufReader, Error, Write}; use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio}; use std::sync::mpsc::SendError; use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError}; use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex}; use std::thread; use std::thread::sleep; use std::time::Instant; use std::{ io::{self, Read}, process, time::Duration, }; mod debugger; mod graphics; mod parser; mod ui; use graphics::build_text; use std::cmp::max; use ui::is_window_docked; fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) { for command in commands { send_command(command, &sender).unwrap(); sleep(Duration::from_millis(time)); } } pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> { sender.send(String::from(command))?; Ok(()) } pub fn is_split(id: u32) -> bool { unsafe { let node = imgui::sys::igDockBuilderGetNode(id); if std::ptr::null() == node { false } else { imgui::sys::ImGuiDockNode_IsSplitNode(node) } } } const STEP_COMMANDS: [&str; 5] = [ "step\n", "-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n", "-stack-list-locals 1\n", r#" -data-disassemble -s $pc -e "$pc + 20" -- 0 "#, r#" -data-read-memory &arr x 1 1 128 "#, ]; const STARTUP_COMMANDS: [&str; 3] = [ "start\n", "target record-full\n", "-data-list-register-names\n", ]; fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>) where F: Fn(), { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); { let gl_attr = video_subsystem.gl_attr(); gl_attr.set_context_profile(sdl2::video::GLProfile::Core); gl_attr.set_context_version(3, 0); } let window = video_subsystem .window("rust-sdl2 demo", 1000, 950) .position_centered() .resizable() .allow_highdpi() .opengl() .build() .unwrap(); let _gl_context = window .gl_create_context() .expect("Couldn't create GL context"); gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _); let mut imgui = imgui::Context::create(); imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE; let mut path = std::path::PathBuf::new(); path.push("imgui"); path.set_extension("ini"); //imgui.set_ini_filename(Some(path)); imgui.set_ini_filename(None); let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window); let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| { video_subsystem.gl_get_proc_address(s) as _ }); let mut last_frame = Instant::now(); let mut event_pump = sdl_context.event_pump().unwrap(); let mut prev_keys = HashSet::new(); let mut file_txt = String::from("no file loaded"); let mut input_buf = imgui::ImString::new("type something here"); 'running: loop { for event in event_pump.poll_iter() { imgui_sdl2.handle_event(&mut imgui, &event); if imgui_sdl2.ignore_event(&event) { continue; } match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => break 'running, _ => {} } } let keys = event_pump .keyboard_state() .pressed_scancodes() .filter_map(Keycode::from_scancode) .collect(); // Get the difference between the new and old sets. let new_keys = &keys - &prev_keys; // Call step commands if new_keys.contains(&Keycode::Right) { send_commands(sender, &STEP_COMMANDS, 50); } if new_keys.contains(&Keycode::Left) { send_command("reverse-step\n", sender).unwrap(); } prev_keys = keys; imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state()); let now = Instant::now(); let delta = now - last_frame; let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0; last_frame = now; imgui.io_mut().delta_time = delta_s; let ui = imgui.frame(); let mut left_dock: u32 = 0; let mut left_top: u32 = 0; let mut left_down: u32 = 0; let mut right_dock: u32 = 0; let mut right_top: u32 = 0; let mut right_down: u32 = 0; let mut main_dock: u32 = 0; unsafe { main_dock = imgui::sys::igDockSpaceOverViewport( imgui::sys::igGetMainViewport(), 0, ::std::ptr::null::<imgui::sys::ImGuiWindowClass>(), ); } if !is_split(main_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( main_dock, imgui::Direction::Right as i32, 0.3f32, &mut right_dock, &mut left_dock, ); } } if right_dock != 0 && !is_split(right_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( right_dock, imgui::Direction::Up as i32, 0.5f32, &mut right_top, &mut right_down, ); } } if left_dock != 0 && !is_split(left_dock) { unsafe { imgui::sys::igDockBuilderSplitNode( left_dock, imgui::Direction::Up as i32, 0.65f32, &mut left_top, &mut left_down, ); } } let mut gdb = gdb_mutex.lock().unwrap(); if let Some(str) = gdb.get_file() { file_txt = str; } ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| { let mut x = 1.0f32; for (i, l) in file_txt.lines().enumerate() { if (i + 1) == gdb.line as usize { ui.text_colored([x, 0f32, 0f32, 1.0f32], &l); x -= 0.5f32; } else { ui.text_colored([x, x, x, 1.0f32], &l); } } }); ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.variables { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| { ui.columns(2, im_str!("A"), true); for (k, v) in &gdb.registers_ordered() { ui.text(k); ui.next_column(); ui.text(v); ui.next_column(); } }); ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| { { imgui::TabBar::new(im_str!("test")) .reorderable(true) .build(&ui, || { for (k, v) in &gdb.asm { let s: &imgui::ImStr; let c_str: std::ffi::CString; unsafe { c_str = std::ffi::CString::new(k.as_str()).unwrap(); s = imgui::ImStr::from_utf8_with_nul_unchecked( c_str.as_bytes_with_nul(), ); } let pc_addr = gdb.pc_addr.get(k).unwrap(); imgui::TabItem::new(s).build(&ui, || { ui.text_colored( [0.8f32, 0.8f32, 0.2f32, 1f32], format!("{:#x}", pc_addr), ); ui.separator(); ui.columns(2, im_str!("asm_col"), true); for (addr, line) in v { if line.len() > 0 { if addr == pc_addr { ui.text_colored( [1f32, 0f32, 0f32, 1f32], format!("{:#x}", addr), ); } else { ui.text_colored( [1f32, 1f32, 1f32, 1f32], format!("{:#x}", addr), ); } ui.next_column(); ui.text_colored([1f32, 1f32, 1f32, 1f32], line); ui.next_column(); } } }) } }) } }); ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| { ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output); if imgui::InputText::new(ui, im_str!(""), &mut input_buf) .enter_returns_true(true) .build() { let mut cmd = String::from(input_buf.to_str()); cmd.push('\n'); send_command(&cmd, &sender).unwrap(); input_buf.clear(); } }); ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| { let (addr, mem) = &gdb.memory; let mut addr = *addr; let mut s = format!("{:#08x} ", addr); let mut col = 0.2f32; for (i, val) in mem.iter().enumerate() { if *val != 0u64 { col = 1f32; } s.push_str(&format!("{:02x}", val)); s.push(' '); addr += 1; if (i + 1) % 8 == 0 { ui.text_colored([col, col, col, 1f32], &s); // cleaning the string for the next line s = format!("{:#08x} ", addr); col = 0.2f32; } } //@Error maybe some values won't be rendered here }); //ui.show_demo_window(&mut true); unsafe { gl::ClearColor(0.2, 0.2, 0.2, 1.0); gl::Clear(gl::COLOR_BUFFER_BIT); } imgui_sdl2.prepare_render(&ui, &window); renderer.render(ui); window.gl_swap_window(); } } fn start_process_thread( child: &mut Child, receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) { let mut stdin = child.stdin.take().unwrap(); let stdout = child.stdout.take().unwrap(); use crate::debugger::DebuggerState; // Receiving commands and sending them to GDB's stdin thread::spawn(move || { for line in receiver { stdin.write_all(line.as_bytes()).unwrap(); } }); // Reading and processing GDB stdout thread::spawn(move || { let mut f = BufReader::new(stdout); loop { let mut line = String::new(); f.read_line(&mut line).unwrap(); print!("[LINE] {}", line); let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap(); let vals = parser::parse(&line, gdb); println!("[PARSER] {:#?}", &vals); if let Ok(v) = vals { // Here we try to limit the scope were we hold the mutex gdb.update(&v); } } }); } fn start_process( receiver: Receiver<String>, gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, ) -> Child { let mut child = Command::new("gdb") .arg("--interpreter=mi3") .arg("./examples/a.exe") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn() .expect("Failed to start process"); start_process_thread(&mut child, receiver, gdb_mutex); println!("Started process: {}", child.id()); child } fn
() -> Result<(), Error> { let (tx, rx) = channel(); let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new())); let mut child = start_process(rx, Arc::clone(&gdb_mutex)); send_commands(&tx, &STARTUP_COMMANDS, 100); start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx); child.kill()?; Ok(()) }
main
identifier_name
trial.go
package internal import ( "context" "fmt" "regexp" "strconv" "strings" "time" "github.com/cenkalti/backoff/v4" "github.com/determined-ai/determined/master/internal/prom" "github.com/determined-ai/determined/master/internal/rm" "github.com/determined-ai/determined/master/internal/task" "github.com/determined-ai/determined/master/internal/task/tasklogger" "github.com/determined-ai/determined/master/pkg/actor/actors" "github.com/determined-ai/determined/master/pkg/logger" "github.com/determined-ai/determined/master/pkg/mathx" "github.com/determined-ai/determined/master/pkg/ptrs" "github.com/pkg/errors" "github.com/determined-ai/determined/master/internal/db" "github.com/determined-ai/determined/master/internal/sproto" "github.com/determined-ai/determined/master/pkg/actor" "github.com/determined-ai/determined/master/pkg/model" "github.com/determined-ai/determined/master/pkg/schemas" "github.com/determined-ai/determined/master/pkg/schemas/expconf" "github.com/determined-ai/determined/master/pkg/ssh" "github.com/determined-ai/determined/master/pkg/tasks" ) // A list of errors for which we don't want to attempt any retries of the experiment. // These are errors that no matter how many times we retry, the outcome will still result // in the same error. var nonRetryableErrors = []*regexp.Regexp{ // This error is typically seen when you request resources that SLURM is not able to satisfy. regexp.MustCompile("sbatch: error: Batch job submission failed"), } // A trial is a task actor which is responsible for handling: // - messages from the resource manager, // - messages from the experiment, // - messages from the trial container(s), and // - keeping the trial table of the database up-to-date. // // The trial's desired state is dictated to it by the experiment, searcher and user; they push // it to states like 'ACTIVE', 'PAUSED' and kill or wake it when more work is available. It takes // this information and works with the resource manager, allocation, etc, to push us towards // a terminal state, by requesting resources, managing them and restarting them on failures. type trial struct { id int taskID model.TaskID jobID model.JobID jobSubmissionTime time.Time idSet bool experimentID int restored bool trialCreationSent bool // System dependencies. db db.DB rm rm.ResourceManager // Fields that are essentially configuration for the trial. config expconf.ExperimentConfig taskSpec *tasks.TaskSpec generatedKeys ssh.PrivateAndPublicKeys warmStartCheckpoint *model.Checkpoint // state is the current state of the trial. It's patched by experiment changes and kill trial. state model.State // searcher encapsulates the searcher state of the trial. searcher trialSearcherState // restarts is a failure count, it increments when the trial fails and we retry it. restarts int // runID is a count of how many times the task container(s) have stopped and restarted, which // could be due to a failure or due to normal pausing and continuing. When RunID increments, // it effectively invalidates many outstanding messages associated with the previous run. runID int // a ref to the current allocation allocationID *model.AllocationID // a note of the user initated exit reason, if any. userInitiatedExit *model.ExitedReason logCtx logger.Context } // newTrial creates a trial which will try to schedule itself after it receives its first workload. func newTrial( logCtx logger.Context, taskID model.TaskID, jobID model.JobID, jobSubmissionTime time.Time, experimentID int, initialState model.State, searcher trialSearcherState, rm rm.ResourceManager, db db.DB, config expconf.ExperimentConfig, warmStartCheckpoint *model.Checkpoint, taskSpec *tasks.TaskSpec, generatedKeys ssh.PrivateAndPublicKeys, restored bool, ) *trial { return &trial{ taskID: taskID, jobID: jobID, jobSubmissionTime: jobSubmissionTime, experimentID: experimentID, state: initialState, searcher: searcher, db: db, rm: rm, config: config, taskSpec: taskSpec, generatedKeys: generatedKeys, warmStartCheckpoint: warmStartCheckpoint, logCtx: logger.MergeContexts(logCtx, logger.Context{ "task-id": taskID, "task-type": model.TaskTypeTrial, }), restored: restored, } } // Returns true if the error message matches one of the errors in the non-retryable list. func isNonRetryableError(err error) bool { for _, nonRetryableError := range nonRetryableErrors { if nonRetryableError.MatchString(err.Error()) { return true } } return false } func (t *trial) Receive(ctx *actor.Context) error { switch msg := ctx.Message().(type) { case actor.PreStart: if t.idSet { if err := t.recover(); err != nil { return fmt.Errorf("recovering trial in prestart: %w", err) } } else { if err := t.create(ctx); err != nil { return fmt.Errorf("persisting trial in prestart: %w", err) } } t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{ "trial-id": t.id, "trial-run-id": t.runID, }) ctx.AddLabels(t.logCtx) return t.maybeAllocateTask(ctx) case actor.PostStop: if !t.idSet { return nil } if !model.TerminalStates[t.state] { if t.allocationID != nil { err := task.DefaultService.Signal(*t.allocationID, task.KillAllocation, "trial crashed") if err == nil { task.DefaultService.AwaitTermination(*t.allocationID) } } return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial did not finish properly", }) } return nil case model.State: return t.patchState(ctx, model.StateWithReason{State: msg}) case model.StateWithReason: return t.patchState(ctx, msg) case trialSearcherState: t.searcher = msg switch { case !t.searcher.Complete: return t.maybeAllocateTask(ctx) case t.searcher.Complete && t.searcher.Closed: return t.patchState(ctx, model.StateWithReason{ State: model.StoppingCompletedState, InformationalReason: "hp search is finished", }) } return nil case sproto.ChangeRP: resources := t.config.Resources() resources.SetResourcePool(msg.ResourcePool) t.config.SetResources(resources) if t.allocationID != nil { err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, "allocation resource pool changed", ) if err != nil { ctx.Log().WithError(err).Warn("could not preempt allocation to change rp") } } case userInitiatedEarlyExit: if err := t.handleUserInitiatedStops(ctx, msg); err != nil { ctx.Respond(err) } case *task.AllocationExited: if t.allocationID != nil { return t.allocationExited(ctx, msg) } case sproto.ContainerLog: if log, err := t.enrichTaskLog(&model.TaskLog{ ContainerID: ptrs.Ptr(string(msg.ContainerID)), Log: msg.Message(), Level: msg.Level, }); err != nil { ctx.Log().WithError(err).Warn("dropping container log") } else { tasklogger.Insert(log) } case model.TaskLog: if log, err := t.enrichTaskLog(&msg); err != nil { ctx.Log().WithError(err).Warn("dropping trial log") } else { tasklogger.Insert(log) } case sproto.InvalidResourcesRequestError: ctx.Tell(ctx.Self().Parent(), msg) default: return actor.ErrUnexpectedMessage(ctx) } return nil } func (t *trial) create(ctx *actor.Context) error { m := model.NewTrial( t.state, t.searcher.Create.RequestID, t.experimentID, model.JSONObj(t.searcher.Create.Hparams), t.warmStartCheckpoint, int64(t.searcher.Create.TrialSeed), ) err := t.addTask() if err != nil { return err } err = db.AddTrial(context.TODO(), m, t.taskID) if err != nil { return errors.Wrap(err, "failed to save trial to database") } t.id = m.ID t.idSet = true return nil } // recover recovers the trial minimal (hopefully to stay) state for a trial actor. // Separately, the experiment stores and recovers our searcher state. func (t *trial) recover() error { runID, restarts, err := t.db.TrialRunIDAndRestarts(t.id) if err != nil { return errors.Wrap(err, "restoring old trial state") } t.runID = runID t.restarts = restarts return nil } // maybeAllocateTask checks if the trial should allocate state and allocates it if so. func (t *trial) maybeAllocateTask(ctx *actor.Context) error { if !(t.allocationID == nil && !t.searcher.Complete && t.state == model.ActiveState) { return nil } name := fmt.Sprintf("Trial %d (Experiment %d)", t.id, t.experimentID) ctx.Log().Info("decided to allocate trial") restoredAllocation, err := t.maybeRestoreAllocation(ctx) if err != nil { ctx.Log().WithError(err).Warn("failed to restore trial allocation") } else if restoredAllocation != nil { specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: restoredAllocation.AllocationID, TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, Restore: true, ProxyPorts: sproto.NewProxyPortConfig( tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Infof("starting restored trial allocation") // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error { return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } t.runID++ t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{"trial-run-id": t.runID}) ctx.AddLabels(t.logCtx) specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: model.AllocationID(fmt.Sprintf("%s.%d", t.taskID, t.runID)), TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, ProxyPorts: sproto.NewProxyPortConfig(tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Debugf("starting new trial allocation") prom.AssociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error { return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } const ( // InvalidHPKillDelay the delay before we forcibly kill a trial that said it had an invalid HP. InvalidHPKillDelay = 10 * time.Second ) func (t *trial) handleUserInitiatedStops(ctx *actor.Context, msg userInitiatedEarlyExit) error { switch msg.reason { case model.InvalidHP, model.InitInvalidHP: t.userInitiatedExit = &msg.reason // After a short time, force us to clean up if we're still handling messages. actors.NotifyAfter(ctx, InvalidHPKillDelay, model.StoppingKilledState) return nil case model.UserRequestedStop, model.Errored: return fmt.Errorf("should not report special exit reason %s to the master", msg.reason) default: return actor.ErrUnexpectedMessage(ctx) } } func (t *trial) addTask() error { return t.db.AddTask(&model.Task{ TaskID: t.taskID, TaskType: model.TaskTypeTrial, StartTime: t.jobSubmissionTime, // TODO: Why is this the job submission time..? JobID: &t.jobID, LogVersion: model.CurrentTaskLogVersion, }) } func (t *trial) buildTaskSpecifier(ctx *actor.Context) (*tasks.TrialSpec, error) { if !t.trialCreationSent { ctx.Tell(ctx.Self().Parent(), trialCreated{requestID: t.searcher.Create.RequestID}) t.trialCreationSent = true } if err := t.db.UpdateTrialRunID(t.id, t.runID); err != nil { return nil, errors.Wrap(err, "failed to save trial run ID") } var stepsCompleted int latestCheckpoint, err := t.db.LatestCheckpointForTrial(t.id) switch { case err != nil: return nil, errors.Wrapf(err, "failed to query latest checkpoint for trial") case latestCheckpoint == nil: latestCheckpoint = t.warmStartCheckpoint default: stepsCompleted = latestCheckpoint.StepsCompleted } return &tasks.TrialSpec{ Base: *t.taskSpec, ExperimentID: t.experimentID, TrialID: t.id, TrialRunID: t.runID, ExperimentConfig: schemas.Copy(t.config), HParams: t.searcher.Create.Hparams, TrialSeed: t.searcher.Create.TrialSeed, StepsCompleted: stepsCompleted, LatestCheckpoint: latestCheckpoint, Keys: t.generatedKeys, }, nil } // allocationExited cleans up after an allocation exit and exits permanently or reallocates. func (t *trial) allocationExited(ctx *actor.Context, exit *task.AllocationExited) error
// patchState decide if the state patch is valid. If so, we'll transition the trial. func (t *trial) patchState(ctx *actor.Context, s model.StateWithReason) error { switch { case model.TerminalStates[t.state]: ctx.Log().Infof("ignoring transition in terminal state (%s -> %s)", t.state, s.State) return nil case model.TerminalStates[s.State]: ctx.Log().Infof("ignoring patch to terminal state %s", s.State) return nil case t.state == s.State: // Order is important, else below will prevent re-sending kills. ctx.Log().Infof("resending actions for transition for %s", t.state) return t.transition(ctx, s) case model.StoppingStates[t.state] && !model.TrialTransitions[t.state][s.State]: ctx.Log().Infof("ignoring patch to less severe stopping state (%s)", s.State) return nil default: ctx.Log().Debugf("patching state after request (%s)", s.State) return t.transition(ctx, s) } } // transition the trial by rectifying the desired state with our actual state to determined // a target state, and then propogating the appropriate signals to the allocation if there is any. func (t *trial) transition(ctx *actor.Context, s model.StateWithReason) error { if t.state != s.State { ctx.Log().Infof("trial changed from state %s to %s", t.state, s.State) if t.idSet { if err := t.db.UpdateTrial(t.id, s.State); err != nil { return errors.Wrap(err, "updating trial with end state") } } t.state = s.State } // Rectify our state and the allocation state with the transition. switch { case t.state == model.ActiveState: return t.maybeAllocateTask(ctx) case t.state == model.PausedState: if t.allocationID != nil { ctx.Log().Info("decided to terminate trial due to pause") err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, s.InformationalReason, ) if err != nil { ctx.Log().WithError(err).Warn("could not terminate allocation after pause") } } case model.StoppingStates[t.state]: switch { case t.allocationID == nil: ctx.Log().Info("stopping trial before resources are requested") return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: s.InformationalReason, }) default: if action, ok := map[model.State]task.AllocationSignal{ model.StoppingCanceledState: task.TerminateAllocation, model.StoppingKilledState: task.KillAllocation, model.StoppingErrorState: task.KillAllocation, }[t.state]; ok { ctx.Log().Infof("decided to %s trial", action) err := task.DefaultService.Signal(*t.allocationID, action, s.InformationalReason) if err != nil { ctx.Log().WithError(err).Warnf("could not %s allocation during stop", action) } } } case model.TerminalStates[t.state]: switch t.state { case model.ErrorState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.Errored, }) case model.CanceledState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserCanceled, }) } ctx.Self().Stop() default: panic(fmt.Errorf("unmatched state in transition %s", t.state)) } return nil } func (t *trial) enrichTaskLog(log *model.TaskLog) (*model.TaskLog, error) { if !t.idSet { return nil, fmt.Errorf("cannot handle trial log before ID is set: %v", log) } log.TaskID = string(t.taskID) if log.Timestamp == nil { log.Timestamp = ptrs.Ptr(time.Now().UTC()) } if log.Level == nil { log.Level = ptrs.Ptr("INFO") } if log.Source == nil { log.Source = ptrs.Ptr("master") } if log.StdType == nil { log.StdType = ptrs.Ptr("stdout") } log.Log += "\n" return log, nil } func (t *trial) maybeRestoreAllocation(ctx *actor.Context) (*model.Allocation, error) { if !t.restored || !t.rm.IsReattachEnabled(ctx) { return nil, nil } var allocations []model.Allocation selectQuery := db.Bun().NewSelect().Model(&allocations). Where("task_id = ?", t.taskID). Where("end_time IS NULL"). Where("state != ?", model.AllocationStateTerminated) if t.rm.IsReattachableOnlyAfterStarted(ctx) { selectQuery.Where("start_time IS NOT NULL") } // Do we have an open allocation? err := selectQuery.Scan(context.TODO()) if err != nil { return nil, err } openAllocs := len(allocations) switch { case openAllocs == 0: return nil, nil case openAllocs == 1: allocation := &allocations[0] if !t.rm.IsReattachEnabledForRP(ctx, allocation.ResourcePool) { return nil, nil } return allocation, nil case openAllocs > 1: const maxAllocsToLog int = 3 allocIDs := make([]string, 0, maxAllocsToLog) for _, alloc := range allocations[0:mathx.Min(len(allocations), maxAllocsToLog)] { allocIDs = append(allocIDs, alloc.AllocationID.String()) } return nil, fmt.Errorf( "discovered %d open allocations on restore: %s", len(allocations), strings.Join(allocIDs, " "), ) default: return nil, fmt.Errorf( "discovered %d open allocations on restore", len(allocations), ) } } func launchRetries() backoff.BackOff { bf := backoff.NewExponentialBackOff() bf.InitialInterval = time.Second bf.MaxInterval = time.Minute return backoff.WithMaxRetries(bf, 4) }
{ if exit.Err != nil { ctx.Log().WithError(exit.Err).Error("trial allocation failed") } t.allocationID = nil prom.DisassociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // Decide if this is permanent. switch { case model.StoppingStates[t.state]: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error while trial was stopping %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: "trial stopped", }) case t.searcher.Complete && t.searcher.Closed: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error but hp search was complete %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "hp search is finished", }) case exit.Err != nil && sproto.IsUnrecoverableSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered unrecoverable failure") return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && isNonRetryableError(exit.Err): // These are errors that no matter how many times we retry, the outcome will // be the same, so don't bother retrying. Fail right away to allow the user // to make any adjustments to the experiment and try again. return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered transient system error") case exit.Err != nil && !sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial failed (restart %d/%d)", t.restarts, t.config.MaxRestarts()) t.restarts++ if err := t.db.UpdateTrialRestarts(t.id, t.restarts); err != nil { return err } if t.restarts > t.config.MaxRestarts() { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial exceeded max restarts", }) } case exit.UserRequestedStop: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserRequestedStop, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "trial exited early due to a user requested stop", }) case t.userInitiatedExit != nil: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: *t.userInitiatedExit, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: fmt.Sprintf( "trial exited early with reason: %v", *t.userInitiatedExit), }) } // Maybe reschedule. return errors.Wrap(t.maybeAllocateTask(ctx), "failed to reschedule trial") }
identifier_body
trial.go
package internal import ( "context" "fmt" "regexp" "strconv" "strings" "time" "github.com/cenkalti/backoff/v4" "github.com/determined-ai/determined/master/internal/prom" "github.com/determined-ai/determined/master/internal/rm" "github.com/determined-ai/determined/master/internal/task" "github.com/determined-ai/determined/master/internal/task/tasklogger" "github.com/determined-ai/determined/master/pkg/actor/actors" "github.com/determined-ai/determined/master/pkg/logger" "github.com/determined-ai/determined/master/pkg/mathx" "github.com/determined-ai/determined/master/pkg/ptrs" "github.com/pkg/errors" "github.com/determined-ai/determined/master/internal/db" "github.com/determined-ai/determined/master/internal/sproto" "github.com/determined-ai/determined/master/pkg/actor" "github.com/determined-ai/determined/master/pkg/model" "github.com/determined-ai/determined/master/pkg/schemas" "github.com/determined-ai/determined/master/pkg/schemas/expconf" "github.com/determined-ai/determined/master/pkg/ssh" "github.com/determined-ai/determined/master/pkg/tasks" ) // A list of errors for which we don't want to attempt any retries of the experiment. // These are errors that no matter how many times we retry, the outcome will still result // in the same error. var nonRetryableErrors = []*regexp.Regexp{ // This error is typically seen when you request resources that SLURM is not able to satisfy. regexp.MustCompile("sbatch: error: Batch job submission failed"), } // A trial is a task actor which is responsible for handling: // - messages from the resource manager, // - messages from the experiment, // - messages from the trial container(s), and // - keeping the trial table of the database up-to-date. // // The trial's desired state is dictated to it by the experiment, searcher and user; they push // it to states like 'ACTIVE', 'PAUSED' and kill or wake it when more work is available. It takes // this information and works with the resource manager, allocation, etc, to push us towards // a terminal state, by requesting resources, managing them and restarting them on failures. type trial struct { id int taskID model.TaskID jobID model.JobID jobSubmissionTime time.Time idSet bool experimentID int restored bool trialCreationSent bool // System dependencies. db db.DB rm rm.ResourceManager // Fields that are essentially configuration for the trial. config expconf.ExperimentConfig taskSpec *tasks.TaskSpec generatedKeys ssh.PrivateAndPublicKeys warmStartCheckpoint *model.Checkpoint // state is the current state of the trial. It's patched by experiment changes and kill trial. state model.State // searcher encapsulates the searcher state of the trial. searcher trialSearcherState // restarts is a failure count, it increments when the trial fails and we retry it. restarts int // runID is a count of how many times the task container(s) have stopped and restarted, which // could be due to a failure or due to normal pausing and continuing. When RunID increments, // it effectively invalidates many outstanding messages associated with the previous run. runID int // a ref to the current allocation allocationID *model.AllocationID // a note of the user initated exit reason, if any. userInitiatedExit *model.ExitedReason logCtx logger.Context } // newTrial creates a trial which will try to schedule itself after it receives its first workload. func newTrial( logCtx logger.Context, taskID model.TaskID, jobID model.JobID, jobSubmissionTime time.Time, experimentID int, initialState model.State, searcher trialSearcherState, rm rm.ResourceManager, db db.DB, config expconf.ExperimentConfig, warmStartCheckpoint *model.Checkpoint, taskSpec *tasks.TaskSpec, generatedKeys ssh.PrivateAndPublicKeys, restored bool, ) *trial { return &trial{ taskID: taskID, jobID: jobID, jobSubmissionTime: jobSubmissionTime, experimentID: experimentID, state: initialState, searcher: searcher, db: db, rm: rm, config: config, taskSpec: taskSpec, generatedKeys: generatedKeys, warmStartCheckpoint: warmStartCheckpoint, logCtx: logger.MergeContexts(logCtx, logger.Context{ "task-id": taskID, "task-type": model.TaskTypeTrial, }), restored: restored, } } // Returns true if the error message matches one of the errors in the non-retryable list. func isNonRetryableError(err error) bool { for _, nonRetryableError := range nonRetryableErrors { if nonRetryableError.MatchString(err.Error()) { return true } } return false } func (t *trial) Receive(ctx *actor.Context) error { switch msg := ctx.Message().(type) { case actor.PreStart: if t.idSet { if err := t.recover(); err != nil { return fmt.Errorf("recovering trial in prestart: %w", err) } } else { if err := t.create(ctx); err != nil { return fmt.Errorf("persisting trial in prestart: %w", err) } } t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{ "trial-id": t.id, "trial-run-id": t.runID, }) ctx.AddLabels(t.logCtx) return t.maybeAllocateTask(ctx) case actor.PostStop: if !t.idSet { return nil } if !model.TerminalStates[t.state] { if t.allocationID != nil { err := task.DefaultService.Signal(*t.allocationID, task.KillAllocation, "trial crashed") if err == nil { task.DefaultService.AwaitTermination(*t.allocationID) } } return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial did not finish properly", }) } return nil case model.State: return t.patchState(ctx, model.StateWithReason{State: msg}) case model.StateWithReason: return t.patchState(ctx, msg) case trialSearcherState: t.searcher = msg switch { case !t.searcher.Complete: return t.maybeAllocateTask(ctx) case t.searcher.Complete && t.searcher.Closed: return t.patchState(ctx, model.StateWithReason{ State: model.StoppingCompletedState, InformationalReason: "hp search is finished", }) } return nil case sproto.ChangeRP: resources := t.config.Resources() resources.SetResourcePool(msg.ResourcePool) t.config.SetResources(resources) if t.allocationID != nil { err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, "allocation resource pool changed", ) if err != nil { ctx.Log().WithError(err).Warn("could not preempt allocation to change rp") } } case userInitiatedEarlyExit: if err := t.handleUserInitiatedStops(ctx, msg); err != nil { ctx.Respond(err) } case *task.AllocationExited: if t.allocationID != nil { return t.allocationExited(ctx, msg) } case sproto.ContainerLog: if log, err := t.enrichTaskLog(&model.TaskLog{ ContainerID: ptrs.Ptr(string(msg.ContainerID)), Log: msg.Message(), Level: msg.Level, }); err != nil { ctx.Log().WithError(err).Warn("dropping container log") } else { tasklogger.Insert(log) } case model.TaskLog: if log, err := t.enrichTaskLog(&msg); err != nil { ctx.Log().WithError(err).Warn("dropping trial log") } else { tasklogger.Insert(log) } case sproto.InvalidResourcesRequestError: ctx.Tell(ctx.Self().Parent(), msg) default: return actor.ErrUnexpectedMessage(ctx) } return nil } func (t *trial) create(ctx *actor.Context) error { m := model.NewTrial( t.state, t.searcher.Create.RequestID, t.experimentID, model.JSONObj(t.searcher.Create.Hparams), t.warmStartCheckpoint, int64(t.searcher.Create.TrialSeed), ) err := t.addTask() if err != nil { return err } err = db.AddTrial(context.TODO(), m, t.taskID) if err != nil { return errors.Wrap(err, "failed to save trial to database") } t.id = m.ID t.idSet = true return nil } // recover recovers the trial minimal (hopefully to stay) state for a trial actor. // Separately, the experiment stores and recovers our searcher state. func (t *trial) recover() error { runID, restarts, err := t.db.TrialRunIDAndRestarts(t.id) if err != nil { return errors.Wrap(err, "restoring old trial state") } t.runID = runID t.restarts = restarts return nil } // maybeAllocateTask checks if the trial should allocate state and allocates it if so. func (t *trial) maybeAllocateTask(ctx *actor.Context) error { if !(t.allocationID == nil && !t.searcher.Complete && t.state == model.ActiveState) { return nil } name := fmt.Sprintf("Trial %d (Experiment %d)", t.id, t.experimentID) ctx.Log().Info("decided to allocate trial") restoredAllocation, err := t.maybeRestoreAllocation(ctx) if err != nil { ctx.Log().WithError(err).Warn("failed to restore trial allocation") } else if restoredAllocation != nil { specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: restoredAllocation.AllocationID, TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, Restore: true, ProxyPorts: sproto.NewProxyPortConfig( tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Infof("starting restored trial allocation") // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error { return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } t.runID++ t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{"trial-run-id": t.runID}) ctx.AddLabels(t.logCtx) specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: model.AllocationID(fmt.Sprintf("%s.%d", t.taskID, t.runID)), TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, ProxyPorts: sproto.NewProxyPortConfig(tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Debugf("starting new trial allocation") prom.AssociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error { return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } const ( // InvalidHPKillDelay the delay before we forcibly kill a trial that said it had an invalid HP. InvalidHPKillDelay = 10 * time.Second ) func (t *trial) handleUserInitiatedStops(ctx *actor.Context, msg userInitiatedEarlyExit) error { switch msg.reason { case model.InvalidHP, model.InitInvalidHP: t.userInitiatedExit = &msg.reason // After a short time, force us to clean up if we're still handling messages. actors.NotifyAfter(ctx, InvalidHPKillDelay, model.StoppingKilledState) return nil case model.UserRequestedStop, model.Errored: return fmt.Errorf("should not report special exit reason %s to the master", msg.reason) default: return actor.ErrUnexpectedMessage(ctx) } } func (t *trial) addTask() error { return t.db.AddTask(&model.Task{ TaskID: t.taskID, TaskType: model.TaskTypeTrial, StartTime: t.jobSubmissionTime, // TODO: Why is this the job submission time..? JobID: &t.jobID, LogVersion: model.CurrentTaskLogVersion, }) } func (t *trial) buildTaskSpecifier(ctx *actor.Context) (*tasks.TrialSpec, error) { if !t.trialCreationSent { ctx.Tell(ctx.Self().Parent(), trialCreated{requestID: t.searcher.Create.RequestID}) t.trialCreationSent = true } if err := t.db.UpdateTrialRunID(t.id, t.runID); err != nil { return nil, errors.Wrap(err, "failed to save trial run ID") } var stepsCompleted int latestCheckpoint, err := t.db.LatestCheckpointForTrial(t.id) switch { case err != nil: return nil, errors.Wrapf(err, "failed to query latest checkpoint for trial") case latestCheckpoint == nil: latestCheckpoint = t.warmStartCheckpoint default: stepsCompleted = latestCheckpoint.StepsCompleted } return &tasks.TrialSpec{ Base: *t.taskSpec, ExperimentID: t.experimentID, TrialID: t.id, TrialRunID: t.runID, ExperimentConfig: schemas.Copy(t.config), HParams: t.searcher.Create.Hparams, TrialSeed: t.searcher.Create.TrialSeed, StepsCompleted: stepsCompleted, LatestCheckpoint: latestCheckpoint, Keys: t.generatedKeys, }, nil } // allocationExited cleans up after an allocation exit and exits permanently or reallocates. func (t *trial) allocationExited(ctx *actor.Context, exit *task.AllocationExited) error { if exit.Err != nil { ctx.Log().WithError(exit.Err).Error("trial allocation failed") } t.allocationID = nil prom.DisassociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // Decide if this is permanent. switch { case model.StoppingStates[t.state]: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error while trial was stopping %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: "trial stopped", }) case t.searcher.Complete && t.searcher.Closed: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error but hp search was complete %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "hp search is finished", }) case exit.Err != nil && sproto.IsUnrecoverableSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered unrecoverable failure") return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && isNonRetryableError(exit.Err): // These are errors that no matter how many times we retry, the outcome will // be the same, so don't bother retrying. Fail right away to allow the user // to make any adjustments to the experiment and try again. return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered transient system error") case exit.Err != nil && !sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial failed (restart %d/%d)", t.restarts, t.config.MaxRestarts()) t.restarts++ if err := t.db.UpdateTrialRestarts(t.id, t.restarts); err != nil { return err } if t.restarts > t.config.MaxRestarts() { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial exceeded max restarts", }) } case exit.UserRequestedStop: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserRequestedStop, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "trial exited early due to a user requested stop", }) case t.userInitiatedExit != nil: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: *t.userInitiatedExit, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: fmt.Sprintf( "trial exited early with reason: %v", *t.userInitiatedExit), }) } // Maybe reschedule. return errors.Wrap(t.maybeAllocateTask(ctx), "failed to reschedule trial") } // patchState decide if the state patch is valid. If so, we'll transition the trial. func (t *trial) patchState(ctx *actor.Context, s model.StateWithReason) error { switch { case model.TerminalStates[t.state]: ctx.Log().Infof("ignoring transition in terminal state (%s -> %s)", t.state, s.State) return nil case model.TerminalStates[s.State]: ctx.Log().Infof("ignoring patch to terminal state %s", s.State) return nil case t.state == s.State: // Order is important, else below will prevent re-sending kills. ctx.Log().Infof("resending actions for transition for %s", t.state) return t.transition(ctx, s) case model.StoppingStates[t.state] && !model.TrialTransitions[t.state][s.State]: ctx.Log().Infof("ignoring patch to less severe stopping state (%s)", s.State) return nil default: ctx.Log().Debugf("patching state after request (%s)", s.State) return t.transition(ctx, s) } } // transition the trial by rectifying the desired state with our actual state to determined // a target state, and then propogating the appropriate signals to the allocation if there is any. func (t *trial) transition(ctx *actor.Context, s model.StateWithReason) error { if t.state != s.State { ctx.Log().Infof("trial changed from state %s to %s", t.state, s.State) if t.idSet { if err := t.db.UpdateTrial(t.id, s.State); err != nil { return errors.Wrap(err, "updating trial with end state") } } t.state = s.State } // Rectify our state and the allocation state with the transition. switch { case t.state == model.ActiveState: return t.maybeAllocateTask(ctx) case t.state == model.PausedState: if t.allocationID != nil { ctx.Log().Info("decided to terminate trial due to pause") err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, s.InformationalReason, ) if err != nil
} case model.StoppingStates[t.state]: switch { case t.allocationID == nil: ctx.Log().Info("stopping trial before resources are requested") return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: s.InformationalReason, }) default: if action, ok := map[model.State]task.AllocationSignal{ model.StoppingCanceledState: task.TerminateAllocation, model.StoppingKilledState: task.KillAllocation, model.StoppingErrorState: task.KillAllocation, }[t.state]; ok { ctx.Log().Infof("decided to %s trial", action) err := task.DefaultService.Signal(*t.allocationID, action, s.InformationalReason) if err != nil { ctx.Log().WithError(err).Warnf("could not %s allocation during stop", action) } } } case model.TerminalStates[t.state]: switch t.state { case model.ErrorState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.Errored, }) case model.CanceledState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserCanceled, }) } ctx.Self().Stop() default: panic(fmt.Errorf("unmatched state in transition %s", t.state)) } return nil } func (t *trial) enrichTaskLog(log *model.TaskLog) (*model.TaskLog, error) { if !t.idSet { return nil, fmt.Errorf("cannot handle trial log before ID is set: %v", log) } log.TaskID = string(t.taskID) if log.Timestamp == nil { log.Timestamp = ptrs.Ptr(time.Now().UTC()) } if log.Level == nil { log.Level = ptrs.Ptr("INFO") } if log.Source == nil { log.Source = ptrs.Ptr("master") } if log.StdType == nil { log.StdType = ptrs.Ptr("stdout") } log.Log += "\n" return log, nil } func (t *trial) maybeRestoreAllocation(ctx *actor.Context) (*model.Allocation, error) { if !t.restored || !t.rm.IsReattachEnabled(ctx) { return nil, nil } var allocations []model.Allocation selectQuery := db.Bun().NewSelect().Model(&allocations). Where("task_id = ?", t.taskID). Where("end_time IS NULL"). Where("state != ?", model.AllocationStateTerminated) if t.rm.IsReattachableOnlyAfterStarted(ctx) { selectQuery.Where("start_time IS NOT NULL") } // Do we have an open allocation? err := selectQuery.Scan(context.TODO()) if err != nil { return nil, err } openAllocs := len(allocations) switch { case openAllocs == 0: return nil, nil case openAllocs == 1: allocation := &allocations[0] if !t.rm.IsReattachEnabledForRP(ctx, allocation.ResourcePool) { return nil, nil } return allocation, nil case openAllocs > 1: const maxAllocsToLog int = 3 allocIDs := make([]string, 0, maxAllocsToLog) for _, alloc := range allocations[0:mathx.Min(len(allocations), maxAllocsToLog)] { allocIDs = append(allocIDs, alloc.AllocationID.String()) } return nil, fmt.Errorf( "discovered %d open allocations on restore: %s", len(allocations), strings.Join(allocIDs, " "), ) default: return nil, fmt.Errorf( "discovered %d open allocations on restore", len(allocations), ) } } func launchRetries() backoff.BackOff { bf := backoff.NewExponentialBackOff() bf.InitialInterval = time.Second bf.MaxInterval = time.Minute return backoff.WithMaxRetries(bf, 4) }
{ ctx.Log().WithError(err).Warn("could not terminate allocation after pause") }
conditional_block
trial.go
package internal import ( "context" "fmt" "regexp" "strconv" "strings" "time" "github.com/cenkalti/backoff/v4" "github.com/determined-ai/determined/master/internal/prom" "github.com/determined-ai/determined/master/internal/rm" "github.com/determined-ai/determined/master/internal/task" "github.com/determined-ai/determined/master/internal/task/tasklogger" "github.com/determined-ai/determined/master/pkg/actor/actors" "github.com/determined-ai/determined/master/pkg/logger" "github.com/determined-ai/determined/master/pkg/mathx" "github.com/determined-ai/determined/master/pkg/ptrs" "github.com/pkg/errors" "github.com/determined-ai/determined/master/internal/db" "github.com/determined-ai/determined/master/internal/sproto" "github.com/determined-ai/determined/master/pkg/actor" "github.com/determined-ai/determined/master/pkg/model" "github.com/determined-ai/determined/master/pkg/schemas" "github.com/determined-ai/determined/master/pkg/schemas/expconf" "github.com/determined-ai/determined/master/pkg/ssh" "github.com/determined-ai/determined/master/pkg/tasks" ) // A list of errors for which we don't want to attempt any retries of the experiment. // These are errors that no matter how many times we retry, the outcome will still result // in the same error. var nonRetryableErrors = []*regexp.Regexp{ // This error is typically seen when you request resources that SLURM is not able to satisfy. regexp.MustCompile("sbatch: error: Batch job submission failed"), } // A trial is a task actor which is responsible for handling: // - messages from the resource manager, // - messages from the experiment, // - messages from the trial container(s), and // - keeping the trial table of the database up-to-date. // // The trial's desired state is dictated to it by the experiment, searcher and user; they push // it to states like 'ACTIVE', 'PAUSED' and kill or wake it when more work is available. It takes // this information and works with the resource manager, allocation, etc, to push us towards // a terminal state, by requesting resources, managing them and restarting them on failures. type trial struct { id int taskID model.TaskID jobID model.JobID jobSubmissionTime time.Time idSet bool experimentID int restored bool trialCreationSent bool // System dependencies. db db.DB rm rm.ResourceManager // Fields that are essentially configuration for the trial. config expconf.ExperimentConfig taskSpec *tasks.TaskSpec generatedKeys ssh.PrivateAndPublicKeys warmStartCheckpoint *model.Checkpoint // state is the current state of the trial. It's patched by experiment changes and kill trial. state model.State // searcher encapsulates the searcher state of the trial. searcher trialSearcherState // restarts is a failure count, it increments when the trial fails and we retry it. restarts int // runID is a count of how many times the task container(s) have stopped and restarted, which // could be due to a failure or due to normal pausing and continuing. When RunID increments, // it effectively invalidates many outstanding messages associated with the previous run. runID int // a ref to the current allocation allocationID *model.AllocationID // a note of the user initated exit reason, if any. userInitiatedExit *model.ExitedReason logCtx logger.Context } // newTrial creates a trial which will try to schedule itself after it receives its first workload. func newTrial( logCtx logger.Context, taskID model.TaskID, jobID model.JobID, jobSubmissionTime time.Time, experimentID int, initialState model.State, searcher trialSearcherState, rm rm.ResourceManager, db db.DB, config expconf.ExperimentConfig, warmStartCheckpoint *model.Checkpoint, taskSpec *tasks.TaskSpec, generatedKeys ssh.PrivateAndPublicKeys, restored bool, ) *trial { return &trial{ taskID: taskID, jobID: jobID, jobSubmissionTime: jobSubmissionTime, experimentID: experimentID, state: initialState, searcher: searcher, db: db, rm: rm, config: config, taskSpec: taskSpec, generatedKeys: generatedKeys, warmStartCheckpoint: warmStartCheckpoint, logCtx: logger.MergeContexts(logCtx, logger.Context{ "task-id": taskID, "task-type": model.TaskTypeTrial, }), restored: restored, } } // Returns true if the error message matches one of the errors in the non-retryable list. func isNonRetryableError(err error) bool { for _, nonRetryableError := range nonRetryableErrors { if nonRetryableError.MatchString(err.Error()) { return true } } return false } func (t *trial) Receive(ctx *actor.Context) error { switch msg := ctx.Message().(type) { case actor.PreStart: if t.idSet { if err := t.recover(); err != nil { return fmt.Errorf("recovering trial in prestart: %w", err) } } else { if err := t.create(ctx); err != nil { return fmt.Errorf("persisting trial in prestart: %w", err) } } t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{ "trial-id": t.id, "trial-run-id": t.runID, }) ctx.AddLabels(t.logCtx) return t.maybeAllocateTask(ctx) case actor.PostStop: if !t.idSet { return nil } if !model.TerminalStates[t.state] { if t.allocationID != nil { err := task.DefaultService.Signal(*t.allocationID, task.KillAllocation, "trial crashed") if err == nil { task.DefaultService.AwaitTermination(*t.allocationID) } } return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial did not finish properly", }) } return nil case model.State: return t.patchState(ctx, model.StateWithReason{State: msg}) case model.StateWithReason: return t.patchState(ctx, msg) case trialSearcherState: t.searcher = msg switch { case !t.searcher.Complete: return t.maybeAllocateTask(ctx) case t.searcher.Complete && t.searcher.Closed: return t.patchState(ctx, model.StateWithReason{ State: model.StoppingCompletedState, InformationalReason: "hp search is finished", }) } return nil case sproto.ChangeRP: resources := t.config.Resources() resources.SetResourcePool(msg.ResourcePool) t.config.SetResources(resources) if t.allocationID != nil { err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, "allocation resource pool changed", ) if err != nil { ctx.Log().WithError(err).Warn("could not preempt allocation to change rp") } } case userInitiatedEarlyExit: if err := t.handleUserInitiatedStops(ctx, msg); err != nil { ctx.Respond(err) } case *task.AllocationExited: if t.allocationID != nil { return t.allocationExited(ctx, msg) } case sproto.ContainerLog: if log, err := t.enrichTaskLog(&model.TaskLog{ ContainerID: ptrs.Ptr(string(msg.ContainerID)), Log: msg.Message(), Level: msg.Level, }); err != nil { ctx.Log().WithError(err).Warn("dropping container log") } else { tasklogger.Insert(log) } case model.TaskLog: if log, err := t.enrichTaskLog(&msg); err != nil { ctx.Log().WithError(err).Warn("dropping trial log") } else { tasklogger.Insert(log) } case sproto.InvalidResourcesRequestError: ctx.Tell(ctx.Self().Parent(), msg) default: return actor.ErrUnexpectedMessage(ctx) } return nil } func (t *trial) create(ctx *actor.Context) error { m := model.NewTrial( t.state, t.searcher.Create.RequestID, t.experimentID, model.JSONObj(t.searcher.Create.Hparams), t.warmStartCheckpoint, int64(t.searcher.Create.TrialSeed), ) err := t.addTask() if err != nil { return err } err = db.AddTrial(context.TODO(), m, t.taskID) if err != nil { return errors.Wrap(err, "failed to save trial to database") } t.id = m.ID t.idSet = true return nil } // recover recovers the trial minimal (hopefully to stay) state for a trial actor. // Separately, the experiment stores and recovers our searcher state. func (t *trial) recover() error { runID, restarts, err := t.db.TrialRunIDAndRestarts(t.id) if err != nil { return errors.Wrap(err, "restoring old trial state") } t.runID = runID t.restarts = restarts return nil } // maybeAllocateTask checks if the trial should allocate state and allocates it if so. func (t *trial) maybeAllocateTask(ctx *actor.Context) error { if !(t.allocationID == nil && !t.searcher.Complete && t.state == model.ActiveState) { return nil } name := fmt.Sprintf("Trial %d (Experiment %d)", t.id, t.experimentID) ctx.Log().Info("decided to allocate trial") restoredAllocation, err := t.maybeRestoreAllocation(ctx) if err != nil { ctx.Log().WithError(err).Warn("failed to restore trial allocation") } else if restoredAllocation != nil { specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: restoredAllocation.AllocationID, TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, Restore: true, ProxyPorts: sproto.NewProxyPortConfig( tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Infof("starting restored trial allocation") // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error { return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } t.runID++ t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{"trial-run-id": t.runID}) ctx.AddLabels(t.logCtx) specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: model.AllocationID(fmt.Sprintf("%s.%d", t.taskID, t.runID)), TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, ProxyPorts: sproto.NewProxyPortConfig(tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Debugf("starting new trial allocation") prom.AssociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error { return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } const ( // InvalidHPKillDelay the delay before we forcibly kill a trial that said it had an invalid HP. InvalidHPKillDelay = 10 * time.Second ) func (t *trial)
(ctx *actor.Context, msg userInitiatedEarlyExit) error { switch msg.reason { case model.InvalidHP, model.InitInvalidHP: t.userInitiatedExit = &msg.reason // After a short time, force us to clean up if we're still handling messages. actors.NotifyAfter(ctx, InvalidHPKillDelay, model.StoppingKilledState) return nil case model.UserRequestedStop, model.Errored: return fmt.Errorf("should not report special exit reason %s to the master", msg.reason) default: return actor.ErrUnexpectedMessage(ctx) } } func (t *trial) addTask() error { return t.db.AddTask(&model.Task{ TaskID: t.taskID, TaskType: model.TaskTypeTrial, StartTime: t.jobSubmissionTime, // TODO: Why is this the job submission time..? JobID: &t.jobID, LogVersion: model.CurrentTaskLogVersion, }) } func (t *trial) buildTaskSpecifier(ctx *actor.Context) (*tasks.TrialSpec, error) { if !t.trialCreationSent { ctx.Tell(ctx.Self().Parent(), trialCreated{requestID: t.searcher.Create.RequestID}) t.trialCreationSent = true } if err := t.db.UpdateTrialRunID(t.id, t.runID); err != nil { return nil, errors.Wrap(err, "failed to save trial run ID") } var stepsCompleted int latestCheckpoint, err := t.db.LatestCheckpointForTrial(t.id) switch { case err != nil: return nil, errors.Wrapf(err, "failed to query latest checkpoint for trial") case latestCheckpoint == nil: latestCheckpoint = t.warmStartCheckpoint default: stepsCompleted = latestCheckpoint.StepsCompleted } return &tasks.TrialSpec{ Base: *t.taskSpec, ExperimentID: t.experimentID, TrialID: t.id, TrialRunID: t.runID, ExperimentConfig: schemas.Copy(t.config), HParams: t.searcher.Create.Hparams, TrialSeed: t.searcher.Create.TrialSeed, StepsCompleted: stepsCompleted, LatestCheckpoint: latestCheckpoint, Keys: t.generatedKeys, }, nil } // allocationExited cleans up after an allocation exit and exits permanently or reallocates. func (t *trial) allocationExited(ctx *actor.Context, exit *task.AllocationExited) error { if exit.Err != nil { ctx.Log().WithError(exit.Err).Error("trial allocation failed") } t.allocationID = nil prom.DisassociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // Decide if this is permanent. switch { case model.StoppingStates[t.state]: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error while trial was stopping %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: "trial stopped", }) case t.searcher.Complete && t.searcher.Closed: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error but hp search was complete %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "hp search is finished", }) case exit.Err != nil && sproto.IsUnrecoverableSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered unrecoverable failure") return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && isNonRetryableError(exit.Err): // These are errors that no matter how many times we retry, the outcome will // be the same, so don't bother retrying. Fail right away to allow the user // to make any adjustments to the experiment and try again. return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered transient system error") case exit.Err != nil && !sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial failed (restart %d/%d)", t.restarts, t.config.MaxRestarts()) t.restarts++ if err := t.db.UpdateTrialRestarts(t.id, t.restarts); err != nil { return err } if t.restarts > t.config.MaxRestarts() { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial exceeded max restarts", }) } case exit.UserRequestedStop: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserRequestedStop, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "trial exited early due to a user requested stop", }) case t.userInitiatedExit != nil: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: *t.userInitiatedExit, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: fmt.Sprintf( "trial exited early with reason: %v", *t.userInitiatedExit), }) } // Maybe reschedule. return errors.Wrap(t.maybeAllocateTask(ctx), "failed to reschedule trial") } // patchState decide if the state patch is valid. If so, we'll transition the trial. func (t *trial) patchState(ctx *actor.Context, s model.StateWithReason) error { switch { case model.TerminalStates[t.state]: ctx.Log().Infof("ignoring transition in terminal state (%s -> %s)", t.state, s.State) return nil case model.TerminalStates[s.State]: ctx.Log().Infof("ignoring patch to terminal state %s", s.State) return nil case t.state == s.State: // Order is important, else below will prevent re-sending kills. ctx.Log().Infof("resending actions for transition for %s", t.state) return t.transition(ctx, s) case model.StoppingStates[t.state] && !model.TrialTransitions[t.state][s.State]: ctx.Log().Infof("ignoring patch to less severe stopping state (%s)", s.State) return nil default: ctx.Log().Debugf("patching state after request (%s)", s.State) return t.transition(ctx, s) } } // transition the trial by rectifying the desired state with our actual state to determined // a target state, and then propogating the appropriate signals to the allocation if there is any. func (t *trial) transition(ctx *actor.Context, s model.StateWithReason) error { if t.state != s.State { ctx.Log().Infof("trial changed from state %s to %s", t.state, s.State) if t.idSet { if err := t.db.UpdateTrial(t.id, s.State); err != nil { return errors.Wrap(err, "updating trial with end state") } } t.state = s.State } // Rectify our state and the allocation state with the transition. switch { case t.state == model.ActiveState: return t.maybeAllocateTask(ctx) case t.state == model.PausedState: if t.allocationID != nil { ctx.Log().Info("decided to terminate trial due to pause") err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, s.InformationalReason, ) if err != nil { ctx.Log().WithError(err).Warn("could not terminate allocation after pause") } } case model.StoppingStates[t.state]: switch { case t.allocationID == nil: ctx.Log().Info("stopping trial before resources are requested") return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: s.InformationalReason, }) default: if action, ok := map[model.State]task.AllocationSignal{ model.StoppingCanceledState: task.TerminateAllocation, model.StoppingKilledState: task.KillAllocation, model.StoppingErrorState: task.KillAllocation, }[t.state]; ok { ctx.Log().Infof("decided to %s trial", action) err := task.DefaultService.Signal(*t.allocationID, action, s.InformationalReason) if err != nil { ctx.Log().WithError(err).Warnf("could not %s allocation during stop", action) } } } case model.TerminalStates[t.state]: switch t.state { case model.ErrorState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.Errored, }) case model.CanceledState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserCanceled, }) } ctx.Self().Stop() default: panic(fmt.Errorf("unmatched state in transition %s", t.state)) } return nil } func (t *trial) enrichTaskLog(log *model.TaskLog) (*model.TaskLog, error) { if !t.idSet { return nil, fmt.Errorf("cannot handle trial log before ID is set: %v", log) } log.TaskID = string(t.taskID) if log.Timestamp == nil { log.Timestamp = ptrs.Ptr(time.Now().UTC()) } if log.Level == nil { log.Level = ptrs.Ptr("INFO") } if log.Source == nil { log.Source = ptrs.Ptr("master") } if log.StdType == nil { log.StdType = ptrs.Ptr("stdout") } log.Log += "\n" return log, nil } func (t *trial) maybeRestoreAllocation(ctx *actor.Context) (*model.Allocation, error) { if !t.restored || !t.rm.IsReattachEnabled(ctx) { return nil, nil } var allocations []model.Allocation selectQuery := db.Bun().NewSelect().Model(&allocations). Where("task_id = ?", t.taskID). Where("end_time IS NULL"). Where("state != ?", model.AllocationStateTerminated) if t.rm.IsReattachableOnlyAfterStarted(ctx) { selectQuery.Where("start_time IS NOT NULL") } // Do we have an open allocation? err := selectQuery.Scan(context.TODO()) if err != nil { return nil, err } openAllocs := len(allocations) switch { case openAllocs == 0: return nil, nil case openAllocs == 1: allocation := &allocations[0] if !t.rm.IsReattachEnabledForRP(ctx, allocation.ResourcePool) { return nil, nil } return allocation, nil case openAllocs > 1: const maxAllocsToLog int = 3 allocIDs := make([]string, 0, maxAllocsToLog) for _, alloc := range allocations[0:mathx.Min(len(allocations), maxAllocsToLog)] { allocIDs = append(allocIDs, alloc.AllocationID.String()) } return nil, fmt.Errorf( "discovered %d open allocations on restore: %s", len(allocations), strings.Join(allocIDs, " "), ) default: return nil, fmt.Errorf( "discovered %d open allocations on restore", len(allocations), ) } } func launchRetries() backoff.BackOff { bf := backoff.NewExponentialBackOff() bf.InitialInterval = time.Second bf.MaxInterval = time.Minute return backoff.WithMaxRetries(bf, 4) }
handleUserInitiatedStops
identifier_name
trial.go
package internal import ( "context" "fmt" "regexp" "strconv" "strings" "time" "github.com/cenkalti/backoff/v4" "github.com/determined-ai/determined/master/internal/prom" "github.com/determined-ai/determined/master/internal/rm" "github.com/determined-ai/determined/master/internal/task" "github.com/determined-ai/determined/master/internal/task/tasklogger" "github.com/determined-ai/determined/master/pkg/actor/actors" "github.com/determined-ai/determined/master/pkg/logger" "github.com/determined-ai/determined/master/pkg/mathx" "github.com/determined-ai/determined/master/pkg/ptrs" "github.com/pkg/errors" "github.com/determined-ai/determined/master/internal/db" "github.com/determined-ai/determined/master/internal/sproto" "github.com/determined-ai/determined/master/pkg/actor" "github.com/determined-ai/determined/master/pkg/model" "github.com/determined-ai/determined/master/pkg/schemas" "github.com/determined-ai/determined/master/pkg/schemas/expconf" "github.com/determined-ai/determined/master/pkg/ssh" "github.com/determined-ai/determined/master/pkg/tasks" ) // A list of errors for which we don't want to attempt any retries of the experiment. // These are errors that no matter how many times we retry, the outcome will still result // in the same error. var nonRetryableErrors = []*regexp.Regexp{ // This error is typically seen when you request resources that SLURM is not able to satisfy. regexp.MustCompile("sbatch: error: Batch job submission failed"), } // A trial is a task actor which is responsible for handling: // - messages from the resource manager, // - messages from the experiment, // - messages from the trial container(s), and // - keeping the trial table of the database up-to-date. // // The trial's desired state is dictated to it by the experiment, searcher and user; they push // it to states like 'ACTIVE', 'PAUSED' and kill or wake it when more work is available. It takes // this information and works with the resource manager, allocation, etc, to push us towards // a terminal state, by requesting resources, managing them and restarting them on failures. type trial struct { id int taskID model.TaskID jobID model.JobID jobSubmissionTime time.Time idSet bool experimentID int restored bool trialCreationSent bool // System dependencies. db db.DB rm rm.ResourceManager // Fields that are essentially configuration for the trial. config expconf.ExperimentConfig taskSpec *tasks.TaskSpec generatedKeys ssh.PrivateAndPublicKeys warmStartCheckpoint *model.Checkpoint // state is the current state of the trial. It's patched by experiment changes and kill trial. state model.State // searcher encapsulates the searcher state of the trial. searcher trialSearcherState // restarts is a failure count, it increments when the trial fails and we retry it. restarts int // runID is a count of how many times the task container(s) have stopped and restarted, which // could be due to a failure or due to normal pausing and continuing. When RunID increments, // it effectively invalidates many outstanding messages associated with the previous run. runID int // a ref to the current allocation allocationID *model.AllocationID // a note of the user initated exit reason, if any. userInitiatedExit *model.ExitedReason logCtx logger.Context } // newTrial creates a trial which will try to schedule itself after it receives its first workload. func newTrial( logCtx logger.Context, taskID model.TaskID, jobID model.JobID, jobSubmissionTime time.Time, experimentID int, initialState model.State, searcher trialSearcherState, rm rm.ResourceManager, db db.DB, config expconf.ExperimentConfig, warmStartCheckpoint *model.Checkpoint, taskSpec *tasks.TaskSpec, generatedKeys ssh.PrivateAndPublicKeys, restored bool, ) *trial { return &trial{ taskID: taskID, jobID: jobID, jobSubmissionTime: jobSubmissionTime, experimentID: experimentID, state: initialState, searcher: searcher, db: db, rm: rm, config: config, taskSpec: taskSpec, generatedKeys: generatedKeys, warmStartCheckpoint: warmStartCheckpoint, logCtx: logger.MergeContexts(logCtx, logger.Context{ "task-id": taskID, "task-type": model.TaskTypeTrial, }), restored: restored, } } // Returns true if the error message matches one of the errors in the non-retryable list. func isNonRetryableError(err error) bool { for _, nonRetryableError := range nonRetryableErrors { if nonRetryableError.MatchString(err.Error()) { return true } } return false } func (t *trial) Receive(ctx *actor.Context) error { switch msg := ctx.Message().(type) { case actor.PreStart: if t.idSet { if err := t.recover(); err != nil { return fmt.Errorf("recovering trial in prestart: %w", err) } } else { if err := t.create(ctx); err != nil { return fmt.Errorf("persisting trial in prestart: %w", err) } } t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{ "trial-id": t.id, "trial-run-id": t.runID, }) ctx.AddLabels(t.logCtx) return t.maybeAllocateTask(ctx) case actor.PostStop: if !t.idSet { return nil } if !model.TerminalStates[t.state] { if t.allocationID != nil { err := task.DefaultService.Signal(*t.allocationID, task.KillAllocation, "trial crashed") if err == nil { task.DefaultService.AwaitTermination(*t.allocationID) } } return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial did not finish properly", }) } return nil case model.State: return t.patchState(ctx, model.StateWithReason{State: msg}) case model.StateWithReason: return t.patchState(ctx, msg) case trialSearcherState: t.searcher = msg switch { case !t.searcher.Complete: return t.maybeAllocateTask(ctx) case t.searcher.Complete && t.searcher.Closed: return t.patchState(ctx, model.StateWithReason{ State: model.StoppingCompletedState, InformationalReason: "hp search is finished", }) } return nil case sproto.ChangeRP: resources := t.config.Resources() resources.SetResourcePool(msg.ResourcePool) t.config.SetResources(resources) if t.allocationID != nil { err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, "allocation resource pool changed", ) if err != nil { ctx.Log().WithError(err).Warn("could not preempt allocation to change rp") } } case userInitiatedEarlyExit: if err := t.handleUserInitiatedStops(ctx, msg); err != nil { ctx.Respond(err) } case *task.AllocationExited: if t.allocationID != nil { return t.allocationExited(ctx, msg) } case sproto.ContainerLog: if log, err := t.enrichTaskLog(&model.TaskLog{ ContainerID: ptrs.Ptr(string(msg.ContainerID)), Log: msg.Message(), Level: msg.Level, }); err != nil { ctx.Log().WithError(err).Warn("dropping container log") } else { tasklogger.Insert(log) } case model.TaskLog: if log, err := t.enrichTaskLog(&msg); err != nil { ctx.Log().WithError(err).Warn("dropping trial log") } else { tasklogger.Insert(log) } case sproto.InvalidResourcesRequestError: ctx.Tell(ctx.Self().Parent(), msg) default: return actor.ErrUnexpectedMessage(ctx) } return nil } func (t *trial) create(ctx *actor.Context) error { m := model.NewTrial( t.state, t.searcher.Create.RequestID, t.experimentID, model.JSONObj(t.searcher.Create.Hparams), t.warmStartCheckpoint, int64(t.searcher.Create.TrialSeed), ) err := t.addTask() if err != nil { return err } err = db.AddTrial(context.TODO(), m, t.taskID) if err != nil { return errors.Wrap(err, "failed to save trial to database") } t.id = m.ID t.idSet = true return nil } // recover recovers the trial minimal (hopefully to stay) state for a trial actor. // Separately, the experiment stores and recovers our searcher state. func (t *trial) recover() error { runID, restarts, err := t.db.TrialRunIDAndRestarts(t.id) if err != nil { return errors.Wrap(err, "restoring old trial state") } t.runID = runID t.restarts = restarts return nil } // maybeAllocateTask checks if the trial should allocate state and allocates it if so. func (t *trial) maybeAllocateTask(ctx *actor.Context) error { if !(t.allocationID == nil && !t.searcher.Complete && t.state == model.ActiveState) { return nil } name := fmt.Sprintf("Trial %d (Experiment %d)", t.id, t.experimentID) ctx.Log().Info("decided to allocate trial") restoredAllocation, err := t.maybeRestoreAllocation(ctx) if err != nil { ctx.Log().WithError(err).Warn("failed to restore trial allocation") } else if restoredAllocation != nil { specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: restoredAllocation.AllocationID, TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, Restore: true, ProxyPorts: sproto.NewProxyPortConfig( tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Infof("starting restored trial allocation") // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error { return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } t.runID++ t.logCtx = logger.MergeContexts(t.logCtx, logger.Context{"trial-run-id": t.runID}) ctx.AddLabels(t.logCtx) specifier, err := t.buildTaskSpecifier(ctx) if err != nil { return err } ar := sproto.AllocateRequest{ AllocationID: model.AllocationID(fmt.Sprintf("%s.%d", t.taskID, t.runID)), TaskID: t.taskID, JobID: t.jobID, JobSubmissionTime: t.jobSubmissionTime, IsUserVisible: true, Name: name, Group: ctx.Self().Parent(), SlotsNeeded: t.config.Resources().SlotsPerTrial(), ResourcePool: t.config.Resources().ResourcePool(), FittingRequirements: sproto.FittingRequirements{ SingleAgent: false, }, Preemptible: true, ProxyPorts: sproto.NewProxyPortConfig(tasks.TrialSpecProxyPorts(t.taskSpec, t.config), t.taskID), } ctx.Log(). WithField("allocation-id", ar.AllocationID). Debugf("starting new trial allocation") prom.AssociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // HACK: Start used to only return errors async, now that it doesn't we need retries else // temporary failures fail the entire trial too easily. err = backoff.Retry(func() error {
return task.DefaultService.StartAllocation( t.logCtx, ar, t.db, t.rm, specifier, ctx.Self().System(), func(ae *task.AllocationExited) { ctx.Tell(ctx.Self(), ae) }, ) }, launchRetries()) if err != nil { return err } t.allocationID = &ar.AllocationID return nil } const ( // InvalidHPKillDelay the delay before we forcibly kill a trial that said it had an invalid HP. InvalidHPKillDelay = 10 * time.Second ) func (t *trial) handleUserInitiatedStops(ctx *actor.Context, msg userInitiatedEarlyExit) error { switch msg.reason { case model.InvalidHP, model.InitInvalidHP: t.userInitiatedExit = &msg.reason // After a short time, force us to clean up if we're still handling messages. actors.NotifyAfter(ctx, InvalidHPKillDelay, model.StoppingKilledState) return nil case model.UserRequestedStop, model.Errored: return fmt.Errorf("should not report special exit reason %s to the master", msg.reason) default: return actor.ErrUnexpectedMessage(ctx) } } func (t *trial) addTask() error { return t.db.AddTask(&model.Task{ TaskID: t.taskID, TaskType: model.TaskTypeTrial, StartTime: t.jobSubmissionTime, // TODO: Why is this the job submission time..? JobID: &t.jobID, LogVersion: model.CurrentTaskLogVersion, }) } func (t *trial) buildTaskSpecifier(ctx *actor.Context) (*tasks.TrialSpec, error) { if !t.trialCreationSent { ctx.Tell(ctx.Self().Parent(), trialCreated{requestID: t.searcher.Create.RequestID}) t.trialCreationSent = true } if err := t.db.UpdateTrialRunID(t.id, t.runID); err != nil { return nil, errors.Wrap(err, "failed to save trial run ID") } var stepsCompleted int latestCheckpoint, err := t.db.LatestCheckpointForTrial(t.id) switch { case err != nil: return nil, errors.Wrapf(err, "failed to query latest checkpoint for trial") case latestCheckpoint == nil: latestCheckpoint = t.warmStartCheckpoint default: stepsCompleted = latestCheckpoint.StepsCompleted } return &tasks.TrialSpec{ Base: *t.taskSpec, ExperimentID: t.experimentID, TrialID: t.id, TrialRunID: t.runID, ExperimentConfig: schemas.Copy(t.config), HParams: t.searcher.Create.Hparams, TrialSeed: t.searcher.Create.TrialSeed, StepsCompleted: stepsCompleted, LatestCheckpoint: latestCheckpoint, Keys: t.generatedKeys, }, nil } // allocationExited cleans up after an allocation exit and exits permanently or reallocates. func (t *trial) allocationExited(ctx *actor.Context, exit *task.AllocationExited) error { if exit.Err != nil { ctx.Log().WithError(exit.Err).Error("trial allocation failed") } t.allocationID = nil prom.DisassociateJobExperiment(t.jobID, strconv.Itoa(t.experimentID), t.config.Labels()) // Decide if this is permanent. switch { case model.StoppingStates[t.state]: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error while trial was stopping %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: "trial stopped", }) case t.searcher.Complete && t.searcher.Closed: if exit.Err != nil { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with an error but hp search was complete %v", exit.Err), }) } return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "hp search is finished", }) case exit.Err != nil && sproto.IsUnrecoverableSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered unrecoverable failure") return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && isNonRetryableError(exit.Err): // These are errors that no matter how many times we retry, the outcome will // be the same, so don't bother retrying. Fail right away to allow the user // to make any adjustments to the experiment and try again. return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: fmt.Sprintf( "trial allocation exited with unrecoverable failure %v", exit.Err), }) case exit.Err != nil && sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial encountered transient system error") case exit.Err != nil && !sproto.IsTransientSystemError(exit.Err): ctx.Log(). WithError(exit.Err). Errorf("trial failed (restart %d/%d)", t.restarts, t.config.MaxRestarts()) t.restarts++ if err := t.db.UpdateTrialRestarts(t.id, t.restarts); err != nil { return err } if t.restarts > t.config.MaxRestarts() { return t.transition(ctx, model.StateWithReason{ State: model.ErrorState, InformationalReason: "trial exceeded max restarts", }) } case exit.UserRequestedStop: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserRequestedStop, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: "trial exited early due to a user requested stop", }) case t.userInitiatedExit != nil: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: *t.userInitiatedExit, }) return t.transition(ctx, model.StateWithReason{ State: model.CompletedState, InformationalReason: fmt.Sprintf( "trial exited early with reason: %v", *t.userInitiatedExit), }) } // Maybe reschedule. return errors.Wrap(t.maybeAllocateTask(ctx), "failed to reschedule trial") } // patchState decide if the state patch is valid. If so, we'll transition the trial. func (t *trial) patchState(ctx *actor.Context, s model.StateWithReason) error { switch { case model.TerminalStates[t.state]: ctx.Log().Infof("ignoring transition in terminal state (%s -> %s)", t.state, s.State) return nil case model.TerminalStates[s.State]: ctx.Log().Infof("ignoring patch to terminal state %s", s.State) return nil case t.state == s.State: // Order is important, else below will prevent re-sending kills. ctx.Log().Infof("resending actions for transition for %s", t.state) return t.transition(ctx, s) case model.StoppingStates[t.state] && !model.TrialTransitions[t.state][s.State]: ctx.Log().Infof("ignoring patch to less severe stopping state (%s)", s.State) return nil default: ctx.Log().Debugf("patching state after request (%s)", s.State) return t.transition(ctx, s) } } // transition the trial by rectifying the desired state with our actual state to determined // a target state, and then propogating the appropriate signals to the allocation if there is any. func (t *trial) transition(ctx *actor.Context, s model.StateWithReason) error { if t.state != s.State { ctx.Log().Infof("trial changed from state %s to %s", t.state, s.State) if t.idSet { if err := t.db.UpdateTrial(t.id, s.State); err != nil { return errors.Wrap(err, "updating trial with end state") } } t.state = s.State } // Rectify our state and the allocation state with the transition. switch { case t.state == model.ActiveState: return t.maybeAllocateTask(ctx) case t.state == model.PausedState: if t.allocationID != nil { ctx.Log().Info("decided to terminate trial due to pause") err := task.DefaultService.Signal( *t.allocationID, task.TerminateAllocation, s.InformationalReason, ) if err != nil { ctx.Log().WithError(err).Warn("could not terminate allocation after pause") } } case model.StoppingStates[t.state]: switch { case t.allocationID == nil: ctx.Log().Info("stopping trial before resources are requested") return t.transition(ctx, model.StateWithReason{ State: model.StoppingToTerminalStates[t.state], InformationalReason: s.InformationalReason, }) default: if action, ok := map[model.State]task.AllocationSignal{ model.StoppingCanceledState: task.TerminateAllocation, model.StoppingKilledState: task.KillAllocation, model.StoppingErrorState: task.KillAllocation, }[t.state]; ok { ctx.Log().Infof("decided to %s trial", action) err := task.DefaultService.Signal(*t.allocationID, action, s.InformationalReason) if err != nil { ctx.Log().WithError(err).Warnf("could not %s allocation during stop", action) } } } case model.TerminalStates[t.state]: switch t.state { case model.ErrorState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.Errored, }) case model.CanceledState: ctx.Tell(ctx.Self().Parent(), trialReportEarlyExit{ requestID: t.searcher.Create.RequestID, reason: model.UserCanceled, }) } ctx.Self().Stop() default: panic(fmt.Errorf("unmatched state in transition %s", t.state)) } return nil } func (t *trial) enrichTaskLog(log *model.TaskLog) (*model.TaskLog, error) { if !t.idSet { return nil, fmt.Errorf("cannot handle trial log before ID is set: %v", log) } log.TaskID = string(t.taskID) if log.Timestamp == nil { log.Timestamp = ptrs.Ptr(time.Now().UTC()) } if log.Level == nil { log.Level = ptrs.Ptr("INFO") } if log.Source == nil { log.Source = ptrs.Ptr("master") } if log.StdType == nil { log.StdType = ptrs.Ptr("stdout") } log.Log += "\n" return log, nil } func (t *trial) maybeRestoreAllocation(ctx *actor.Context) (*model.Allocation, error) { if !t.restored || !t.rm.IsReattachEnabled(ctx) { return nil, nil } var allocations []model.Allocation selectQuery := db.Bun().NewSelect().Model(&allocations). Where("task_id = ?", t.taskID). Where("end_time IS NULL"). Where("state != ?", model.AllocationStateTerminated) if t.rm.IsReattachableOnlyAfterStarted(ctx) { selectQuery.Where("start_time IS NOT NULL") } // Do we have an open allocation? err := selectQuery.Scan(context.TODO()) if err != nil { return nil, err } openAllocs := len(allocations) switch { case openAllocs == 0: return nil, nil case openAllocs == 1: allocation := &allocations[0] if !t.rm.IsReattachEnabledForRP(ctx, allocation.ResourcePool) { return nil, nil } return allocation, nil case openAllocs > 1: const maxAllocsToLog int = 3 allocIDs := make([]string, 0, maxAllocsToLog) for _, alloc := range allocations[0:mathx.Min(len(allocations), maxAllocsToLog)] { allocIDs = append(allocIDs, alloc.AllocationID.String()) } return nil, fmt.Errorf( "discovered %d open allocations on restore: %s", len(allocations), strings.Join(allocIDs, " "), ) default: return nil, fmt.Errorf( "discovered %d open allocations on restore", len(allocations), ) } } func launchRetries() backoff.BackOff { bf := backoff.NewExponentialBackOff() bf.InitialInterval = time.Second bf.MaxInterval = time.Minute return backoff.WithMaxRetries(bf, 4) }
random_line_split
zhtta.rs
// // zhtta.rs // // Starting code for PS3 // Running on Rust 0.9 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // University of Virginia - cs4414 Spring 2014 // Weilin Xu and David Evans // Version 0.5 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" // Problem 7 design decision: cache should not be bigger than 10 MB // remove last modified files // check cache for files first! #[feature(globs)]; extern mod extra; use std::io::*; use std::io::net::ip::{SocketAddr}; use std::io::buffered::BufferedReader; use std::{os, str, libc, from_str}; use std::path::Path; use std::hashmap::HashMap; use extra::getopts; use extra::arc::MutexArc; use extra::arc::RWArc; use extra::lru_cache::LruCache; use extra::priority_queue::PriorityQueue; use extra::sync::Semaphore; use std::io::buffered::BufferedStream; use std::io::File; use std::io::fs; mod gash; static SERVER_NAME : &'static str = "Zhtta Version 0.5"; static IP : &'static str = "127.0.0.1"; static PORT : uint = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body { background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; static IP_LOGFILE : &'static str = "../iplog.txt"; //has to be level above b/c working dir is changed to www struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: ~str, path: ~Path, } impl std::cmp::Eq for HTTP_Request { fn eq(&self, other: &HTTP_Request) -> bool { let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther == sizeSelf { if (other.peer_name.slice_to(7) == "128.143." || other.peer_name.slice_to(6) == "137.54." || other.peer_name.slice_to(9) == "127.0.0.1") && (self.peer_name.slice_to(7) == "128.143." || self.peer_name.slice_to(6) == "137.54." || self.peer_name.slice_to(9) == "127.0.0.1") { return true; } } return false; } } impl std::cmp::Ord for HTTP_Request { fn lt(&self, other: &HTTP_Request) -> bool { //First get the file sizes for the Http_Request let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther > sizeSelf { return true; } else { return getPriority(self.peer_name.clone()) < getPriority(other.peer_name.clone()); } } } struct WebServer { ip: ~str, port: uint, www_dir_path: ~Path, request_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, cache: MutexArc<MutexArc<LruCache<Path,~[u8]>>>, notify_port: Port<()>, shared_notify_chan: SharedChan<()> } impl WebServer { fn new(ip: &str, port: uint, www_dir: &str) -> WebServer { let (notify_port, shared_notify_chan) = SharedChan::new(); let www_dir_path = ~Path::new(www_dir); os::change_dir(www_dir_path.clone()); WebServer { ip: ip.to_owned(), port: port, www_dir_path: www_dir_path, request_queue_arc: MutexArc::new(PriorityQueue::new()), stream_map_arc: MutexArc::new(HashMap::new()), cache: MutexArc::new(MutexArc::new(LruCache::new(10))), notify_port: notify_port, shared_notify_chan: shared_notify_chan } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error."); let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned(); let request_queue_arc = self.request_queue_arc.clone(); let shared_notify_chan = self.shared_notify_chan.clone(); let stream_map_arc = self.stream_map_arc.clone(); spawn(proc() { let mut acceptor = net::tcp::TcpListener::bind(addr).listen(); println!("{:s} listening on {:s} (serving from: {:s}).", SERVER_NAME, addr.to_str(), www_dir_path_str); //Visitor counter let num_visitor : uint = 0; //Arc for visitor counter. let visitor_arc_mut = RWArc::new(num_visitor); for stream in acceptor.incoming() { let (queue_port, queue_chan) = Chan::new(); queue_chan.send(request_queue_arc.clone()); let notify_chan = shared_notify_chan.clone(); let stream_map_arc = stream_map_arc.clone(); let(portMut, chanMut) = Chan::new(); chanMut.send(visitor_arc_mut.clone()); // Spawn a task to handle the connection. spawn(proc() { let request_queue_arc = queue_port.recv(); //This updates counter by adding one to it. let local_arc_mut = portMut.recv(); local_arc_mut.write(|value| { *value += 1 }); //This sets a local variable to current count. let mut visitor_count_local : uint = 0; local_arc_mut.read(|value| { //println(value.to_str()); visitor_count_local = *value; }); let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); ipToFile(peer_name.clone()); let mut buf = [0, ..500]; stream.read(buf); let request_str = str::from_utf8(buf); debug!("Request:\n{:s}", request_str); let req_group : ~[&str]= request_str.splitn(' ', 3).collect(); if req_group.len() > 2 { let path_str = "." + req_group[1].to_owned(); let mut path_obj = ~os::getcwd(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{:s}]", path_obj.as_str().expect("error")); debug!("Requested path: [{:s}]", path_str); if path_str == ~"./" { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream, &visitor_count_local); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else { debug!("===== Static Page request ====="); WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan); } } }); } }); } fn respond_with_error_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { let mut stream = stream; let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned()); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // TODO: Safe visitor counter. fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) { let mut stream = stream; let visitor_count_other : uint = visitor_count_local.clone(); let response: ~str = format!("{:s}{:s}<h1>Greetings, Krusty!</h1> <h2>Visitor count: {:u}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, visitor_count_other); debug!("Responding to counter request"); stream.write(response.as_bytes()); } // TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) { let mut stream = stream; stream.write(HTTP_OK.as_bytes()); let mut check : bool = true; cache.access(|local_cache| { let bytes = local_cache.get(path); match(bytes) { Some(bytes) => { // in cache debug!("File found in cache: {}", path.display()); let size = bytes.len(); let iterations = size %100000; if(iterations < 100000) { stream.write(bytes.to_owned()); } else { for i in range(0, iterations) { let start = i * 100000; let tempByte = bytes.slice(start,start+100000-1); stream.write(tempByte); } let left = size - (iterations*100000); stream.write(bytes.slice_from(left)); } check = false; } None => {} } }); if(check) { cache.access(|local_cache| { // not in cache //let mut stream = stream; debug!("File not found in cache: {}", path.display()); let mut file_reader = File::open(path).expect("Invalid file!"); let fileSize = fs::stat(path).size; let iterations = fileSize&100000; let mut byteArray: ~[u8] = ~[]; if(iterations < 100000) { let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } else { for i in range(0, iterations) { let tempArray = file_reader.read_bytes(100000); stream.write(tempArray); byteArray.push_all_move(tempArray); } let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } //add to cache! //automatically handles removing other elements if necessary //if(fileSize < 10000000) { debug!("File added to cache: {}", path.display()); local_cache.put(path.clone(), byteArray); //} }); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { //for now, just serve as static file let shtml_file = File::open(path); let mut rwStream = BufferedStream::new(shtml_file); let mut newFile : ~[~str] = ~[]; let mut checkIfLastIsCmd : bool = false; for line in rwStream.lines() { let mut check : bool = false; let mut newLine : ~[~str] = ~[]; for split in line.split(' ') { if(check) { let cmdSplit : ~[&str] = split.split('=').collect(); let command : ~str = cmdSplit[1].to_owned(); let finalCommand = command.slice(1,command.len()-1).to_owned(); let output : ~str = gash::run_cmdline(finalCommand); newLine.push(output); check = false; checkIfLastIsCmd = true; } else if(split == "<!--#exec") { check = true; } else if(split == "-->") { } else { if(checkIfLastIsCmd && split.slice(0, 3) == "-->") { newLine.push(split.slice_from(3).to_owned()); newLine.push(" ".to_owned()); checkIfLastIsCmd = false; } else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") { newLine.push(split.slice(0, split.len()-9).to_owned()); check = true; } else { newLine.push(split.to_owned()); newLine.push(" ".to_owned()); } } } let mut fullLine : ~str = ~""; for s in newLine.iter() { fullLine = fullLine + s.clone(); } newFile.push(fullLine); } let mut fullPage : ~str = ~""; for s in newFile.iter() { fullPage = fullPage + s.clone(); } let mut stream = stream; stream.write(HTTP_OK.as_bytes()); stream.write(fullPage.as_bytes()); } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>) { // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_port, stream_chan) = Chan::new(); stream_chan.send(stream); unsafe { // Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. stream_map_arc.unsafe_access(|local_stream_map| { let stream = stream_port.recv(); local_stream_map.swap(peer_name.clone(), stream); }); } // Enqueue the HTTP request. let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() }; let (req_port, req_chan) = Chan::new(); req_chan.send(req); debug!("Waiting for queue mutex lock."); req_queue_arc.access(|local_req_queue| { debug!("Got queue mutex lock."); let req: HTTP_Request = req_port.recv(); local_req_queue.push(req); //debug!("Priority of new request is {:d}", getPriority(name.clone())); debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len());
} // TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); let cacheArc = self.cache.clone(); //Semaphore for counting tasks let s = Semaphore::new(4); // Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port. let (request_port, request_chan) = Chan::new(); loop { self.notify_port.recv(); // waiting for new request enqueued. req_queue_get.access( |req_queue| { match req_queue.maybe_pop() { // Priority queue. None => { /* do nothing */ } Some(req) => { request_chan.send(req); debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len()); } } }); let request = request_port.recv(); // Get stream from hashmap. // Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. let (stream_port, stream_chan) = Chan::new(); let (request_port_local, request_chan_local) = Chan::new(); unsafe { stream_map_get.unsafe_access(|local_stream_map| { let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream"); stream_chan.send(stream); request_chan_local.send(request.path.clone()); }); } if(fs::stat(request.path).size < 1000000) { let mut file_reader = File::open(request.path).expect("Invalid file!"); let mut stream = stream_port.recv(); stream.write(HTTP_OK.as_bytes()); stream.write(file_reader.read_to_end()); } else { let semaphore = s.clone(); semaphore.acquire(); // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. semaphore.access( || { //Sending cache into spawn let(portCache, chanCache) = Chan::new(); chanCache.send(cacheArc.clone()); //Sending stream into spawn let streamLocal = stream_port.recv(); let(portStream, chanStream) = Chan::new(); chanStream.send(streamLocal); //Sending request into spawn let portLocal = request_port_local.recv(); let(portRequest, chanRequest) = Chan::new(); chanRequest.send(portLocal); let (semaphoreRequest, semaphoreChan) = Chan::new(); semaphoreChan.send(semaphore.clone()); spawn(proc() { let localCacheArc = portCache.recv(); let portName = portRequest.recv(); let s2 = semaphoreRequest.recv(); unsafe { localCacheArc.unsafe_access( |cache| { WebServer::respond_with_static_file(portStream.recv(), portName, cache.clone()); }); } // Close stream automatically. debug!("=====Terminated connection from [{}].=====", portName.display()); s2.release(); }); }); } } } fn get_peer_name(stream: &mut Option<std::io::net::tcp::TcpStream>) -> ~str { match *stream { Some(ref mut s) => { match s.peer_name() { Some(pn) => {pn.to_str()}, None => (~"") } }, None => (~"") } } } fn get_args() -> (~str, uint, ~str) { fn print_usage(program: &str) { println!("Usage: {:s} [options]", program); println!("--ip \tIP address, \"{:s}\" by default.", IP); println!("--port \tport number, \"{:u}\" by default.", PORT); println!("--www \tworking directory, \"{:s}\" by default", WWW_DIR); println("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = ~[ getopts::optopt("ip"), getopts::optopt("port"), getopts::optopt("www"), getopts::optflag("h"), getopts::optflag("help") ]; let matches = match getopts::getopts(args.tail(), opts) { Ok(m) => { m } Err(f) => { fail!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") { matches.opt_str("ip").expect("invalid ip address?").to_owned() } else { IP.to_owned() }; let port:uint = if matches.opt_present("port") { from_str::from_str(matches.opt_str("port").expect("invalid port number?")).expect("not uint?") } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); } fn getPriority(other: ~str) -> int{ if(other.slice_to(7) == "128.143." || other.slice_to(6) == "137.54." || other.slice_to(9) == "127.0.0.1") { //debug!("{:s} Piority: 1", other); return 1; } else { return 2; } } fn ipToFile(IP_string: ~str) { let mut output_file = File::open_mode(&Path::new(IP_LOGFILE),Append,ReadWrite); match output_file { Some(mut file) => { let stringToWrite: ~str = IP_string + "\n"; file.write(stringToWrite.as_bytes()); }, None => { println!("Failure writing to file"); } } }
}); notify_chan.send(()); // Send incoming notification to responder task.
random_line_split
zhtta.rs
// // zhtta.rs // // Starting code for PS3 // Running on Rust 0.9 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // University of Virginia - cs4414 Spring 2014 // Weilin Xu and David Evans // Version 0.5 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" // Problem 7 design decision: cache should not be bigger than 10 MB // remove last modified files // check cache for files first! #[feature(globs)]; extern mod extra; use std::io::*; use std::io::net::ip::{SocketAddr}; use std::io::buffered::BufferedReader; use std::{os, str, libc, from_str}; use std::path::Path; use std::hashmap::HashMap; use extra::getopts; use extra::arc::MutexArc; use extra::arc::RWArc; use extra::lru_cache::LruCache; use extra::priority_queue::PriorityQueue; use extra::sync::Semaphore; use std::io::buffered::BufferedStream; use std::io::File; use std::io::fs; mod gash; static SERVER_NAME : &'static str = "Zhtta Version 0.5"; static IP : &'static str = "127.0.0.1"; static PORT : uint = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body { background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; static IP_LOGFILE : &'static str = "../iplog.txt"; //has to be level above b/c working dir is changed to www struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: ~str, path: ~Path, } impl std::cmp::Eq for HTTP_Request { fn eq(&self, other: &HTTP_Request) -> bool { let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther == sizeSelf { if (other.peer_name.slice_to(7) == "128.143." || other.peer_name.slice_to(6) == "137.54." || other.peer_name.slice_to(9) == "127.0.0.1") && (self.peer_name.slice_to(7) == "128.143." || self.peer_name.slice_to(6) == "137.54." || self.peer_name.slice_to(9) == "127.0.0.1") { return true; } } return false; } } impl std::cmp::Ord for HTTP_Request { fn lt(&self, other: &HTTP_Request) -> bool { //First get the file sizes for the Http_Request let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther > sizeSelf { return true; } else { return getPriority(self.peer_name.clone()) < getPriority(other.peer_name.clone()); } } } struct WebServer { ip: ~str, port: uint, www_dir_path: ~Path, request_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, cache: MutexArc<MutexArc<LruCache<Path,~[u8]>>>, notify_port: Port<()>, shared_notify_chan: SharedChan<()> } impl WebServer { fn new(ip: &str, port: uint, www_dir: &str) -> WebServer { let (notify_port, shared_notify_chan) = SharedChan::new(); let www_dir_path = ~Path::new(www_dir); os::change_dir(www_dir_path.clone()); WebServer { ip: ip.to_owned(), port: port, www_dir_path: www_dir_path, request_queue_arc: MutexArc::new(PriorityQueue::new()), stream_map_arc: MutexArc::new(HashMap::new()), cache: MutexArc::new(MutexArc::new(LruCache::new(10))), notify_port: notify_port, shared_notify_chan: shared_notify_chan } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error."); let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned(); let request_queue_arc = self.request_queue_arc.clone(); let shared_notify_chan = self.shared_notify_chan.clone(); let stream_map_arc = self.stream_map_arc.clone(); spawn(proc() { let mut acceptor = net::tcp::TcpListener::bind(addr).listen(); println!("{:s} listening on {:s} (serving from: {:s}).", SERVER_NAME, addr.to_str(), www_dir_path_str); //Visitor counter let num_visitor : uint = 0; //Arc for visitor counter. let visitor_arc_mut = RWArc::new(num_visitor); for stream in acceptor.incoming() { let (queue_port, queue_chan) = Chan::new(); queue_chan.send(request_queue_arc.clone()); let notify_chan = shared_notify_chan.clone(); let stream_map_arc = stream_map_arc.clone(); let(portMut, chanMut) = Chan::new(); chanMut.send(visitor_arc_mut.clone()); // Spawn a task to handle the connection. spawn(proc() { let request_queue_arc = queue_port.recv(); //This updates counter by adding one to it. let local_arc_mut = portMut.recv(); local_arc_mut.write(|value| { *value += 1 }); //This sets a local variable to current count. let mut visitor_count_local : uint = 0; local_arc_mut.read(|value| { //println(value.to_str()); visitor_count_local = *value; }); let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); ipToFile(peer_name.clone()); let mut buf = [0, ..500]; stream.read(buf); let request_str = str::from_utf8(buf); debug!("Request:\n{:s}", request_str); let req_group : ~[&str]= request_str.splitn(' ', 3).collect(); if req_group.len() > 2 { let path_str = "." + req_group[1].to_owned(); let mut path_obj = ~os::getcwd(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{:s}]", path_obj.as_str().expect("error")); debug!("Requested path: [{:s}]", path_str); if path_str == ~"./" { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream, &visitor_count_local); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else { debug!("===== Static Page request ====="); WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan); } } }); } }); } fn respond_with_error_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { let mut stream = stream; let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned()); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // TODO: Safe visitor counter. fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) { let mut stream = stream; let visitor_count_other : uint = visitor_count_local.clone(); let response: ~str = format!("{:s}{:s}<h1>Greetings, Krusty!</h1> <h2>Visitor count: {:u}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, visitor_count_other); debug!("Responding to counter request"); stream.write(response.as_bytes()); } // TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) { let mut stream = stream; stream.write(HTTP_OK.as_bytes()); let mut check : bool = true; cache.access(|local_cache| { let bytes = local_cache.get(path); match(bytes) { Some(bytes) => { // in cache debug!("File found in cache: {}", path.display()); let size = bytes.len(); let iterations = size %100000; if(iterations < 100000) { stream.write(bytes.to_owned()); } else { for i in range(0, iterations) { let start = i * 100000; let tempByte = bytes.slice(start,start+100000-1); stream.write(tempByte); } let left = size - (iterations*100000); stream.write(bytes.slice_from(left)); } check = false; } None => {} } }); if(check) { cache.access(|local_cache| { // not in cache //let mut stream = stream; debug!("File not found in cache: {}", path.display()); let mut file_reader = File::open(path).expect("Invalid file!"); let fileSize = fs::stat(path).size; let iterations = fileSize&100000; let mut byteArray: ~[u8] = ~[]; if(iterations < 100000) { let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } else { for i in range(0, iterations) { let tempArray = file_reader.read_bytes(100000); stream.write(tempArray); byteArray.push_all_move(tempArray); } let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } //add to cache! //automatically handles removing other elements if necessary //if(fileSize < 10000000) { debug!("File added to cache: {}", path.display()); local_cache.put(path.clone(), byteArray); //} }); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { //for now, just serve as static file let shtml_file = File::open(path); let mut rwStream = BufferedStream::new(shtml_file); let mut newFile : ~[~str] = ~[]; let mut checkIfLastIsCmd : bool = false; for line in rwStream.lines() { let mut check : bool = false; let mut newLine : ~[~str] = ~[]; for split in line.split(' ') { if(check) { let cmdSplit : ~[&str] = split.split('=').collect(); let command : ~str = cmdSplit[1].to_owned(); let finalCommand = command.slice(1,command.len()-1).to_owned(); let output : ~str = gash::run_cmdline(finalCommand); newLine.push(output); check = false; checkIfLastIsCmd = true; } else if(split == "<!--#exec") { check = true; } else if(split == "-->") { } else { if(checkIfLastIsCmd && split.slice(0, 3) == "-->") { newLine.push(split.slice_from(3).to_owned()); newLine.push(" ".to_owned()); checkIfLastIsCmd = false; } else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") { newLine.push(split.slice(0, split.len()-9).to_owned()); check = true; } else { newLine.push(split.to_owned()); newLine.push(" ".to_owned()); } } } let mut fullLine : ~str = ~""; for s in newLine.iter() { fullLine = fullLine + s.clone(); } newFile.push(fullLine); } let mut fullPage : ~str = ~""; for s in newFile.iter() { fullPage = fullPage + s.clone(); } let mut stream = stream; stream.write(HTTP_OK.as_bytes()); stream.write(fullPage.as_bytes()); } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>)
// TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); let cacheArc = self.cache.clone(); //Semaphore for counting tasks let s = Semaphore::new(4); // Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port. let (request_port, request_chan) = Chan::new(); loop { self.notify_port.recv(); // waiting for new request enqueued. req_queue_get.access( |req_queue| { match req_queue.maybe_pop() { // Priority queue. None => { /* do nothing */ } Some(req) => { request_chan.send(req); debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len()); } } }); let request = request_port.recv(); // Get stream from hashmap. // Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. let (stream_port, stream_chan) = Chan::new(); let (request_port_local, request_chan_local) = Chan::new(); unsafe { stream_map_get.unsafe_access(|local_stream_map| { let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream"); stream_chan.send(stream); request_chan_local.send(request.path.clone()); }); } if(fs::stat(request.path).size < 1000000) { let mut file_reader = File::open(request.path).expect("Invalid file!"); let mut stream = stream_port.recv(); stream.write(HTTP_OK.as_bytes()); stream.write(file_reader.read_to_end()); } else { let semaphore = s.clone(); semaphore.acquire(); // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. semaphore.access( || { //Sending cache into spawn let(portCache, chanCache) = Chan::new(); chanCache.send(cacheArc.clone()); //Sending stream into spawn let streamLocal = stream_port.recv(); let(portStream, chanStream) = Chan::new(); chanStream.send(streamLocal); //Sending request into spawn let portLocal = request_port_local.recv(); let(portRequest, chanRequest) = Chan::new(); chanRequest.send(portLocal); let (semaphoreRequest, semaphoreChan) = Chan::new(); semaphoreChan.send(semaphore.clone()); spawn(proc() { let localCacheArc = portCache.recv(); let portName = portRequest.recv(); let s2 = semaphoreRequest.recv(); unsafe { localCacheArc.unsafe_access( |cache| { WebServer::respond_with_static_file(portStream.recv(), portName, cache.clone()); }); } // Close stream automatically. debug!("=====Terminated connection from [{}].=====", portName.display()); s2.release(); }); }); } } } fn get_peer_name(stream: &mut Option<std::io::net::tcp::TcpStream>) -> ~str { match *stream { Some(ref mut s) => { match s.peer_name() { Some(pn) => {pn.to_str()}, None => (~"") } }, None => (~"") } } } fn get_args() -> (~str, uint, ~str) { fn print_usage(program: &str) { println!("Usage: {:s} [options]", program); println!("--ip \tIP address, \"{:s}\" by default.", IP); println!("--port \tport number, \"{:u}\" by default.", PORT); println!("--www \tworking directory, \"{:s}\" by default", WWW_DIR); println("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = ~[ getopts::optopt("ip"), getopts::optopt("port"), getopts::optopt("www"), getopts::optflag("h"), getopts::optflag("help") ]; let matches = match getopts::getopts(args.tail(), opts) { Ok(m) => { m } Err(f) => { fail!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") { matches.opt_str("ip").expect("invalid ip address?").to_owned() } else { IP.to_owned() }; let port:uint = if matches.opt_present("port") { from_str::from_str(matches.opt_str("port").expect("invalid port number?")).expect("not uint?") } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); } fn getPriority(other: ~str) -> int{ if(other.slice_to(7) == "128.143." || other.slice_to(6) == "137.54." || other.slice_to(9) == "127.0.0.1") { //debug!("{:s} Piority: 1", other); return 1; } else { return 2; } } fn ipToFile(IP_string: ~str) { let mut output_file = File::open_mode(&Path::new(IP_LOGFILE),Append,ReadWrite); match output_file { Some(mut file) => { let stringToWrite: ~str = IP_string + "\n"; file.write(stringToWrite.as_bytes()); }, None => { println!("Failure writing to file"); } } }
{ // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_port, stream_chan) = Chan::new(); stream_chan.send(stream); unsafe { // Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. stream_map_arc.unsafe_access(|local_stream_map| { let stream = stream_port.recv(); local_stream_map.swap(peer_name.clone(), stream); }); } // Enqueue the HTTP request. let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() }; let (req_port, req_chan) = Chan::new(); req_chan.send(req); debug!("Waiting for queue mutex lock."); req_queue_arc.access(|local_req_queue| { debug!("Got queue mutex lock."); let req: HTTP_Request = req_port.recv(); local_req_queue.push(req); //debug!("Priority of new request is {:d}", getPriority(name.clone())); debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len()); }); notify_chan.send(()); // Send incoming notification to responder task. }
identifier_body
zhtta.rs
// // zhtta.rs // // Starting code for PS3 // Running on Rust 0.9 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // University of Virginia - cs4414 Spring 2014 // Weilin Xu and David Evans // Version 0.5 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" // Problem 7 design decision: cache should not be bigger than 10 MB // remove last modified files // check cache for files first! #[feature(globs)]; extern mod extra; use std::io::*; use std::io::net::ip::{SocketAddr}; use std::io::buffered::BufferedReader; use std::{os, str, libc, from_str}; use std::path::Path; use std::hashmap::HashMap; use extra::getopts; use extra::arc::MutexArc; use extra::arc::RWArc; use extra::lru_cache::LruCache; use extra::priority_queue::PriorityQueue; use extra::sync::Semaphore; use std::io::buffered::BufferedStream; use std::io::File; use std::io::fs; mod gash; static SERVER_NAME : &'static str = "Zhtta Version 0.5"; static IP : &'static str = "127.0.0.1"; static PORT : uint = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body { background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; static IP_LOGFILE : &'static str = "../iplog.txt"; //has to be level above b/c working dir is changed to www struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: ~str, path: ~Path, } impl std::cmp::Eq for HTTP_Request { fn eq(&self, other: &HTTP_Request) -> bool { let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther == sizeSelf { if (other.peer_name.slice_to(7) == "128.143." || other.peer_name.slice_to(6) == "137.54." || other.peer_name.slice_to(9) == "127.0.0.1") && (self.peer_name.slice_to(7) == "128.143." || self.peer_name.slice_to(6) == "137.54." || self.peer_name.slice_to(9) == "127.0.0.1") { return true; } } return false; } } impl std::cmp::Ord for HTTP_Request { fn lt(&self, other: &HTTP_Request) -> bool { //First get the file sizes for the Http_Request let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther > sizeSelf { return true; } else { return getPriority(self.peer_name.clone()) < getPriority(other.peer_name.clone()); } } } struct WebServer { ip: ~str, port: uint, www_dir_path: ~Path, request_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, cache: MutexArc<MutexArc<LruCache<Path,~[u8]>>>, notify_port: Port<()>, shared_notify_chan: SharedChan<()> } impl WebServer { fn new(ip: &str, port: uint, www_dir: &str) -> WebServer { let (notify_port, shared_notify_chan) = SharedChan::new(); let www_dir_path = ~Path::new(www_dir); os::change_dir(www_dir_path.clone()); WebServer { ip: ip.to_owned(), port: port, www_dir_path: www_dir_path, request_queue_arc: MutexArc::new(PriorityQueue::new()), stream_map_arc: MutexArc::new(HashMap::new()), cache: MutexArc::new(MutexArc::new(LruCache::new(10))), notify_port: notify_port, shared_notify_chan: shared_notify_chan } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error."); let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned(); let request_queue_arc = self.request_queue_arc.clone(); let shared_notify_chan = self.shared_notify_chan.clone(); let stream_map_arc = self.stream_map_arc.clone(); spawn(proc() { let mut acceptor = net::tcp::TcpListener::bind(addr).listen(); println!("{:s} listening on {:s} (serving from: {:s}).", SERVER_NAME, addr.to_str(), www_dir_path_str); //Visitor counter let num_visitor : uint = 0; //Arc for visitor counter. let visitor_arc_mut = RWArc::new(num_visitor); for stream in acceptor.incoming() { let (queue_port, queue_chan) = Chan::new(); queue_chan.send(request_queue_arc.clone()); let notify_chan = shared_notify_chan.clone(); let stream_map_arc = stream_map_arc.clone(); let(portMut, chanMut) = Chan::new(); chanMut.send(visitor_arc_mut.clone()); // Spawn a task to handle the connection. spawn(proc() { let request_queue_arc = queue_port.recv(); //This updates counter by adding one to it. let local_arc_mut = portMut.recv(); local_arc_mut.write(|value| { *value += 1 }); //This sets a local variable to current count. let mut visitor_count_local : uint = 0; local_arc_mut.read(|value| { //println(value.to_str()); visitor_count_local = *value; }); let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); ipToFile(peer_name.clone()); let mut buf = [0, ..500]; stream.read(buf); let request_str = str::from_utf8(buf); debug!("Request:\n{:s}", request_str); let req_group : ~[&str]= request_str.splitn(' ', 3).collect(); if req_group.len() > 2 { let path_str = "." + req_group[1].to_owned(); let mut path_obj = ~os::getcwd(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{:s}]", path_obj.as_str().expect("error")); debug!("Requested path: [{:s}]", path_str); if path_str == ~"./" { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream, &visitor_count_local); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else { debug!("===== Static Page request ====="); WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan); } } }); } }); } fn
(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { let mut stream = stream; let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned()); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // TODO: Safe visitor counter. fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) { let mut stream = stream; let visitor_count_other : uint = visitor_count_local.clone(); let response: ~str = format!("{:s}{:s}<h1>Greetings, Krusty!</h1> <h2>Visitor count: {:u}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, visitor_count_other); debug!("Responding to counter request"); stream.write(response.as_bytes()); } // TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) { let mut stream = stream; stream.write(HTTP_OK.as_bytes()); let mut check : bool = true; cache.access(|local_cache| { let bytes = local_cache.get(path); match(bytes) { Some(bytes) => { // in cache debug!("File found in cache: {}", path.display()); let size = bytes.len(); let iterations = size %100000; if(iterations < 100000) { stream.write(bytes.to_owned()); } else { for i in range(0, iterations) { let start = i * 100000; let tempByte = bytes.slice(start,start+100000-1); stream.write(tempByte); } let left = size - (iterations*100000); stream.write(bytes.slice_from(left)); } check = false; } None => {} } }); if(check) { cache.access(|local_cache| { // not in cache //let mut stream = stream; debug!("File not found in cache: {}", path.display()); let mut file_reader = File::open(path).expect("Invalid file!"); let fileSize = fs::stat(path).size; let iterations = fileSize&100000; let mut byteArray: ~[u8] = ~[]; if(iterations < 100000) { let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } else { for i in range(0, iterations) { let tempArray = file_reader.read_bytes(100000); stream.write(tempArray); byteArray.push_all_move(tempArray); } let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } //add to cache! //automatically handles removing other elements if necessary //if(fileSize < 10000000) { debug!("File added to cache: {}", path.display()); local_cache.put(path.clone(), byteArray); //} }); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { //for now, just serve as static file let shtml_file = File::open(path); let mut rwStream = BufferedStream::new(shtml_file); let mut newFile : ~[~str] = ~[]; let mut checkIfLastIsCmd : bool = false; for line in rwStream.lines() { let mut check : bool = false; let mut newLine : ~[~str] = ~[]; for split in line.split(' ') { if(check) { let cmdSplit : ~[&str] = split.split('=').collect(); let command : ~str = cmdSplit[1].to_owned(); let finalCommand = command.slice(1,command.len()-1).to_owned(); let output : ~str = gash::run_cmdline(finalCommand); newLine.push(output); check = false; checkIfLastIsCmd = true; } else if(split == "<!--#exec") { check = true; } else if(split == "-->") { } else { if(checkIfLastIsCmd && split.slice(0, 3) == "-->") { newLine.push(split.slice_from(3).to_owned()); newLine.push(" ".to_owned()); checkIfLastIsCmd = false; } else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") { newLine.push(split.slice(0, split.len()-9).to_owned()); check = true; } else { newLine.push(split.to_owned()); newLine.push(" ".to_owned()); } } } let mut fullLine : ~str = ~""; for s in newLine.iter() { fullLine = fullLine + s.clone(); } newFile.push(fullLine); } let mut fullPage : ~str = ~""; for s in newFile.iter() { fullPage = fullPage + s.clone(); } let mut stream = stream; stream.write(HTTP_OK.as_bytes()); stream.write(fullPage.as_bytes()); } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>) { // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_port, stream_chan) = Chan::new(); stream_chan.send(stream); unsafe { // Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. stream_map_arc.unsafe_access(|local_stream_map| { let stream = stream_port.recv(); local_stream_map.swap(peer_name.clone(), stream); }); } // Enqueue the HTTP request. let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() }; let (req_port, req_chan) = Chan::new(); req_chan.send(req); debug!("Waiting for queue mutex lock."); req_queue_arc.access(|local_req_queue| { debug!("Got queue mutex lock."); let req: HTTP_Request = req_port.recv(); local_req_queue.push(req); //debug!("Priority of new request is {:d}", getPriority(name.clone())); debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len()); }); notify_chan.send(()); // Send incoming notification to responder task. } // TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); let cacheArc = self.cache.clone(); //Semaphore for counting tasks let s = Semaphore::new(4); // Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port. let (request_port, request_chan) = Chan::new(); loop { self.notify_port.recv(); // waiting for new request enqueued. req_queue_get.access( |req_queue| { match req_queue.maybe_pop() { // Priority queue. None => { /* do nothing */ } Some(req) => { request_chan.send(req); debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len()); } } }); let request = request_port.recv(); // Get stream from hashmap. // Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. let (stream_port, stream_chan) = Chan::new(); let (request_port_local, request_chan_local) = Chan::new(); unsafe { stream_map_get.unsafe_access(|local_stream_map| { let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream"); stream_chan.send(stream); request_chan_local.send(request.path.clone()); }); } if(fs::stat(request.path).size < 1000000) { let mut file_reader = File::open(request.path).expect("Invalid file!"); let mut stream = stream_port.recv(); stream.write(HTTP_OK.as_bytes()); stream.write(file_reader.read_to_end()); } else { let semaphore = s.clone(); semaphore.acquire(); // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. semaphore.access( || { //Sending cache into spawn let(portCache, chanCache) = Chan::new(); chanCache.send(cacheArc.clone()); //Sending stream into spawn let streamLocal = stream_port.recv(); let(portStream, chanStream) = Chan::new(); chanStream.send(streamLocal); //Sending request into spawn let portLocal = request_port_local.recv(); let(portRequest, chanRequest) = Chan::new(); chanRequest.send(portLocal); let (semaphoreRequest, semaphoreChan) = Chan::new(); semaphoreChan.send(semaphore.clone()); spawn(proc() { let localCacheArc = portCache.recv(); let portName = portRequest.recv(); let s2 = semaphoreRequest.recv(); unsafe { localCacheArc.unsafe_access( |cache| { WebServer::respond_with_static_file(portStream.recv(), portName, cache.clone()); }); } // Close stream automatically. debug!("=====Terminated connection from [{}].=====", portName.display()); s2.release(); }); }); } } } fn get_peer_name(stream: &mut Option<std::io::net::tcp::TcpStream>) -> ~str { match *stream { Some(ref mut s) => { match s.peer_name() { Some(pn) => {pn.to_str()}, None => (~"") } }, None => (~"") } } } fn get_args() -> (~str, uint, ~str) { fn print_usage(program: &str) { println!("Usage: {:s} [options]", program); println!("--ip \tIP address, \"{:s}\" by default.", IP); println!("--port \tport number, \"{:u}\" by default.", PORT); println!("--www \tworking directory, \"{:s}\" by default", WWW_DIR); println("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = ~[ getopts::optopt("ip"), getopts::optopt("port"), getopts::optopt("www"), getopts::optflag("h"), getopts::optflag("help") ]; let matches = match getopts::getopts(args.tail(), opts) { Ok(m) => { m } Err(f) => { fail!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") { matches.opt_str("ip").expect("invalid ip address?").to_owned() } else { IP.to_owned() }; let port:uint = if matches.opt_present("port") { from_str::from_str(matches.opt_str("port").expect("invalid port number?")).expect("not uint?") } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); } fn getPriority(other: ~str) -> int{ if(other.slice_to(7) == "128.143." || other.slice_to(6) == "137.54." || other.slice_to(9) == "127.0.0.1") { //debug!("{:s} Piority: 1", other); return 1; } else { return 2; } } fn ipToFile(IP_string: ~str) { let mut output_file = File::open_mode(&Path::new(IP_LOGFILE),Append,ReadWrite); match output_file { Some(mut file) => { let stringToWrite: ~str = IP_string + "\n"; file.write(stringToWrite.as_bytes()); }, None => { println!("Failure writing to file"); } } }
respond_with_error_page
identifier_name
zhtta.rs
// // zhtta.rs // // Starting code for PS3 // Running on Rust 0.9 // // Note that this code has serious security risks! You should not run it // on any system with access to sensitive files. // // University of Virginia - cs4414 Spring 2014 // Weilin Xu and David Evans // Version 0.5 // To see debug! outputs set the RUST_LOG environment variable, e.g.: export RUST_LOG="zhtta=debug" // Problem 7 design decision: cache should not be bigger than 10 MB // remove last modified files // check cache for files first! #[feature(globs)]; extern mod extra; use std::io::*; use std::io::net::ip::{SocketAddr}; use std::io::buffered::BufferedReader; use std::{os, str, libc, from_str}; use std::path::Path; use std::hashmap::HashMap; use extra::getopts; use extra::arc::MutexArc; use extra::arc::RWArc; use extra::lru_cache::LruCache; use extra::priority_queue::PriorityQueue; use extra::sync::Semaphore; use std::io::buffered::BufferedStream; use std::io::File; use std::io::fs; mod gash; static SERVER_NAME : &'static str = "Zhtta Version 0.5"; static IP : &'static str = "127.0.0.1"; static PORT : uint = 4414; static WWW_DIR : &'static str = "./www"; static HTTP_OK : &'static str = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n"; static HTTP_BAD : &'static str = "HTTP/1.1 404 Not Found\r\n\r\n"; static COUNTER_STYLE : &'static str = "<doctype !html><html><head><title>Hello, Rust!</title> <style>body { background-color: #884414; color: #FFEEAA} h1 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm red } h2 { font-size:2cm; text-align: center; color: black; text-shadow: 0 0 4mm green } </style></head> <body>"; static IP_LOGFILE : &'static str = "../iplog.txt"; //has to be level above b/c working dir is changed to www struct HTTP_Request { // Use peer_name as the key to access TcpStream in hashmap. // (Due to a bug in extra::arc in Rust 0.9, it is very inconvenient to use TcpStream without the "Freeze" bound. // See issue: https://github.com/mozilla/rust/issues/12139) peer_name: ~str, path: ~Path, } impl std::cmp::Eq for HTTP_Request { fn eq(&self, other: &HTTP_Request) -> bool { let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther == sizeSelf { if (other.peer_name.slice_to(7) == "128.143." || other.peer_name.slice_to(6) == "137.54." || other.peer_name.slice_to(9) == "127.0.0.1") && (self.peer_name.slice_to(7) == "128.143." || self.peer_name.slice_to(6) == "137.54." || self.peer_name.slice_to(9) == "127.0.0.1") { return true; } } return false; } } impl std::cmp::Ord for HTTP_Request { fn lt(&self, other: &HTTP_Request) -> bool { //First get the file sizes for the Http_Request let sizeSelf = fs::stat(self.path).size; let sizeOther = fs::stat(other.path).size; if sizeOther > sizeSelf { return true; } else { return getPriority(self.peer_name.clone()) < getPriority(other.peer_name.clone()); } } } struct WebServer { ip: ~str, port: uint, www_dir_path: ~Path, request_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, cache: MutexArc<MutexArc<LruCache<Path,~[u8]>>>, notify_port: Port<()>, shared_notify_chan: SharedChan<()> } impl WebServer { fn new(ip: &str, port: uint, www_dir: &str) -> WebServer { let (notify_port, shared_notify_chan) = SharedChan::new(); let www_dir_path = ~Path::new(www_dir); os::change_dir(www_dir_path.clone()); WebServer { ip: ip.to_owned(), port: port, www_dir_path: www_dir_path, request_queue_arc: MutexArc::new(PriorityQueue::new()), stream_map_arc: MutexArc::new(HashMap::new()), cache: MutexArc::new(MutexArc::new(LruCache::new(10))), notify_port: notify_port, shared_notify_chan: shared_notify_chan } } fn run(&mut self) { self.listen(); self.dequeue_static_file_request(); } fn listen(&mut self) { let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error."); let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned(); let request_queue_arc = self.request_queue_arc.clone(); let shared_notify_chan = self.shared_notify_chan.clone(); let stream_map_arc = self.stream_map_arc.clone(); spawn(proc() { let mut acceptor = net::tcp::TcpListener::bind(addr).listen(); println!("{:s} listening on {:s} (serving from: {:s}).", SERVER_NAME, addr.to_str(), www_dir_path_str); //Visitor counter let num_visitor : uint = 0; //Arc for visitor counter. let visitor_arc_mut = RWArc::new(num_visitor); for stream in acceptor.incoming() { let (queue_port, queue_chan) = Chan::new(); queue_chan.send(request_queue_arc.clone()); let notify_chan = shared_notify_chan.clone(); let stream_map_arc = stream_map_arc.clone(); let(portMut, chanMut) = Chan::new(); chanMut.send(visitor_arc_mut.clone()); // Spawn a task to handle the connection. spawn(proc() { let request_queue_arc = queue_port.recv(); //This updates counter by adding one to it. let local_arc_mut = portMut.recv(); local_arc_mut.write(|value| { *value += 1 }); //This sets a local variable to current count. let mut visitor_count_local : uint = 0; local_arc_mut.read(|value| { //println(value.to_str()); visitor_count_local = *value; }); let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); ipToFile(peer_name.clone()); let mut buf = [0, ..500]; stream.read(buf); let request_str = str::from_utf8(buf); debug!("Request:\n{:s}", request_str); let req_group : ~[&str]= request_str.splitn(' ', 3).collect(); if req_group.len() > 2 { let path_str = "." + req_group[1].to_owned(); let mut path_obj = ~os::getcwd(); path_obj.push(path_str.clone()); let ext_str = match path_obj.extension_str() { Some(e) => e, None => "", }; debug!("Requested path: [{:s}]", path_obj.as_str().expect("error")); debug!("Requested path: [{:s}]", path_str); if path_str == ~"./" { debug!("===== Counter Page request ====="); WebServer::respond_with_counter_page(stream, &visitor_count_local); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if !path_obj.exists() || path_obj.is_dir() { debug!("===== Error page request ====="); WebServer::respond_with_error_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else if ext_str == "shtml" { // Dynamic web pages. debug!("===== Dynamic Page request ====="); WebServer::respond_with_dynamic_page(stream, path_obj); debug!("=====Terminated connection from [{:s}].=====", peer_name); } else { debug!("===== Static Page request ====="); WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan); } } }); } }); } fn respond_with_error_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { let mut stream = stream; let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned()); stream.write(HTTP_BAD.as_bytes()); stream.write(msg.as_bytes()); } // TODO: Safe visitor counter. fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) { let mut stream = stream; let visitor_count_other : uint = visitor_count_local.clone(); let response: ~str = format!("{:s}{:s}<h1>Greetings, Krusty!</h1> <h2>Visitor count: {:u}</h2></body></html>\r\n", HTTP_OK, COUNTER_STYLE, visitor_count_other); debug!("Responding to counter request"); stream.write(response.as_bytes()); } // TODO: Streaming file. // TODO: Application-layer file caching. fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) { let mut stream = stream; stream.write(HTTP_OK.as_bytes()); let mut check : bool = true; cache.access(|local_cache| { let bytes = local_cache.get(path); match(bytes) { Some(bytes) => { // in cache debug!("File found in cache: {}", path.display()); let size = bytes.len(); let iterations = size %100000; if(iterations < 100000) { stream.write(bytes.to_owned()); } else { for i in range(0, iterations) { let start = i * 100000; let tempByte = bytes.slice(start,start+100000-1); stream.write(tempByte); } let left = size - (iterations*100000); stream.write(bytes.slice_from(left)); } check = false; } None => {} } }); if(check) { cache.access(|local_cache| { // not in cache //let mut stream = stream; debug!("File not found in cache: {}", path.display()); let mut file_reader = File::open(path).expect("Invalid file!"); let fileSize = fs::stat(path).size; let iterations = fileSize&100000; let mut byteArray: ~[u8] = ~[]; if(iterations < 100000) { let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } else { for i in range(0, iterations) { let tempArray = file_reader.read_bytes(100000); stream.write(tempArray); byteArray.push_all_move(tempArray); } let tempArray = file_reader.read_to_end(); stream.write(tempArray); byteArray.push_all_move(tempArray); } //add to cache! //automatically handles removing other elements if necessary //if(fileSize < 10000000) { debug!("File added to cache: {}", path.display()); local_cache.put(path.clone(), byteArray); //} }); } } // TODO: Server-side gashing. fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) { //for now, just serve as static file let shtml_file = File::open(path); let mut rwStream = BufferedStream::new(shtml_file); let mut newFile : ~[~str] = ~[]; let mut checkIfLastIsCmd : bool = false; for line in rwStream.lines() { let mut check : bool = false; let mut newLine : ~[~str] = ~[]; for split in line.split(' ') { if(check) { let cmdSplit : ~[&str] = split.split('=').collect(); let command : ~str = cmdSplit[1].to_owned(); let finalCommand = command.slice(1,command.len()-1).to_owned(); let output : ~str = gash::run_cmdline(finalCommand); newLine.push(output); check = false; checkIfLastIsCmd = true; } else if(split == "<!--#exec") { check = true; } else if(split == "-->") { } else { if(checkIfLastIsCmd && split.slice(0, 3) == "-->") { newLine.push(split.slice_from(3).to_owned()); newLine.push(" ".to_owned()); checkIfLastIsCmd = false; } else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") { newLine.push(split.slice(0, split.len()-9).to_owned()); check = true; } else { newLine.push(split.to_owned()); newLine.push(" ".to_owned()); } } } let mut fullLine : ~str = ~""; for s in newLine.iter() { fullLine = fullLine + s.clone(); } newFile.push(fullLine); } let mut fullPage : ~str = ~""; for s in newFile.iter() { fullPage = fullPage + s.clone(); } let mut stream = stream; stream.write(HTTP_OK.as_bytes()); stream.write(fullPage.as_bytes()); } // TODO: Smarter Scheduling. fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>) { // Save stream in hashmap for later response. let mut stream = stream; let peer_name = WebServer::get_peer_name(&mut stream); let (stream_port, stream_chan) = Chan::new(); stream_chan.send(stream); unsafe { // Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. stream_map_arc.unsafe_access(|local_stream_map| { let stream = stream_port.recv(); local_stream_map.swap(peer_name.clone(), stream); }); } // Enqueue the HTTP request. let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() }; let (req_port, req_chan) = Chan::new(); req_chan.send(req); debug!("Waiting for queue mutex lock."); req_queue_arc.access(|local_req_queue| { debug!("Got queue mutex lock."); let req: HTTP_Request = req_port.recv(); local_req_queue.push(req); //debug!("Priority of new request is {:d}", getPriority(name.clone())); debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len()); }); notify_chan.send(()); // Send incoming notification to responder task. } // TODO: Smarter Scheduling. fn dequeue_static_file_request(&mut self) { let req_queue_get = self.request_queue_arc.clone(); let stream_map_get = self.stream_map_arc.clone(); let cacheArc = self.cache.clone(); //Semaphore for counting tasks let s = Semaphore::new(4); // Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port. let (request_port, request_chan) = Chan::new(); loop { self.notify_port.recv(); // waiting for new request enqueued. req_queue_get.access( |req_queue| { match req_queue.maybe_pop() { // Priority queue. None => { /* do nothing */ } Some(req) => { request_chan.send(req); debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len()); } } }); let request = request_port.recv(); // Get stream from hashmap. // Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound. let (stream_port, stream_chan) = Chan::new(); let (request_port_local, request_chan_local) = Chan::new(); unsafe { stream_map_get.unsafe_access(|local_stream_map| { let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream"); stream_chan.send(stream); request_chan_local.send(request.path.clone()); }); } if(fs::stat(request.path).size < 1000000) { let mut file_reader = File::open(request.path).expect("Invalid file!"); let mut stream = stream_port.recv(); stream.write(HTTP_OK.as_bytes()); stream.write(file_reader.read_to_end()); } else
} } fn get_peer_name(stream: &mut Option<std::io::net::tcp::TcpStream>) -> ~str { match *stream { Some(ref mut s) => { match s.peer_name() { Some(pn) => {pn.to_str()}, None => (~"") } }, None => (~"") } } } fn get_args() -> (~str, uint, ~str) { fn print_usage(program: &str) { println!("Usage: {:s} [options]", program); println!("--ip \tIP address, \"{:s}\" by default.", IP); println!("--port \tport number, \"{:u}\" by default.", PORT); println!("--www \tworking directory, \"{:s}\" by default", WWW_DIR); println("-h --help \tUsage"); } /* Begin processing program arguments and initiate the parameters. */ let args = os::args(); let program = args[0].clone(); let opts = ~[ getopts::optopt("ip"), getopts::optopt("port"), getopts::optopt("www"), getopts::optflag("h"), getopts::optflag("help") ]; let matches = match getopts::getopts(args.tail(), opts) { Ok(m) => { m } Err(f) => { fail!(f.to_err_msg()) } }; if matches.opt_present("h") || matches.opt_present("help") { print_usage(program); unsafe { libc::exit(1); } } let ip_str = if matches.opt_present("ip") { matches.opt_str("ip").expect("invalid ip address?").to_owned() } else { IP.to_owned() }; let port:uint = if matches.opt_present("port") { from_str::from_str(matches.opt_str("port").expect("invalid port number?")).expect("not uint?") } else { PORT }; let www_dir_str = if matches.opt_present("www") { matches.opt_str("www").expect("invalid www argument?") } else { WWW_DIR.to_owned() }; (ip_str, port, www_dir_str) } fn main() { let (ip_str, port, www_dir_str) = get_args(); let mut zhtta = WebServer::new(ip_str, port, www_dir_str); zhtta.run(); } fn getPriority(other: ~str) -> int{ if(other.slice_to(7) == "128.143." || other.slice_to(6) == "137.54." || other.slice_to(9) == "127.0.0.1") { //debug!("{:s} Piority: 1", other); return 1; } else { return 2; } } fn ipToFile(IP_string: ~str) { let mut output_file = File::open_mode(&Path::new(IP_LOGFILE),Append,ReadWrite); match output_file { Some(mut file) => { let stringToWrite: ~str = IP_string + "\n"; file.write(stringToWrite.as_bytes()); }, None => { println!("Failure writing to file"); } } }
{ let semaphore = s.clone(); semaphore.acquire(); // TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency. semaphore.access( || { //Sending cache into spawn let(portCache, chanCache) = Chan::new(); chanCache.send(cacheArc.clone()); //Sending stream into spawn let streamLocal = stream_port.recv(); let(portStream, chanStream) = Chan::new(); chanStream.send(streamLocal); //Sending request into spawn let portLocal = request_port_local.recv(); let(portRequest, chanRequest) = Chan::new(); chanRequest.send(portLocal); let (semaphoreRequest, semaphoreChan) = Chan::new(); semaphoreChan.send(semaphore.clone()); spawn(proc() { let localCacheArc = portCache.recv(); let portName = portRequest.recv(); let s2 = semaphoreRequest.recv(); unsafe { localCacheArc.unsafe_access( |cache| { WebServer::respond_with_static_file(portStream.recv(), portName, cache.clone()); }); } // Close stream automatically. debug!("=====Terminated connection from [{}].=====", portName.display()); s2.release(); }); }); }
conditional_block
resourceSubscription.go
package rescache import ( "encoding/json" "errors" "github.com/resgateio/resgate/server/codec" "github.com/resgateio/resgate/server/reserr" ) type subscriptionState byte const ( stateSubscribed subscriptionState = iota stateError stateRequested stateCollection stateModel ) // Model represents a RES model // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#models type Model struct { Values map[string]codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the model func (m *Model) MarshalJSON() ([]byte, error) { if m.data == nil { data, err := json.Marshal(m.Values) if err != nil { return nil, err } m.data = data } return m.data, nil } // Collection represents a RES collection // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#collections type Collection struct { Values []codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the collection func (c *Collection) MarshalJSON() ([]byte, error) { if c.data == nil { data, err := json.Marshal(c.Values) if err != nil { return nil, err } c.data = data } return c.data, nil } // ResourceSubscription represents a client subscription for a resource or query resource type ResourceSubscription struct { e *EventSubscription query string state subscriptionState subs map[Subscriber]struct{} resetting bool links []string // version is the internal resource version, starting with 0 and bumped +1 // for each modifying event. version uint // Three types of values stored model *Model collection *Collection err error } func newResourceSubscription(e *EventSubscription, query string) *ResourceSubscription { return &ResourceSubscription{ e: e, query: query, subs: make(map[Subscriber]struct{}), } } // GetResourceType returns the resource type of the resource subscription func (rs *ResourceSubscription) GetResourceType() ResourceType { rs.e.mu.Lock() defer rs.e.mu.Unlock() return ResourceType(rs.state) } // GetError returns the subscription error, or nil if there is no error func (rs *ResourceSubscription) GetError() error { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.err } // GetCollection will lock the EventSubscription for any changes // and return the collection string slice. func (rs *ResourceSubscription) GetCollection() (*Collection, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.collection, rs.version } // GetModel will return the model map and its current version. func (rs *ResourceSubscription) GetModel() (*Model, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.model, rs.version } // Unsubscribe cancels the client subscriber's subscription func (rs *ResourceSubscription) Unsubscribe(sub Subscriber) { rs.e.Enqueue(func() { if sub != nil { delete(rs.subs, sub) } // Directly unregister unsubscribed queries if rs.query != "" && len(rs.subs) == 0 { rs.unregister() } rs.e.removeCount(1) }) } func (rs *ResourceSubscription) handleEvent(r *ResourceEvent) { // Discard if event happened before resource was loaded, // unless it is a reaccess. Then we let the event be passed further. if rs.state <= stateRequested && r.Event != "reaccess" { return } // Set event to target current version of the resource. r.Version = rs.version switch r.Event { case "change": if rs.resetting || !rs.handleEventChange(r) { return } case "add": if rs.resetting || !rs.handleEventAdd(r) { return } case "remove": if rs.resetting || !rs.handleEventRemove(r) { return } case "delete": if !rs.resetting { rs.handleEventDelete(r) } return } rs.e.mu.Unlock() for sub := range rs.subs { sub.Event(r) } rs.e.mu.Lock() } func (rs *ResourceSubscription) handleEventChange(r *ResourceEvent) bool { if rs.state == stateCollection { rs.e.cache.Errorf("Error processing event %s.%s: change event on collection", rs.e.ResourceName, r.Event) return false } var props map[string]codec.Value var err error // [DEPRECATED:deprecatedModelChangeEvent] if codec.IsLegacyChangeEvent(r.Payload) { rs.e.cache.deprecated(rs.e.ResourceName, deprecatedModelChangeEvent) props, err = codec.DecodeLegacyChangeEvent(r.Payload) } else { props, err = codec.DecodeChangeEvent(r.Payload) } if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) } // Clone old map using old map size as capacity. // It might not be exact, but often sufficient m := make(map[string]codec.Value, len(rs.model.Values)) for k, v := range rs.model.Values { m[k] = v } // Update model properties for k, v := range props { if v.Type == codec.ValueTypeDelete { if _, ok := m[k]; ok { delete(m, k) } else { delete(props, k) } } else { if m[k].Equal(v) { delete(props, k) } else { m[k] = v } } } // No actual changes if len(props) == 0 { return false } r.Changed = props r.OldValues = rs.model.Values r.Update = true rs.model = &Model{Values: m} rs.version++ return true } func (rs *ResourceSubscription) handleEventAdd(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: add event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeAddEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx > l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l+1) copy(col, old[0:idx]) copy(col[idx+1:], old[idx:]) col[idx] = params.Value rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Value = params.Value r.Update = true return true } func (rs *ResourceSubscription) handleEventRemove(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: remove event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeRemoveEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx >= l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } r.Value = old[idx] // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l-1) copy(col, old[0:idx]) copy(col[idx:], old[idx+1:]) rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Update = true return true } func (rs *ResourceSubscription) handleEventDelete(r *ResourceEvent)
func (rs *ResourceSubscription) enqueueGetResponse(data []byte, err error) { rs.e.Enqueue(func() { rs, sublist := rs.processGetResponse(data, err) rs.e.mu.Unlock() defer rs.e.mu.Lock() if rs.state == stateError { for _, sub := range sublist { sub.Loaded(nil, rs.err) } } else { for _, sub := range sublist { sub.Loaded(rs, nil) } } }) } // unregister deletes itself and all its links from // the EventSubscription func (rs *ResourceSubscription) unregister() { if rs.query == "" { rs.e.base = nil } else { delete(rs.e.queries, rs.query) } for _, q := range rs.links { if q == "" { rs.e.base = nil } else { delete(rs.e.links, q) } } rs.links = nil } func (rs *ResourceSubscription) processGetResponse(payload []byte, err error) (nrs *ResourceSubscription, sublist []Subscriber) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) } // Get request failed if err != nil { // Set state and store the error in case any other // subscriber are waiting on the Lock to subscribe rs.state = stateError rs.err = err // Clone subscribers to slice sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } c := int64(len(sublist)) rs.subs = nil rs.unregister() rs.e.removeCount(c) nrs = rs return } // Is the normalized query in the response different from the // one requested by the Subscriber? // Then we should create a link to the normalized query if result.Query != rs.query { nrs = rs.e.getResourceSubscription(result.Query) if rs.query == "" { rs.e.base = nrs } else { // Replace resource subscription with the normalized version if rs.e.links == nil { rs.e.links = make(map[string]*ResourceSubscription) } rs.e.links[rs.query] = nrs delete(rs.e.queries, rs.query) } nrs.links = append(nrs.links, rs.query) // Copy over all subscribers for sub := range rs.subs { nrs.subs[sub] = struct{}{} } } else { nrs = rs } // Clone subscribers to slice from original resourceSubscription // as it is only those subscribers that has not yet been Loaded. // In nrs, there might be subscribers already Loaded. sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } // Exit if another request has already progressed the state. // Might happen when making a query subscription, directly followed by // another subscription using the normalized query of the previous. // When the second request returns, its resourceSubscription // will already be updated by the response from the first request. if nrs.state > stateRequested { return } // Make sure internal resource version has its 0 value nrs.version = 0 if result.Model != nil { nrs.model = &Model{Values: result.Model} nrs.state = stateModel } else { nrs.collection = &Collection{Values: result.Collection} nrs.state = stateCollection } return } func (rs *ResourceSubscription) handleResetResource(t *Throttle) { // Are we already resetting. Then quick exit if rs.resetting { return } rs.resetting = true // Create request subj := "get." + rs.e.ResourceName payload := codec.CreateGetRequest(rs.query) if t != nil { t.Add(func() { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) t.Done() }) }) } else { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) }) } } func (rs *ResourceSubscription) handleResetAccess(t *Throttle) { for sub := range rs.subs { sub.Reaccess(t) } } func (rs *ResourceSubscription) processResetGetResponse(payload []byte, err error) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) if err == nil && ((rs.state == stateModel && result.Model == nil) || (rs.state == stateCollection && result.Collection == nil)) { err = errors.New("mismatching resource type") } } // Get request failed if err != nil { // In case of a system.notFound error, // a delete event is generated. Otherwise we // just log the error. if reserr.IsError(err, reserr.CodeNotFound) { rs.handleEvent(&ResourceEvent{Event: "delete"}) } else { rs.e.cache.Errorf("Subscription %s: Reset get error - %s", rs.e.ResourceName, err) } return } switch rs.state { case stateModel: rs.processResetModel(result.Model) case stateCollection: rs.processResetCollection(result.Collection) } } func (rs *ResourceSubscription) processResetModel(props map[string]codec.Value) { // Update cached model properties vals := rs.model.Values for k := range vals { if _, ok := props[k]; !ok { props[k] = codec.DeleteValue } } for k, v := range props { ov, ok := vals[k] if ok && v.Equal(ov) { delete(props, k) } } if len(props) == 0 { return } r := &ResourceEvent{ Event: "change", Payload: codec.EncodeChangeEvent(props), } rs.handleEvent(r) } func (rs *ResourceSubscription) processResetCollection(collection []codec.Value) { events := lcs(rs.collection.Values, collection) for _, r := range events { rs.handleEvent(r) } } func lcs(a, b []codec.Value) []*ResourceEvent { var i, j int // Do a LCS matric calculation // https://en.wikipedia.org/wiki/Longest_common_subsequence_problem s := 0 m := len(a) n := len(b) // Trim of matches at the start and end for s < m && s < n && a[s].Equal(b[s]) { s++ } if s == m && s == n { return nil } for s < m && s < n && a[m-1].Equal(b[n-1]) { m-- n-- } var aa, bb []codec.Value if s > 0 || m < len(a) { aa = a[s:m] m = m - s } else { aa = a } if s > 0 || n < len(b) { bb = b[s:n] n = n - s } else { bb = b } // Create matrix and initialize it w := m + 1 c := make([]int, w*(n+1)) for i = 0; i < m; i++ { for j = 0; j < n; j++ { if aa[i].Equal(bb[j]) { c[(i+1)+w*(j+1)] = c[i+w*j] + 1 } else { v1 := c[(i+1)+w*j] v2 := c[i+w*(j+1)] if v2 > v1 { c[(i+1)+w*(j+1)] = v2 } else { c[(i+1)+w*(j+1)] = v1 } } } } steps := make([]*ResourceEvent, 0, m+n-2*c[w*(n+1)-1]) idx := m + s i = m j = n r := 0 var adds [][3]int addCount := n - c[w*(n+1)-1] if addCount > 0 { adds = make([][3]int, 0, addCount) } Loop: for { m = i - 1 n = j - 1 switch { case i > 0 && j > 0 && aa[m].Equal(bb[n]): idx-- i-- j-- case j > 0 && (i == 0 || c[i+w*n] >= c[m+w*j]): adds = append(adds, [3]int{n, idx, r}) j-- case i > 0 && (j == 0 || c[i+w*n] < c[m+w*j]): idx-- steps = append(steps, &ResourceEvent{ Event: "remove", Payload: codec.EncodeRemoveEvent(&codec.RemoveEvent{ Idx: idx, }), }) r++ i-- default: break Loop } } // Do the adds l := len(adds) - 1 for i := l; i >= 0; i-- { add := adds[i] steps = append(steps, &ResourceEvent{ Event: "add", Payload: codec.EncodeAddEvent(&codec.AddEvent{ Value: bb[add[0]], Idx: add[1] - r + add[2] + l - i, }), }) } return steps }
{ subs := rs.subs c := int64(len(subs)) rs.subs = nil rs.unregister() rs.e.removeCount(c) rs.e.mu.Unlock() for sub := range subs { sub.Event(r) } rs.e.mu.Lock() }
identifier_body
resourceSubscription.go
package rescache import ( "encoding/json" "errors" "github.com/resgateio/resgate/server/codec" "github.com/resgateio/resgate/server/reserr" ) type subscriptionState byte const ( stateSubscribed subscriptionState = iota stateError stateRequested stateCollection stateModel ) // Model represents a RES model // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#models type Model struct { Values map[string]codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the model func (m *Model) MarshalJSON() ([]byte, error) { if m.data == nil
return m.data, nil } // Collection represents a RES collection // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#collections type Collection struct { Values []codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the collection func (c *Collection) MarshalJSON() ([]byte, error) { if c.data == nil { data, err := json.Marshal(c.Values) if err != nil { return nil, err } c.data = data } return c.data, nil } // ResourceSubscription represents a client subscription for a resource or query resource type ResourceSubscription struct { e *EventSubscription query string state subscriptionState subs map[Subscriber]struct{} resetting bool links []string // version is the internal resource version, starting with 0 and bumped +1 // for each modifying event. version uint // Three types of values stored model *Model collection *Collection err error } func newResourceSubscription(e *EventSubscription, query string) *ResourceSubscription { return &ResourceSubscription{ e: e, query: query, subs: make(map[Subscriber]struct{}), } } // GetResourceType returns the resource type of the resource subscription func (rs *ResourceSubscription) GetResourceType() ResourceType { rs.e.mu.Lock() defer rs.e.mu.Unlock() return ResourceType(rs.state) } // GetError returns the subscription error, or nil if there is no error func (rs *ResourceSubscription) GetError() error { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.err } // GetCollection will lock the EventSubscription for any changes // and return the collection string slice. func (rs *ResourceSubscription) GetCollection() (*Collection, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.collection, rs.version } // GetModel will return the model map and its current version. func (rs *ResourceSubscription) GetModel() (*Model, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.model, rs.version } // Unsubscribe cancels the client subscriber's subscription func (rs *ResourceSubscription) Unsubscribe(sub Subscriber) { rs.e.Enqueue(func() { if sub != nil { delete(rs.subs, sub) } // Directly unregister unsubscribed queries if rs.query != "" && len(rs.subs) == 0 { rs.unregister() } rs.e.removeCount(1) }) } func (rs *ResourceSubscription) handleEvent(r *ResourceEvent) { // Discard if event happened before resource was loaded, // unless it is a reaccess. Then we let the event be passed further. if rs.state <= stateRequested && r.Event != "reaccess" { return } // Set event to target current version of the resource. r.Version = rs.version switch r.Event { case "change": if rs.resetting || !rs.handleEventChange(r) { return } case "add": if rs.resetting || !rs.handleEventAdd(r) { return } case "remove": if rs.resetting || !rs.handleEventRemove(r) { return } case "delete": if !rs.resetting { rs.handleEventDelete(r) } return } rs.e.mu.Unlock() for sub := range rs.subs { sub.Event(r) } rs.e.mu.Lock() } func (rs *ResourceSubscription) handleEventChange(r *ResourceEvent) bool { if rs.state == stateCollection { rs.e.cache.Errorf("Error processing event %s.%s: change event on collection", rs.e.ResourceName, r.Event) return false } var props map[string]codec.Value var err error // [DEPRECATED:deprecatedModelChangeEvent] if codec.IsLegacyChangeEvent(r.Payload) { rs.e.cache.deprecated(rs.e.ResourceName, deprecatedModelChangeEvent) props, err = codec.DecodeLegacyChangeEvent(r.Payload) } else { props, err = codec.DecodeChangeEvent(r.Payload) } if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) } // Clone old map using old map size as capacity. // It might not be exact, but often sufficient m := make(map[string]codec.Value, len(rs.model.Values)) for k, v := range rs.model.Values { m[k] = v } // Update model properties for k, v := range props { if v.Type == codec.ValueTypeDelete { if _, ok := m[k]; ok { delete(m, k) } else { delete(props, k) } } else { if m[k].Equal(v) { delete(props, k) } else { m[k] = v } } } // No actual changes if len(props) == 0 { return false } r.Changed = props r.OldValues = rs.model.Values r.Update = true rs.model = &Model{Values: m} rs.version++ return true } func (rs *ResourceSubscription) handleEventAdd(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: add event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeAddEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx > l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l+1) copy(col, old[0:idx]) copy(col[idx+1:], old[idx:]) col[idx] = params.Value rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Value = params.Value r.Update = true return true } func (rs *ResourceSubscription) handleEventRemove(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: remove event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeRemoveEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx >= l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } r.Value = old[idx] // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l-1) copy(col, old[0:idx]) copy(col[idx:], old[idx+1:]) rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Update = true return true } func (rs *ResourceSubscription) handleEventDelete(r *ResourceEvent) { subs := rs.subs c := int64(len(subs)) rs.subs = nil rs.unregister() rs.e.removeCount(c) rs.e.mu.Unlock() for sub := range subs { sub.Event(r) } rs.e.mu.Lock() } func (rs *ResourceSubscription) enqueueGetResponse(data []byte, err error) { rs.e.Enqueue(func() { rs, sublist := rs.processGetResponse(data, err) rs.e.mu.Unlock() defer rs.e.mu.Lock() if rs.state == stateError { for _, sub := range sublist { sub.Loaded(nil, rs.err) } } else { for _, sub := range sublist { sub.Loaded(rs, nil) } } }) } // unregister deletes itself and all its links from // the EventSubscription func (rs *ResourceSubscription) unregister() { if rs.query == "" { rs.e.base = nil } else { delete(rs.e.queries, rs.query) } for _, q := range rs.links { if q == "" { rs.e.base = nil } else { delete(rs.e.links, q) } } rs.links = nil } func (rs *ResourceSubscription) processGetResponse(payload []byte, err error) (nrs *ResourceSubscription, sublist []Subscriber) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) } // Get request failed if err != nil { // Set state and store the error in case any other // subscriber are waiting on the Lock to subscribe rs.state = stateError rs.err = err // Clone subscribers to slice sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } c := int64(len(sublist)) rs.subs = nil rs.unregister() rs.e.removeCount(c) nrs = rs return } // Is the normalized query in the response different from the // one requested by the Subscriber? // Then we should create a link to the normalized query if result.Query != rs.query { nrs = rs.e.getResourceSubscription(result.Query) if rs.query == "" { rs.e.base = nrs } else { // Replace resource subscription with the normalized version if rs.e.links == nil { rs.e.links = make(map[string]*ResourceSubscription) } rs.e.links[rs.query] = nrs delete(rs.e.queries, rs.query) } nrs.links = append(nrs.links, rs.query) // Copy over all subscribers for sub := range rs.subs { nrs.subs[sub] = struct{}{} } } else { nrs = rs } // Clone subscribers to slice from original resourceSubscription // as it is only those subscribers that has not yet been Loaded. // In nrs, there might be subscribers already Loaded. sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } // Exit if another request has already progressed the state. // Might happen when making a query subscription, directly followed by // another subscription using the normalized query of the previous. // When the second request returns, its resourceSubscription // will already be updated by the response from the first request. if nrs.state > stateRequested { return } // Make sure internal resource version has its 0 value nrs.version = 0 if result.Model != nil { nrs.model = &Model{Values: result.Model} nrs.state = stateModel } else { nrs.collection = &Collection{Values: result.Collection} nrs.state = stateCollection } return } func (rs *ResourceSubscription) handleResetResource(t *Throttle) { // Are we already resetting. Then quick exit if rs.resetting { return } rs.resetting = true // Create request subj := "get." + rs.e.ResourceName payload := codec.CreateGetRequest(rs.query) if t != nil { t.Add(func() { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) t.Done() }) }) } else { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) }) } } func (rs *ResourceSubscription) handleResetAccess(t *Throttle) { for sub := range rs.subs { sub.Reaccess(t) } } func (rs *ResourceSubscription) processResetGetResponse(payload []byte, err error) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) if err == nil && ((rs.state == stateModel && result.Model == nil) || (rs.state == stateCollection && result.Collection == nil)) { err = errors.New("mismatching resource type") } } // Get request failed if err != nil { // In case of a system.notFound error, // a delete event is generated. Otherwise we // just log the error. if reserr.IsError(err, reserr.CodeNotFound) { rs.handleEvent(&ResourceEvent{Event: "delete"}) } else { rs.e.cache.Errorf("Subscription %s: Reset get error - %s", rs.e.ResourceName, err) } return } switch rs.state { case stateModel: rs.processResetModel(result.Model) case stateCollection: rs.processResetCollection(result.Collection) } } func (rs *ResourceSubscription) processResetModel(props map[string]codec.Value) { // Update cached model properties vals := rs.model.Values for k := range vals { if _, ok := props[k]; !ok { props[k] = codec.DeleteValue } } for k, v := range props { ov, ok := vals[k] if ok && v.Equal(ov) { delete(props, k) } } if len(props) == 0 { return } r := &ResourceEvent{ Event: "change", Payload: codec.EncodeChangeEvent(props), } rs.handleEvent(r) } func (rs *ResourceSubscription) processResetCollection(collection []codec.Value) { events := lcs(rs.collection.Values, collection) for _, r := range events { rs.handleEvent(r) } } func lcs(a, b []codec.Value) []*ResourceEvent { var i, j int // Do a LCS matric calculation // https://en.wikipedia.org/wiki/Longest_common_subsequence_problem s := 0 m := len(a) n := len(b) // Trim of matches at the start and end for s < m && s < n && a[s].Equal(b[s]) { s++ } if s == m && s == n { return nil } for s < m && s < n && a[m-1].Equal(b[n-1]) { m-- n-- } var aa, bb []codec.Value if s > 0 || m < len(a) { aa = a[s:m] m = m - s } else { aa = a } if s > 0 || n < len(b) { bb = b[s:n] n = n - s } else { bb = b } // Create matrix and initialize it w := m + 1 c := make([]int, w*(n+1)) for i = 0; i < m; i++ { for j = 0; j < n; j++ { if aa[i].Equal(bb[j]) { c[(i+1)+w*(j+1)] = c[i+w*j] + 1 } else { v1 := c[(i+1)+w*j] v2 := c[i+w*(j+1)] if v2 > v1 { c[(i+1)+w*(j+1)] = v2 } else { c[(i+1)+w*(j+1)] = v1 } } } } steps := make([]*ResourceEvent, 0, m+n-2*c[w*(n+1)-1]) idx := m + s i = m j = n r := 0 var adds [][3]int addCount := n - c[w*(n+1)-1] if addCount > 0 { adds = make([][3]int, 0, addCount) } Loop: for { m = i - 1 n = j - 1 switch { case i > 0 && j > 0 && aa[m].Equal(bb[n]): idx-- i-- j-- case j > 0 && (i == 0 || c[i+w*n] >= c[m+w*j]): adds = append(adds, [3]int{n, idx, r}) j-- case i > 0 && (j == 0 || c[i+w*n] < c[m+w*j]): idx-- steps = append(steps, &ResourceEvent{ Event: "remove", Payload: codec.EncodeRemoveEvent(&codec.RemoveEvent{ Idx: idx, }), }) r++ i-- default: break Loop } } // Do the adds l := len(adds) - 1 for i := l; i >= 0; i-- { add := adds[i] steps = append(steps, &ResourceEvent{ Event: "add", Payload: codec.EncodeAddEvent(&codec.AddEvent{ Value: bb[add[0]], Idx: add[1] - r + add[2] + l - i, }), }) } return steps }
{ data, err := json.Marshal(m.Values) if err != nil { return nil, err } m.data = data }
conditional_block
resourceSubscription.go
package rescache import ( "encoding/json" "errors" "github.com/resgateio/resgate/server/codec" "github.com/resgateio/resgate/server/reserr" ) type subscriptionState byte const ( stateSubscribed subscriptionState = iota stateError stateRequested stateCollection stateModel ) // Model represents a RES model // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#models type Model struct { Values map[string]codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the model func (m *Model) MarshalJSON() ([]byte, error) { if m.data == nil { data, err := json.Marshal(m.Values) if err != nil { return nil, err } m.data = data } return m.data, nil } // Collection represents a RES collection // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#collections type Collection struct { Values []codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the collection func (c *Collection) MarshalJSON() ([]byte, error) { if c.data == nil { data, err := json.Marshal(c.Values) if err != nil { return nil, err } c.data = data } return c.data, nil } // ResourceSubscription represents a client subscription for a resource or query resource type ResourceSubscription struct { e *EventSubscription query string state subscriptionState subs map[Subscriber]struct{} resetting bool links []string // version is the internal resource version, starting with 0 and bumped +1 // for each modifying event. version uint // Three types of values stored model *Model collection *Collection err error } func
(e *EventSubscription, query string) *ResourceSubscription { return &ResourceSubscription{ e: e, query: query, subs: make(map[Subscriber]struct{}), } } // GetResourceType returns the resource type of the resource subscription func (rs *ResourceSubscription) GetResourceType() ResourceType { rs.e.mu.Lock() defer rs.e.mu.Unlock() return ResourceType(rs.state) } // GetError returns the subscription error, or nil if there is no error func (rs *ResourceSubscription) GetError() error { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.err } // GetCollection will lock the EventSubscription for any changes // and return the collection string slice. func (rs *ResourceSubscription) GetCollection() (*Collection, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.collection, rs.version } // GetModel will return the model map and its current version. func (rs *ResourceSubscription) GetModel() (*Model, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.model, rs.version } // Unsubscribe cancels the client subscriber's subscription func (rs *ResourceSubscription) Unsubscribe(sub Subscriber) { rs.e.Enqueue(func() { if sub != nil { delete(rs.subs, sub) } // Directly unregister unsubscribed queries if rs.query != "" && len(rs.subs) == 0 { rs.unregister() } rs.e.removeCount(1) }) } func (rs *ResourceSubscription) handleEvent(r *ResourceEvent) { // Discard if event happened before resource was loaded, // unless it is a reaccess. Then we let the event be passed further. if rs.state <= stateRequested && r.Event != "reaccess" { return } // Set event to target current version of the resource. r.Version = rs.version switch r.Event { case "change": if rs.resetting || !rs.handleEventChange(r) { return } case "add": if rs.resetting || !rs.handleEventAdd(r) { return } case "remove": if rs.resetting || !rs.handleEventRemove(r) { return } case "delete": if !rs.resetting { rs.handleEventDelete(r) } return } rs.e.mu.Unlock() for sub := range rs.subs { sub.Event(r) } rs.e.mu.Lock() } func (rs *ResourceSubscription) handleEventChange(r *ResourceEvent) bool { if rs.state == stateCollection { rs.e.cache.Errorf("Error processing event %s.%s: change event on collection", rs.e.ResourceName, r.Event) return false } var props map[string]codec.Value var err error // [DEPRECATED:deprecatedModelChangeEvent] if codec.IsLegacyChangeEvent(r.Payload) { rs.e.cache.deprecated(rs.e.ResourceName, deprecatedModelChangeEvent) props, err = codec.DecodeLegacyChangeEvent(r.Payload) } else { props, err = codec.DecodeChangeEvent(r.Payload) } if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) } // Clone old map using old map size as capacity. // It might not be exact, but often sufficient m := make(map[string]codec.Value, len(rs.model.Values)) for k, v := range rs.model.Values { m[k] = v } // Update model properties for k, v := range props { if v.Type == codec.ValueTypeDelete { if _, ok := m[k]; ok { delete(m, k) } else { delete(props, k) } } else { if m[k].Equal(v) { delete(props, k) } else { m[k] = v } } } // No actual changes if len(props) == 0 { return false } r.Changed = props r.OldValues = rs.model.Values r.Update = true rs.model = &Model{Values: m} rs.version++ return true } func (rs *ResourceSubscription) handleEventAdd(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: add event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeAddEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx > l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l+1) copy(col, old[0:idx]) copy(col[idx+1:], old[idx:]) col[idx] = params.Value rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Value = params.Value r.Update = true return true } func (rs *ResourceSubscription) handleEventRemove(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: remove event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeRemoveEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx >= l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } r.Value = old[idx] // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l-1) copy(col, old[0:idx]) copy(col[idx:], old[idx+1:]) rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Update = true return true } func (rs *ResourceSubscription) handleEventDelete(r *ResourceEvent) { subs := rs.subs c := int64(len(subs)) rs.subs = nil rs.unregister() rs.e.removeCount(c) rs.e.mu.Unlock() for sub := range subs { sub.Event(r) } rs.e.mu.Lock() } func (rs *ResourceSubscription) enqueueGetResponse(data []byte, err error) { rs.e.Enqueue(func() { rs, sublist := rs.processGetResponse(data, err) rs.e.mu.Unlock() defer rs.e.mu.Lock() if rs.state == stateError { for _, sub := range sublist { sub.Loaded(nil, rs.err) } } else { for _, sub := range sublist { sub.Loaded(rs, nil) } } }) } // unregister deletes itself and all its links from // the EventSubscription func (rs *ResourceSubscription) unregister() { if rs.query == "" { rs.e.base = nil } else { delete(rs.e.queries, rs.query) } for _, q := range rs.links { if q == "" { rs.e.base = nil } else { delete(rs.e.links, q) } } rs.links = nil } func (rs *ResourceSubscription) processGetResponse(payload []byte, err error) (nrs *ResourceSubscription, sublist []Subscriber) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) } // Get request failed if err != nil { // Set state and store the error in case any other // subscriber are waiting on the Lock to subscribe rs.state = stateError rs.err = err // Clone subscribers to slice sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } c := int64(len(sublist)) rs.subs = nil rs.unregister() rs.e.removeCount(c) nrs = rs return } // Is the normalized query in the response different from the // one requested by the Subscriber? // Then we should create a link to the normalized query if result.Query != rs.query { nrs = rs.e.getResourceSubscription(result.Query) if rs.query == "" { rs.e.base = nrs } else { // Replace resource subscription with the normalized version if rs.e.links == nil { rs.e.links = make(map[string]*ResourceSubscription) } rs.e.links[rs.query] = nrs delete(rs.e.queries, rs.query) } nrs.links = append(nrs.links, rs.query) // Copy over all subscribers for sub := range rs.subs { nrs.subs[sub] = struct{}{} } } else { nrs = rs } // Clone subscribers to slice from original resourceSubscription // as it is only those subscribers that has not yet been Loaded. // In nrs, there might be subscribers already Loaded. sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } // Exit if another request has already progressed the state. // Might happen when making a query subscription, directly followed by // another subscription using the normalized query of the previous. // When the second request returns, its resourceSubscription // will already be updated by the response from the first request. if nrs.state > stateRequested { return } // Make sure internal resource version has its 0 value nrs.version = 0 if result.Model != nil { nrs.model = &Model{Values: result.Model} nrs.state = stateModel } else { nrs.collection = &Collection{Values: result.Collection} nrs.state = stateCollection } return } func (rs *ResourceSubscription) handleResetResource(t *Throttle) { // Are we already resetting. Then quick exit if rs.resetting { return } rs.resetting = true // Create request subj := "get." + rs.e.ResourceName payload := codec.CreateGetRequest(rs.query) if t != nil { t.Add(func() { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) t.Done() }) }) } else { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) }) } } func (rs *ResourceSubscription) handleResetAccess(t *Throttle) { for sub := range rs.subs { sub.Reaccess(t) } } func (rs *ResourceSubscription) processResetGetResponse(payload []byte, err error) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) if err == nil && ((rs.state == stateModel && result.Model == nil) || (rs.state == stateCollection && result.Collection == nil)) { err = errors.New("mismatching resource type") } } // Get request failed if err != nil { // In case of a system.notFound error, // a delete event is generated. Otherwise we // just log the error. if reserr.IsError(err, reserr.CodeNotFound) { rs.handleEvent(&ResourceEvent{Event: "delete"}) } else { rs.e.cache.Errorf("Subscription %s: Reset get error - %s", rs.e.ResourceName, err) } return } switch rs.state { case stateModel: rs.processResetModel(result.Model) case stateCollection: rs.processResetCollection(result.Collection) } } func (rs *ResourceSubscription) processResetModel(props map[string]codec.Value) { // Update cached model properties vals := rs.model.Values for k := range vals { if _, ok := props[k]; !ok { props[k] = codec.DeleteValue } } for k, v := range props { ov, ok := vals[k] if ok && v.Equal(ov) { delete(props, k) } } if len(props) == 0 { return } r := &ResourceEvent{ Event: "change", Payload: codec.EncodeChangeEvent(props), } rs.handleEvent(r) } func (rs *ResourceSubscription) processResetCollection(collection []codec.Value) { events := lcs(rs.collection.Values, collection) for _, r := range events { rs.handleEvent(r) } } func lcs(a, b []codec.Value) []*ResourceEvent { var i, j int // Do a LCS matric calculation // https://en.wikipedia.org/wiki/Longest_common_subsequence_problem s := 0 m := len(a) n := len(b) // Trim of matches at the start and end for s < m && s < n && a[s].Equal(b[s]) { s++ } if s == m && s == n { return nil } for s < m && s < n && a[m-1].Equal(b[n-1]) { m-- n-- } var aa, bb []codec.Value if s > 0 || m < len(a) { aa = a[s:m] m = m - s } else { aa = a } if s > 0 || n < len(b) { bb = b[s:n] n = n - s } else { bb = b } // Create matrix and initialize it w := m + 1 c := make([]int, w*(n+1)) for i = 0; i < m; i++ { for j = 0; j < n; j++ { if aa[i].Equal(bb[j]) { c[(i+1)+w*(j+1)] = c[i+w*j] + 1 } else { v1 := c[(i+1)+w*j] v2 := c[i+w*(j+1)] if v2 > v1 { c[(i+1)+w*(j+1)] = v2 } else { c[(i+1)+w*(j+1)] = v1 } } } } steps := make([]*ResourceEvent, 0, m+n-2*c[w*(n+1)-1]) idx := m + s i = m j = n r := 0 var adds [][3]int addCount := n - c[w*(n+1)-1] if addCount > 0 { adds = make([][3]int, 0, addCount) } Loop: for { m = i - 1 n = j - 1 switch { case i > 0 && j > 0 && aa[m].Equal(bb[n]): idx-- i-- j-- case j > 0 && (i == 0 || c[i+w*n] >= c[m+w*j]): adds = append(adds, [3]int{n, idx, r}) j-- case i > 0 && (j == 0 || c[i+w*n] < c[m+w*j]): idx-- steps = append(steps, &ResourceEvent{ Event: "remove", Payload: codec.EncodeRemoveEvent(&codec.RemoveEvent{ Idx: idx, }), }) r++ i-- default: break Loop } } // Do the adds l := len(adds) - 1 for i := l; i >= 0; i-- { add := adds[i] steps = append(steps, &ResourceEvent{ Event: "add", Payload: codec.EncodeAddEvent(&codec.AddEvent{ Value: bb[add[0]], Idx: add[1] - r + add[2] + l - i, }), }) } return steps }
newResourceSubscription
identifier_name
resourceSubscription.go
package rescache import ( "encoding/json" "errors" "github.com/resgateio/resgate/server/codec" "github.com/resgateio/resgate/server/reserr" ) type subscriptionState byte const ( stateSubscribed subscriptionState = iota stateError stateRequested stateCollection stateModel ) // Model represents a RES model // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#models type Model struct { Values map[string]codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the model func (m *Model) MarshalJSON() ([]byte, error) { if m.data == nil { data, err := json.Marshal(m.Values) if err != nil { return nil, err } m.data = data } return m.data, nil } // Collection represents a RES collection // https://github.com/resgateio/resgate/blob/master/docs/res-protocol.md#collections type Collection struct { Values []codec.Value data []byte } // MarshalJSON creates a JSON encoded representation of the collection func (c *Collection) MarshalJSON() ([]byte, error) { if c.data == nil { data, err := json.Marshal(c.Values) if err != nil { return nil, err } c.data = data } return c.data, nil } // ResourceSubscription represents a client subscription for a resource or query resource type ResourceSubscription struct { e *EventSubscription query string state subscriptionState subs map[Subscriber]struct{} resetting bool links []string // version is the internal resource version, starting with 0 and bumped +1 // for each modifying event. version uint // Three types of values stored model *Model collection *Collection err error } func newResourceSubscription(e *EventSubscription, query string) *ResourceSubscription { return &ResourceSubscription{ e: e, query: query, subs: make(map[Subscriber]struct{}), } } // GetResourceType returns the resource type of the resource subscription func (rs *ResourceSubscription) GetResourceType() ResourceType { rs.e.mu.Lock() defer rs.e.mu.Unlock() return ResourceType(rs.state) } // GetError returns the subscription error, or nil if there is no error func (rs *ResourceSubscription) GetError() error { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.err } // GetCollection will lock the EventSubscription for any changes // and return the collection string slice. func (rs *ResourceSubscription) GetCollection() (*Collection, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.collection, rs.version } // GetModel will return the model map and its current version. func (rs *ResourceSubscription) GetModel() (*Model, uint) { rs.e.mu.Lock() defer rs.e.mu.Unlock() return rs.model, rs.version } // Unsubscribe cancels the client subscriber's subscription func (rs *ResourceSubscription) Unsubscribe(sub Subscriber) { rs.e.Enqueue(func() { if sub != nil { delete(rs.subs, sub) } // Directly unregister unsubscribed queries if rs.query != "" && len(rs.subs) == 0 { rs.unregister() } rs.e.removeCount(1) }) } func (rs *ResourceSubscription) handleEvent(r *ResourceEvent) { // Discard if event happened before resource was loaded, // unless it is a reaccess. Then we let the event be passed further. if rs.state <= stateRequested && r.Event != "reaccess" { return } // Set event to target current version of the resource. r.Version = rs.version switch r.Event { case "change": if rs.resetting || !rs.handleEventChange(r) { return } case "add": if rs.resetting || !rs.handleEventAdd(r) { return } case "remove": if rs.resetting || !rs.handleEventRemove(r) { return } case "delete": if !rs.resetting { rs.handleEventDelete(r) } return } rs.e.mu.Unlock() for sub := range rs.subs { sub.Event(r) } rs.e.mu.Lock() } func (rs *ResourceSubscription) handleEventChange(r *ResourceEvent) bool { if rs.state == stateCollection { rs.e.cache.Errorf("Error processing event %s.%s: change event on collection", rs.e.ResourceName, r.Event) return false } var props map[string]codec.Value var err error // [DEPRECATED:deprecatedModelChangeEvent] if codec.IsLegacyChangeEvent(r.Payload) { rs.e.cache.deprecated(rs.e.ResourceName, deprecatedModelChangeEvent) props, err = codec.DecodeLegacyChangeEvent(r.Payload) } else { props, err = codec.DecodeChangeEvent(r.Payload) } if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) } // Clone old map using old map size as capacity. // It might not be exact, but often sufficient m := make(map[string]codec.Value, len(rs.model.Values)) for k, v := range rs.model.Values { m[k] = v } // Update model properties for k, v := range props { if v.Type == codec.ValueTypeDelete { if _, ok := m[k]; ok { delete(m, k) } else { delete(props, k) } } else { if m[k].Equal(v) { delete(props, k) } else { m[k] = v } } } // No actual changes if len(props) == 0 { return false } r.Changed = props r.OldValues = rs.model.Values r.Update = true rs.model = &Model{Values: m} rs.version++ return true } func (rs *ResourceSubscription) handleEventAdd(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: add event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeAddEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx > l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l+1) copy(col, old[0:idx]) copy(col[idx+1:], old[idx:]) col[idx] = params.Value rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Value = params.Value r.Update = true return true } func (rs *ResourceSubscription) handleEventRemove(r *ResourceEvent) bool { if rs.state == stateModel { rs.e.cache.Errorf("Error processing event %s.%s: remove event on model", rs.e.ResourceName, r.Event) return false } params, err := codec.DecodeRemoveEvent(r.Payload) if err != nil { rs.e.cache.Errorf("Error processing event %s.%s: %s", rs.e.ResourceName, r.Event, err) return false } idx := params.Idx old := rs.collection.Values l := len(old) if idx < 0 || idx >= l { rs.e.cache.Errorf("Error processing event %s.%s: idx %d is out of bounds", rs.e.ResourceName, r.Event, idx) return false } r.Value = old[idx] // Copy collection as the old slice might have been // passed to a Subscriber and should be considered immutable col := make([]codec.Value, l-1) copy(col, old[0:idx]) copy(col[idx:], old[idx+1:]) rs.collection = &Collection{Values: col} rs.version++ r.Idx = params.Idx r.Update = true return true } func (rs *ResourceSubscription) handleEventDelete(r *ResourceEvent) { subs := rs.subs c := int64(len(subs)) rs.subs = nil rs.unregister() rs.e.removeCount(c) rs.e.mu.Unlock() for sub := range subs { sub.Event(r) } rs.e.mu.Lock() } func (rs *ResourceSubscription) enqueueGetResponse(data []byte, err error) { rs.e.Enqueue(func() { rs, sublist := rs.processGetResponse(data, err) rs.e.mu.Unlock() defer rs.e.mu.Lock() if rs.state == stateError { for _, sub := range sublist { sub.Loaded(nil, rs.err) } } else { for _, sub := range sublist { sub.Loaded(rs, nil) } } }) } // unregister deletes itself and all its links from // the EventSubscription func (rs *ResourceSubscription) unregister() { if rs.query == "" { rs.e.base = nil } else { delete(rs.e.queries, rs.query) } for _, q := range rs.links { if q == "" { rs.e.base = nil } else { delete(rs.e.links, q) } } rs.links = nil } func (rs *ResourceSubscription) processGetResponse(payload []byte, err error) (nrs *ResourceSubscription, sublist []Subscriber) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) } // Get request failed if err != nil { // Set state and store the error in case any other // subscriber are waiting on the Lock to subscribe rs.state = stateError rs.err = err // Clone subscribers to slice sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } c := int64(len(sublist)) rs.subs = nil rs.unregister() rs.e.removeCount(c) nrs = rs return } // Is the normalized query in the response different from the // one requested by the Subscriber? // Then we should create a link to the normalized query if result.Query != rs.query { nrs = rs.e.getResourceSubscription(result.Query) if rs.query == "" { rs.e.base = nrs } else { // Replace resource subscription with the normalized version if rs.e.links == nil { rs.e.links = make(map[string]*ResourceSubscription) } rs.e.links[rs.query] = nrs delete(rs.e.queries, rs.query) } nrs.links = append(nrs.links, rs.query) // Copy over all subscribers for sub := range rs.subs { nrs.subs[sub] = struct{}{} } } else { nrs = rs } // Clone subscribers to slice from original resourceSubscription // as it is only those subscribers that has not yet been Loaded. // In nrs, there might be subscribers already Loaded. sublist = make([]Subscriber, len(rs.subs)) i := 0 for sub := range rs.subs { sublist[i] = sub i++ } // Exit if another request has already progressed the state. // Might happen when making a query subscription, directly followed by // another subscription using the normalized query of the previous. // When the second request returns, its resourceSubscription // will already be updated by the response from the first request. if nrs.state > stateRequested { return } // Make sure internal resource version has its 0 value nrs.version = 0 if result.Model != nil { nrs.model = &Model{Values: result.Model} nrs.state = stateModel } else { nrs.collection = &Collection{Values: result.Collection} nrs.state = stateCollection } return } func (rs *ResourceSubscription) handleResetResource(t *Throttle) { // Are we already resetting. Then quick exit if rs.resetting { return }
// Create request subj := "get." + rs.e.ResourceName payload := codec.CreateGetRequest(rs.query) if t != nil { t.Add(func() { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) t.Done() }) }) } else { rs.e.cache.mq.SendRequest(subj, payload, func(_ string, data []byte, err error) { rs.e.Enqueue(func() { rs.resetting = false rs.processResetGetResponse(data, err) }) }) } } func (rs *ResourceSubscription) handleResetAccess(t *Throttle) { for sub := range rs.subs { sub.Reaccess(t) } } func (rs *ResourceSubscription) processResetGetResponse(payload []byte, err error) { var result *codec.GetResult // Either we have an error making the request // or an error in the service's response if err == nil { result, err = codec.DecodeGetResponse(payload) if err == nil && ((rs.state == stateModel && result.Model == nil) || (rs.state == stateCollection && result.Collection == nil)) { err = errors.New("mismatching resource type") } } // Get request failed if err != nil { // In case of a system.notFound error, // a delete event is generated. Otherwise we // just log the error. if reserr.IsError(err, reserr.CodeNotFound) { rs.handleEvent(&ResourceEvent{Event: "delete"}) } else { rs.e.cache.Errorf("Subscription %s: Reset get error - %s", rs.e.ResourceName, err) } return } switch rs.state { case stateModel: rs.processResetModel(result.Model) case stateCollection: rs.processResetCollection(result.Collection) } } func (rs *ResourceSubscription) processResetModel(props map[string]codec.Value) { // Update cached model properties vals := rs.model.Values for k := range vals { if _, ok := props[k]; !ok { props[k] = codec.DeleteValue } } for k, v := range props { ov, ok := vals[k] if ok && v.Equal(ov) { delete(props, k) } } if len(props) == 0 { return } r := &ResourceEvent{ Event: "change", Payload: codec.EncodeChangeEvent(props), } rs.handleEvent(r) } func (rs *ResourceSubscription) processResetCollection(collection []codec.Value) { events := lcs(rs.collection.Values, collection) for _, r := range events { rs.handleEvent(r) } } func lcs(a, b []codec.Value) []*ResourceEvent { var i, j int // Do a LCS matric calculation // https://en.wikipedia.org/wiki/Longest_common_subsequence_problem s := 0 m := len(a) n := len(b) // Trim of matches at the start and end for s < m && s < n && a[s].Equal(b[s]) { s++ } if s == m && s == n { return nil } for s < m && s < n && a[m-1].Equal(b[n-1]) { m-- n-- } var aa, bb []codec.Value if s > 0 || m < len(a) { aa = a[s:m] m = m - s } else { aa = a } if s > 0 || n < len(b) { bb = b[s:n] n = n - s } else { bb = b } // Create matrix and initialize it w := m + 1 c := make([]int, w*(n+1)) for i = 0; i < m; i++ { for j = 0; j < n; j++ { if aa[i].Equal(bb[j]) { c[(i+1)+w*(j+1)] = c[i+w*j] + 1 } else { v1 := c[(i+1)+w*j] v2 := c[i+w*(j+1)] if v2 > v1 { c[(i+1)+w*(j+1)] = v2 } else { c[(i+1)+w*(j+1)] = v1 } } } } steps := make([]*ResourceEvent, 0, m+n-2*c[w*(n+1)-1]) idx := m + s i = m j = n r := 0 var adds [][3]int addCount := n - c[w*(n+1)-1] if addCount > 0 { adds = make([][3]int, 0, addCount) } Loop: for { m = i - 1 n = j - 1 switch { case i > 0 && j > 0 && aa[m].Equal(bb[n]): idx-- i-- j-- case j > 0 && (i == 0 || c[i+w*n] >= c[m+w*j]): adds = append(adds, [3]int{n, idx, r}) j-- case i > 0 && (j == 0 || c[i+w*n] < c[m+w*j]): idx-- steps = append(steps, &ResourceEvent{ Event: "remove", Payload: codec.EncodeRemoveEvent(&codec.RemoveEvent{ Idx: idx, }), }) r++ i-- default: break Loop } } // Do the adds l := len(adds) - 1 for i := l; i >= 0; i-- { add := adds[i] steps = append(steps, &ResourceEvent{ Event: "add", Payload: codec.EncodeAddEvent(&codec.AddEvent{ Value: bb[add[0]], Idx: add[1] - r + add[2] + l - i, }), }) } return steps }
rs.resetting = true
random_line_split
callgrind.ts
// https://www.valgrind.org/docs/manual/cl-format.html // // Larger example files can be found by searching on github: // https://github.com/search?q=cfn%3D&type=code // // Converting callgrind files into flamegraphs is challenging because callgrind // formatted profiles contain call graphs with weighted nodes and edges, and // such a weighted call graph does not uniquely define a flamegraph. // // Consider a program that looks like this: // // // example.js // function backup(read) { // if (read) { // read() // } else { // write() // } // } // // function start() { // backup(true) // } // // function end() { // backup(false) // } // // start() // end() // // Profiling this program might result in a profile that looks like the // following flame graph defined in Brendan Gregg's plaintext format: // // start;backup;read 4 // end;backup;write 4 // // When we convert this execution into a call-graph, we get the following: // // +------------------+ +---------------+ // | start (self: 0) | | end (self: 0) | // +------------------+ +---------------| // \ / // (total: 4) \ / (total: 4) // v v // +------------------+ // | backup (self: 0) | // +------------------+ // / \ // (total: 4) / \ (total: 4) // v v // +----------------+ +-----------------+ // | read (self: 4) | | write (self: 4) | // +----------------+ +-----------------+ // // In the process of the conversion, we've lost information about the ratio of // time spent in read v.s. write in the start call v.s. the end call. The // following flame graph would yield the exact same call-graph, and therefore // the exact sample call-grind formatted profile: // // start;backup;read 3 // start;backup;write 1 // end;backup;read 1 // end;backup;write 3 // // This is unfortunate, since it means we can't produce a flamegraph that isn't // potentially lying about the what the actual execution behavior was. To // produce a flamegraph at all from the call graph representation, we have to // decide how much weight each sub-call should have. Given that we know the // total weight of each node, we'll make the incorrect assumption that every // invocation of a function will have the average distribution of costs among // the sub-function invocations. In the example given, this means we assume that // every invocation of backup() is assumed to spend half its time in read() and // half its time in write(). // // So the flamegraph we'll produce from the given call-graph will actually be: // // start;backup;read 2 // start;backup;write 2 // end;backup;read 2 // end;backup;write 2 // // A particularly bad consequence is that the resulting flamegraph will suggest // that there was at some point a call stack that looked like // strat;backup;write, even though that never happened in the real program // execution. import {CallTreeProfileBuilder, Frame, FrameInfo, Profile, ProfileGroup} from '../lib/profile' import {getOrElse, getOrInsert, KeyedSet} from '../lib/utils' import {ByteFormatter, TimeFormatter} from '../lib/value-formatters' class CallGraph { private frameSet = new KeyedSet<Frame>() private totalWeights = new Map<Frame, number>() private childrenTotalWeights = new Map<Frame, Map<Frame, number>>() constructor(private fileName: string, private fieldName: string) {} private getOrInsertFrame(info: FrameInfo): Frame { return Frame.getOrInsert(this.frameSet, info) } private addToTotalWeight(frame: Frame, weight: number) { if (!this.totalWeights.has(frame)) { this.totalWeights.set(frame, weight) } else { this.totalWeights.set(frame, this.totalWeights.get(frame)! + weight) } } addSelfWeight(frameInfo: FrameInfo, weight: number) { this.addToTotalWeight(this.getOrInsertFrame(frameInfo), weight) } addChildWithTotalWeight(parentInfo: FrameInfo, childInfo: FrameInfo, weight: number) { const parent = this.getOrInsertFrame(parentInfo) const child = this.getOrInsertFrame(childInfo) const childMap = getOrInsert(this.childrenTotalWeights, parent, k => new Map()) if (!childMap.has(child)) { childMap.set(child, weight) } else { childMap.set(child, childMap.get(child) + weight) } this.addToTotalWeight(parent, weight) } toProfile(): Profile { // To convert a call graph into a profile, we first need to identify what // the "root weights" are. "root weights" are the total weight of each frame // while at the bottom of the call-stack. The majority of functions will have // zero weight while at the bottom of the call-stack, since most functions // are never at the bottom of the call-stack. const rootWeights = new Map<Frame, number>() for (let [frame, totalWeight] of this.totalWeights) { rootWeights.set(frame, totalWeight) } for (let [_, childMap] of this.childrenTotalWeights) { for (let [child, weight] of childMap) { rootWeights.set(child, getOrElse(rootWeights, child, () => weight) - weight) } } let totalProfileWeight = 0 for (let [_, rootWeight] of rootWeights) { totalProfileWeight += rootWeight } const profile = new CallTreeProfileBuilder() let unitMultiplier = 1 // These are common field names used by Xdebug. Let's give them special // treatment to more helpfully display units. if (this.fieldName === 'Time_(10ns)') { profile.setName(`${this.fileName} -- Time`) unitMultiplier = 10 profile.setValueFormatter(new TimeFormatter('nanoseconds')) } else if (this.fieldName == 'Memory_(bytes)') { profile.setName(`${this.fileName} -- Memory`) profile.setValueFormatter(new ByteFormatter()) } else { profile.setName(`${this.fileName} -- ${this.fieldName}`) } let totalCumulative = 0 const currentStack = new Set<Frame>() const visit = (frame: Frame, callTreeWeight: number) => { if (currentStack.has(frame)) { // Call-graphs are allowed to have cycles. Call-trees are not. In case // we run into a cycle, we'll just avoid recursing into the same subtree // more than once in a call stack. The result will be that the time // spent in the recursive call will instead be attributed as self time // in the parent. return } // We need to calculate how much weight to give to a particular node in // the call-tree based on information from the call-graph. A given node // from the call-graph might correspond to several nodes in the call-tree, // so we need to decide how to distribute the weight of the call-graph // node to the various call-tree nodes. // // We assume that the weighting is evenly distributed. If a call-tree node // X occurs with weights x1 and x2, and we know from the call-graph that // child Y of X has a total weight y, then we assume the child Y of X has // weight y*x1/(x1 + x2) for the first occurrence, and y*x2(y1 + x2) for // the second occurrence. // // This assumption is incorrectly (sometimes wildly so), but we need to // make *some* assumption, and this seems to me the sanest option. // // See the comment at the top of the file for an example where this // assumption can yield especially misleading results. if (callTreeWeight < 1e-4 * totalProfileWeight) { // This assumption about even distribution can cause us to generate a // call tree with dramatically more nodes than the call graph. // // Consider a function which is called 1000 times, where the result is // cached. The first invocation has a complex call tree and may take // 100ms. Let's say that this complex call tree has 250 nodes. // // Subsequent calls use the cached result, so take only 1ms, and have no // children in their call trees. So we have, in total, (1 + 250) + 999 // nodes in the call-tree for a total of 1250 nodes. // // The information specific to each invocation is, however, lost in the // call-graph representation. // // Because of the even distribution assumption we make, this means that // the call-trees of each invocation will have the same shape. Each 1ms // call-tree will look identical to the 100ms call-tree, just // horizontally compacted. So instead of 1251 nodes, we have // 1000*250=250,000 nodes in the resulting call graph. // // To mitigate this explosion of the # of nodes, we ignore subtrees // whose weights are less than 0.01% of the total weight of the profile. return } // totalWeightForFrame is the total weight for the given frame in the // entire call graph. const callGraphWeightForFrame = getOrElse(this.totalWeights, frame, () => 0) if (callGraphWeightForFrame === 0) { return
// This is the portion of the total time the given child spends within the // given parent that we'll attribute to this specific path in the call // tree. const ratio = callTreeWeight / callGraphWeightForFrame let selfWeightForFrame = callGraphWeightForFrame profile.enterFrame(frame, totalCumulative * unitMultiplier) currentStack.add(frame) for (let [child, callGraphEdgeWeight] of this.childrenTotalWeights.get(frame) || []) { selfWeightForFrame -= callGraphEdgeWeight const childCallTreeWeight = callGraphEdgeWeight * ratio visit(child, childCallTreeWeight) } currentStack.delete(frame) totalCumulative += selfWeightForFrame * ratio profile.leaveFrame(frame, totalCumulative * unitMultiplier) } for (let [rootFrame, rootWeight] of rootWeights) { if (rootWeight <= 0) { continue } // If we've reached here, it means that the given root frame has some // weight while at the top of the call-stack. visit(rootFrame, rootWeight) } return profile.build() } } // In writing this, I initially tried to use the formal grammar described in // section 3.2 of https://www.valgrind.org/docs/manual/cl-format.html, but // stopped because most of the information isn't relevant for visualization, and // because there's inconsistency between the grammar and subsequence // descriptions. // // For example, the grammar for headers specifies all the valid header names, // but then the writing below that mentions there may be a "totals" or "summary" // header, which should be disallowed by the formal grammar. // // So, instead, I'm not going to bother with a formal parse. Since there are no // real recursive structures in this file format, that should be okay. class CallgrindParser { private lines: string[] private lineNum: number private callGraphs: CallGraph[] | null = null private eventsLine: string | null = null private filename: string | null = null private functionName: string | null = null private calleeFilename: string | null = null private calleeFunctionName: string | null = null private savedFileNames: {[id: string]: string} = {} private savedFunctionNames: {[id: string]: string} = {} constructor(contents: string, private importedFileName: string) { this.lines = contents.split('\n') this.lineNum = 0 } parse(): ProfileGroup | null { while (this.lineNum < this.lines.length) { const line = this.lines[this.lineNum++] if (/^\s*#/.exec(line)) { // Line is a comment. Ignore it. continue } if (/^\s*$/.exec(line)) { // Line is empty. Ignore it. continue } if (this.parseHeaderLine(line)) { continue } if (this.parseAssignmentLine(line)) { continue } if (this.parseCostLine(line, 'self')) { continue } throw new Error(`Unrecognized line "${line}" on line ${this.lineNum}`) } if (!this.callGraphs) { return null } return { name: this.importedFileName, indexToView: 0, profiles: this.callGraphs.map(cg => cg.toProfile()), } } private frameInfo(): FrameInfo { const file = this.filename || '(unknown)' const name = this.functionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private calleeFrameInfo(): FrameInfo { const file = this.calleeFilename || '(unknown)' const name = this.calleeFunctionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private parseHeaderLine(line: string): boolean { const headerMatch = /^\s*(\w+):\s*(.*)+$/.exec(line) if (!headerMatch) return false if (headerMatch[1] !== 'events') { // We don't care about other headers. Ignore this line. return true } // Line specifies the formatting of subsequent cost lines. const fields = headerMatch[2].split(' ') if (this.callGraphs != null) { throw new Error( `Duplicate "events: " lines specified. First was "${this.eventsLine}", now received "${line}" on ${this.lineNum}.`, ) } this.callGraphs = fields.map(fieldName => { return new CallGraph(this.importedFileName, fieldName) }) return true } private parseAssignmentLine(line: string): boolean { const assignmentMatch = /^(\w+)=\s*(.*)$/.exec(line) if (!assignmentMatch) return false const key = assignmentMatch[1] const value = assignmentMatch[2] switch (key) { case 'fe': case 'fi': case 'fl': { this.filename = this.parseNameWithCompression(value, this.savedFileNames) this.calleeFilename = this.filename break } case 'fn': { this.functionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'cfi': case 'cfl': { this.calleeFilename = this.parseNameWithCompression(value, this.savedFileNames) break } case 'cfn': { this.calleeFunctionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'calls': { // TODO(jlfwong): This is currently ignoring the number of calls being // made. Accounting for the number of calls might be unhelpful anyway, // since it'll just be copying the exact same frame over-and-over again, // but that might be better than ignoring it. this.parseCostLine(this.lines[this.lineNum++], 'child') break } default: { console.log(`Ignoring assignment to unrecognized key "${line}" on line ${this.lineNum}`) } } return true } private parseNameWithCompression(name: string, saved: {[id: string]: string}): string { { const nameDefinitionMatch = /^\((\d+)\)\s*(.+)$/.exec(name) if (nameDefinitionMatch) { const id = nameDefinitionMatch[1] const name = nameDefinitionMatch[2] if (id in saved) { throw new Error( `Redefinition of name with id: ${id}. Original value was "${saved[id]}". Tried to redefine as "${name}" on line ${this.lineNum}.`, ) } saved[id] = name return name } } { const nameUseMatch = /^\((\d+)\)$/.exec(name) if (nameUseMatch) { const id = nameUseMatch[1] if (!(id in saved)) { throw new Error( `Tried to use name with id ${id} on line ${this.lineNum} before it was defined.`, ) } return saved[id] } } return name } private parseCostLine(line: string, costType: 'self' | 'child'): boolean { // TODO(jlfwong): Handle "Subposition compression" // TODO(jlfwong): Allow hexadecimal encoding const parts = line.split(/\s+/) const nums: number[] = [] for (let part of parts) { // As far as I can tell from the specification, the callgrind format does // not accept floating point numbers. const asNum = parseInt(part) if (isNaN(asNum)) { return false } nums.push(asNum) } if (nums.length == 0) { return false } // TODO(jlfwong): Handle custom positions format w/ multiple parts const numPositionFields = 1 // NOTE: We intentionally do not include the line number here because // callgrind uses the line number of the function invocation, not the // line number of the function definition, which conflicts with how // speedscope uses line numbers. // // const lineNum = nums[0] if (!this.callGraphs) { throw new Error( `Encountered a cost line on line ${this.lineNum} before event specification was provided.`, ) } for (let i = 0; i < this.callGraphs.length; i++) { if (costType === 'self') { this.callGraphs[i].addSelfWeight(this.frameInfo(), nums[numPositionFields + i]) } else if (costType === 'child') { this.callGraphs[i].addChildWithTotalWeight( this.frameInfo(), this.calleeFrameInfo(), nums[numPositionFields + i] || 0, ) } } return true } } export function importFromCallgrind( contents: string, importedFileName: string, ): ProfileGroup | null { return new CallgrindParser(contents, importedFileName).parse() }
}
random_line_split
callgrind.ts
// https://www.valgrind.org/docs/manual/cl-format.html // // Larger example files can be found by searching on github: // https://github.com/search?q=cfn%3D&type=code // // Converting callgrind files into flamegraphs is challenging because callgrind // formatted profiles contain call graphs with weighted nodes and edges, and // such a weighted call graph does not uniquely define a flamegraph. // // Consider a program that looks like this: // // // example.js // function backup(read) { // if (read) { // read() // } else { // write() // } // } // // function start() { // backup(true) // } // // function end() { // backup(false) // } // // start() // end() // // Profiling this program might result in a profile that looks like the // following flame graph defined in Brendan Gregg's plaintext format: // // start;backup;read 4 // end;backup;write 4 // // When we convert this execution into a call-graph, we get the following: // // +------------------+ +---------------+ // | start (self: 0) | | end (self: 0) | // +------------------+ +---------------| // \ / // (total: 4) \ / (total: 4) // v v // +------------------+ // | backup (self: 0) | // +------------------+ // / \ // (total: 4) / \ (total: 4) // v v // +----------------+ +-----------------+ // | read (self: 4) | | write (self: 4) | // +----------------+ +-----------------+ // // In the process of the conversion, we've lost information about the ratio of // time spent in read v.s. write in the start call v.s. the end call. The // following flame graph would yield the exact same call-graph, and therefore // the exact sample call-grind formatted profile: // // start;backup;read 3 // start;backup;write 1 // end;backup;read 1 // end;backup;write 3 // // This is unfortunate, since it means we can't produce a flamegraph that isn't // potentially lying about the what the actual execution behavior was. To // produce a flamegraph at all from the call graph representation, we have to // decide how much weight each sub-call should have. Given that we know the // total weight of each node, we'll make the incorrect assumption that every // invocation of a function will have the average distribution of costs among // the sub-function invocations. In the example given, this means we assume that // every invocation of backup() is assumed to spend half its time in read() and // half its time in write(). // // So the flamegraph we'll produce from the given call-graph will actually be: // // start;backup;read 2 // start;backup;write 2 // end;backup;read 2 // end;backup;write 2 // // A particularly bad consequence is that the resulting flamegraph will suggest // that there was at some point a call stack that looked like // strat;backup;write, even though that never happened in the real program // execution. import {CallTreeProfileBuilder, Frame, FrameInfo, Profile, ProfileGroup} from '../lib/profile' import {getOrElse, getOrInsert, KeyedSet} from '../lib/utils' import {ByteFormatter, TimeFormatter} from '../lib/value-formatters' class CallGraph { private frameSet = new KeyedSet<Frame>() private totalWeights = new Map<Frame, number>() private childrenTotalWeights = new Map<Frame, Map<Frame, number>>() constructor(private fileName: string, private fieldName: string) {} private getOrInsertFrame(info: FrameInfo): Frame { return Frame.getOrInsert(this.frameSet, info) } private addToTotalWeight(frame: Frame, weight: number) { if (!this.totalWeights.has(frame)) { this.totalWeights.set(frame, weight) } else { this.totalWeights.set(frame, this.totalWeights.get(frame)! + weight) } } addSelfWeight(frameInfo: FrameInfo, weight: number) { this.addToTotalWeight(this.getOrInsertFrame(frameInfo), weight) } addChildWithTotalWeight(parentInfo: FrameInfo, childInfo: FrameInfo, weight: number) { const parent = this.getOrInsertFrame(parentInfo) const child = this.getOrInsertFrame(childInfo) const childMap = getOrInsert(this.childrenTotalWeights, parent, k => new Map()) if (!childMap.has(child)) { childMap.set(child, weight) } else { childMap.set(child, childMap.get(child) + weight) } this.addToTotalWeight(parent, weight) } toProfile(): Profile { // To convert a call graph into a profile, we first need to identify what // the "root weights" are. "root weights" are the total weight of each frame // while at the bottom of the call-stack. The majority of functions will have // zero weight while at the bottom of the call-stack, since most functions // are never at the bottom of the call-stack. const rootWeights = new Map<Frame, number>() for (let [frame, totalWeight] of this.totalWeights) { rootWeights.set(frame, totalWeight) } for (let [_, childMap] of this.childrenTotalWeights) { for (let [child, weight] of childMap) { rootWeights.set(child, getOrElse(rootWeights, child, () => weight) - weight) } } let totalProfileWeight = 0 for (let [_, rootWeight] of rootWeights) { totalProfileWeight += rootWeight } const profile = new CallTreeProfileBuilder() let unitMultiplier = 1 // These are common field names used by Xdebug. Let's give them special // treatment to more helpfully display units. if (this.fieldName === 'Time_(10ns)') { profile.setName(`${this.fileName} -- Time`) unitMultiplier = 10 profile.setValueFormatter(new TimeFormatter('nanoseconds')) } else if (this.fieldName == 'Memory_(bytes)') { profile.setName(`${this.fileName} -- Memory`) profile.setValueFormatter(new ByteFormatter()) } else { profile.setName(`${this.fileName} -- ${this.fieldName}`) } let totalCumulative = 0 const currentStack = new Set<Frame>() const visit = (frame: Frame, callTreeWeight: number) => { if (currentStack.has(frame)) { // Call-graphs are allowed to have cycles. Call-trees are not. In case // we run into a cycle, we'll just avoid recursing into the same subtree // more than once in a call stack. The result will be that the time // spent in the recursive call will instead be attributed as self time // in the parent. return } // We need to calculate how much weight to give to a particular node in // the call-tree based on information from the call-graph. A given node // from the call-graph might correspond to several nodes in the call-tree, // so we need to decide how to distribute the weight of the call-graph // node to the various call-tree nodes. // // We assume that the weighting is evenly distributed. If a call-tree node // X occurs with weights x1 and x2, and we know from the call-graph that // child Y of X has a total weight y, then we assume the child Y of X has // weight y*x1/(x1 + x2) for the first occurrence, and y*x2(y1 + x2) for // the second occurrence. // // This assumption is incorrectly (sometimes wildly so), but we need to // make *some* assumption, and this seems to me the sanest option. // // See the comment at the top of the file for an example where this // assumption can yield especially misleading results. if (callTreeWeight < 1e-4 * totalProfileWeight) { // This assumption about even distribution can cause us to generate a // call tree with dramatically more nodes than the call graph. // // Consider a function which is called 1000 times, where the result is // cached. The first invocation has a complex call tree and may take // 100ms. Let's say that this complex call tree has 250 nodes. // // Subsequent calls use the cached result, so take only 1ms, and have no // children in their call trees. So we have, in total, (1 + 250) + 999 // nodes in the call-tree for a total of 1250 nodes. // // The information specific to each invocation is, however, lost in the // call-graph representation. // // Because of the even distribution assumption we make, this means that // the call-trees of each invocation will have the same shape. Each 1ms // call-tree will look identical to the 100ms call-tree, just // horizontally compacted. So instead of 1251 nodes, we have // 1000*250=250,000 nodes in the resulting call graph. // // To mitigate this explosion of the # of nodes, we ignore subtrees // whose weights are less than 0.01% of the total weight of the profile. return } // totalWeightForFrame is the total weight for the given frame in the // entire call graph. const callGraphWeightForFrame = getOrElse(this.totalWeights, frame, () => 0) if (callGraphWeightForFrame === 0) { return } // This is the portion of the total time the given child spends within the // given parent that we'll attribute to this specific path in the call // tree. const ratio = callTreeWeight / callGraphWeightForFrame let selfWeightForFrame = callGraphWeightForFrame profile.enterFrame(frame, totalCumulative * unitMultiplier) currentStack.add(frame) for (let [child, callGraphEdgeWeight] of this.childrenTotalWeights.get(frame) || []) { selfWeightForFrame -= callGraphEdgeWeight const childCallTreeWeight = callGraphEdgeWeight * ratio visit(child, childCallTreeWeight) } currentStack.delete(frame) totalCumulative += selfWeightForFrame * ratio profile.leaveFrame(frame, totalCumulative * unitMultiplier) } for (let [rootFrame, rootWeight] of rootWeights) { if (rootWeight <= 0) { continue } // If we've reached here, it means that the given root frame has some // weight while at the top of the call-stack. visit(rootFrame, rootWeight) } return profile.build() } } // In writing this, I initially tried to use the formal grammar described in // section 3.2 of https://www.valgrind.org/docs/manual/cl-format.html, but // stopped because most of the information isn't relevant for visualization, and // because there's inconsistency between the grammar and subsequence // descriptions. // // For example, the grammar for headers specifies all the valid header names, // but then the writing below that mentions there may be a "totals" or "summary" // header, which should be disallowed by the formal grammar. // // So, instead, I'm not going to bother with a formal parse. Since there are no // real recursive structures in this file format, that should be okay. class CallgrindParser { private lines: string[] private lineNum: number private callGraphs: CallGraph[] | null = null private eventsLine: string | null = null private filename: string | null = null private functionName: string | null = null private calleeFilename: string | null = null private calleeFunctionName: string | null = null private savedFileNames: {[id: string]: string} = {} private savedFunctionNames: {[id: string]: string} = {} constructor(contents: string, private importedFileName: string) { this.lines = contents.split('\n') this.lineNum = 0 } parse(): ProfileGroup | null
private frameInfo(): FrameInfo { const file = this.filename || '(unknown)' const name = this.functionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private calleeFrameInfo(): FrameInfo { const file = this.calleeFilename || '(unknown)' const name = this.calleeFunctionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private parseHeaderLine(line: string): boolean { const headerMatch = /^\s*(\w+):\s*(.*)+$/.exec(line) if (!headerMatch) return false if (headerMatch[1] !== 'events') { // We don't care about other headers. Ignore this line. return true } // Line specifies the formatting of subsequent cost lines. const fields = headerMatch[2].split(' ') if (this.callGraphs != null) { throw new Error( `Duplicate "events: " lines specified. First was "${this.eventsLine}", now received "${line}" on ${this.lineNum}.`, ) } this.callGraphs = fields.map(fieldName => { return new CallGraph(this.importedFileName, fieldName) }) return true } private parseAssignmentLine(line: string): boolean { const assignmentMatch = /^(\w+)=\s*(.*)$/.exec(line) if (!assignmentMatch) return false const key = assignmentMatch[1] const value = assignmentMatch[2] switch (key) { case 'fe': case 'fi': case 'fl': { this.filename = this.parseNameWithCompression(value, this.savedFileNames) this.calleeFilename = this.filename break } case 'fn': { this.functionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'cfi': case 'cfl': { this.calleeFilename = this.parseNameWithCompression(value, this.savedFileNames) break } case 'cfn': { this.calleeFunctionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'calls': { // TODO(jlfwong): This is currently ignoring the number of calls being // made. Accounting for the number of calls might be unhelpful anyway, // since it'll just be copying the exact same frame over-and-over again, // but that might be better than ignoring it. this.parseCostLine(this.lines[this.lineNum++], 'child') break } default: { console.log(`Ignoring assignment to unrecognized key "${line}" on line ${this.lineNum}`) } } return true } private parseNameWithCompression(name: string, saved: {[id: string]: string}): string { { const nameDefinitionMatch = /^\((\d+)\)\s*(.+)$/.exec(name) if (nameDefinitionMatch) { const id = nameDefinitionMatch[1] const name = nameDefinitionMatch[2] if (id in saved) { throw new Error( `Redefinition of name with id: ${id}. Original value was "${saved[id]}". Tried to redefine as "${name}" on line ${this.lineNum}.`, ) } saved[id] = name return name } } { const nameUseMatch = /^\((\d+)\)$/.exec(name) if (nameUseMatch) { const id = nameUseMatch[1] if (!(id in saved)) { throw new Error( `Tried to use name with id ${id} on line ${this.lineNum} before it was defined.`, ) } return saved[id] } } return name } private parseCostLine(line: string, costType: 'self' | 'child'): boolean { // TODO(jlfwong): Handle "Subposition compression" // TODO(jlfwong): Allow hexadecimal encoding const parts = line.split(/\s+/) const nums: number[] = [] for (let part of parts) { // As far as I can tell from the specification, the callgrind format does // not accept floating point numbers. const asNum = parseInt(part) if (isNaN(asNum)) { return false } nums.push(asNum) } if (nums.length == 0) { return false } // TODO(jlfwong): Handle custom positions format w/ multiple parts const numPositionFields = 1 // NOTE: We intentionally do not include the line number here because // callgrind uses the line number of the function invocation, not the // line number of the function definition, which conflicts with how // speedscope uses line numbers. // // const lineNum = nums[0] if (!this.callGraphs) { throw new Error( `Encountered a cost line on line ${this.lineNum} before event specification was provided.`, ) } for (let i = 0; i < this.callGraphs.length; i++) { if (costType === 'self') { this.callGraphs[i].addSelfWeight(this.frameInfo(), nums[numPositionFields + i]) } else if (costType === 'child') { this.callGraphs[i].addChildWithTotalWeight( this.frameInfo(), this.calleeFrameInfo(), nums[numPositionFields + i] || 0, ) } } return true } } export function importFromCallgrind( contents: string, importedFileName: string, ): ProfileGroup | null { return new CallgrindParser(contents, importedFileName).parse() }
{ while (this.lineNum < this.lines.length) { const line = this.lines[this.lineNum++] if (/^\s*#/.exec(line)) { // Line is a comment. Ignore it. continue } if (/^\s*$/.exec(line)) { // Line is empty. Ignore it. continue } if (this.parseHeaderLine(line)) { continue } if (this.parseAssignmentLine(line)) { continue } if (this.parseCostLine(line, 'self')) { continue } throw new Error(`Unrecognized line "${line}" on line ${this.lineNum}`) } if (!this.callGraphs) { return null } return { name: this.importedFileName, indexToView: 0, profiles: this.callGraphs.map(cg => cg.toProfile()), } }
identifier_body
callgrind.ts
// https://www.valgrind.org/docs/manual/cl-format.html // // Larger example files can be found by searching on github: // https://github.com/search?q=cfn%3D&type=code // // Converting callgrind files into flamegraphs is challenging because callgrind // formatted profiles contain call graphs with weighted nodes and edges, and // such a weighted call graph does not uniquely define a flamegraph. // // Consider a program that looks like this: // // // example.js // function backup(read) { // if (read) { // read() // } else { // write() // } // } // // function start() { // backup(true) // } // // function end() { // backup(false) // } // // start() // end() // // Profiling this program might result in a profile that looks like the // following flame graph defined in Brendan Gregg's plaintext format: // // start;backup;read 4 // end;backup;write 4 // // When we convert this execution into a call-graph, we get the following: // // +------------------+ +---------------+ // | start (self: 0) | | end (self: 0) | // +------------------+ +---------------| // \ / // (total: 4) \ / (total: 4) // v v // +------------------+ // | backup (self: 0) | // +------------------+ // / \ // (total: 4) / \ (total: 4) // v v // +----------------+ +-----------------+ // | read (self: 4) | | write (self: 4) | // +----------------+ +-----------------+ // // In the process of the conversion, we've lost information about the ratio of // time spent in read v.s. write in the start call v.s. the end call. The // following flame graph would yield the exact same call-graph, and therefore // the exact sample call-grind formatted profile: // // start;backup;read 3 // start;backup;write 1 // end;backup;read 1 // end;backup;write 3 // // This is unfortunate, since it means we can't produce a flamegraph that isn't // potentially lying about the what the actual execution behavior was. To // produce a flamegraph at all from the call graph representation, we have to // decide how much weight each sub-call should have. Given that we know the // total weight of each node, we'll make the incorrect assumption that every // invocation of a function will have the average distribution of costs among // the sub-function invocations. In the example given, this means we assume that // every invocation of backup() is assumed to spend half its time in read() and // half its time in write(). // // So the flamegraph we'll produce from the given call-graph will actually be: // // start;backup;read 2 // start;backup;write 2 // end;backup;read 2 // end;backup;write 2 // // A particularly bad consequence is that the resulting flamegraph will suggest // that there was at some point a call stack that looked like // strat;backup;write, even though that never happened in the real program // execution. import {CallTreeProfileBuilder, Frame, FrameInfo, Profile, ProfileGroup} from '../lib/profile' import {getOrElse, getOrInsert, KeyedSet} from '../lib/utils' import {ByteFormatter, TimeFormatter} from '../lib/value-formatters' class CallGraph { private frameSet = new KeyedSet<Frame>() private totalWeights = new Map<Frame, number>() private childrenTotalWeights = new Map<Frame, Map<Frame, number>>() constructor(private fileName: string, private fieldName: string) {} private getOrInsertFrame(info: FrameInfo): Frame { return Frame.getOrInsert(this.frameSet, info) } private addToTotalWeight(frame: Frame, weight: number) { if (!this.totalWeights.has(frame)) { this.totalWeights.set(frame, weight) } else { this.totalWeights.set(frame, this.totalWeights.get(frame)! + weight) } } addSelfWeight(frameInfo: FrameInfo, weight: number) { this.addToTotalWeight(this.getOrInsertFrame(frameInfo), weight) } addChildWithTotalWeight(parentInfo: FrameInfo, childInfo: FrameInfo, weight: number) { const parent = this.getOrInsertFrame(parentInfo) const child = this.getOrInsertFrame(childInfo) const childMap = getOrInsert(this.childrenTotalWeights, parent, k => new Map()) if (!childMap.has(child)) { childMap.set(child, weight) } else { childMap.set(child, childMap.get(child) + weight) } this.addToTotalWeight(parent, weight) } toProfile(): Profile { // To convert a call graph into a profile, we first need to identify what // the "root weights" are. "root weights" are the total weight of each frame // while at the bottom of the call-stack. The majority of functions will have // zero weight while at the bottom of the call-stack, since most functions // are never at the bottom of the call-stack. const rootWeights = new Map<Frame, number>() for (let [frame, totalWeight] of this.totalWeights) { rootWeights.set(frame, totalWeight) } for (let [_, childMap] of this.childrenTotalWeights) { for (let [child, weight] of childMap) { rootWeights.set(child, getOrElse(rootWeights, child, () => weight) - weight) } } let totalProfileWeight = 0 for (let [_, rootWeight] of rootWeights) { totalProfileWeight += rootWeight } const profile = new CallTreeProfileBuilder() let unitMultiplier = 1 // These are common field names used by Xdebug. Let's give them special // treatment to more helpfully display units. if (this.fieldName === 'Time_(10ns)') { profile.setName(`${this.fileName} -- Time`) unitMultiplier = 10 profile.setValueFormatter(new TimeFormatter('nanoseconds')) } else if (this.fieldName == 'Memory_(bytes)') { profile.setName(`${this.fileName} -- Memory`) profile.setValueFormatter(new ByteFormatter()) } else { profile.setName(`${this.fileName} -- ${this.fieldName}`) } let totalCumulative = 0 const currentStack = new Set<Frame>() const visit = (frame: Frame, callTreeWeight: number) => { if (currentStack.has(frame)) { // Call-graphs are allowed to have cycles. Call-trees are not. In case // we run into a cycle, we'll just avoid recursing into the same subtree // more than once in a call stack. The result will be that the time // spent in the recursive call will instead be attributed as self time // in the parent. return } // We need to calculate how much weight to give to a particular node in // the call-tree based on information from the call-graph. A given node // from the call-graph might correspond to several nodes in the call-tree, // so we need to decide how to distribute the weight of the call-graph // node to the various call-tree nodes. // // We assume that the weighting is evenly distributed. If a call-tree node // X occurs with weights x1 and x2, and we know from the call-graph that // child Y of X has a total weight y, then we assume the child Y of X has // weight y*x1/(x1 + x2) for the first occurrence, and y*x2(y1 + x2) for // the second occurrence. // // This assumption is incorrectly (sometimes wildly so), but we need to // make *some* assumption, and this seems to me the sanest option. // // See the comment at the top of the file for an example where this // assumption can yield especially misleading results. if (callTreeWeight < 1e-4 * totalProfileWeight) { // This assumption about even distribution can cause us to generate a // call tree with dramatically more nodes than the call graph. // // Consider a function which is called 1000 times, where the result is // cached. The first invocation has a complex call tree and may take // 100ms. Let's say that this complex call tree has 250 nodes. // // Subsequent calls use the cached result, so take only 1ms, and have no // children in their call trees. So we have, in total, (1 + 250) + 999 // nodes in the call-tree for a total of 1250 nodes. // // The information specific to each invocation is, however, lost in the // call-graph representation. // // Because of the even distribution assumption we make, this means that // the call-trees of each invocation will have the same shape. Each 1ms // call-tree will look identical to the 100ms call-tree, just // horizontally compacted. So instead of 1251 nodes, we have // 1000*250=250,000 nodes in the resulting call graph. // // To mitigate this explosion of the # of nodes, we ignore subtrees // whose weights are less than 0.01% of the total weight of the profile. return } // totalWeightForFrame is the total weight for the given frame in the // entire call graph. const callGraphWeightForFrame = getOrElse(this.totalWeights, frame, () => 0) if (callGraphWeightForFrame === 0) { return } // This is the portion of the total time the given child spends within the // given parent that we'll attribute to this specific path in the call // tree. const ratio = callTreeWeight / callGraphWeightForFrame let selfWeightForFrame = callGraphWeightForFrame profile.enterFrame(frame, totalCumulative * unitMultiplier) currentStack.add(frame) for (let [child, callGraphEdgeWeight] of this.childrenTotalWeights.get(frame) || []) { selfWeightForFrame -= callGraphEdgeWeight const childCallTreeWeight = callGraphEdgeWeight * ratio visit(child, childCallTreeWeight) } currentStack.delete(frame) totalCumulative += selfWeightForFrame * ratio profile.leaveFrame(frame, totalCumulative * unitMultiplier) } for (let [rootFrame, rootWeight] of rootWeights) { if (rootWeight <= 0) { continue } // If we've reached here, it means that the given root frame has some // weight while at the top of the call-stack. visit(rootFrame, rootWeight) } return profile.build() } } // In writing this, I initially tried to use the formal grammar described in // section 3.2 of https://www.valgrind.org/docs/manual/cl-format.html, but // stopped because most of the information isn't relevant for visualization, and // because there's inconsistency between the grammar and subsequence // descriptions. // // For example, the grammar for headers specifies all the valid header names, // but then the writing below that mentions there may be a "totals" or "summary" // header, which should be disallowed by the formal grammar. // // So, instead, I'm not going to bother with a formal parse. Since there are no // real recursive structures in this file format, that should be okay. class CallgrindParser { private lines: string[] private lineNum: number private callGraphs: CallGraph[] | null = null private eventsLine: string | null = null private filename: string | null = null private functionName: string | null = null private calleeFilename: string | null = null private calleeFunctionName: string | null = null private savedFileNames: {[id: string]: string} = {} private savedFunctionNames: {[id: string]: string} = {} constructor(contents: string, private importedFileName: string) { this.lines = contents.split('\n') this.lineNum = 0 } parse(): ProfileGroup | null { while (this.lineNum < this.lines.length) { const line = this.lines[this.lineNum++] if (/^\s*#/.exec(line)) { // Line is a comment. Ignore it. continue } if (/^\s*$/.exec(line)) { // Line is empty. Ignore it. continue } if (this.parseHeaderLine(line)) { continue } if (this.parseAssignmentLine(line)) { continue } if (this.parseCostLine(line, 'self')) { continue } throw new Error(`Unrecognized line "${line}" on line ${this.lineNum}`) } if (!this.callGraphs) { return null } return { name: this.importedFileName, indexToView: 0, profiles: this.callGraphs.map(cg => cg.toProfile()), } } private frameInfo(): FrameInfo { const file = this.filename || '(unknown)' const name = this.functionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private calleeFrameInfo(): FrameInfo { const file = this.calleeFilename || '(unknown)' const name = this.calleeFunctionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private
(line: string): boolean { const headerMatch = /^\s*(\w+):\s*(.*)+$/.exec(line) if (!headerMatch) return false if (headerMatch[1] !== 'events') { // We don't care about other headers. Ignore this line. return true } // Line specifies the formatting of subsequent cost lines. const fields = headerMatch[2].split(' ') if (this.callGraphs != null) { throw new Error( `Duplicate "events: " lines specified. First was "${this.eventsLine}", now received "${line}" on ${this.lineNum}.`, ) } this.callGraphs = fields.map(fieldName => { return new CallGraph(this.importedFileName, fieldName) }) return true } private parseAssignmentLine(line: string): boolean { const assignmentMatch = /^(\w+)=\s*(.*)$/.exec(line) if (!assignmentMatch) return false const key = assignmentMatch[1] const value = assignmentMatch[2] switch (key) { case 'fe': case 'fi': case 'fl': { this.filename = this.parseNameWithCompression(value, this.savedFileNames) this.calleeFilename = this.filename break } case 'fn': { this.functionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'cfi': case 'cfl': { this.calleeFilename = this.parseNameWithCompression(value, this.savedFileNames) break } case 'cfn': { this.calleeFunctionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'calls': { // TODO(jlfwong): This is currently ignoring the number of calls being // made. Accounting for the number of calls might be unhelpful anyway, // since it'll just be copying the exact same frame over-and-over again, // but that might be better than ignoring it. this.parseCostLine(this.lines[this.lineNum++], 'child') break } default: { console.log(`Ignoring assignment to unrecognized key "${line}" on line ${this.lineNum}`) } } return true } private parseNameWithCompression(name: string, saved: {[id: string]: string}): string { { const nameDefinitionMatch = /^\((\d+)\)\s*(.+)$/.exec(name) if (nameDefinitionMatch) { const id = nameDefinitionMatch[1] const name = nameDefinitionMatch[2] if (id in saved) { throw new Error( `Redefinition of name with id: ${id}. Original value was "${saved[id]}". Tried to redefine as "${name}" on line ${this.lineNum}.`, ) } saved[id] = name return name } } { const nameUseMatch = /^\((\d+)\)$/.exec(name) if (nameUseMatch) { const id = nameUseMatch[1] if (!(id in saved)) { throw new Error( `Tried to use name with id ${id} on line ${this.lineNum} before it was defined.`, ) } return saved[id] } } return name } private parseCostLine(line: string, costType: 'self' | 'child'): boolean { // TODO(jlfwong): Handle "Subposition compression" // TODO(jlfwong): Allow hexadecimal encoding const parts = line.split(/\s+/) const nums: number[] = [] for (let part of parts) { // As far as I can tell from the specification, the callgrind format does // not accept floating point numbers. const asNum = parseInt(part) if (isNaN(asNum)) { return false } nums.push(asNum) } if (nums.length == 0) { return false } // TODO(jlfwong): Handle custom positions format w/ multiple parts const numPositionFields = 1 // NOTE: We intentionally do not include the line number here because // callgrind uses the line number of the function invocation, not the // line number of the function definition, which conflicts with how // speedscope uses line numbers. // // const lineNum = nums[0] if (!this.callGraphs) { throw new Error( `Encountered a cost line on line ${this.lineNum} before event specification was provided.`, ) } for (let i = 0; i < this.callGraphs.length; i++) { if (costType === 'self') { this.callGraphs[i].addSelfWeight(this.frameInfo(), nums[numPositionFields + i]) } else if (costType === 'child') { this.callGraphs[i].addChildWithTotalWeight( this.frameInfo(), this.calleeFrameInfo(), nums[numPositionFields + i] || 0, ) } } return true } } export function importFromCallgrind( contents: string, importedFileName: string, ): ProfileGroup | null { return new CallgrindParser(contents, importedFileName).parse() }
parseHeaderLine
identifier_name
callgrind.ts
// https://www.valgrind.org/docs/manual/cl-format.html // // Larger example files can be found by searching on github: // https://github.com/search?q=cfn%3D&type=code // // Converting callgrind files into flamegraphs is challenging because callgrind // formatted profiles contain call graphs with weighted nodes and edges, and // such a weighted call graph does not uniquely define a flamegraph. // // Consider a program that looks like this: // // // example.js // function backup(read) { // if (read) { // read() // } else { // write() // } // } // // function start() { // backup(true) // } // // function end() { // backup(false) // } // // start() // end() // // Profiling this program might result in a profile that looks like the // following flame graph defined in Brendan Gregg's plaintext format: // // start;backup;read 4 // end;backup;write 4 // // When we convert this execution into a call-graph, we get the following: // // +------------------+ +---------------+ // | start (self: 0) | | end (self: 0) | // +------------------+ +---------------| // \ / // (total: 4) \ / (total: 4) // v v // +------------------+ // | backup (self: 0) | // +------------------+ // / \ // (total: 4) / \ (total: 4) // v v // +----------------+ +-----------------+ // | read (self: 4) | | write (self: 4) | // +----------------+ +-----------------+ // // In the process of the conversion, we've lost information about the ratio of // time spent in read v.s. write in the start call v.s. the end call. The // following flame graph would yield the exact same call-graph, and therefore // the exact sample call-grind formatted profile: // // start;backup;read 3 // start;backup;write 1 // end;backup;read 1 // end;backup;write 3 // // This is unfortunate, since it means we can't produce a flamegraph that isn't // potentially lying about the what the actual execution behavior was. To // produce a flamegraph at all from the call graph representation, we have to // decide how much weight each sub-call should have. Given that we know the // total weight of each node, we'll make the incorrect assumption that every // invocation of a function will have the average distribution of costs among // the sub-function invocations. In the example given, this means we assume that // every invocation of backup() is assumed to spend half its time in read() and // half its time in write(). // // So the flamegraph we'll produce from the given call-graph will actually be: // // start;backup;read 2 // start;backup;write 2 // end;backup;read 2 // end;backup;write 2 // // A particularly bad consequence is that the resulting flamegraph will suggest // that there was at some point a call stack that looked like // strat;backup;write, even though that never happened in the real program // execution. import {CallTreeProfileBuilder, Frame, FrameInfo, Profile, ProfileGroup} from '../lib/profile' import {getOrElse, getOrInsert, KeyedSet} from '../lib/utils' import {ByteFormatter, TimeFormatter} from '../lib/value-formatters' class CallGraph { private frameSet = new KeyedSet<Frame>() private totalWeights = new Map<Frame, number>() private childrenTotalWeights = new Map<Frame, Map<Frame, number>>() constructor(private fileName: string, private fieldName: string) {} private getOrInsertFrame(info: FrameInfo): Frame { return Frame.getOrInsert(this.frameSet, info) } private addToTotalWeight(frame: Frame, weight: number) { if (!this.totalWeights.has(frame)) { this.totalWeights.set(frame, weight) } else { this.totalWeights.set(frame, this.totalWeights.get(frame)! + weight) } } addSelfWeight(frameInfo: FrameInfo, weight: number) { this.addToTotalWeight(this.getOrInsertFrame(frameInfo), weight) } addChildWithTotalWeight(parentInfo: FrameInfo, childInfo: FrameInfo, weight: number) { const parent = this.getOrInsertFrame(parentInfo) const child = this.getOrInsertFrame(childInfo) const childMap = getOrInsert(this.childrenTotalWeights, parent, k => new Map()) if (!childMap.has(child)) { childMap.set(child, weight) } else { childMap.set(child, childMap.get(child) + weight) } this.addToTotalWeight(parent, weight) } toProfile(): Profile { // To convert a call graph into a profile, we first need to identify what // the "root weights" are. "root weights" are the total weight of each frame // while at the bottom of the call-stack. The majority of functions will have // zero weight while at the bottom of the call-stack, since most functions // are never at the bottom of the call-stack. const rootWeights = new Map<Frame, number>() for (let [frame, totalWeight] of this.totalWeights) { rootWeights.set(frame, totalWeight) } for (let [_, childMap] of this.childrenTotalWeights) { for (let [child, weight] of childMap) { rootWeights.set(child, getOrElse(rootWeights, child, () => weight) - weight) } } let totalProfileWeight = 0 for (let [_, rootWeight] of rootWeights) { totalProfileWeight += rootWeight } const profile = new CallTreeProfileBuilder() let unitMultiplier = 1 // These are common field names used by Xdebug. Let's give them special // treatment to more helpfully display units. if (this.fieldName === 'Time_(10ns)') { profile.setName(`${this.fileName} -- Time`) unitMultiplier = 10 profile.setValueFormatter(new TimeFormatter('nanoseconds')) } else if (this.fieldName == 'Memory_(bytes)') { profile.setName(`${this.fileName} -- Memory`) profile.setValueFormatter(new ByteFormatter()) } else { profile.setName(`${this.fileName} -- ${this.fieldName}`) } let totalCumulative = 0 const currentStack = new Set<Frame>() const visit = (frame: Frame, callTreeWeight: number) => { if (currentStack.has(frame)) { // Call-graphs are allowed to have cycles. Call-trees are not. In case // we run into a cycle, we'll just avoid recursing into the same subtree // more than once in a call stack. The result will be that the time // spent in the recursive call will instead be attributed as self time // in the parent. return } // We need to calculate how much weight to give to a particular node in // the call-tree based on information from the call-graph. A given node // from the call-graph might correspond to several nodes in the call-tree, // so we need to decide how to distribute the weight of the call-graph // node to the various call-tree nodes. // // We assume that the weighting is evenly distributed. If a call-tree node // X occurs with weights x1 and x2, and we know from the call-graph that // child Y of X has a total weight y, then we assume the child Y of X has // weight y*x1/(x1 + x2) for the first occurrence, and y*x2(y1 + x2) for // the second occurrence. // // This assumption is incorrectly (sometimes wildly so), but we need to // make *some* assumption, and this seems to me the sanest option. // // See the comment at the top of the file for an example where this // assumption can yield especially misleading results. if (callTreeWeight < 1e-4 * totalProfileWeight)
// totalWeightForFrame is the total weight for the given frame in the // entire call graph. const callGraphWeightForFrame = getOrElse(this.totalWeights, frame, () => 0) if (callGraphWeightForFrame === 0) { return } // This is the portion of the total time the given child spends within the // given parent that we'll attribute to this specific path in the call // tree. const ratio = callTreeWeight / callGraphWeightForFrame let selfWeightForFrame = callGraphWeightForFrame profile.enterFrame(frame, totalCumulative * unitMultiplier) currentStack.add(frame) for (let [child, callGraphEdgeWeight] of this.childrenTotalWeights.get(frame) || []) { selfWeightForFrame -= callGraphEdgeWeight const childCallTreeWeight = callGraphEdgeWeight * ratio visit(child, childCallTreeWeight) } currentStack.delete(frame) totalCumulative += selfWeightForFrame * ratio profile.leaveFrame(frame, totalCumulative * unitMultiplier) } for (let [rootFrame, rootWeight] of rootWeights) { if (rootWeight <= 0) { continue } // If we've reached here, it means that the given root frame has some // weight while at the top of the call-stack. visit(rootFrame, rootWeight) } return profile.build() } } // In writing this, I initially tried to use the formal grammar described in // section 3.2 of https://www.valgrind.org/docs/manual/cl-format.html, but // stopped because most of the information isn't relevant for visualization, and // because there's inconsistency between the grammar and subsequence // descriptions. // // For example, the grammar for headers specifies all the valid header names, // but then the writing below that mentions there may be a "totals" or "summary" // header, which should be disallowed by the formal grammar. // // So, instead, I'm not going to bother with a formal parse. Since there are no // real recursive structures in this file format, that should be okay. class CallgrindParser { private lines: string[] private lineNum: number private callGraphs: CallGraph[] | null = null private eventsLine: string | null = null private filename: string | null = null private functionName: string | null = null private calleeFilename: string | null = null private calleeFunctionName: string | null = null private savedFileNames: {[id: string]: string} = {} private savedFunctionNames: {[id: string]: string} = {} constructor(contents: string, private importedFileName: string) { this.lines = contents.split('\n') this.lineNum = 0 } parse(): ProfileGroup | null { while (this.lineNum < this.lines.length) { const line = this.lines[this.lineNum++] if (/^\s*#/.exec(line)) { // Line is a comment. Ignore it. continue } if (/^\s*$/.exec(line)) { // Line is empty. Ignore it. continue } if (this.parseHeaderLine(line)) { continue } if (this.parseAssignmentLine(line)) { continue } if (this.parseCostLine(line, 'self')) { continue } throw new Error(`Unrecognized line "${line}" on line ${this.lineNum}`) } if (!this.callGraphs) { return null } return { name: this.importedFileName, indexToView: 0, profiles: this.callGraphs.map(cg => cg.toProfile()), } } private frameInfo(): FrameInfo { const file = this.filename || '(unknown)' const name = this.functionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private calleeFrameInfo(): FrameInfo { const file = this.calleeFilename || '(unknown)' const name = this.calleeFunctionName || '(unknown)' const key = `${file}:${name}` return {key, name, file} } private parseHeaderLine(line: string): boolean { const headerMatch = /^\s*(\w+):\s*(.*)+$/.exec(line) if (!headerMatch) return false if (headerMatch[1] !== 'events') { // We don't care about other headers. Ignore this line. return true } // Line specifies the formatting of subsequent cost lines. const fields = headerMatch[2].split(' ') if (this.callGraphs != null) { throw new Error( `Duplicate "events: " lines specified. First was "${this.eventsLine}", now received "${line}" on ${this.lineNum}.`, ) } this.callGraphs = fields.map(fieldName => { return new CallGraph(this.importedFileName, fieldName) }) return true } private parseAssignmentLine(line: string): boolean { const assignmentMatch = /^(\w+)=\s*(.*)$/.exec(line) if (!assignmentMatch) return false const key = assignmentMatch[1] const value = assignmentMatch[2] switch (key) { case 'fe': case 'fi': case 'fl': { this.filename = this.parseNameWithCompression(value, this.savedFileNames) this.calleeFilename = this.filename break } case 'fn': { this.functionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'cfi': case 'cfl': { this.calleeFilename = this.parseNameWithCompression(value, this.savedFileNames) break } case 'cfn': { this.calleeFunctionName = this.parseNameWithCompression(value, this.savedFunctionNames) break } case 'calls': { // TODO(jlfwong): This is currently ignoring the number of calls being // made. Accounting for the number of calls might be unhelpful anyway, // since it'll just be copying the exact same frame over-and-over again, // but that might be better than ignoring it. this.parseCostLine(this.lines[this.lineNum++], 'child') break } default: { console.log(`Ignoring assignment to unrecognized key "${line}" on line ${this.lineNum}`) } } return true } private parseNameWithCompression(name: string, saved: {[id: string]: string}): string { { const nameDefinitionMatch = /^\((\d+)\)\s*(.+)$/.exec(name) if (nameDefinitionMatch) { const id = nameDefinitionMatch[1] const name = nameDefinitionMatch[2] if (id in saved) { throw new Error( `Redefinition of name with id: ${id}. Original value was "${saved[id]}". Tried to redefine as "${name}" on line ${this.lineNum}.`, ) } saved[id] = name return name } } { const nameUseMatch = /^\((\d+)\)$/.exec(name) if (nameUseMatch) { const id = nameUseMatch[1] if (!(id in saved)) { throw new Error( `Tried to use name with id ${id} on line ${this.lineNum} before it was defined.`, ) } return saved[id] } } return name } private parseCostLine(line: string, costType: 'self' | 'child'): boolean { // TODO(jlfwong): Handle "Subposition compression" // TODO(jlfwong): Allow hexadecimal encoding const parts = line.split(/\s+/) const nums: number[] = [] for (let part of parts) { // As far as I can tell from the specification, the callgrind format does // not accept floating point numbers. const asNum = parseInt(part) if (isNaN(asNum)) { return false } nums.push(asNum) } if (nums.length == 0) { return false } // TODO(jlfwong): Handle custom positions format w/ multiple parts const numPositionFields = 1 // NOTE: We intentionally do not include the line number here because // callgrind uses the line number of the function invocation, not the // line number of the function definition, which conflicts with how // speedscope uses line numbers. // // const lineNum = nums[0] if (!this.callGraphs) { throw new Error( `Encountered a cost line on line ${this.lineNum} before event specification was provided.`, ) } for (let i = 0; i < this.callGraphs.length; i++) { if (costType === 'self') { this.callGraphs[i].addSelfWeight(this.frameInfo(), nums[numPositionFields + i]) } else if (costType === 'child') { this.callGraphs[i].addChildWithTotalWeight( this.frameInfo(), this.calleeFrameInfo(), nums[numPositionFields + i] || 0, ) } } return true } } export function importFromCallgrind( contents: string, importedFileName: string, ): ProfileGroup | null { return new CallgrindParser(contents, importedFileName).parse() }
{ // This assumption about even distribution can cause us to generate a // call tree with dramatically more nodes than the call graph. // // Consider a function which is called 1000 times, where the result is // cached. The first invocation has a complex call tree and may take // 100ms. Let's say that this complex call tree has 250 nodes. // // Subsequent calls use the cached result, so take only 1ms, and have no // children in their call trees. So we have, in total, (1 + 250) + 999 // nodes in the call-tree for a total of 1250 nodes. // // The information specific to each invocation is, however, lost in the // call-graph representation. // // Because of the even distribution assumption we make, this means that // the call-trees of each invocation will have the same shape. Each 1ms // call-tree will look identical to the 100ms call-tree, just // horizontally compacted. So instead of 1251 nodes, we have // 1000*250=250,000 nodes in the resulting call graph. // // To mitigate this explosion of the # of nodes, we ignore subtrees // whose weights are less than 0.01% of the total weight of the profile. return }
conditional_block
seedData.js
const Photo = require('../models/photo') const User = require('../models/user') const Genre = require('../models/genre') const Comment = require('../models/comment') const data = async() => { User.collection.deleteMany({}) Genre.collection.deleteMany({}) Photo.collection.deleteMany({}) Comment.collection.deleteMany({}) const rei = await User.create({ name: 'Rei', email: 'rei@sample.com', password: 'hello', age: 12 }) const leizl = await User.create({ name: 'Leizl', email: 'leizl@sample.com', password: 'hello', age: 17 }) const kring = await User.create({ name: 'Kring', email: 'kring@sample.com', password: 'hello', age: 21 }) const genres = await Genre.insertMany([ {name: "Streetwear"}, {name: "Sneakers"}, {name: "Memes"}, {name: "Sports"}, {name: "Nature"}, {name: "Techonology"}, {name: "Animals"}, {name: "Cars"}, {name: "Food"}, {name: "Music"} ]) // // console.log(genres[0]) const photos = await Photo.insertMany([ {name: "Supreme LV Hoodie", description: "How much?", likes: 301, genre: genres[0], image: "https://blvcks.com/wp-content/uploads/2017/09/o4j_Hmqy2-g.jpg", owner: rei}, {name: "Black Gucci Hoodie", description: "Why?", likes: 222, genre: genres[0], image: "https://cdn-images.farfetch-contents.com/12/56/27/14/12562714_11933138_300.jpg", owner: rei}, {name: "White Gucci Tee", description: "Why tho?", likes: 930, genre: genres[0], image: "https://cdn-images.farfetch-contents.com/12/14/71/57/12147157_10105325_480.jpg", owner: rei}, {name: "Off White Tee", description: "Affordable or nah", likes: 543, genre: genres[0], image: "https://is4.fwrdassets.com/images/p/fw/z/OFFF-MS57_V5.jpg", owner: rei}, {name: "Supreme Grey Bogo Crewneck", description: "To wear or not to wear", likes: 12, genre: genres[0], image: "https://stockx.imgix.net/products/streetwear/Supreme-Box-Logo-Crewneck-FW18-Ash-Grey.jpg?fit=fill&bg=FFFFFF&w=300&h=214&auto=format,compress&q=90&dpr=2&trim=color&updated_at=1544119130", owner: rei}, {name: "Off White Chicago", description: "Almost mortgaged my house", likes: 123, genre: genres[1], image: "https://image.goat.com/crop/375/attachments/product_template_additional_pictures/images/008/487/311/original/136666_01.jpg.jpeg", owner: rei}, {name: "Travis Scott Jordan 1s", description: "Pretty Unique", likes: 134, genre: genres[1], image: "https://static.highsnobiety.com/thumbor/xKVF4YDdnDrzV5p6j7CI5Zcjofk=/fit-in/320x213/smart/static.highsnobiety.com/wp-content/uploads/2019/01/11084540/travis-scott-nike-air-jordan-1-reverse-swoosh-release-date-price-product-04.jpg", owner: rei}, {name: "Yeezy Pirate Blacks", description: "Expensive AF", likes: 242, genre: genres[1], image: "https://image.goat.com/crop/750/attachments/product_template_additional_pictures/images/008/490/707/original/29981_01.jpg.jpeg", owner: rei}, {name: "Nike MAG Back to the Future", description: "Used my Flatiron Tuition for these", likes: 246, genre: genres[1], image: "https://stockx-360.imgix.net/mag-kauf_TruView/Images/mag-kauf_TruView/Lv2/img01.jpg?auto=format,compress&q=90&updated_at=1538080256&w=400", owner: rei}, {name: "Timberlands", description: "The New Yorker", likes: 745, genre: genres[1], image: "https://images.timberland.com/is/image/timberland/10061024-HERO?wid=720&hei=720&fit=constrain,1&qlt=85,1&op_usm=1,1,6,0", owner: leizl}, {name: "WHY", description: "Why you do this to me?", likes: 432, genre: genres[2], image: "https://pics.me.me/when-your-body-naturally-and-habitually-wakes-you-up-at-31528076.png", owner: leizl}, {name: "The Truth", description: "Thats a fact !", likes: 635, genre: genres[2], image: "https://img.ifcdn.com/images/4fa610a71a294154fe556ede328f5db06b7ad05942ac3a88d443293718de026c_1.jpg", owner: leizl}, {name: "Sorry", description: "Not Sorry", likes: 385, genre: genres[2], image: "https://pics.me.me/sorry-i-didnt-get-your-text-okayyy-so-who-got-37051846.png", owner: leizl}, {name: "Oops", description: "I swear I'm doing work", likes: 878, genre: genres[2], image: "https://pics.me.me/when-your-boss-comes-around-the-corner-and-you-grab-23503350.png", owner: leizl}, {name: "Chill", description: "...", likes: 990, genre: genres[2], image: "https://humorside.com/wp-content/uploads/2017/12/funny-memes-that-will-cure-your-bad-day-05.jpg", owner: leizl}, {name: "LBJ", description: "Easy", likes: 745, genre: genres[3], image: "https://pbs.twimg.com/media/Clf-NPOWIAAzvVj.jpg", owner: leizl}, {name: "Warriors", description: "Hitting the 3-peat", likes: 645, genre: genres[3], image: "https://www.mercurynews.com/wp-content/uploads/2016/08/20150616__warceleb34.jpg?w=460", owner: leizl}, {name: "Damian Lillard", description: "Its Dame Time", likes: 777, genre: genres[3], image: "https://pbs.twimg.com/media/DNsnt0yVAAUIZrs.jpg", owner: rei}, {name: "LBJ to JR", description: "What are you doing..?", likes: 888, genre: genres[3], image: "https://cms.qz.com/wp-content/uploads/2018/06/lebron-james-yelling-at-jr-smith.jpg?quality=75&strip=all&w=1400", owner: kring}, {name: "Mountain", description: "Nice", likes: 245, genre: genres[4], image: "https://www.elitereaders.com/wp-content/uploads/2016/02/featimage-4.jpg", owner: kring}, {name: "Blue", description: "Very Blue", likes: 168, genre: genres[4], image: "https://cdn-image.travelandleisure.com/sites/default/files/styles/1600x1000/public/1482439294/crater-lake-oregon-BLUEST1216.jpg?itok=mGRk1Fd2", owner: kring}, {name: "Green", description: "Very Green", likes: 675, genre: genres[4], image: "https://www.uea.ac.uk/documents/3154295/26870726/Green+spaces++banner.jpg/b5ef8e03-1b39-5855-e563-e85623894a29?t=1530872002959", owner: kring}, {name: "Rain", description: "Dont like the rain", likes: 432, genre: genres[4], image: "https://images.pexels.com/photos/1463530/pexels-photo-1463530.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500", owner: kring}, {name: "Rainbow", description: "Look its a rainbow", likes: 167, genre: genres[4], image: "https://s.hswstatic.com/gif/rainbow-gallery-1.jpg", owner: kring}, {name: "Quantum Computer", description: "The Future", likes: 364, genre: genres[5], image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSnlDT50speD4_TjyV7G1sRjTgj0nqHSxJGspuKC5LpMOFAZ1uc", owner: kring}, {name: "Robo Dog", description: "Whos mans?", likes: 512, genre: genres[5], image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTFOiXP0CwOmWn8jcWwsu5GEbsFiClNbh2jruY6ygRpW5kEh7eD", owner: kring}, {name: "Antique", description: "Take me back to 1999", likes: 341, genre: genres[5], image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQdZarDaGuG2YGfjWj6kZbIXwO6EkxgsFrjKiMMuFpCSecZmHmqHw", owner: leizl}, {name: "Echo", description: "Alexa? Is that you?", likes: 892, genre: genres[5], image: "https://target.scene7.com/is/image/Target/GUEST_adaeeb2b-67d4-448d-8f2c-8f5de3b39757?wid=488&hei=488&fmt=pjpeg", owner: kring}, {name: "Nintendo Switch", description: "Play me in Smash", likes: 45, genre: genres[5], image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRBeoGwQISIS43ygfTvFWxg63wr6SsWIqSac0HJ3lADLh6bGomQ", owner: rei}, {name: "Husky", description: "Cool dog", likes: 355, genre: genres[6], image: "https://cdn.orvis.com/images/DBS_SibHusky.jpg", owner: kring}, {name: "White Horse", description: "Majestic", likes: 344, genre: genres[6], image: "https://image.shutterstock.com/image-photo/white-horse-standing-on-green-260nw-756457801.jpg", owner: leizl}, {name: "Black Pug", description: "Cute", likes: 135, genre: genres[6], image: "https://wallpapercave.com/wp/bFB4V5c.jpg", owner: rei}, {name: "Some Kitten", description: "Stay this size", likes: 900, genre: genres[6], image: "https://dcist.com/wp-content/uploads/sites/3/2019/04/Gem2-1500x1346.jpg", owner: leizl}, {name: "The Lion King", description: "Roar", likes: 876, genre: genres[6], image: "https://cosmos-images2.imgix.net/file/spina/photo/14772/GettyImages-691120979.jpg?ixlib=rails-2.1.4&auto=format&ch=Width%2CDPR&fit=max&w=835", owner: rei}, {name: "GTR", description: "Fast affffff", likes: 444, genre: genres[7], image: "https://www.nissanusa.com/content/dam/Nissan/us/vehicles/gtr/r35/2_minor_change/overview/18tdi-gtrhelios104.jpg.ximg.l_full_m.smart.jpg", owner: kring}, {name: "Bugatti", description: "I woke up in a new Bugatti", likes: 567, genre: genres[7], image: "https://hips.hearstapps.com/amv-prod-cad-assets.s3.amazonaws.com/vdat/submodels/bugatti_divo_bugatti-divo_2020-1535127766731.jpg", owner: kring}, {name: "Mercedes C63", description: "Too much class", likes: 888, genre: genres[7], image: "https://f7432d8eadcf865aa9d9-9c672a3a4ecaaacdf2fee3b3e6fd2716.ssl.cf3.rackcdn.com/C2299/U6886/IMG_17113-medium.jpg", owner: rei}, {name: "Tesla", description: "No gas, No problem", likes: 908, genre: genres[7], image: "https://media.wired.com/photos/5926c04bf3e2356fd800a53a/master/w_2400,c_limit/TeslaSTA.jpg", owner: kring}, {name: "Lamborghini", description: "Get low low low", likes: 688, genre: genres[7], image: "https://content.homenetiol.com/2001243/2130496/0x0/e38c567224374fb0a3f6a7a83b94bc57.jpg", owner: kring}, {name: "Pad Thai", description: "My go to", likes: 396, genre: genres[8], image: "https://pinchofyum.com/wp-content/uploads/Vegetarian-Pad-Thai-Recipe.jpg", owner: leizl}, {name: "General Tsos Chicken", description: "First name General, Last name Tso", likes: 574, genre: genres[8], image: "https://www.seriouseats.com/recipes/images/2015/04/20140328-general-tsos-chicken-recipe-food-lab-1-1500x1125.jpg", owner: leizl}, {name: "STEAK", description: "Medium Rare Please", likes: 79, genre: genres[8], image: "https://hips.hearstapps.com/vidthumb/images/delish-cajun-butter-steak-still006-1528495387.jpg", owner: kring}, {name: "Vegan", description: "Rabbit food", likes: 246, genre: genres[8], image: "https://cdn1.medicalnewstoday.com/content/images/articles/324/324343/plant-meal.jpg", owner: kring}, {name: "Popeyes", description: "Louisiana is the way to go", likes: 364, genre: genres[8], image: "https://images.firstwefeast.com/complex/images/c_limit,f_auto,fl_lossy,q_auto,w_768/xcnbpr1e475lafy6eay9/popeyes", owner: kring}, {name: "DJ ???", description: "Give him a name", likes: 6, genre: genres[9], image: "https://images.unsplash.com/photo-1533113860586-3da7fe05daae?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1000&q=80", owner: rei}, {name: "DRUM DRUM DRUM", description: "I started a band", likes: 111, genre: genres[9], image: "https://c.stocksy.com/a/y9G900/z9/2207074.jpg?1551026928", owner: leizl}, {name: "Yamaha Piano", description: "Play me a song", likes: 322, genre: genres[9], image: "https://www.pianosplus.com/wp-content/uploads/yamaha-pianos-types-650x368.jpg", owner: rei}, {name: "Festival?", description: "Do you even rave?", likes: 45, genre: genres[9], image: "https://s29745.pcdn.co/wp-content/uploads/2018/09/41580112_10156173992978025_4013920836766400512_o.jpg.optimal.jpg", owner: leizl},
const comments = await Comment.insertMany([ {content: "First Comment random", photo: photos[0], user: rei}, {content: "Second Comment random", photo: photos[0], user: rei}, {content: "Third Comment random", photo: photos[1], user: leizl} ]) } // const createData = data() // console.log(newUser) module.exports = data
{name: "Spotitube", description: "Pick your Poison", likes: 23, genre: genres[9], image: "https://techcrunch.com/wp-content/uploads/2016/07/spotify-over-youtube.png?w=730&crop=1", owner: rei}, ])
random_line_split
mesh_generator.rs
/* * MIT License * * Copyright (c) 2020 bonsairobo * Copyright (c) 2021 Robert Swain <robert.swain@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ use crate::{ app_state::AppState, fog::FogConfig, mesh_fade::{FadeUniform, FADE_IN, FADE_OUT}, utilities::bevy_util::thread_local_resource::ThreadLocalResource, voxel_map::{Voxel, VoxelMap}, }; use bevy_mod_bounding::{aabb::Aabb, obb::Obb}; use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType}; use building_blocks::{ mesh::*, prelude::*, storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap}, }; use bevy::{ asset::prelude::*, ecs, prelude::*, render::{mesh::Indices, pipeline::PrimitiveTopology}, tasks::ComputeTaskPool, }; use std::{cell::RefCell, collections::VecDeque}; fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize { 40 * pool.thread_num() } #[derive(Default)] pub struct MeshCommandQueue { commands: VecDeque<MeshCommand>, } impl MeshCommandQueue { pub fn enqueue(&mut self, command: MeshCommand) { self.commands.push_front(command); } pub fn is_empty(&self) -> bool { self.commands.is_empty() } pub fn len(&self) -> usize { self.commands.len() } pub fn clear(&mut self) { self.commands.clear(); } } // PERF: try to eliminate the use of multiple Vecs #[derive(Clone, Debug, Eq, PartialEq)] pub enum MeshCommand { Create(LodChunkKey3), Update(LodChunkUpdate3), } #[derive(Default)] pub struct ChunkMeshes { // Map from chunk key to mesh entity. entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, } impl ChunkMeshes { pub fn clear_entities(&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) { self.entities.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); self.remove_queue.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); } pub fn remove_entity( &mut self, lod_chunk_key: &LodChunkKey3, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) { clear_up_entity(&entity, &mesh, commands, meshes); } } } fn clear_up_entity( entity: &Entity, mesh: &Handle<Mesh>, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { commands.entity(*entity).despawn(); meshes.remove(mesh); } // Utility struct for building the mesh #[derive(Debug, Clone)] struct MeshBuf { pub positions: Vec<[f32; 3]>, pub normals: Vec<[f32; 3]>, pub tex_coords: Vec<[f32; 2]>, pub layer: Vec<u32>, pub indices: Vec<u32>, pub extent: Extent3i, } impl Default for MeshBuf { fn default() -> Self { Self { positions: Vec::new(), normals: Vec::new(), tex_coords: Vec::new(), layer: Vec::new(), indices: Vec::new(), extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])), } } } impl MeshBuf { fn add_quad( &mut self, face: &OrientedCubeFace, quad: &UnorientedQuad, voxel_size: f32, u_flip_face: Axis3, layer: u32, ) { let start_index = self.positions.len() as u32; self.positions .extend_from_slice(&face.quad_mesh_positions(quad, voxel_size)); self.normals.extend_from_slice(&face.quad_mesh_normals()); let flip_v = true; self.tex_coords .extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad)); self.layer.extend_from_slice(&[layer; 4]); self.indices .extend_from_slice(&face.quad_mesh_indices(start_index)); } } pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>); pub struct ArrayTexturePipelines(pub RenderPipelines); /// Generates new meshes for all dirty chunks. pub fn mesh_generator_system( mut commands: Commands, pool: Res<ComputeTaskPool>, voxel_map: Res<VoxelMap>, local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>, mut mesh_commands: ResMut<MeshCommandQueue>, mut mesh_assets: ResMut<Assets<Mesh>>, mut chunk_meshes: ResMut<ChunkMeshes>, array_texture_pipelines: Res<ArrayTexturePipelines>, array_texture_material: Res<ArrayTextureMaterial>, mut state: ResMut<State<AppState>>, ) { let first_run = chunk_meshes.entities.is_empty(); let new_chunk_meshes = apply_mesh_commands( &*voxel_map, &*local_mesh_buffers, &*pool, &mut *mesh_commands, &mut *chunk_meshes, &mut commands, first_run, ); spawn_mesh_entities( new_chunk_meshes, &mut commands, &mut *mesh_assets, &mut *chunk_meshes, &*array_texture_pipelines, &*array_texture_material, ); if first_run { println!("MESHES GENERATED!\n-> AppState::Running"); state.set(AppState::Running).unwrap(); } } fn apply_mesh_commands( voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, pool: &ComputeTaskPool, mesh_commands: &mut MeshCommandQueue, chunk_meshes: &mut ChunkMeshes, commands: &mut Commands, first_run: bool, ) -> Vec<(LodChunkKey3, Option<MeshBuf>)> { let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool)); let mut num_creates = 0; let mut num_updates = 0; pool.scope(|s| { let mut num_meshes_created = 0; for command in mesh_commands.commands.iter().rev().cloned() { match command { MeshCommand::Create(lod_key) => { if !chunk_meshes.entities.contains_key(&lod_key) { num_creates += 1; num_meshes_created += 1; s.spawn(async move { ( lod_key, create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers), ) }); } } MeshCommand::Update(update) => { num_updates += 1; match update { LodChunkUpdate3::Split(split) => { if let Some((entity, mesh)) = chunk_meshes.entities.remove(&split.old_chunk) { chunk_meshes .remove_queue .insert(split.old_chunk, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } for &lod_key in split.new_chunks.iter() { if !chunk_meshes.entities.contains_key(&lod_key) { num_meshes_created += 1; s.spawn(async move { ( lod_key, create_mesh_for_chunk( lod_key, voxel_map, local_mesh_buffers, ), ) }); } } } LodChunkUpdate3::Merge(merge) => { for lod_key in merge.old_chunks.iter() { if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key) { chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } } if !chunk_meshes.entities.contains_key(&merge.new_chunk) { num_meshes_created += 1; s.spawn(async move { ( merge.new_chunk, create_mesh_for_chunk( merge.new_chunk, voxel_map, local_mesh_buffers, ), ) }); } } } } } if !first_run && num_meshes_created >= num_chunks_to_mesh { break; } } let new_length = mesh_commands.len() - (num_creates + num_updates); mesh_commands.commands.truncate(new_length); }) } pub fn mesh_despawn_system( mut commands: Commands, mut chunk_meshes: ResMut<ChunkMeshes>, mut meshes: ResMut<Assets<Mesh>>, query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>, ) { for (fade, lod_chunk_key) in query.iter() { if !fade.fade_in && fade.remaining == 0.0 { if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) { commands.entity(entity).despawn(); meshes.remove(&mesh); } } } } fn create_mesh_for_chunk( key: LodChunkKey3, voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, ) -> Option<MeshBuf> { let chunks = voxel_map.pyramid.level(key.lod); let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key); let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent); // Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk. let mesh_tls = local_mesh_buffers.get(); let mut surface_nets_buffers = mesh_tls .get_or_create_with(|| { RefCell::new(LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer::new( padded_chunk_extent, RIGHT_HANDED_Y_UP_CONFIG.quad_groups(), ), neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY), }) }) .borrow_mut(); let LocalSurfaceNetsBuffers { mesh_buffer, neighborhood_buffer, } = &mut *surface_nets_buffers; // While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk. neighborhood_buffer.set_minimum(padded_chunk_extent.minimum); // Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries. copy_extent(&chunk_extent, chunks, neighborhood_buffer); let voxel_size = (1 << key.lod) as f32; greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer); if mesh_buffer.num_quads() == 0 { None } else { let mut mesh_buf = MeshBuf::default(); mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape(); for group in mesh_buffer.quad_groups.iter() { for quad in group.quads.iter() { let mat = neighborhood_buffer.get(quad.minimum); mesh_buf.add_quad( &group.face, quad, voxel_size, RIGHT_HANDED_Y_UP_CONFIG.u_flip_face, mat.0 as u32 - 1, ); } } Some(mesh_buf) } } // ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this // module as a Local resource, so we know it's safe. type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>; pub struct LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer, neighborhood_buffer: Array3x1<Voxel>, } fn spawn_mesh_entities( new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>, commands: &mut Commands, mesh_assets: &mut Assets<Mesh>, chunk_meshes: &mut ChunkMeshes, array_texture_pipelines: &ArrayTexturePipelines, array_texture_material: &ArrayTextureMaterial, ) { for (lod_chunk_key, item) in new_chunk_meshes.into_iter() { let old_mesh = if let Some(mesh_buf) = item { if mesh_buf.indices.is_empty() { None } else
} else { chunk_meshes.entities.remove(&lod_chunk_key) }; if let Some((entity, _mesh)) = old_mesh { commands.entity(entity).insert(FADE_OUT); } } }
{ let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList); let MeshBuf { positions, normals, tex_coords, layer, indices, extent, } = mesh_buf; render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone()); render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals); render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords); render_mesh.set_attribute("Vertex_Layer", layer); render_mesh.set_indices(Some(Indices::U32(indices.clone()))); let mesh_handle = mesh_assets.add(render_mesh); let minimum = Vec3::new( extent.minimum.0[0] as f32, extent.minimum.0[1] as f32, extent.minimum.0[2] as f32, ); let maximum = Vec3::new( extent.max().0[0] as f32, extent.max().0[1] as f32, extent.max().0[2] as f32, ); let entity = commands .spawn_bundle(PbrBundle { mesh: mesh_handle.clone(), render_pipelines: array_texture_pipelines.0.clone(), material: array_texture_material.0.clone(), ..Default::default() }) .insert_bundle(( FADE_IN, lod_chunk_key, Obb::from_aabb_orientation( Aabb::from_extents(minimum, maximum), Quat::IDENTITY, ), FogConfig::default(), )) .id(); if lod_chunk_key.lod == 0 { let collider_vertices = positions .iter() .cloned() .map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p)) .collect(); let collider_indices: Vec<[u32; 3]> = indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect(); commands .entity(entity) .insert_bundle(RigidBodyBundle { body_type: RigidBodyType::Static, ..Default::default() }) .insert_bundle(ColliderBundle { shape: ColliderShape::trimesh(collider_vertices, collider_indices), ..Default::default() }); } chunk_meshes .entities .insert(lod_chunk_key, (entity, mesh_handle)) }
conditional_block
mesh_generator.rs
/* * MIT License * * Copyright (c) 2020 bonsairobo * Copyright (c) 2021 Robert Swain <robert.swain@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ use crate::{ app_state::AppState, fog::FogConfig, mesh_fade::{FadeUniform, FADE_IN, FADE_OUT}, utilities::bevy_util::thread_local_resource::ThreadLocalResource, voxel_map::{Voxel, VoxelMap}, }; use bevy_mod_bounding::{aabb::Aabb, obb::Obb}; use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType}; use building_blocks::{ mesh::*, prelude::*, storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap}, }; use bevy::{ asset::prelude::*, ecs, prelude::*, render::{mesh::Indices, pipeline::PrimitiveTopology}, tasks::ComputeTaskPool, }; use std::{cell::RefCell, collections::VecDeque}; fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize { 40 * pool.thread_num() } #[derive(Default)] pub struct MeshCommandQueue { commands: VecDeque<MeshCommand>, } impl MeshCommandQueue { pub fn enqueue(&mut self, command: MeshCommand) { self.commands.push_front(command); } pub fn is_empty(&self) -> bool { self.commands.is_empty() } pub fn len(&self) -> usize { self.commands.len() } pub fn clear(&mut self) { self.commands.clear(); } } // PERF: try to eliminate the use of multiple Vecs #[derive(Clone, Debug, Eq, PartialEq)] pub enum MeshCommand { Create(LodChunkKey3), Update(LodChunkUpdate3), } #[derive(Default)] pub struct ChunkMeshes { // Map from chunk key to mesh entity. entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, } impl ChunkMeshes { pub fn
(&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) { self.entities.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); self.remove_queue.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); } pub fn remove_entity( &mut self, lod_chunk_key: &LodChunkKey3, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) { clear_up_entity(&entity, &mesh, commands, meshes); } } } fn clear_up_entity( entity: &Entity, mesh: &Handle<Mesh>, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { commands.entity(*entity).despawn(); meshes.remove(mesh); } // Utility struct for building the mesh #[derive(Debug, Clone)] struct MeshBuf { pub positions: Vec<[f32; 3]>, pub normals: Vec<[f32; 3]>, pub tex_coords: Vec<[f32; 2]>, pub layer: Vec<u32>, pub indices: Vec<u32>, pub extent: Extent3i, } impl Default for MeshBuf { fn default() -> Self { Self { positions: Vec::new(), normals: Vec::new(), tex_coords: Vec::new(), layer: Vec::new(), indices: Vec::new(), extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])), } } } impl MeshBuf { fn add_quad( &mut self, face: &OrientedCubeFace, quad: &UnorientedQuad, voxel_size: f32, u_flip_face: Axis3, layer: u32, ) { let start_index = self.positions.len() as u32; self.positions .extend_from_slice(&face.quad_mesh_positions(quad, voxel_size)); self.normals.extend_from_slice(&face.quad_mesh_normals()); let flip_v = true; self.tex_coords .extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad)); self.layer.extend_from_slice(&[layer; 4]); self.indices .extend_from_slice(&face.quad_mesh_indices(start_index)); } } pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>); pub struct ArrayTexturePipelines(pub RenderPipelines); /// Generates new meshes for all dirty chunks. pub fn mesh_generator_system( mut commands: Commands, pool: Res<ComputeTaskPool>, voxel_map: Res<VoxelMap>, local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>, mut mesh_commands: ResMut<MeshCommandQueue>, mut mesh_assets: ResMut<Assets<Mesh>>, mut chunk_meshes: ResMut<ChunkMeshes>, array_texture_pipelines: Res<ArrayTexturePipelines>, array_texture_material: Res<ArrayTextureMaterial>, mut state: ResMut<State<AppState>>, ) { let first_run = chunk_meshes.entities.is_empty(); let new_chunk_meshes = apply_mesh_commands( &*voxel_map, &*local_mesh_buffers, &*pool, &mut *mesh_commands, &mut *chunk_meshes, &mut commands, first_run, ); spawn_mesh_entities( new_chunk_meshes, &mut commands, &mut *mesh_assets, &mut *chunk_meshes, &*array_texture_pipelines, &*array_texture_material, ); if first_run { println!("MESHES GENERATED!\n-> AppState::Running"); state.set(AppState::Running).unwrap(); } } fn apply_mesh_commands( voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, pool: &ComputeTaskPool, mesh_commands: &mut MeshCommandQueue, chunk_meshes: &mut ChunkMeshes, commands: &mut Commands, first_run: bool, ) -> Vec<(LodChunkKey3, Option<MeshBuf>)> { let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool)); let mut num_creates = 0; let mut num_updates = 0; pool.scope(|s| { let mut num_meshes_created = 0; for command in mesh_commands.commands.iter().rev().cloned() { match command { MeshCommand::Create(lod_key) => { if !chunk_meshes.entities.contains_key(&lod_key) { num_creates += 1; num_meshes_created += 1; s.spawn(async move { ( lod_key, create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers), ) }); } } MeshCommand::Update(update) => { num_updates += 1; match update { LodChunkUpdate3::Split(split) => { if let Some((entity, mesh)) = chunk_meshes.entities.remove(&split.old_chunk) { chunk_meshes .remove_queue .insert(split.old_chunk, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } for &lod_key in split.new_chunks.iter() { if !chunk_meshes.entities.contains_key(&lod_key) { num_meshes_created += 1; s.spawn(async move { ( lod_key, create_mesh_for_chunk( lod_key, voxel_map, local_mesh_buffers, ), ) }); } } } LodChunkUpdate3::Merge(merge) => { for lod_key in merge.old_chunks.iter() { if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key) { chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } } if !chunk_meshes.entities.contains_key(&merge.new_chunk) { num_meshes_created += 1; s.spawn(async move { ( merge.new_chunk, create_mesh_for_chunk( merge.new_chunk, voxel_map, local_mesh_buffers, ), ) }); } } } } } if !first_run && num_meshes_created >= num_chunks_to_mesh { break; } } let new_length = mesh_commands.len() - (num_creates + num_updates); mesh_commands.commands.truncate(new_length); }) } pub fn mesh_despawn_system( mut commands: Commands, mut chunk_meshes: ResMut<ChunkMeshes>, mut meshes: ResMut<Assets<Mesh>>, query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>, ) { for (fade, lod_chunk_key) in query.iter() { if !fade.fade_in && fade.remaining == 0.0 { if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) { commands.entity(entity).despawn(); meshes.remove(&mesh); } } } } fn create_mesh_for_chunk( key: LodChunkKey3, voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, ) -> Option<MeshBuf> { let chunks = voxel_map.pyramid.level(key.lod); let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key); let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent); // Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk. let mesh_tls = local_mesh_buffers.get(); let mut surface_nets_buffers = mesh_tls .get_or_create_with(|| { RefCell::new(LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer::new( padded_chunk_extent, RIGHT_HANDED_Y_UP_CONFIG.quad_groups(), ), neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY), }) }) .borrow_mut(); let LocalSurfaceNetsBuffers { mesh_buffer, neighborhood_buffer, } = &mut *surface_nets_buffers; // While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk. neighborhood_buffer.set_minimum(padded_chunk_extent.minimum); // Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries. copy_extent(&chunk_extent, chunks, neighborhood_buffer); let voxel_size = (1 << key.lod) as f32; greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer); if mesh_buffer.num_quads() == 0 { None } else { let mut mesh_buf = MeshBuf::default(); mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape(); for group in mesh_buffer.quad_groups.iter() { for quad in group.quads.iter() { let mat = neighborhood_buffer.get(quad.minimum); mesh_buf.add_quad( &group.face, quad, voxel_size, RIGHT_HANDED_Y_UP_CONFIG.u_flip_face, mat.0 as u32 - 1, ); } } Some(mesh_buf) } } // ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this // module as a Local resource, so we know it's safe. type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>; pub struct LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer, neighborhood_buffer: Array3x1<Voxel>, } fn spawn_mesh_entities( new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>, commands: &mut Commands, mesh_assets: &mut Assets<Mesh>, chunk_meshes: &mut ChunkMeshes, array_texture_pipelines: &ArrayTexturePipelines, array_texture_material: &ArrayTextureMaterial, ) { for (lod_chunk_key, item) in new_chunk_meshes.into_iter() { let old_mesh = if let Some(mesh_buf) = item { if mesh_buf.indices.is_empty() { None } else { let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList); let MeshBuf { positions, normals, tex_coords, layer, indices, extent, } = mesh_buf; render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone()); render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals); render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords); render_mesh.set_attribute("Vertex_Layer", layer); render_mesh.set_indices(Some(Indices::U32(indices.clone()))); let mesh_handle = mesh_assets.add(render_mesh); let minimum = Vec3::new( extent.minimum.0[0] as f32, extent.minimum.0[1] as f32, extent.minimum.0[2] as f32, ); let maximum = Vec3::new( extent.max().0[0] as f32, extent.max().0[1] as f32, extent.max().0[2] as f32, ); let entity = commands .spawn_bundle(PbrBundle { mesh: mesh_handle.clone(), render_pipelines: array_texture_pipelines.0.clone(), material: array_texture_material.0.clone(), ..Default::default() }) .insert_bundle(( FADE_IN, lod_chunk_key, Obb::from_aabb_orientation( Aabb::from_extents(minimum, maximum), Quat::IDENTITY, ), FogConfig::default(), )) .id(); if lod_chunk_key.lod == 0 { let collider_vertices = positions .iter() .cloned() .map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p)) .collect(); let collider_indices: Vec<[u32; 3]> = indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect(); commands .entity(entity) .insert_bundle(RigidBodyBundle { body_type: RigidBodyType::Static, ..Default::default() }) .insert_bundle(ColliderBundle { shape: ColliderShape::trimesh(collider_vertices, collider_indices), ..Default::default() }); } chunk_meshes .entities .insert(lod_chunk_key, (entity, mesh_handle)) } } else { chunk_meshes.entities.remove(&lod_chunk_key) }; if let Some((entity, _mesh)) = old_mesh { commands.entity(entity).insert(FADE_OUT); } } }
clear_entities
identifier_name
mesh_generator.rs
/* * MIT License * * Copyright (c) 2020 bonsairobo * Copyright (c) 2021 Robert Swain <robert.swain@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ use crate::{ app_state::AppState, fog::FogConfig, mesh_fade::{FadeUniform, FADE_IN, FADE_OUT}, utilities::bevy_util::thread_local_resource::ThreadLocalResource, voxel_map::{Voxel, VoxelMap}, }; use bevy_mod_bounding::{aabb::Aabb, obb::Obb}; use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType}; use building_blocks::{ mesh::*, prelude::*, storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap}, }; use bevy::{ asset::prelude::*, ecs, prelude::*, render::{mesh::Indices, pipeline::PrimitiveTopology}, tasks::ComputeTaskPool, }; use std::{cell::RefCell, collections::VecDeque}; fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize { 40 * pool.thread_num() } #[derive(Default)] pub struct MeshCommandQueue { commands: VecDeque<MeshCommand>, } impl MeshCommandQueue { pub fn enqueue(&mut self, command: MeshCommand)
pub fn is_empty(&self) -> bool { self.commands.is_empty() } pub fn len(&self) -> usize { self.commands.len() } pub fn clear(&mut self) { self.commands.clear(); } } // PERF: try to eliminate the use of multiple Vecs #[derive(Clone, Debug, Eq, PartialEq)] pub enum MeshCommand { Create(LodChunkKey3), Update(LodChunkUpdate3), } #[derive(Default)] pub struct ChunkMeshes { // Map from chunk key to mesh entity. entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, } impl ChunkMeshes { pub fn clear_entities(&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) { self.entities.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); self.remove_queue.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); } pub fn remove_entity( &mut self, lod_chunk_key: &LodChunkKey3, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) { clear_up_entity(&entity, &mesh, commands, meshes); } } } fn clear_up_entity( entity: &Entity, mesh: &Handle<Mesh>, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { commands.entity(*entity).despawn(); meshes.remove(mesh); } // Utility struct for building the mesh #[derive(Debug, Clone)] struct MeshBuf { pub positions: Vec<[f32; 3]>, pub normals: Vec<[f32; 3]>, pub tex_coords: Vec<[f32; 2]>, pub layer: Vec<u32>, pub indices: Vec<u32>, pub extent: Extent3i, } impl Default for MeshBuf { fn default() -> Self { Self { positions: Vec::new(), normals: Vec::new(), tex_coords: Vec::new(), layer: Vec::new(), indices: Vec::new(), extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])), } } } impl MeshBuf { fn add_quad( &mut self, face: &OrientedCubeFace, quad: &UnorientedQuad, voxel_size: f32, u_flip_face: Axis3, layer: u32, ) { let start_index = self.positions.len() as u32; self.positions .extend_from_slice(&face.quad_mesh_positions(quad, voxel_size)); self.normals.extend_from_slice(&face.quad_mesh_normals()); let flip_v = true; self.tex_coords .extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad)); self.layer.extend_from_slice(&[layer; 4]); self.indices .extend_from_slice(&face.quad_mesh_indices(start_index)); } } pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>); pub struct ArrayTexturePipelines(pub RenderPipelines); /// Generates new meshes for all dirty chunks. pub fn mesh_generator_system( mut commands: Commands, pool: Res<ComputeTaskPool>, voxel_map: Res<VoxelMap>, local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>, mut mesh_commands: ResMut<MeshCommandQueue>, mut mesh_assets: ResMut<Assets<Mesh>>, mut chunk_meshes: ResMut<ChunkMeshes>, array_texture_pipelines: Res<ArrayTexturePipelines>, array_texture_material: Res<ArrayTextureMaterial>, mut state: ResMut<State<AppState>>, ) { let first_run = chunk_meshes.entities.is_empty(); let new_chunk_meshes = apply_mesh_commands( &*voxel_map, &*local_mesh_buffers, &*pool, &mut *mesh_commands, &mut *chunk_meshes, &mut commands, first_run, ); spawn_mesh_entities( new_chunk_meshes, &mut commands, &mut *mesh_assets, &mut *chunk_meshes, &*array_texture_pipelines, &*array_texture_material, ); if first_run { println!("MESHES GENERATED!\n-> AppState::Running"); state.set(AppState::Running).unwrap(); } } fn apply_mesh_commands( voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, pool: &ComputeTaskPool, mesh_commands: &mut MeshCommandQueue, chunk_meshes: &mut ChunkMeshes, commands: &mut Commands, first_run: bool, ) -> Vec<(LodChunkKey3, Option<MeshBuf>)> { let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool)); let mut num_creates = 0; let mut num_updates = 0; pool.scope(|s| { let mut num_meshes_created = 0; for command in mesh_commands.commands.iter().rev().cloned() { match command { MeshCommand::Create(lod_key) => { if !chunk_meshes.entities.contains_key(&lod_key) { num_creates += 1; num_meshes_created += 1; s.spawn(async move { ( lod_key, create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers), ) }); } } MeshCommand::Update(update) => { num_updates += 1; match update { LodChunkUpdate3::Split(split) => { if let Some((entity, mesh)) = chunk_meshes.entities.remove(&split.old_chunk) { chunk_meshes .remove_queue .insert(split.old_chunk, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } for &lod_key in split.new_chunks.iter() { if !chunk_meshes.entities.contains_key(&lod_key) { num_meshes_created += 1; s.spawn(async move { ( lod_key, create_mesh_for_chunk( lod_key, voxel_map, local_mesh_buffers, ), ) }); } } } LodChunkUpdate3::Merge(merge) => { for lod_key in merge.old_chunks.iter() { if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key) { chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } } if !chunk_meshes.entities.contains_key(&merge.new_chunk) { num_meshes_created += 1; s.spawn(async move { ( merge.new_chunk, create_mesh_for_chunk( merge.new_chunk, voxel_map, local_mesh_buffers, ), ) }); } } } } } if !first_run && num_meshes_created >= num_chunks_to_mesh { break; } } let new_length = mesh_commands.len() - (num_creates + num_updates); mesh_commands.commands.truncate(new_length); }) } pub fn mesh_despawn_system( mut commands: Commands, mut chunk_meshes: ResMut<ChunkMeshes>, mut meshes: ResMut<Assets<Mesh>>, query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>, ) { for (fade, lod_chunk_key) in query.iter() { if !fade.fade_in && fade.remaining == 0.0 { if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) { commands.entity(entity).despawn(); meshes.remove(&mesh); } } } } fn create_mesh_for_chunk( key: LodChunkKey3, voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, ) -> Option<MeshBuf> { let chunks = voxel_map.pyramid.level(key.lod); let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key); let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent); // Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk. let mesh_tls = local_mesh_buffers.get(); let mut surface_nets_buffers = mesh_tls .get_or_create_with(|| { RefCell::new(LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer::new( padded_chunk_extent, RIGHT_HANDED_Y_UP_CONFIG.quad_groups(), ), neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY), }) }) .borrow_mut(); let LocalSurfaceNetsBuffers { mesh_buffer, neighborhood_buffer, } = &mut *surface_nets_buffers; // While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk. neighborhood_buffer.set_minimum(padded_chunk_extent.minimum); // Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries. copy_extent(&chunk_extent, chunks, neighborhood_buffer); let voxel_size = (1 << key.lod) as f32; greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer); if mesh_buffer.num_quads() == 0 { None } else { let mut mesh_buf = MeshBuf::default(); mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape(); for group in mesh_buffer.quad_groups.iter() { for quad in group.quads.iter() { let mat = neighborhood_buffer.get(quad.minimum); mesh_buf.add_quad( &group.face, quad, voxel_size, RIGHT_HANDED_Y_UP_CONFIG.u_flip_face, mat.0 as u32 - 1, ); } } Some(mesh_buf) } } // ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this // module as a Local resource, so we know it's safe. type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>; pub struct LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer, neighborhood_buffer: Array3x1<Voxel>, } fn spawn_mesh_entities( new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>, commands: &mut Commands, mesh_assets: &mut Assets<Mesh>, chunk_meshes: &mut ChunkMeshes, array_texture_pipelines: &ArrayTexturePipelines, array_texture_material: &ArrayTextureMaterial, ) { for (lod_chunk_key, item) in new_chunk_meshes.into_iter() { let old_mesh = if let Some(mesh_buf) = item { if mesh_buf.indices.is_empty() { None } else { let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList); let MeshBuf { positions, normals, tex_coords, layer, indices, extent, } = mesh_buf; render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone()); render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals); render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords); render_mesh.set_attribute("Vertex_Layer", layer); render_mesh.set_indices(Some(Indices::U32(indices.clone()))); let mesh_handle = mesh_assets.add(render_mesh); let minimum = Vec3::new( extent.minimum.0[0] as f32, extent.minimum.0[1] as f32, extent.minimum.0[2] as f32, ); let maximum = Vec3::new( extent.max().0[0] as f32, extent.max().0[1] as f32, extent.max().0[2] as f32, ); let entity = commands .spawn_bundle(PbrBundle { mesh: mesh_handle.clone(), render_pipelines: array_texture_pipelines.0.clone(), material: array_texture_material.0.clone(), ..Default::default() }) .insert_bundle(( FADE_IN, lod_chunk_key, Obb::from_aabb_orientation( Aabb::from_extents(minimum, maximum), Quat::IDENTITY, ), FogConfig::default(), )) .id(); if lod_chunk_key.lod == 0 { let collider_vertices = positions .iter() .cloned() .map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p)) .collect(); let collider_indices: Vec<[u32; 3]> = indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect(); commands .entity(entity) .insert_bundle(RigidBodyBundle { body_type: RigidBodyType::Static, ..Default::default() }) .insert_bundle(ColliderBundle { shape: ColliderShape::trimesh(collider_vertices, collider_indices), ..Default::default() }); } chunk_meshes .entities .insert(lod_chunk_key, (entity, mesh_handle)) } } else { chunk_meshes.entities.remove(&lod_chunk_key) }; if let Some((entity, _mesh)) = old_mesh { commands.entity(entity).insert(FADE_OUT); } } }
{ self.commands.push_front(command); }
identifier_body
mesh_generator.rs
/* * MIT License * * Copyright (c) 2020 bonsairobo * Copyright (c) 2021 Robert Swain <robert.swain@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ use crate::{ app_state::AppState, fog::FogConfig, mesh_fade::{FadeUniform, FADE_IN, FADE_OUT}, utilities::bevy_util::thread_local_resource::ThreadLocalResource, voxel_map::{Voxel, VoxelMap}, }; use bevy_mod_bounding::{aabb::Aabb, obb::Obb}; use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType}; use building_blocks::{ mesh::*, prelude::*, storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap}, }; use bevy::{ asset::prelude::*, ecs, prelude::*, render::{mesh::Indices, pipeline::PrimitiveTopology}, tasks::ComputeTaskPool, }; use std::{cell::RefCell, collections::VecDeque}; fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize { 40 * pool.thread_num() } #[derive(Default)] pub struct MeshCommandQueue { commands: VecDeque<MeshCommand>, } impl MeshCommandQueue { pub fn enqueue(&mut self, command: MeshCommand) { self.commands.push_front(command); } pub fn is_empty(&self) -> bool { self.commands.is_empty() } pub fn len(&self) -> usize { self.commands.len() } pub fn clear(&mut self) { self.commands.clear(); } } // PERF: try to eliminate the use of multiple Vecs #[derive(Clone, Debug, Eq, PartialEq)] pub enum MeshCommand { Create(LodChunkKey3), Update(LodChunkUpdate3), } #[derive(Default)] pub struct ChunkMeshes { // Map from chunk key to mesh entity. entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>, } impl ChunkMeshes { pub fn clear_entities(&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) { self.entities.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); self.remove_queue.retain(|_, (entity, mesh)| { clear_up_entity(entity, mesh, commands, meshes); false }); } pub fn remove_entity( &mut self, lod_chunk_key: &LodChunkKey3, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) { clear_up_entity(&entity, &mesh, commands, meshes); } } } fn clear_up_entity( entity: &Entity, mesh: &Handle<Mesh>, commands: &mut Commands, meshes: &mut Assets<Mesh>, ) { commands.entity(*entity).despawn(); meshes.remove(mesh); } // Utility struct for building the mesh #[derive(Debug, Clone)] struct MeshBuf { pub positions: Vec<[f32; 3]>, pub normals: Vec<[f32; 3]>, pub tex_coords: Vec<[f32; 2]>, pub layer: Vec<u32>, pub indices: Vec<u32>, pub extent: Extent3i, } impl Default for MeshBuf { fn default() -> Self { Self { positions: Vec::new(), normals: Vec::new(), tex_coords: Vec::new(), layer: Vec::new(), indices: Vec::new(), extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])), } } } impl MeshBuf { fn add_quad( &mut self, face: &OrientedCubeFace, quad: &UnorientedQuad, voxel_size: f32, u_flip_face: Axis3, layer: u32, ) { let start_index = self.positions.len() as u32; self.positions .extend_from_slice(&face.quad_mesh_positions(quad, voxel_size)); self.normals.extend_from_slice(&face.quad_mesh_normals()); let flip_v = true; self.tex_coords .extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad)); self.layer.extend_from_slice(&[layer; 4]); self.indices .extend_from_slice(&face.quad_mesh_indices(start_index)); } } pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>); pub struct ArrayTexturePipelines(pub RenderPipelines); /// Generates new meshes for all dirty chunks. pub fn mesh_generator_system( mut commands: Commands, pool: Res<ComputeTaskPool>, voxel_map: Res<VoxelMap>, local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>, mut mesh_commands: ResMut<MeshCommandQueue>, mut mesh_assets: ResMut<Assets<Mesh>>, mut chunk_meshes: ResMut<ChunkMeshes>, array_texture_pipelines: Res<ArrayTexturePipelines>, array_texture_material: Res<ArrayTextureMaterial>, mut state: ResMut<State<AppState>>, ) { let first_run = chunk_meshes.entities.is_empty(); let new_chunk_meshes = apply_mesh_commands( &*voxel_map, &*local_mesh_buffers, &*pool, &mut *mesh_commands, &mut *chunk_meshes, &mut commands, first_run, ); spawn_mesh_entities( new_chunk_meshes, &mut commands, &mut *mesh_assets, &mut *chunk_meshes, &*array_texture_pipelines, &*array_texture_material, ); if first_run { println!("MESHES GENERATED!\n-> AppState::Running"); state.set(AppState::Running).unwrap(); } } fn apply_mesh_commands( voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, pool: &ComputeTaskPool, mesh_commands: &mut MeshCommandQueue, chunk_meshes: &mut ChunkMeshes, commands: &mut Commands, first_run: bool, ) -> Vec<(LodChunkKey3, Option<MeshBuf>)> { let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool)); let mut num_creates = 0; let mut num_updates = 0; pool.scope(|s| { let mut num_meshes_created = 0; for command in mesh_commands.commands.iter().rev().cloned() { match command { MeshCommand::Create(lod_key) => { if !chunk_meshes.entities.contains_key(&lod_key) { num_creates += 1; num_meshes_created += 1; s.spawn(async move { ( lod_key, create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers), ) }); } } MeshCommand::Update(update) => { num_updates += 1; match update { LodChunkUpdate3::Split(split) => { if let Some((entity, mesh)) = chunk_meshes.entities.remove(&split.old_chunk) { chunk_meshes .remove_queue .insert(split.old_chunk, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } for &lod_key in split.new_chunks.iter() { if !chunk_meshes.entities.contains_key(&lod_key) { num_meshes_created += 1; s.spawn(async move { ( lod_key,
) }); } } } LodChunkUpdate3::Merge(merge) => { for lod_key in merge.old_chunks.iter() { if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key) { chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh)); commands.entity(entity).insert(FADE_OUT); } } if !chunk_meshes.entities.contains_key(&merge.new_chunk) { num_meshes_created += 1; s.spawn(async move { ( merge.new_chunk, create_mesh_for_chunk( merge.new_chunk, voxel_map, local_mesh_buffers, ), ) }); } } } } } if !first_run && num_meshes_created >= num_chunks_to_mesh { break; } } let new_length = mesh_commands.len() - (num_creates + num_updates); mesh_commands.commands.truncate(new_length); }) } pub fn mesh_despawn_system( mut commands: Commands, mut chunk_meshes: ResMut<ChunkMeshes>, mut meshes: ResMut<Assets<Mesh>>, query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>, ) { for (fade, lod_chunk_key) in query.iter() { if !fade.fade_in && fade.remaining == 0.0 { if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) { commands.entity(entity).despawn(); meshes.remove(&mesh); } } } } fn create_mesh_for_chunk( key: LodChunkKey3, voxel_map: &VoxelMap, local_mesh_buffers: &ThreadLocalMeshBuffers, ) -> Option<MeshBuf> { let chunks = voxel_map.pyramid.level(key.lod); let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key); let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent); // Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk. let mesh_tls = local_mesh_buffers.get(); let mut surface_nets_buffers = mesh_tls .get_or_create_with(|| { RefCell::new(LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer::new( padded_chunk_extent, RIGHT_HANDED_Y_UP_CONFIG.quad_groups(), ), neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY), }) }) .borrow_mut(); let LocalSurfaceNetsBuffers { mesh_buffer, neighborhood_buffer, } = &mut *surface_nets_buffers; // While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk. neighborhood_buffer.set_minimum(padded_chunk_extent.minimum); // Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries. copy_extent(&chunk_extent, chunks, neighborhood_buffer); let voxel_size = (1 << key.lod) as f32; greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer); if mesh_buffer.num_quads() == 0 { None } else { let mut mesh_buf = MeshBuf::default(); mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape(); for group in mesh_buffer.quad_groups.iter() { for quad in group.quads.iter() { let mat = neighborhood_buffer.get(quad.minimum); mesh_buf.add_quad( &group.face, quad, voxel_size, RIGHT_HANDED_Y_UP_CONFIG.u_flip_face, mat.0 as u32 - 1, ); } } Some(mesh_buf) } } // ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this // module as a Local resource, so we know it's safe. type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>; pub struct LocalSurfaceNetsBuffers { mesh_buffer: GreedyQuadsBuffer, neighborhood_buffer: Array3x1<Voxel>, } fn spawn_mesh_entities( new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>, commands: &mut Commands, mesh_assets: &mut Assets<Mesh>, chunk_meshes: &mut ChunkMeshes, array_texture_pipelines: &ArrayTexturePipelines, array_texture_material: &ArrayTextureMaterial, ) { for (lod_chunk_key, item) in new_chunk_meshes.into_iter() { let old_mesh = if let Some(mesh_buf) = item { if mesh_buf.indices.is_empty() { None } else { let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList); let MeshBuf { positions, normals, tex_coords, layer, indices, extent, } = mesh_buf; render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone()); render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals); render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords); render_mesh.set_attribute("Vertex_Layer", layer); render_mesh.set_indices(Some(Indices::U32(indices.clone()))); let mesh_handle = mesh_assets.add(render_mesh); let minimum = Vec3::new( extent.minimum.0[0] as f32, extent.minimum.0[1] as f32, extent.minimum.0[2] as f32, ); let maximum = Vec3::new( extent.max().0[0] as f32, extent.max().0[1] as f32, extent.max().0[2] as f32, ); let entity = commands .spawn_bundle(PbrBundle { mesh: mesh_handle.clone(), render_pipelines: array_texture_pipelines.0.clone(), material: array_texture_material.0.clone(), ..Default::default() }) .insert_bundle(( FADE_IN, lod_chunk_key, Obb::from_aabb_orientation( Aabb::from_extents(minimum, maximum), Quat::IDENTITY, ), FogConfig::default(), )) .id(); if lod_chunk_key.lod == 0 { let collider_vertices = positions .iter() .cloned() .map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p)) .collect(); let collider_indices: Vec<[u32; 3]> = indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect(); commands .entity(entity) .insert_bundle(RigidBodyBundle { body_type: RigidBodyType::Static, ..Default::default() }) .insert_bundle(ColliderBundle { shape: ColliderShape::trimesh(collider_vertices, collider_indices), ..Default::default() }); } chunk_meshes .entities .insert(lod_chunk_key, (entity, mesh_handle)) } } else { chunk_meshes.entities.remove(&lod_chunk_key) }; if let Some((entity, _mesh)) = old_mesh { commands.entity(entity).insert(FADE_OUT); } } }
create_mesh_for_chunk( lod_key, voxel_map, local_mesh_buffers, ),
random_line_split
mod.rs
use crate::audio::*; use crate::format; use id3; use lazy_static::lazy_static; use liblame_sys::*; use log::*; use regex::bytes; use sample; use std::*; mod index; use self::index::FrameIndex; /// This is the absolute maximum number of samples that can be contained in a single frame. const MAX_FRAME_SIZE: usize = 1152; const MAX_FRAME_BYTES: usize = 1348; pub fn magic() -> &'static bytes::Regex { lazy_static! { static ref MAGIC: bytes::Regex = bytes::Regex::new(r"(?s-u)^(?:ID3)|(:?\xff[\xe0-\xff])").unwrap(); } &MAGIC } struct DecoderInit { hip: hip_t, mp3_data: mp3data_struct, buffers: [[i16; MAX_FRAME_SIZE]; 2], decode_count: usize, stream_offset: u64, tag: Option<id3::Tag>, } unsafe fn init_decoder<R>(mut input: &mut R) -> Result<DecoderInit, Error> where R: io::Read + io::Seek, { let tag = { let mut buf = [0; 3]; input.read_exact(&mut buf)?; input.seek(io::SeekFrom::Start(0))?; if &buf == b"ID3" { Some(id3::Tag::read_from(&mut input)?) } else { None } }; // On very rare occasions, LAME is unable to find the start of the stream. index::find_stream(input)?; let stream_offset = input.seek(io::SeekFrom::Current(0))?; let hip: hip_t = hip_decode_init(); if hip.is_null() { return Err(Error::ConstructionFailed); } hip_set_debugf(hip, Some(debug_cb)); hip_set_msgf(hip, Some(msg_cb)); hip_set_errorf(hip, Some(error_cb)); let mut mp3_data = mem::zeroed(); let mut enc_delay = 0; let mut enc_padding = 0; let mut buf_left = [0; MAX_FRAME_SIZE]; let mut buf_right = [0; MAX_FRAME_SIZE]; let mut rs = 0; while rs == 0 { let mut read_buf = [0; MAX_FRAME_BYTES]; let num_read = input.read(&mut read_buf)?; rs = hip_decode1_headersB( hip, read_buf.as_mut_ptr(), num_read, buf_left.as_mut_ptr(), buf_right.as_mut_ptr(), &mut mp3_data, &mut enc_delay, &mut enc_padding, ); } if rs == -1 { hip_decode_exit(hip); return Err(Error::Lame(rs)); } let decode_count = rs; if mp3_data.header_parsed != 1 { return Err(Error::NoHeader); } Ok(DecoderInit { hip, mp3_data, buffers: [buf_left, buf_right], decode_count: decode_count as usize, stream_offset, tag, }) } pub fn decode_metadata<R>(mut input: R) -> Result<format::Metadata, Error> where R: io::Read + io::Seek, { unsafe { let init = init_decoder(&mut input)?; hip_decode_exit(init.hip); let num_samples = if init.mp3_data.nsamp != 0 { init.mp3_data.nsamp } else { input.seek(io::SeekFrom::Start(init.stream_offset))?; let frame_index = FrameIndex::read(&mut input)?; frame_index.num_samples() }; Ok(format::Metadata { sample_rate: init.mp3_data.samplerate as u32, num_samples: Some(num_samples), tag: init.tag, }) } } pub fn decode<R>(mut input: R) -> Result<(dynam::Audio, format::Metadata), Error> where R: io::Read + io::Seek + 'static, { unsafe { let init = init_decoder(&mut input)?; let sample_rate = init.mp3_data.samplerate as u32; let num_channels = init.mp3_data.stereo as u32; input.seek(io::SeekFrom::Start(init.stream_offset))?; let frame_index = FrameIndex::read(&mut input)?; input.seek(io::SeekFrom::Start(frame_index.frames[0].offset))?; let meta = format::Metadata { sample_rate, num_samples: Some(frame_index.num_samples()), tag: init.tag, }; macro_rules! dyn_type { ($dyn:path) => { $dyn(Box::from(Decoder { input, input_buf: [0; MAX_FRAME_BYTES], hip: init.hip, frame_index, sample_rate, buffers: init.buffers, next_frame: 0, next_sample: 0, samples_available: init.decode_count, _f: marker::PhantomData, })) .into() }; } Ok(( match num_channels { 1 => dyn_type!(dynam::Seek::MonoI16), 2 => dyn_type!(dynam::Seek::StereoI16), _ => unreachable!(), // LAME's interface does not allow this. }, meta, )) } } struct Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { input: R, input_buf: [u8; MAX_FRAME_BYTES], hip: hip_t, frame_index: FrameIndex, sample_rate: u32, buffers: [[i16; MAX_FRAME_SIZE]; 2], next_frame: usize, next_sample: usize, samples_available: usize, _f: marker::PhantomData<F>, } unsafe impl<F, R> Send for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { } impl<F, R> iter::Iterator for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { type Item = F; fn next(&mut self) -> Option<Self::Item> { let mut num_read = 0; while self.next_sample >= self.samples_available { unsafe { let rs = hip_decode1( self.hip, self.input_buf.as_mut_ptr(), num_read, self.buffers[0].as_mut_ptr(), self.buffers[1].as_mut_ptr(), ); match rs { 0 => { if self.next_frame >= self.frame_index.frames.len() { return None; } let frame = &self.frame_index.frames[self.next_frame]; num_read = match self .input .read(&mut self.input_buf[..frame.length as usize]) { Ok(nr) if nr == 0 => return None, Ok(nr) => nr, Err(err) => { error!("{}", err); return None; } }; } code if code < 0 => { error!("Error decoding next frame: {}", Error::Lame(code)); return None; } decode_count => { self.next_frame += 1; self.next_sample = 0; self.samples_available = decode_count as usize; } }; } } let frame = F::from_fn(|ch| self.buffers[ch][self.next_sample]); self.next_sample += 1; Some(frame) } } impl<F, R> Source for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn sample_rate(&self) -> u32 { self.sample_rate } } impl<F, R> Seekable for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn seek(&mut self, position: u64) -> Result<(), SeekError> { let i = self .frame_index .frame_for_sample(position) .ok_or(SeekError::OutofRange { pos: position, size: self.length(), })?; self.next_frame = i; self.next_sample = position as usize - self.frame_index.frames[i].sample_offset as usize; self.samples_available = 0; assert!(self.next_frame < self.frame_index.frames.len()); assert!(self.next_sample < MAX_FRAME_SIZE); let frame = &self.frame_index.frames[self.next_frame]; self.input .seek(io::SeekFrom::Start(frame.offset)) .map_err(Box::from)?; Ok(()) } fn length(&self) -> u64 { self.frame_index.num_samples() } fn current_position(&self) -> u64 { if self.next_frame == 0 { return 0; } self.frame_index.frames[self.next_frame - 1].sample_offset + self.next_sample as u64 } } impl<F, R> Seek for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { } impl<F, R> Drop for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn drop(&mut self) { unsafe { hip_decode_exit(self.hip); } } } unsafe extern "C" fn debug_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { debug!("{}", VaFormatter(format, ap)); } unsafe extern "C" fn msg_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { info!("{}", VaFormatter(format, ap)); } unsafe extern "C" fn error_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { error!("{}", VaFormatter(format, ap)); } struct VaFormatter(*const os::raw::c_char, *mut __va_list_tag); impl fmt::Display for VaFormatter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { let cstr = ffi::CStr::from_ptr(self.0); // A buffer two times the format should be enough in most cases. let mut buf = vec![0u8; cstr.to_bytes().len() * 2]; vsnprintf(buf.as_mut_ptr() as *mut i8, buf.len(), self.0, self.1); write!( f, "{}", String::from_utf8_lossy(&*buf).trim_matches(&['\0', '\n'][..]) ) } } } #[derive(Debug)] pub enum Error { IO(io::Error), ID3(id3::Error), Index(index::Error), Lame(i32), ConstructionFailed, NoHeader, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::IO(ref err) => write!(f, "IO: {}", err), Error::ID3(ref err) => write!(f, "ID3: {}", err), Error::Index(ref err) => write!(f, "Index: {}", err), Error::Lame(code) => { let msg = match code { 0 => "okay", -1 => "generic error", -10 => "no memory", -11 => "bad bitrate", -12 => "bad sample frequency", -13 => "internal error", -80 => "read error", -81 => "write error", -82 => "file too large", _ => "unknown", }; write!(f, "Lame error: {}", msg) } Error::ConstructionFailed => write!(f, "Failed to construct decoder"), Error::NoHeader => write!(f, "Missing header"), } } } impl error::Error for Error { fn description(&self) -> &str { "MP3 error" } fn cause(&self) -> Option<&error::Error> { match *self { Error::IO(ref err) => Some(err), Error::ID3(ref err) => Some(err), Error::Index(ref err) => Some(err), _ => None, } } } impl From<io::Error> for Error { fn
(err: io::Error) -> Error { Error::IO(err) } } impl From<id3::Error> for Error { fn from(err: id3::Error) -> Error { Error::ID3(err) } } impl From<index::Error> for Error { fn from(err: index::Error) -> Error { Error::Index(err) } } #[cfg(all(test, feature = "unstable"))] mod benchmarks { extern crate test; use super::*; #[bench] fn read_metadata(b: &mut test::Bencher) { b.iter(|| { let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap(); decode_metadata(file).unwrap(); }); } #[bench] fn decoder_open(b: &mut test::Bencher) { b.iter(|| { let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap(); decode(file).unwrap(); }); } }
from
identifier_name
mod.rs
use crate::audio::*; use crate::format; use id3; use lazy_static::lazy_static; use liblame_sys::*; use log::*; use regex::bytes; use sample; use std::*; mod index; use self::index::FrameIndex; /// This is the absolute maximum number of samples that can be contained in a single frame. const MAX_FRAME_SIZE: usize = 1152; const MAX_FRAME_BYTES: usize = 1348; pub fn magic() -> &'static bytes::Regex { lazy_static! { static ref MAGIC: bytes::Regex = bytes::Regex::new(r"(?s-u)^(?:ID3)|(:?\xff[\xe0-\xff])").unwrap(); } &MAGIC } struct DecoderInit { hip: hip_t, mp3_data: mp3data_struct, buffers: [[i16; MAX_FRAME_SIZE]; 2], decode_count: usize, stream_offset: u64, tag: Option<id3::Tag>, } unsafe fn init_decoder<R>(mut input: &mut R) -> Result<DecoderInit, Error> where R: io::Read + io::Seek, { let tag = { let mut buf = [0; 3]; input.read_exact(&mut buf)?; input.seek(io::SeekFrom::Start(0))?; if &buf == b"ID3" { Some(id3::Tag::read_from(&mut input)?) } else { None } }; // On very rare occasions, LAME is unable to find the start of the stream. index::find_stream(input)?; let stream_offset = input.seek(io::SeekFrom::Current(0))?; let hip: hip_t = hip_decode_init(); if hip.is_null() { return Err(Error::ConstructionFailed); } hip_set_debugf(hip, Some(debug_cb)); hip_set_msgf(hip, Some(msg_cb)); hip_set_errorf(hip, Some(error_cb)); let mut mp3_data = mem::zeroed(); let mut enc_delay = 0; let mut enc_padding = 0; let mut buf_left = [0; MAX_FRAME_SIZE]; let mut buf_right = [0; MAX_FRAME_SIZE]; let mut rs = 0; while rs == 0 { let mut read_buf = [0; MAX_FRAME_BYTES]; let num_read = input.read(&mut read_buf)?; rs = hip_decode1_headersB( hip, read_buf.as_mut_ptr(), num_read, buf_left.as_mut_ptr(), buf_right.as_mut_ptr(), &mut mp3_data, &mut enc_delay, &mut enc_padding, ); } if rs == -1 { hip_decode_exit(hip); return Err(Error::Lame(rs)); } let decode_count = rs; if mp3_data.header_parsed != 1 { return Err(Error::NoHeader); } Ok(DecoderInit { hip, mp3_data, buffers: [buf_left, buf_right], decode_count: decode_count as usize, stream_offset, tag, }) } pub fn decode_metadata<R>(mut input: R) -> Result<format::Metadata, Error> where R: io::Read + io::Seek, { unsafe { let init = init_decoder(&mut input)?; hip_decode_exit(init.hip); let num_samples = if init.mp3_data.nsamp != 0 { init.mp3_data.nsamp } else { input.seek(io::SeekFrom::Start(init.stream_offset))?; let frame_index = FrameIndex::read(&mut input)?; frame_index.num_samples() }; Ok(format::Metadata { sample_rate: init.mp3_data.samplerate as u32, num_samples: Some(num_samples), tag: init.tag, }) } } pub fn decode<R>(mut input: R) -> Result<(dynam::Audio, format::Metadata), Error> where R: io::Read + io::Seek + 'static, { unsafe { let init = init_decoder(&mut input)?; let sample_rate = init.mp3_data.samplerate as u32; let num_channels = init.mp3_data.stereo as u32; input.seek(io::SeekFrom::Start(init.stream_offset))?; let frame_index = FrameIndex::read(&mut input)?; input.seek(io::SeekFrom::Start(frame_index.frames[0].offset))?; let meta = format::Metadata { sample_rate, num_samples: Some(frame_index.num_samples()), tag: init.tag, }; macro_rules! dyn_type { ($dyn:path) => { $dyn(Box::from(Decoder { input, input_buf: [0; MAX_FRAME_BYTES], hip: init.hip, frame_index, sample_rate, buffers: init.buffers, next_frame: 0, next_sample: 0, samples_available: init.decode_count, _f: marker::PhantomData, })) .into() }; } Ok(( match num_channels { 1 => dyn_type!(dynam::Seek::MonoI16), 2 => dyn_type!(dynam::Seek::StereoI16), _ => unreachable!(), // LAME's interface does not allow this. }, meta, )) } } struct Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { input: R, input_buf: [u8; MAX_FRAME_BYTES], hip: hip_t, frame_index: FrameIndex, sample_rate: u32, buffers: [[i16; MAX_FRAME_SIZE]; 2], next_frame: usize, next_sample: usize, samples_available: usize, _f: marker::PhantomData<F>, } unsafe impl<F, R> Send for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { } impl<F, R> iter::Iterator for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { type Item = F; fn next(&mut self) -> Option<Self::Item> { let mut num_read = 0; while self.next_sample >= self.samples_available { unsafe { let rs = hip_decode1( self.hip, self.input_buf.as_mut_ptr(), num_read, self.buffers[0].as_mut_ptr(), self.buffers[1].as_mut_ptr(), ); match rs { 0 => { if self.next_frame >= self.frame_index.frames.len() { return None; } let frame = &self.frame_index.frames[self.next_frame]; num_read = match self .input .read(&mut self.input_buf[..frame.length as usize]) { Ok(nr) if nr == 0 => return None, Ok(nr) => nr, Err(err) => { error!("{}", err); return None; } }; } code if code < 0 => { error!("Error decoding next frame: {}", Error::Lame(code)); return None; } decode_count => { self.next_frame += 1; self.next_sample = 0; self.samples_available = decode_count as usize; } }; } } let frame = F::from_fn(|ch| self.buffers[ch][self.next_sample]); self.next_sample += 1; Some(frame) } } impl<F, R> Source for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn sample_rate(&self) -> u32
} impl<F, R> Seekable for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn seek(&mut self, position: u64) -> Result<(), SeekError> { let i = self .frame_index .frame_for_sample(position) .ok_or(SeekError::OutofRange { pos: position, size: self.length(), })?; self.next_frame = i; self.next_sample = position as usize - self.frame_index.frames[i].sample_offset as usize; self.samples_available = 0; assert!(self.next_frame < self.frame_index.frames.len()); assert!(self.next_sample < MAX_FRAME_SIZE); let frame = &self.frame_index.frames[self.next_frame]; self.input .seek(io::SeekFrom::Start(frame.offset)) .map_err(Box::from)?; Ok(()) } fn length(&self) -> u64 { self.frame_index.num_samples() } fn current_position(&self) -> u64 { if self.next_frame == 0 { return 0; } self.frame_index.frames[self.next_frame - 1].sample_offset + self.next_sample as u64 } } impl<F, R> Seek for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { } impl<F, R> Drop for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn drop(&mut self) { unsafe { hip_decode_exit(self.hip); } } } unsafe extern "C" fn debug_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { debug!("{}", VaFormatter(format, ap)); } unsafe extern "C" fn msg_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { info!("{}", VaFormatter(format, ap)); } unsafe extern "C" fn error_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { error!("{}", VaFormatter(format, ap)); } struct VaFormatter(*const os::raw::c_char, *mut __va_list_tag); impl fmt::Display for VaFormatter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { let cstr = ffi::CStr::from_ptr(self.0); // A buffer two times the format should be enough in most cases. let mut buf = vec![0u8; cstr.to_bytes().len() * 2]; vsnprintf(buf.as_mut_ptr() as *mut i8, buf.len(), self.0, self.1); write!( f, "{}", String::from_utf8_lossy(&*buf).trim_matches(&['\0', '\n'][..]) ) } } } #[derive(Debug)] pub enum Error { IO(io::Error), ID3(id3::Error), Index(index::Error), Lame(i32), ConstructionFailed, NoHeader, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::IO(ref err) => write!(f, "IO: {}", err), Error::ID3(ref err) => write!(f, "ID3: {}", err), Error::Index(ref err) => write!(f, "Index: {}", err), Error::Lame(code) => { let msg = match code { 0 => "okay", -1 => "generic error", -10 => "no memory", -11 => "bad bitrate", -12 => "bad sample frequency", -13 => "internal error", -80 => "read error", -81 => "write error", -82 => "file too large", _ => "unknown", }; write!(f, "Lame error: {}", msg) } Error::ConstructionFailed => write!(f, "Failed to construct decoder"), Error::NoHeader => write!(f, "Missing header"), } } } impl error::Error for Error { fn description(&self) -> &str { "MP3 error" } fn cause(&self) -> Option<&error::Error> { match *self { Error::IO(ref err) => Some(err), Error::ID3(ref err) => Some(err), Error::Index(ref err) => Some(err), _ => None, } } } impl From<io::Error> for Error { fn from(err: io::Error) -> Error { Error::IO(err) } } impl From<id3::Error> for Error { fn from(err: id3::Error) -> Error { Error::ID3(err) } } impl From<index::Error> for Error { fn from(err: index::Error) -> Error { Error::Index(err) } } #[cfg(all(test, feature = "unstable"))] mod benchmarks { extern crate test; use super::*; #[bench] fn read_metadata(b: &mut test::Bencher) { b.iter(|| { let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap(); decode_metadata(file).unwrap(); }); } #[bench] fn decoder_open(b: &mut test::Bencher) { b.iter(|| { let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap(); decode(file).unwrap(); }); } }
{ self.sample_rate }
identifier_body
mod.rs
use crate::audio::*; use crate::format; use id3; use lazy_static::lazy_static; use liblame_sys::*; use log::*; use regex::bytes; use sample; use std::*; mod index; use self::index::FrameIndex; /// This is the absolute maximum number of samples that can be contained in a single frame. const MAX_FRAME_SIZE: usize = 1152; const MAX_FRAME_BYTES: usize = 1348; pub fn magic() -> &'static bytes::Regex { lazy_static! { static ref MAGIC: bytes::Regex = bytes::Regex::new(r"(?s-u)^(?:ID3)|(:?\xff[\xe0-\xff])").unwrap(); } &MAGIC } struct DecoderInit { hip: hip_t, mp3_data: mp3data_struct, buffers: [[i16; MAX_FRAME_SIZE]; 2], decode_count: usize, stream_offset: u64, tag: Option<id3::Tag>, } unsafe fn init_decoder<R>(mut input: &mut R) -> Result<DecoderInit, Error> where R: io::Read + io::Seek, { let tag = { let mut buf = [0; 3]; input.read_exact(&mut buf)?; input.seek(io::SeekFrom::Start(0))?; if &buf == b"ID3" { Some(id3::Tag::read_from(&mut input)?) } else { None } }; // On very rare occasions, LAME is unable to find the start of the stream. index::find_stream(input)?; let stream_offset = input.seek(io::SeekFrom::Current(0))?; let hip: hip_t = hip_decode_init(); if hip.is_null() { return Err(Error::ConstructionFailed); } hip_set_debugf(hip, Some(debug_cb)); hip_set_msgf(hip, Some(msg_cb)); hip_set_errorf(hip, Some(error_cb)); let mut mp3_data = mem::zeroed(); let mut enc_delay = 0; let mut enc_padding = 0; let mut buf_left = [0; MAX_FRAME_SIZE]; let mut buf_right = [0; MAX_FRAME_SIZE]; let mut rs = 0; while rs == 0 { let mut read_buf = [0; MAX_FRAME_BYTES]; let num_read = input.read(&mut read_buf)?; rs = hip_decode1_headersB( hip, read_buf.as_mut_ptr(), num_read, buf_left.as_mut_ptr(), buf_right.as_mut_ptr(), &mut mp3_data, &mut enc_delay, &mut enc_padding, ); } if rs == -1 { hip_decode_exit(hip); return Err(Error::Lame(rs)); } let decode_count = rs; if mp3_data.header_parsed != 1 { return Err(Error::NoHeader); } Ok(DecoderInit { hip, mp3_data, buffers: [buf_left, buf_right], decode_count: decode_count as usize, stream_offset, tag, }) } pub fn decode_metadata<R>(mut input: R) -> Result<format::Metadata, Error> where R: io::Read + io::Seek, { unsafe { let init = init_decoder(&mut input)?; hip_decode_exit(init.hip); let num_samples = if init.mp3_data.nsamp != 0 { init.mp3_data.nsamp } else { input.seek(io::SeekFrom::Start(init.stream_offset))?; let frame_index = FrameIndex::read(&mut input)?; frame_index.num_samples() }; Ok(format::Metadata { sample_rate: init.mp3_data.samplerate as u32, num_samples: Some(num_samples), tag: init.tag, }) } } pub fn decode<R>(mut input: R) -> Result<(dynam::Audio, format::Metadata), Error> where R: io::Read + io::Seek + 'static, { unsafe { let init = init_decoder(&mut input)?; let sample_rate = init.mp3_data.samplerate as u32; let num_channels = init.mp3_data.stereo as u32; input.seek(io::SeekFrom::Start(init.stream_offset))?; let frame_index = FrameIndex::read(&mut input)?; input.seek(io::SeekFrom::Start(frame_index.frames[0].offset))?; let meta = format::Metadata { sample_rate,
$dyn(Box::from(Decoder { input, input_buf: [0; MAX_FRAME_BYTES], hip: init.hip, frame_index, sample_rate, buffers: init.buffers, next_frame: 0, next_sample: 0, samples_available: init.decode_count, _f: marker::PhantomData, })) .into() }; } Ok(( match num_channels { 1 => dyn_type!(dynam::Seek::MonoI16), 2 => dyn_type!(dynam::Seek::StereoI16), _ => unreachable!(), // LAME's interface does not allow this. }, meta, )) } } struct Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { input: R, input_buf: [u8; MAX_FRAME_BYTES], hip: hip_t, frame_index: FrameIndex, sample_rate: u32, buffers: [[i16; MAX_FRAME_SIZE]; 2], next_frame: usize, next_sample: usize, samples_available: usize, _f: marker::PhantomData<F>, } unsafe impl<F, R> Send for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { } impl<F, R> iter::Iterator for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { type Item = F; fn next(&mut self) -> Option<Self::Item> { let mut num_read = 0; while self.next_sample >= self.samples_available { unsafe { let rs = hip_decode1( self.hip, self.input_buf.as_mut_ptr(), num_read, self.buffers[0].as_mut_ptr(), self.buffers[1].as_mut_ptr(), ); match rs { 0 => { if self.next_frame >= self.frame_index.frames.len() { return None; } let frame = &self.frame_index.frames[self.next_frame]; num_read = match self .input .read(&mut self.input_buf[..frame.length as usize]) { Ok(nr) if nr == 0 => return None, Ok(nr) => nr, Err(err) => { error!("{}", err); return None; } }; } code if code < 0 => { error!("Error decoding next frame: {}", Error::Lame(code)); return None; } decode_count => { self.next_frame += 1; self.next_sample = 0; self.samples_available = decode_count as usize; } }; } } let frame = F::from_fn(|ch| self.buffers[ch][self.next_sample]); self.next_sample += 1; Some(frame) } } impl<F, R> Source for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn sample_rate(&self) -> u32 { self.sample_rate } } impl<F, R> Seekable for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn seek(&mut self, position: u64) -> Result<(), SeekError> { let i = self .frame_index .frame_for_sample(position) .ok_or(SeekError::OutofRange { pos: position, size: self.length(), })?; self.next_frame = i; self.next_sample = position as usize - self.frame_index.frames[i].sample_offset as usize; self.samples_available = 0; assert!(self.next_frame < self.frame_index.frames.len()); assert!(self.next_sample < MAX_FRAME_SIZE); let frame = &self.frame_index.frames[self.next_frame]; self.input .seek(io::SeekFrom::Start(frame.offset)) .map_err(Box::from)?; Ok(()) } fn length(&self) -> u64 { self.frame_index.num_samples() } fn current_position(&self) -> u64 { if self.next_frame == 0 { return 0; } self.frame_index.frames[self.next_frame - 1].sample_offset + self.next_sample as u64 } } impl<F, R> Seek for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { } impl<F, R> Drop for Decoder<F, R> where F: sample::Frame<Sample = i16>, R: io::Read + io::Seek + 'static, { fn drop(&mut self) { unsafe { hip_decode_exit(self.hip); } } } unsafe extern "C" fn debug_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { debug!("{}", VaFormatter(format, ap)); } unsafe extern "C" fn msg_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { info!("{}", VaFormatter(format, ap)); } unsafe extern "C" fn error_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) { error!("{}", VaFormatter(format, ap)); } struct VaFormatter(*const os::raw::c_char, *mut __va_list_tag); impl fmt::Display for VaFormatter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { unsafe { let cstr = ffi::CStr::from_ptr(self.0); // A buffer two times the format should be enough in most cases. let mut buf = vec![0u8; cstr.to_bytes().len() * 2]; vsnprintf(buf.as_mut_ptr() as *mut i8, buf.len(), self.0, self.1); write!( f, "{}", String::from_utf8_lossy(&*buf).trim_matches(&['\0', '\n'][..]) ) } } } #[derive(Debug)] pub enum Error { IO(io::Error), ID3(id3::Error), Index(index::Error), Lame(i32), ConstructionFailed, NoHeader, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::IO(ref err) => write!(f, "IO: {}", err), Error::ID3(ref err) => write!(f, "ID3: {}", err), Error::Index(ref err) => write!(f, "Index: {}", err), Error::Lame(code) => { let msg = match code { 0 => "okay", -1 => "generic error", -10 => "no memory", -11 => "bad bitrate", -12 => "bad sample frequency", -13 => "internal error", -80 => "read error", -81 => "write error", -82 => "file too large", _ => "unknown", }; write!(f, "Lame error: {}", msg) } Error::ConstructionFailed => write!(f, "Failed to construct decoder"), Error::NoHeader => write!(f, "Missing header"), } } } impl error::Error for Error { fn description(&self) -> &str { "MP3 error" } fn cause(&self) -> Option<&error::Error> { match *self { Error::IO(ref err) => Some(err), Error::ID3(ref err) => Some(err), Error::Index(ref err) => Some(err), _ => None, } } } impl From<io::Error> for Error { fn from(err: io::Error) -> Error { Error::IO(err) } } impl From<id3::Error> for Error { fn from(err: id3::Error) -> Error { Error::ID3(err) } } impl From<index::Error> for Error { fn from(err: index::Error) -> Error { Error::Index(err) } } #[cfg(all(test, feature = "unstable"))] mod benchmarks { extern crate test; use super::*; #[bench] fn read_metadata(b: &mut test::Bencher) { b.iter(|| { let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap(); decode_metadata(file).unwrap(); }); } #[bench] fn decoder_open(b: &mut test::Bencher) { b.iter(|| { let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap(); decode(file).unwrap(); }); } }
num_samples: Some(frame_index.num_samples()), tag: init.tag, }; macro_rules! dyn_type { ($dyn:path) => {
random_line_split
aws.go
package reconciler import ( "errors" "fmt" "net" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/networkop/cloudroutesync/pkg/route" "github.com/sirupsen/logrus" ) var errRouteTableNotFound = errors.New("RouteTable not found") var awsReservedRanges = []*net.IPNet{ route.ParseCIDR("224.0.0.0/4"), route.ParseCIDR("255.255.255.255/32"), route.ParseCIDR("127.0.0.0/8"), route.ParseCIDR("169.254.0.0/16"), } // AWS Implementation details: // * AWS only allows association of 1 route table with a single subnet // * AWS routes cannot be tagged or given names // * We will attempt to remove all non-local and non-default routes // * To workaround the above, we may need to keep track of added routes // Doing the above between restarts means having a statefile // AwsClient stores cloud client and values type AwsClient struct { aws *ec2.EC2 instanceID, privateIP, subnetID, vpcID string awsRouteTable *ec2.RouteTable baseRoutes []*ec2.Route nicIPtoID map[string]string } // NewAwsClient builds new AWS client func NewAwsClient() (*AwsClient, error) { s, err := session.NewSession(&aws.Config{ MaxRetries: aws.Int(0), }) if err != nil { return nil, fmt.Errorf("Failed to connect to AWS metadata service: %s", err) } md := ec2metadata.New(s) idDoc, err := md.GetInstanceIdentityDocument() if err != nil { return nil, fmt.Errorf("Failed to GetInstanceIdentityDocument: %s", err) } client := ec2.New(s, aws.NewConfig().WithRegion(idDoc.Region)) logrus.Debug("NewAwsClient built") return &AwsClient{ aws: client, instanceID: idDoc.InstanceID, privateIP: idDoc.PrivateIP, nicIPtoID: make(map[string]string), }, nil } // Cleanup removes any leftover resources func (c *AwsClient) Cleanup() error { logrus.Info("Deleting own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to read route table: %s", err) } logrus.Debugf("Disassociating route tableID: %s", *myRouteTable.RouteTableId) for _, assoc := range myRouteTable.Associations { _, err := c.aws.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ AssociationId: assoc.RouteTableAssociationId, }) if err != nil { return fmt.Errorf("Failed to disassociate route table %s", err) } } logrus.Debugf("Deleting route tableID: %s", myRouteTable.RouteTableId) _, err = c.aws.DeleteRouteTable(&ec2.DeleteRouteTableInput{ RouteTableId: myRouteTable.RouteTableId, }) if err != nil { return fmt.Errorf("Failed to delete route table %s", err) } return nil } // Reconcile implements reconciler interface func (c *AwsClient) Reconcile(rt *route.Table, eventSync bool, syncInterval int) { logrus.Debug("Entering Reconcile loop") err := c.lookupAwsSubnet() if err != nil { logrus.Panicf("Failed to lookupSubnet: %s", err) } err = c.ensureRouteTable() if err != nil { logrus.Panicf("Failed to ensure route table: %s", err) } if eventSync { for range rt.SyncCh { err = c.syncRouteTable(rt) if err != nil { logrus.Infof("Failed to sync route table: %s", err) } } } else { for { select { case _ = <-rt.SyncCh: logrus.Debug("Received sync signal in periodic mode, ignoring") default: err = c.syncRouteTable(rt) if err != nil
time.Sleep(time.Duration(syncInterval) * time.Second) } } } } func (c *AwsClient) getRouteTable(filters []*ec2.Filter) (*ec2.RouteTable, error) { logrus.Debugf("Reading route table with filters: %+v", filters) input := &ec2.DescribeRouteTablesInput{ Filters: filters, } result, err := c.aws.DescribeRouteTables(input) if err != nil { return nil, fmt.Errorf("Failed to DescribeRouteTables: %s", err) } switch len(result.RouteTables) { case 0: return nil, errRouteTableNotFound case 1: return result.RouteTables[0], nil default: return nil, fmt.Errorf("Found unexpected number of routeTables %d", len(result.RouteTables)) } } // First we need to check what other routes may be present in the main route table // This is done to capture the default route pointing to Internet GatewayID // Next, we check if the route table exists, and if not create a new one // Right after create we inject the default route to make sure VMs stay online // And create a new associating between the new route table and the local subnet func (c *AwsClient) ensureRouteTable() error { logrus.Debug("Reading the main route table") mainRT, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("vpc-id"), Values: aws.StringSlice([]string{c.vpcID}), }, { Name: aws.String("association.main"), Values: aws.StringSlice([]string{"true"}), }, }, ) if err != nil { return fmt.Errorf("Could not find the main route table: %s", err) } logrus.Debug("Checking if our route table exists") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { switch err { case errRouteTableNotFound: logrus.Info("Route table doesn't exist, creating a new one") input := &ec2.CreateRouteTableInput{ VpcId: aws.String(c.vpcID), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String(ec2.ResourceTypeRouteTable), Tags: []*ec2.Tag{ { Key: aws.String("name"), Value: aws.String(uniquePrefix), }, }, }, }, } resp, err := c.aws.CreateRouteTable(input) if err != nil { return fmt.Errorf("Failed to CreateRouteTable: %w", err) } for _, route := range onlyDefaultRoute(mainRT.Routes) { logrus.Debugf("Checking a route from the main RT: %s", *route.DestinationCidrBlock) if route.GatewayId != nil { logrus.Info("Adding a default route from the main route table") input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, GatewayId: route.GatewayId, RouteTableId: resp.RouteTable.RouteTableId, } _, err := c.aws.CreateRoute(input) if err != nil { return fmt.Errorf("Failed to add base routes from main RT: %s", err) } } } c.awsRouteTable = resp.RouteTable return c.associateRouteTable() default: return err } } c.awsRouteTable = myRouteTable logrus.Debugf("Route table already exists") return c.associateRouteTable() } func onlyDefaultRoute(routes []*ec2.Route) []*ec2.Route { logrus.Debugf("Finding default route to internet GW") for _, route := range routes { if strings.HasPrefix(*route.GatewayId, "igw") { return []*ec2.Route{route} } } return nil } func filterRoutes(routes []*ec2.Route) (result []*ec2.Route) { logrus.Debugf("Filtering out routes that don't have NetworkInterfaceID set") for _, route := range routes { if route.NetworkInterfaceId == nil { continue } result = append(result, route) } return result } func (c *AwsClient) syncRouteTable(rt *route.Table) error { currentRoutes := filterRoutes(c.awsRouteTable.Routes) logrus.Debugf("Current routes %+v", currentRoutes) proposedRoutes := c.buildRoutes(rt) logrus.Debugf("Proposed routes %+v", proposedRoutes) toAdd := []*ec2.Route{} for _, proposedRoute := range proposedRoutes { if len(currentRoutes) == 0 { toAdd = append(toAdd, proposedRoute) } for _, currentRoute := range currentRoutes { if !routesEqual(proposedRoute, currentRoute) { toAdd = append(toAdd, proposedRoute) } } } toDelete := []*ec2.Route{} for _, currentRoute := range currentRoutes { if len(proposedRoutes) == 0 { toDelete = append(toDelete, currentRoute) } for _, proposedRoute := range proposedRoutes { if !routesEqual(currentRoute, proposedRoute) { toDelete = append(toDelete, currentRoute) } } } var opErrors []error var wg sync.WaitGroup for _, route := range toAdd { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, NetworkInterfaceId: route.NetworkInterfaceId, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Creating route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.CreateRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err)) } }(route, &wg) } for _, route := range toDelete { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.DeleteRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Deleting route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.DeleteRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err)) } }(route, &wg) } wg.Wait() for _, err := range opErrors { logrus.Infof("Failed route operation: %s", err) } if len(toAdd)+len(toDelete) > 0 { logrus.Debug("Updating own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to update route table") } c.awsRouteTable = myRouteTable } return nil } func (c *AwsClient) buildRoutes(rt *route.Table) (result []*ec2.Route) { OUTER: for prefix, nextHop := range rt.Routes { ip, _, err := net.ParseCIDR(prefix) if err != nil { logrus.Infof("Failed to parse prefix: %s", prefix) continue } for _, subnet := range awsReservedRanges { if subnet != nil && subnet.Contains(ip) { logrus.Debugf("Ignoring IP from AWS reserved ranges: %s", ip) continue OUTER } } result = append(result, &ec2.Route{ DestinationCidrBlock: aws.String(prefix), NetworkInterfaceId: aws.String(c.nicIDFromIP(nextHop.String())), }) } return result } func (c *AwsClient) associateRouteTable() error { logrus.Debugf("Ensuring route table is associated") for _, assoc := range c.awsRouteTable.Associations { if *assoc.SubnetId == c.subnetID { logrus.Debugf("Route table is already associated, nothing to do") return nil } } logrus.Debugf("Associating route table with the subnet") input := &ec2.AssociateRouteTableInput{ RouteTableId: aws.String(*c.awsRouteTable.RouteTableId), SubnetId: aws.String(c.subnetID), } _, err := c.aws.AssociateRouteTable(input) if err != nil { return err } return nil } func (c *AwsClient) nicIDFromIP(ip string) string { logrus.Debugf("Calculating nic ID from IP: %s", ip) if id, ok := c.nicIPtoID[ip]; ok { return id } input := &ec2.DescribeNetworkInterfacesInput{ Filters: []*ec2.Filter{ &ec2.Filter{ Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{c.subnetID}), }, }, } nics, err := c.aws.DescribeNetworkInterfaces(input) if err != nil { logrus.Infof("Failed to DescribeNetworkInterfaces: %s", err) return "" } for _, nic := range nics.NetworkInterfaces { logrus.Debugf("Checking nic %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == ip { logrus.Debugf("Found a matching nic ID for IP %s", ip) c.nicIPtoID[ip] = *nic.NetworkInterfaceId return *nic.NetworkInterfaceId } } logrus.Infof("Failed to find an AWS interface matching IP: %s", ip) logrus.Info("Assuming nexthop is self") return c.privateIP } func (c *AwsClient) lookupAwsSubnet() error { logrus.Debugf("Looking for subnetID for instanceID %s", c.instanceID) instances, err := c.aws.DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(c.instanceID)}, }) if err != nil { return fmt.Errorf("Failed to DescribeInstances: %s", err) } if len(instances.Reservations) == 0 { return fmt.Errorf("No instances found") } logrus.Debug("Trying to find a matching instance") for _, res := range instances.Reservations { for _, instance := range res.Instances { logrus.Debugf("Checking instance %s", *instance.InstanceId) for _, nic := range instance.NetworkInterfaces { logrus.Debugf("Checking NIC %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == c.privateIP { logrus.Debug("Found a matching NIC, assigning IDs") c.subnetID = *nic.SubnetId c.vpcID = *nic.VpcId return nil } } } } return fmt.Errorf("Failed to find the matching instance and NIC") } func routesEqual(route1, route2 *ec2.Route) bool { if *route1.DestinationCidrBlock == *route2.DestinationCidrBlock { if *route1.NetworkInterfaceId == *route2.NetworkInterfaceId { return true } } return false }
{ logrus.Infof("Failed to sync route table: %s", err) }
conditional_block
aws.go
package reconciler import ( "errors" "fmt" "net" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/networkop/cloudroutesync/pkg/route" "github.com/sirupsen/logrus" ) var errRouteTableNotFound = errors.New("RouteTable not found") var awsReservedRanges = []*net.IPNet{ route.ParseCIDR("224.0.0.0/4"), route.ParseCIDR("255.255.255.255/32"), route.ParseCIDR("127.0.0.0/8"), route.ParseCIDR("169.254.0.0/16"), } // AWS Implementation details: // * AWS only allows association of 1 route table with a single subnet // * AWS routes cannot be tagged or given names // * We will attempt to remove all non-local and non-default routes // * To workaround the above, we may need to keep track of added routes // Doing the above between restarts means having a statefile // AwsClient stores cloud client and values type AwsClient struct { aws *ec2.EC2 instanceID, privateIP, subnetID, vpcID string awsRouteTable *ec2.RouteTable baseRoutes []*ec2.Route nicIPtoID map[string]string } // NewAwsClient builds new AWS client func NewAwsClient() (*AwsClient, error) { s, err := session.NewSession(&aws.Config{ MaxRetries: aws.Int(0), }) if err != nil { return nil, fmt.Errorf("Failed to connect to AWS metadata service: %s", err) } md := ec2metadata.New(s) idDoc, err := md.GetInstanceIdentityDocument() if err != nil { return nil, fmt.Errorf("Failed to GetInstanceIdentityDocument: %s", err) } client := ec2.New(s, aws.NewConfig().WithRegion(idDoc.Region)) logrus.Debug("NewAwsClient built") return &AwsClient{ aws: client, instanceID: idDoc.InstanceID, privateIP: idDoc.PrivateIP, nicIPtoID: make(map[string]string), }, nil } // Cleanup removes any leftover resources func (c *AwsClient) Cleanup() error { logrus.Info("Deleting own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to read route table: %s", err) } logrus.Debugf("Disassociating route tableID: %s", *myRouteTable.RouteTableId) for _, assoc := range myRouteTable.Associations { _, err := c.aws.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ AssociationId: assoc.RouteTableAssociationId, }) if err != nil { return fmt.Errorf("Failed to disassociate route table %s", err) } } logrus.Debugf("Deleting route tableID: %s", myRouteTable.RouteTableId) _, err = c.aws.DeleteRouteTable(&ec2.DeleteRouteTableInput{ RouteTableId: myRouteTable.RouteTableId, }) if err != nil { return fmt.Errorf("Failed to delete route table %s", err) } return nil } // Reconcile implements reconciler interface func (c *AwsClient) Reconcile(rt *route.Table, eventSync bool, syncInterval int) { logrus.Debug("Entering Reconcile loop") err := c.lookupAwsSubnet() if err != nil { logrus.Panicf("Failed to lookupSubnet: %s", err) } err = c.ensureRouteTable() if err != nil { logrus.Panicf("Failed to ensure route table: %s", err) } if eventSync { for range rt.SyncCh { err = c.syncRouteTable(rt) if err != nil { logrus.Infof("Failed to sync route table: %s", err) } } } else { for { select { case _ = <-rt.SyncCh: logrus.Debug("Received sync signal in periodic mode, ignoring") default: err = c.syncRouteTable(rt) if err != nil { logrus.Infof("Failed to sync route table: %s", err) } time.Sleep(time.Duration(syncInterval) * time.Second) } } } } func (c *AwsClient) getRouteTable(filters []*ec2.Filter) (*ec2.RouteTable, error) { logrus.Debugf("Reading route table with filters: %+v", filters) input := &ec2.DescribeRouteTablesInput{ Filters: filters, } result, err := c.aws.DescribeRouteTables(input) if err != nil { return nil, fmt.Errorf("Failed to DescribeRouteTables: %s", err) } switch len(result.RouteTables) { case 0: return nil, errRouteTableNotFound case 1: return result.RouteTables[0], nil default: return nil, fmt.Errorf("Found unexpected number of routeTables %d", len(result.RouteTables)) } } // First we need to check what other routes may be present in the main route table // This is done to capture the default route pointing to Internet GatewayID // Next, we check if the route table exists, and if not create a new one // Right after create we inject the default route to make sure VMs stay online // And create a new associating between the new route table and the local subnet func (c *AwsClient) ensureRouteTable() error { logrus.Debug("Reading the main route table") mainRT, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("vpc-id"), Values: aws.StringSlice([]string{c.vpcID}), }, { Name: aws.String("association.main"), Values: aws.StringSlice([]string{"true"}), }, }, ) if err != nil { return fmt.Errorf("Could not find the main route table: %s", err) } logrus.Debug("Checking if our route table exists") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { switch err { case errRouteTableNotFound: logrus.Info("Route table doesn't exist, creating a new one") input := &ec2.CreateRouteTableInput{ VpcId: aws.String(c.vpcID), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String(ec2.ResourceTypeRouteTable), Tags: []*ec2.Tag{ { Key: aws.String("name"), Value: aws.String(uniquePrefix), }, }, }, }, } resp, err := c.aws.CreateRouteTable(input) if err != nil { return fmt.Errorf("Failed to CreateRouteTable: %w", err) } for _, route := range onlyDefaultRoute(mainRT.Routes) { logrus.Debugf("Checking a route from the main RT: %s", *route.DestinationCidrBlock) if route.GatewayId != nil { logrus.Info("Adding a default route from the main route table") input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, GatewayId: route.GatewayId, RouteTableId: resp.RouteTable.RouteTableId, } _, err := c.aws.CreateRoute(input) if err != nil { return fmt.Errorf("Failed to add base routes from main RT: %s", err) } } } c.awsRouteTable = resp.RouteTable return c.associateRouteTable() default: return err } } c.awsRouteTable = myRouteTable logrus.Debugf("Route table already exists") return c.associateRouteTable() } func
(routes []*ec2.Route) []*ec2.Route { logrus.Debugf("Finding default route to internet GW") for _, route := range routes { if strings.HasPrefix(*route.GatewayId, "igw") { return []*ec2.Route{route} } } return nil } func filterRoutes(routes []*ec2.Route) (result []*ec2.Route) { logrus.Debugf("Filtering out routes that don't have NetworkInterfaceID set") for _, route := range routes { if route.NetworkInterfaceId == nil { continue } result = append(result, route) } return result } func (c *AwsClient) syncRouteTable(rt *route.Table) error { currentRoutes := filterRoutes(c.awsRouteTable.Routes) logrus.Debugf("Current routes %+v", currentRoutes) proposedRoutes := c.buildRoutes(rt) logrus.Debugf("Proposed routes %+v", proposedRoutes) toAdd := []*ec2.Route{} for _, proposedRoute := range proposedRoutes { if len(currentRoutes) == 0 { toAdd = append(toAdd, proposedRoute) } for _, currentRoute := range currentRoutes { if !routesEqual(proposedRoute, currentRoute) { toAdd = append(toAdd, proposedRoute) } } } toDelete := []*ec2.Route{} for _, currentRoute := range currentRoutes { if len(proposedRoutes) == 0 { toDelete = append(toDelete, currentRoute) } for _, proposedRoute := range proposedRoutes { if !routesEqual(currentRoute, proposedRoute) { toDelete = append(toDelete, currentRoute) } } } var opErrors []error var wg sync.WaitGroup for _, route := range toAdd { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, NetworkInterfaceId: route.NetworkInterfaceId, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Creating route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.CreateRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err)) } }(route, &wg) } for _, route := range toDelete { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.DeleteRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Deleting route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.DeleteRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err)) } }(route, &wg) } wg.Wait() for _, err := range opErrors { logrus.Infof("Failed route operation: %s", err) } if len(toAdd)+len(toDelete) > 0 { logrus.Debug("Updating own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to update route table") } c.awsRouteTable = myRouteTable } return nil } func (c *AwsClient) buildRoutes(rt *route.Table) (result []*ec2.Route) { OUTER: for prefix, nextHop := range rt.Routes { ip, _, err := net.ParseCIDR(prefix) if err != nil { logrus.Infof("Failed to parse prefix: %s", prefix) continue } for _, subnet := range awsReservedRanges { if subnet != nil && subnet.Contains(ip) { logrus.Debugf("Ignoring IP from AWS reserved ranges: %s", ip) continue OUTER } } result = append(result, &ec2.Route{ DestinationCidrBlock: aws.String(prefix), NetworkInterfaceId: aws.String(c.nicIDFromIP(nextHop.String())), }) } return result } func (c *AwsClient) associateRouteTable() error { logrus.Debugf("Ensuring route table is associated") for _, assoc := range c.awsRouteTable.Associations { if *assoc.SubnetId == c.subnetID { logrus.Debugf("Route table is already associated, nothing to do") return nil } } logrus.Debugf("Associating route table with the subnet") input := &ec2.AssociateRouteTableInput{ RouteTableId: aws.String(*c.awsRouteTable.RouteTableId), SubnetId: aws.String(c.subnetID), } _, err := c.aws.AssociateRouteTable(input) if err != nil { return err } return nil } func (c *AwsClient) nicIDFromIP(ip string) string { logrus.Debugf("Calculating nic ID from IP: %s", ip) if id, ok := c.nicIPtoID[ip]; ok { return id } input := &ec2.DescribeNetworkInterfacesInput{ Filters: []*ec2.Filter{ &ec2.Filter{ Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{c.subnetID}), }, }, } nics, err := c.aws.DescribeNetworkInterfaces(input) if err != nil { logrus.Infof("Failed to DescribeNetworkInterfaces: %s", err) return "" } for _, nic := range nics.NetworkInterfaces { logrus.Debugf("Checking nic %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == ip { logrus.Debugf("Found a matching nic ID for IP %s", ip) c.nicIPtoID[ip] = *nic.NetworkInterfaceId return *nic.NetworkInterfaceId } } logrus.Infof("Failed to find an AWS interface matching IP: %s", ip) logrus.Info("Assuming nexthop is self") return c.privateIP } func (c *AwsClient) lookupAwsSubnet() error { logrus.Debugf("Looking for subnetID for instanceID %s", c.instanceID) instances, err := c.aws.DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(c.instanceID)}, }) if err != nil { return fmt.Errorf("Failed to DescribeInstances: %s", err) } if len(instances.Reservations) == 0 { return fmt.Errorf("No instances found") } logrus.Debug("Trying to find a matching instance") for _, res := range instances.Reservations { for _, instance := range res.Instances { logrus.Debugf("Checking instance %s", *instance.InstanceId) for _, nic := range instance.NetworkInterfaces { logrus.Debugf("Checking NIC %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == c.privateIP { logrus.Debug("Found a matching NIC, assigning IDs") c.subnetID = *nic.SubnetId c.vpcID = *nic.VpcId return nil } } } } return fmt.Errorf("Failed to find the matching instance and NIC") } func routesEqual(route1, route2 *ec2.Route) bool { if *route1.DestinationCidrBlock == *route2.DestinationCidrBlock { if *route1.NetworkInterfaceId == *route2.NetworkInterfaceId { return true } } return false }
onlyDefaultRoute
identifier_name
aws.go
package reconciler import ( "errors" "fmt" "net" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/networkop/cloudroutesync/pkg/route" "github.com/sirupsen/logrus" ) var errRouteTableNotFound = errors.New("RouteTable not found") var awsReservedRanges = []*net.IPNet{ route.ParseCIDR("224.0.0.0/4"), route.ParseCIDR("255.255.255.255/32"), route.ParseCIDR("127.0.0.0/8"), route.ParseCIDR("169.254.0.0/16"), } // AWS Implementation details: // * AWS only allows association of 1 route table with a single subnet // * AWS routes cannot be tagged or given names // * We will attempt to remove all non-local and non-default routes // * To workaround the above, we may need to keep track of added routes // Doing the above between restarts means having a statefile // AwsClient stores cloud client and values type AwsClient struct { aws *ec2.EC2 instanceID, privateIP, subnetID, vpcID string awsRouteTable *ec2.RouteTable baseRoutes []*ec2.Route nicIPtoID map[string]string } // NewAwsClient builds new AWS client func NewAwsClient() (*AwsClient, error) { s, err := session.NewSession(&aws.Config{ MaxRetries: aws.Int(0), }) if err != nil { return nil, fmt.Errorf("Failed to connect to AWS metadata service: %s", err) } md := ec2metadata.New(s) idDoc, err := md.GetInstanceIdentityDocument() if err != nil { return nil, fmt.Errorf("Failed to GetInstanceIdentityDocument: %s", err) } client := ec2.New(s, aws.NewConfig().WithRegion(idDoc.Region)) logrus.Debug("NewAwsClient built") return &AwsClient{ aws: client, instanceID: idDoc.InstanceID, privateIP: idDoc.PrivateIP, nicIPtoID: make(map[string]string), }, nil } // Cleanup removes any leftover resources func (c *AwsClient) Cleanup() error { logrus.Info("Deleting own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to read route table: %s", err) } logrus.Debugf("Disassociating route tableID: %s", *myRouteTable.RouteTableId) for _, assoc := range myRouteTable.Associations { _, err := c.aws.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ AssociationId: assoc.RouteTableAssociationId, }) if err != nil { return fmt.Errorf("Failed to disassociate route table %s", err) } } logrus.Debugf("Deleting route tableID: %s", myRouteTable.RouteTableId) _, err = c.aws.DeleteRouteTable(&ec2.DeleteRouteTableInput{ RouteTableId: myRouteTable.RouteTableId, }) if err != nil { return fmt.Errorf("Failed to delete route table %s", err) } return nil } // Reconcile implements reconciler interface func (c *AwsClient) Reconcile(rt *route.Table, eventSync bool, syncInterval int) { logrus.Debug("Entering Reconcile loop") err := c.lookupAwsSubnet() if err != nil { logrus.Panicf("Failed to lookupSubnet: %s", err) } err = c.ensureRouteTable() if err != nil { logrus.Panicf("Failed to ensure route table: %s", err) } if eventSync { for range rt.SyncCh { err = c.syncRouteTable(rt) if err != nil { logrus.Infof("Failed to sync route table: %s", err) } } } else { for { select { case _ = <-rt.SyncCh: logrus.Debug("Received sync signal in periodic mode, ignoring") default: err = c.syncRouteTable(rt) if err != nil { logrus.Infof("Failed to sync route table: %s", err) } time.Sleep(time.Duration(syncInterval) * time.Second) } } } } func (c *AwsClient) getRouteTable(filters []*ec2.Filter) (*ec2.RouteTable, error) { logrus.Debugf("Reading route table with filters: %+v", filters) input := &ec2.DescribeRouteTablesInput{ Filters: filters, } result, err := c.aws.DescribeRouteTables(input) if err != nil { return nil, fmt.Errorf("Failed to DescribeRouteTables: %s", err) } switch len(result.RouteTables) { case 0: return nil, errRouteTableNotFound case 1: return result.RouteTables[0], nil default: return nil, fmt.Errorf("Found unexpected number of routeTables %d", len(result.RouteTables)) } } // First we need to check what other routes may be present in the main route table // This is done to capture the default route pointing to Internet GatewayID // Next, we check if the route table exists, and if not create a new one // Right after create we inject the default route to make sure VMs stay online // And create a new associating between the new route table and the local subnet func (c *AwsClient) ensureRouteTable() error { logrus.Debug("Reading the main route table") mainRT, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("vpc-id"), Values: aws.StringSlice([]string{c.vpcID}), }, { Name: aws.String("association.main"), Values: aws.StringSlice([]string{"true"}), }, }, ) if err != nil { return fmt.Errorf("Could not find the main route table: %s", err) } logrus.Debug("Checking if our route table exists") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { switch err { case errRouteTableNotFound: logrus.Info("Route table doesn't exist, creating a new one") input := &ec2.CreateRouteTableInput{ VpcId: aws.String(c.vpcID), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String(ec2.ResourceTypeRouteTable), Tags: []*ec2.Tag{ { Key: aws.String("name"), Value: aws.String(uniquePrefix), }, }, }, }, } resp, err := c.aws.CreateRouteTable(input) if err != nil { return fmt.Errorf("Failed to CreateRouteTable: %w", err) } for _, route := range onlyDefaultRoute(mainRT.Routes) { logrus.Debugf("Checking a route from the main RT: %s", *route.DestinationCidrBlock) if route.GatewayId != nil { logrus.Info("Adding a default route from the main route table") input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, GatewayId: route.GatewayId, RouteTableId: resp.RouteTable.RouteTableId, } _, err := c.aws.CreateRoute(input) if err != nil { return fmt.Errorf("Failed to add base routes from main RT: %s", err) } } } c.awsRouteTable = resp.RouteTable return c.associateRouteTable() default: return err } } c.awsRouteTable = myRouteTable logrus.Debugf("Route table already exists") return c.associateRouteTable() } func onlyDefaultRoute(routes []*ec2.Route) []*ec2.Route { logrus.Debugf("Finding default route to internet GW") for _, route := range routes { if strings.HasPrefix(*route.GatewayId, "igw") { return []*ec2.Route{route} } } return nil } func filterRoutes(routes []*ec2.Route) (result []*ec2.Route) { logrus.Debugf("Filtering out routes that don't have NetworkInterfaceID set") for _, route := range routes { if route.NetworkInterfaceId == nil { continue } result = append(result, route) } return result } func (c *AwsClient) syncRouteTable(rt *route.Table) error { currentRoutes := filterRoutes(c.awsRouteTable.Routes) logrus.Debugf("Current routes %+v", currentRoutes) proposedRoutes := c.buildRoutes(rt) logrus.Debugf("Proposed routes %+v", proposedRoutes) toAdd := []*ec2.Route{} for _, proposedRoute := range proposedRoutes { if len(currentRoutes) == 0 { toAdd = append(toAdd, proposedRoute) } for _, currentRoute := range currentRoutes { if !routesEqual(proposedRoute, currentRoute) { toAdd = append(toAdd, proposedRoute) } } } toDelete := []*ec2.Route{} for _, currentRoute := range currentRoutes { if len(proposedRoutes) == 0 { toDelete = append(toDelete, currentRoute) } for _, proposedRoute := range proposedRoutes { if !routesEqual(currentRoute, proposedRoute) { toDelete = append(toDelete, currentRoute) } } } var opErrors []error var wg sync.WaitGroup for _, route := range toAdd { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, NetworkInterfaceId: route.NetworkInterfaceId, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Creating route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.CreateRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err)) } }(route, &wg) } for _, route := range toDelete { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.DeleteRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Deleting route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.DeleteRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err)) } }(route, &wg) } wg.Wait() for _, err := range opErrors { logrus.Infof("Failed route operation: %s", err) } if len(toAdd)+len(toDelete) > 0 { logrus.Debug("Updating own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to update route table") } c.awsRouteTable = myRouteTable } return nil } func (c *AwsClient) buildRoutes(rt *route.Table) (result []*ec2.Route)
func (c *AwsClient) associateRouteTable() error { logrus.Debugf("Ensuring route table is associated") for _, assoc := range c.awsRouteTable.Associations { if *assoc.SubnetId == c.subnetID { logrus.Debugf("Route table is already associated, nothing to do") return nil } } logrus.Debugf("Associating route table with the subnet") input := &ec2.AssociateRouteTableInput{ RouteTableId: aws.String(*c.awsRouteTable.RouteTableId), SubnetId: aws.String(c.subnetID), } _, err := c.aws.AssociateRouteTable(input) if err != nil { return err } return nil } func (c *AwsClient) nicIDFromIP(ip string) string { logrus.Debugf("Calculating nic ID from IP: %s", ip) if id, ok := c.nicIPtoID[ip]; ok { return id } input := &ec2.DescribeNetworkInterfacesInput{ Filters: []*ec2.Filter{ &ec2.Filter{ Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{c.subnetID}), }, }, } nics, err := c.aws.DescribeNetworkInterfaces(input) if err != nil { logrus.Infof("Failed to DescribeNetworkInterfaces: %s", err) return "" } for _, nic := range nics.NetworkInterfaces { logrus.Debugf("Checking nic %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == ip { logrus.Debugf("Found a matching nic ID for IP %s", ip) c.nicIPtoID[ip] = *nic.NetworkInterfaceId return *nic.NetworkInterfaceId } } logrus.Infof("Failed to find an AWS interface matching IP: %s", ip) logrus.Info("Assuming nexthop is self") return c.privateIP } func (c *AwsClient) lookupAwsSubnet() error { logrus.Debugf("Looking for subnetID for instanceID %s", c.instanceID) instances, err := c.aws.DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(c.instanceID)}, }) if err != nil { return fmt.Errorf("Failed to DescribeInstances: %s", err) } if len(instances.Reservations) == 0 { return fmt.Errorf("No instances found") } logrus.Debug("Trying to find a matching instance") for _, res := range instances.Reservations { for _, instance := range res.Instances { logrus.Debugf("Checking instance %s", *instance.InstanceId) for _, nic := range instance.NetworkInterfaces { logrus.Debugf("Checking NIC %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == c.privateIP { logrus.Debug("Found a matching NIC, assigning IDs") c.subnetID = *nic.SubnetId c.vpcID = *nic.VpcId return nil } } } } return fmt.Errorf("Failed to find the matching instance and NIC") } func routesEqual(route1, route2 *ec2.Route) bool { if *route1.DestinationCidrBlock == *route2.DestinationCidrBlock { if *route1.NetworkInterfaceId == *route2.NetworkInterfaceId { return true } } return false }
{ OUTER: for prefix, nextHop := range rt.Routes { ip, _, err := net.ParseCIDR(prefix) if err != nil { logrus.Infof("Failed to parse prefix: %s", prefix) continue } for _, subnet := range awsReservedRanges { if subnet != nil && subnet.Contains(ip) { logrus.Debugf("Ignoring IP from AWS reserved ranges: %s", ip) continue OUTER } } result = append(result, &ec2.Route{ DestinationCidrBlock: aws.String(prefix), NetworkInterfaceId: aws.String(c.nicIDFromIP(nextHop.String())), }) } return result }
identifier_body
aws.go
package reconciler import ( "errors" "fmt" "net" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/networkop/cloudroutesync/pkg/route" "github.com/sirupsen/logrus" ) var errRouteTableNotFound = errors.New("RouteTable not found") var awsReservedRanges = []*net.IPNet{ route.ParseCIDR("224.0.0.0/4"), route.ParseCIDR("255.255.255.255/32"), route.ParseCIDR("127.0.0.0/8"), route.ParseCIDR("169.254.0.0/16"), } // AWS Implementation details: // * AWS only allows association of 1 route table with a single subnet // * AWS routes cannot be tagged or given names // * We will attempt to remove all non-local and non-default routes // * To workaround the above, we may need to keep track of added routes // Doing the above between restarts means having a statefile // AwsClient stores cloud client and values type AwsClient struct { aws *ec2.EC2 instanceID, privateIP, subnetID, vpcID string awsRouteTable *ec2.RouteTable baseRoutes []*ec2.Route nicIPtoID map[string]string } // NewAwsClient builds new AWS client func NewAwsClient() (*AwsClient, error) { s, err := session.NewSession(&aws.Config{ MaxRetries: aws.Int(0), }) if err != nil { return nil, fmt.Errorf("Failed to connect to AWS metadata service: %s", err) } md := ec2metadata.New(s) idDoc, err := md.GetInstanceIdentityDocument() if err != nil { return nil, fmt.Errorf("Failed to GetInstanceIdentityDocument: %s", err) } client := ec2.New(s, aws.NewConfig().WithRegion(idDoc.Region)) logrus.Debug("NewAwsClient built") return &AwsClient{ aws: client, instanceID: idDoc.InstanceID, privateIP: idDoc.PrivateIP, nicIPtoID: make(map[string]string), }, nil } // Cleanup removes any leftover resources func (c *AwsClient) Cleanup() error { logrus.Info("Deleting own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to read route table: %s", err) } logrus.Debugf("Disassociating route tableID: %s", *myRouteTable.RouteTableId) for _, assoc := range myRouteTable.Associations { _, err := c.aws.DisassociateRouteTable(&ec2.DisassociateRouteTableInput{ AssociationId: assoc.RouteTableAssociationId, }) if err != nil { return fmt.Errorf("Failed to disassociate route table %s", err) } } logrus.Debugf("Deleting route tableID: %s", myRouteTable.RouteTableId) _, err = c.aws.DeleteRouteTable(&ec2.DeleteRouteTableInput{ RouteTableId: myRouteTable.RouteTableId, }) if err != nil { return fmt.Errorf("Failed to delete route table %s", err) } return nil } // Reconcile implements reconciler interface func (c *AwsClient) Reconcile(rt *route.Table, eventSync bool, syncInterval int) { logrus.Debug("Entering Reconcile loop") err := c.lookupAwsSubnet() if err != nil { logrus.Panicf("Failed to lookupSubnet: %s", err) } err = c.ensureRouteTable() if err != nil { logrus.Panicf("Failed to ensure route table: %s", err) } if eventSync { for range rt.SyncCh { err = c.syncRouteTable(rt) if err != nil { logrus.Infof("Failed to sync route table: %s", err) } } } else { for { select { case _ = <-rt.SyncCh: logrus.Debug("Received sync signal in periodic mode, ignoring") default: err = c.syncRouteTable(rt) if err != nil { logrus.Infof("Failed to sync route table: %s", err) } time.Sleep(time.Duration(syncInterval) * time.Second) } } } } func (c *AwsClient) getRouteTable(filters []*ec2.Filter) (*ec2.RouteTable, error) { logrus.Debugf("Reading route table with filters: %+v", filters) input := &ec2.DescribeRouteTablesInput{ Filters: filters, } result, err := c.aws.DescribeRouteTables(input) if err != nil { return nil, fmt.Errorf("Failed to DescribeRouteTables: %s", err) } switch len(result.RouteTables) { case 0: return nil, errRouteTableNotFound case 1: return result.RouteTables[0], nil default: return nil, fmt.Errorf("Found unexpected number of routeTables %d", len(result.RouteTables)) } } // First we need to check what other routes may be present in the main route table // This is done to capture the default route pointing to Internet GatewayID // Next, we check if the route table exists, and if not create a new one // Right after create we inject the default route to make sure VMs stay online // And create a new associating between the new route table and the local subnet func (c *AwsClient) ensureRouteTable() error { logrus.Debug("Reading the main route table") mainRT, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("vpc-id"), Values: aws.StringSlice([]string{c.vpcID}), }, { Name: aws.String("association.main"), Values: aws.StringSlice([]string{"true"}), }, }, ) if err != nil { return fmt.Errorf("Could not find the main route table: %s", err) } logrus.Debug("Checking if our route table exists") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { switch err { case errRouteTableNotFound: logrus.Info("Route table doesn't exist, creating a new one") input := &ec2.CreateRouteTableInput{ VpcId: aws.String(c.vpcID), TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String(ec2.ResourceTypeRouteTable), Tags: []*ec2.Tag{ { Key: aws.String("name"), Value: aws.String(uniquePrefix), }, }, }, }, } resp, err := c.aws.CreateRouteTable(input) if err != nil { return fmt.Errorf("Failed to CreateRouteTable: %w", err) } for _, route := range onlyDefaultRoute(mainRT.Routes) { logrus.Debugf("Checking a route from the main RT: %s", *route.DestinationCidrBlock) if route.GatewayId != nil { logrus.Info("Adding a default route from the main route table") input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, GatewayId: route.GatewayId, RouteTableId: resp.RouteTable.RouteTableId, } _, err := c.aws.CreateRoute(input) if err != nil { return fmt.Errorf("Failed to add base routes from main RT: %s", err) } } } c.awsRouteTable = resp.RouteTable return c.associateRouteTable() default: return err } } c.awsRouteTable = myRouteTable logrus.Debugf("Route table already exists") return c.associateRouteTable() } func onlyDefaultRoute(routes []*ec2.Route) []*ec2.Route { logrus.Debugf("Finding default route to internet GW") for _, route := range routes { if strings.HasPrefix(*route.GatewayId, "igw") { return []*ec2.Route{route} } } return nil } func filterRoutes(routes []*ec2.Route) (result []*ec2.Route) { logrus.Debugf("Filtering out routes that don't have NetworkInterfaceID set") for _, route := range routes { if route.NetworkInterfaceId == nil { continue } result = append(result, route) } return result } func (c *AwsClient) syncRouteTable(rt *route.Table) error { currentRoutes := filterRoutes(c.awsRouteTable.Routes) logrus.Debugf("Current routes %+v", currentRoutes) proposedRoutes := c.buildRoutes(rt) logrus.Debugf("Proposed routes %+v", proposedRoutes) toAdd := []*ec2.Route{} for _, proposedRoute := range proposedRoutes { if len(currentRoutes) == 0 { toAdd = append(toAdd, proposedRoute) } for _, currentRoute := range currentRoutes { if !routesEqual(proposedRoute, currentRoute) { toAdd = append(toAdd, proposedRoute) } } } toDelete := []*ec2.Route{} for _, currentRoute := range currentRoutes { if len(proposedRoutes) == 0 { toDelete = append(toDelete, currentRoute) } for _, proposedRoute := range proposedRoutes { if !routesEqual(currentRoute, proposedRoute) { toDelete = append(toDelete, currentRoute) } } } var opErrors []error var wg sync.WaitGroup for _, route := range toAdd { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.CreateRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, NetworkInterfaceId: route.NetworkInterfaceId, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Creating route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.CreateRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err)) } }(route, &wg) } for _, route := range toDelete { wg.Add(1) go func(route *ec2.Route, wg *sync.WaitGroup) { defer wg.Done() input := &ec2.DeleteRouteInput{ DestinationCidrBlock: route.DestinationCidrBlock, RouteTableId: c.awsRouteTable.RouteTableId, } logrus.Infof("Deleting route %s in %s", *route.DestinationCidrBlock, *c.awsRouteTable.RouteTableId) _, err := c.aws.DeleteRoute(input) if err != nil { opErrors = append(opErrors, fmt.Errorf("Failed to create route: %s", err))
} wg.Wait() for _, err := range opErrors { logrus.Infof("Failed route operation: %s", err) } if len(toAdd)+len(toDelete) > 0 { logrus.Debug("Updating own route table") myRouteTable, err := c.getRouteTable( []*ec2.Filter{ { Name: aws.String("tag:name"), Values: aws.StringSlice([]string{uniquePrefix}), }, }, ) if err != nil { return fmt.Errorf("Failed to update route table") } c.awsRouteTable = myRouteTable } return nil } func (c *AwsClient) buildRoutes(rt *route.Table) (result []*ec2.Route) { OUTER: for prefix, nextHop := range rt.Routes { ip, _, err := net.ParseCIDR(prefix) if err != nil { logrus.Infof("Failed to parse prefix: %s", prefix) continue } for _, subnet := range awsReservedRanges { if subnet != nil && subnet.Contains(ip) { logrus.Debugf("Ignoring IP from AWS reserved ranges: %s", ip) continue OUTER } } result = append(result, &ec2.Route{ DestinationCidrBlock: aws.String(prefix), NetworkInterfaceId: aws.String(c.nicIDFromIP(nextHop.String())), }) } return result } func (c *AwsClient) associateRouteTable() error { logrus.Debugf("Ensuring route table is associated") for _, assoc := range c.awsRouteTable.Associations { if *assoc.SubnetId == c.subnetID { logrus.Debugf("Route table is already associated, nothing to do") return nil } } logrus.Debugf("Associating route table with the subnet") input := &ec2.AssociateRouteTableInput{ RouteTableId: aws.String(*c.awsRouteTable.RouteTableId), SubnetId: aws.String(c.subnetID), } _, err := c.aws.AssociateRouteTable(input) if err != nil { return err } return nil } func (c *AwsClient) nicIDFromIP(ip string) string { logrus.Debugf("Calculating nic ID from IP: %s", ip) if id, ok := c.nicIPtoID[ip]; ok { return id } input := &ec2.DescribeNetworkInterfacesInput{ Filters: []*ec2.Filter{ &ec2.Filter{ Name: aws.String("subnet-id"), Values: aws.StringSlice([]string{c.subnetID}), }, }, } nics, err := c.aws.DescribeNetworkInterfaces(input) if err != nil { logrus.Infof("Failed to DescribeNetworkInterfaces: %s", err) return "" } for _, nic := range nics.NetworkInterfaces { logrus.Debugf("Checking nic %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == ip { logrus.Debugf("Found a matching nic ID for IP %s", ip) c.nicIPtoID[ip] = *nic.NetworkInterfaceId return *nic.NetworkInterfaceId } } logrus.Infof("Failed to find an AWS interface matching IP: %s", ip) logrus.Info("Assuming nexthop is self") return c.privateIP } func (c *AwsClient) lookupAwsSubnet() error { logrus.Debugf("Looking for subnetID for instanceID %s", c.instanceID) instances, err := c.aws.DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(c.instanceID)}, }) if err != nil { return fmt.Errorf("Failed to DescribeInstances: %s", err) } if len(instances.Reservations) == 0 { return fmt.Errorf("No instances found") } logrus.Debug("Trying to find a matching instance") for _, res := range instances.Reservations { for _, instance := range res.Instances { logrus.Debugf("Checking instance %s", *instance.InstanceId) for _, nic := range instance.NetworkInterfaces { logrus.Debugf("Checking NIC %s", *nic.NetworkInterfaceId) if *nic.PrivateIpAddress == c.privateIP { logrus.Debug("Found a matching NIC, assigning IDs") c.subnetID = *nic.SubnetId c.vpcID = *nic.VpcId return nil } } } } return fmt.Errorf("Failed to find the matching instance and NIC") } func routesEqual(route1, route2 *ec2.Route) bool { if *route1.DestinationCidrBlock == *route2.DestinationCidrBlock { if *route1.NetworkInterfaceId == *route2.NetworkInterfaceId { return true } } return false }
} }(route, &wg)
random_line_split
table.d.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { Directionality } from '@angular/cdk/bidi'; import { CollectionViewer, DataSource } from '@angular/cdk/collections'; import { Platform } from '@angular/cdk/platform'; import { AfterContentChecked, ChangeDetectorRef, ElementRef, IterableDiffers, OnDestroy, OnInit, QueryList, TrackByFunction, ViewContainerRef } from '@angular/core'; import { BehaviorSubject, Observable } from 'rxjs'; import { CdkColumnDef } from './cell'; import { CdkCellOutletMultiRowContext, CdkCellOutletRowContext, CdkFooterRowDef, CdkHeaderRowDef, CdkRowDef } from './row'; /** Interface used to provide an outlet for rows to be inserted into. */ import * as ษตngcc0 from '@angular/core'; export interface RowOutlet { viewContainer: ViewContainerRef; } /** * Union of the types that can be set as the data source for a `CdkTable`. * @docs-private */ declare type CdkTableDataSourceInput<T> = DataSource<T> | Observable<ReadonlyArray<T> | T[]> | ReadonlyArray<T> | T[]; /** * Provides a handle for the table to grab the view container's ng-container to insert data rows. * @docs-private */ export declare class DataRowOutlet implements RowOutlet { viewContainer: ViewContainerRef; elementRef: ElementRef; constructor(viewContainer: ViewContainerRef, elementRef: ElementRef); static ษตfac: ษตngcc0.ษตษตFactoryDef<DataRowOutlet, never>; static ษตdir: ษตngcc0.ษตษตDirectiveDefWithMeta<DataRowOutlet, "[rowOutlet]", never, {}, {}, never>; } /** * Provides a handle for the table to grab the view container's ng-container to insert the header. * @docs-private */ export declare class HeaderRowOutlet implements RowOutlet { viewContainer: ViewContainerRef; elementRef: ElementRef; constructor(viewContainer: ViewContainerRef, elementRef: ElementRef); static ษตfac: ษตngcc0.ษตษตFactoryDef<HeaderRowOutlet, never>; static ษตdir: ษตngcc0.ษตษตDirectiveDefWithMeta<HeaderRowOutlet, "[headerRowOutlet]", never, {}, {}, never>; } /** * Provides a handle for the table to grab the view container's ng-container to insert the footer. * @docs-private */ export declare class FooterRowOutlet implements RowOutlet { viewContainer: ViewContainerRef; elementRef: ElementRef; constructor(viewContainer: ViewContainerRef, elementRef: ElementRef); static ษตfac: ษตngcc0.ษตษตFactoryDef<FooterRowOutlet, never>; static ษตdir: ษตngcc0.ษตษตDirectiveDefWithMeta<FooterRowOutlet, "[footerRowOutlet]", never, {}, {}, never>; } /** * The table template that can be used by the mat-table. Should not be used outside of the * material library. * @docs-private */ export declare const CDK_TABLE_TEMPLATE = "\n <ng-content select=\"caption\"></ng-content>\n <ng-container headerRowOutlet></ng-container>\n <ng-container rowOutlet></ng-container>\n <ng-container footerRowOutlet></ng-container>\n"; /** * Interface used to conveniently type the possible context interfaces for the render row. * @docs-private */ export interface RowContext<T> extends CdkCellOutletMultiRowContext<T>, CdkCellOutletRowContext<T> { } /** * Set of properties that represents the identity of a single rendered row. * * When the table needs to determine the list of rows to render, it will do so by iterating through * each data object and evaluating its list of row templates to display (when multiTemplateDataRows * is false, there is only one template per data object). For each pair of data object and row * template, a `RenderRow` is added to the list of rows to render. If the data object and row * template pair has already been rendered, the previously used `RenderRow` is added; else a new * `RenderRow` is * created. Once the list is complete and all data objects have been itereated * through, a diff is performed to determine the changes that need to be made to the rendered rows. * * @docs-private */ export interface RenderRow<T> { data: T; dataIndex: number; rowDef: CdkRowDef<T>; } /** * A data table that can render a header row, data rows, and a footer row.
export declare class CdkTable<T> implements AfterContentChecked, CollectionViewer, OnDestroy, OnInit { protected readonly _differs: IterableDiffers; protected readonly _changeDetectorRef: ChangeDetectorRef; protected readonly _elementRef: ElementRef; protected readonly _dir: Directionality; private _platform; private _document; /** Latest data provided by the data source. */ protected _data: T[] | ReadonlyArray<T>; /** Subject that emits when the component has been destroyed. */ private _onDestroy; /** List of the rendered rows as identified by their `RenderRow` object. */ private _renderRows; /** Subscription that listens for the data provided by the data source. */ private _renderChangeSubscription; /** * Map of all the user's defined columns (header, data, and footer cell template) identified by * name. Collection populated by the column definitions gathered by `ContentChildren` as well as * any custom column definitions added to `_customColumnDefs`. */ private _columnDefsByName; /** * Set of all row definitions that can be used by this table. Populated by the rows gathered by * using `ContentChildren` as well as any custom row definitions added to `_customRowDefs`. */ private _rowDefs; /** * Set of all header row definitions that can be used by this table. Populated by the rows * gathered by using `ContentChildren` as well as any custom row definitions added to * `_customHeaderRowDefs`. */ private _headerRowDefs; /** * Set of all row definitions that can be used by this table. Populated by the rows gathered by * using `ContentChildren` as well as any custom row definitions added to * `_customFooterRowDefs`. */ private _footerRowDefs; /** Differ used to find the changes in the data provided by the data source. */ private _dataDiffer; /** Stores the row definition that does not have a when predicate. */ private _defaultRowDef; /** * Column definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has * column definitions as *its* content child. */ private _customColumnDefs; /** * Data row definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has * built-in data rows as *its* content child. */ private _customRowDefs; /** * Header row definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has * built-in header rows as *its* content child. */ private _customHeaderRowDefs; /** * Footer row definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has a * built-in footer row as *its* content child. */ private _customFooterRowDefs; /** * Whether the header row definition has been changed. Triggers an update to the header row after * content is checked. Initialized as true so that the table renders the initial set of rows. */ private _headerRowDefChanged; /** * Whether the footer row definition has been changed. Triggers an update to the footer row after * content is checked. Initialized as true so that the table renders the initial set of rows. */ private _footerRowDefChanged; /** * Cache of the latest rendered `RenderRow` objects as a map for easy retrieval when constructing * a new list of `RenderRow` objects for rendering rows. Since the new list is constructed with * the cached `RenderRow` objects when possible, the row identity is preserved when the data * and row template matches, which allows the `IterableDiffer` to check rows by reference * and understand which rows are added/moved/removed. * * Implemented as a map of maps where the first key is the `data: T` object and the second is the * `CdkRowDef<T>` object. With the two keys, the cache points to a `RenderRow<T>` object that * contains an array of created pairs. The array is necessary to handle cases where the data * array contains multiple duplicate data objects and each instantiated `RenderRow` must be * stored. */ private _cachedRenderRowsMap; /** Whether the table is applied to a native `<table>`. */ private _isNativeHtmlTable; /** * Utility class that is responsible for applying the appropriate sticky positioning styles to * the table's rows and cells. */ private _stickyStyler; /** * CSS class added to any row or cell that has sticky positioning applied. May be overriden by * table subclasses. */ protected stickyCssClass: string; /** * Tracking function that will be used to check the differences in data changes. Used similarly * to `ngFor` `trackBy` function. Optimize row operations by identifying a row based on its data * relative to the function to know if a row should be added/removed/moved. * Accepts a function that takes two parameters, `index` and `item`. */ trackBy: TrackByFunction<T>; private _trackByFn; /** * The table's source of data, which can be provided in three ways (in order of complexity): * - Simple data array (each object represents one table row) * - Stream that emits a data array each time the array changes * - `DataSource` object that implements the connect/disconnect interface. * * If a data array is provided, the table must be notified when the array's objects are * added, removed, or moved. This can be done by calling the `renderRows()` function which will * render the diff since the last table render. If the data array reference is changed, the table * will automatically trigger an update to the rows. * * When providing an Observable stream, the table will trigger an update automatically when the * stream emits a new array of data. * * Finally, when providing a `DataSource` object, the table will use the Observable stream * provided by the connect function and trigger updates when that stream emits new data array * values. During the table's ngOnDestroy or when the data source is removed from the table, the * table will call the DataSource's `disconnect` function (may be useful for cleaning up any * subscriptions registered during the connect process). */ dataSource: CdkTableDataSourceInput<T>; private _dataSource; /** * Whether to allow multiple rows per data object by evaluating which rows evaluate their 'when' * predicate to true. If `multiTemplateDataRows` is false, which is the default value, then each * dataobject will render the first row that evaluates its when predicate to true, in the order * defined in the table, or otherwise the default row which does not have a when predicate. */ multiTemplateDataRows: boolean; _multiTemplateDataRows: boolean; /** * Stream containing the latest information on what rows are being displayed on screen. * Can be used by the data source to as a heuristic of what data should be provided. * * @docs-private */ viewChange: BehaviorSubject<{ start: number; end: number; }>; _rowOutlet: DataRowOutlet; _headerRowOutlet: HeaderRowOutlet; _footerRowOutlet: FooterRowOutlet; /** * The column definitions provided by the user that contain what the header, data, and footer * cells should render for each column. */ _contentColumnDefs: QueryList<CdkColumnDef>; /** Set of data row definitions that were provided to the table as content children. */ _contentRowDefs: QueryList<CdkRowDef<T>>; /** Set of header row definitions that were provided to the table as content children. */ _contentHeaderRowDefs: QueryList<CdkHeaderRowDef>; /** Set of footer row definitions that were provided to the table as content children. */ _contentFooterRowDefs: QueryList<CdkFooterRowDef>; constructor(_differs: IterableDiffers, _changeDetectorRef: ChangeDetectorRef, _elementRef: ElementRef, role: string, _dir: Directionality, _document: any, _platform: Platform); ngOnInit(): void; ngAfterContentChecked(): void; ngOnDestroy(): void; /** * Renders rows based on the table's latest set of data, which was either provided directly as an * input or retrieved through an Observable stream (directly or from a DataSource). * Checks for differences in the data since the last diff to perform only the necessary * changes (add/remove/move rows). * * If the table's data source is a DataSource or Observable, this will be invoked automatically * each time the provided Observable stream emits a new data array. Otherwise if your data is * an array, this function will need to be called to render any changes. */ renderRows(): void; /** * Sets the header row definition to be used. Overrides the header row definition gathered by * using `ContentChild`, if one exists. Sets a flag that will re-render the header row after the * table's content is checked. * @docs-private * @deprecated Use `addHeaderRowDef` and `removeHeaderRowDef` instead * @breaking-change 8.0.0 */ setHeaderRowDef(headerRowDef: CdkHeaderRowDef): void; /** * Sets the footer row definition to be used. Overrides the footer row definition gathered by * using `ContentChild`, if one exists. Sets a flag that will re-render the footer row after the * table's content is checked. * @docs-private * @deprecated Use `addFooterRowDef` and `removeFooterRowDef` instead * @breaking-change 8.0.0 */ setFooterRowDef(footerRowDef: CdkFooterRowDef): void; /** Adds a column definition that was not included as part of the content children. */ addColumnDef(columnDef: CdkColumnDef): void; /** Removes a column definition that was not included as part of the content children. */ removeColumnDef(columnDef: CdkColumnDef): void; /** Adds a row definition that was not included as part of the content children. */ addRowDef(rowDef: CdkRowDef<T>): void; /** Removes a row definition that was not included as part of the content children. */ removeRowDef(rowDef: CdkRowDef<T>): void; /** Adds a header row definition that was not included as part of the content children. */ addHeaderRowDef(headerRowDef: CdkHeaderRowDef): void; /** Removes a header row definition that was not included as part of the content children. */ removeHeaderRowDef(headerRowDef: CdkHeaderRowDef): void; /** Adds a footer row definition that was not included as part of the content children. */ addFooterRowDef(footerRowDef: CdkFooterRowDef): void; /** Removes a footer row definition that was not included as part of the content children. */ removeFooterRowDef(footerRowDef: CdkFooterRowDef): void; /** * Updates the header sticky styles. First resets all applied styles with respect to the cells * sticking to the top. Then, evaluating which cells need to be stuck to the top. This is * automatically called when the header row changes its displayed set of columns, or if its * sticky input changes. May be called manually for cases where the cell content changes outside * of these events. */ updateStickyHeaderRowStyles(): void; /** * Updates the footer sticky styles. First resets all applied styles with respect to the cells * sticking to the bottom. Then, evaluating which cells need to be stuck to the bottom. This is * automatically called when the footer row changes its displayed set of columns, or if its * sticky input changes. May be called manually for cases where the cell content changes outside * of these events. */ updateStickyFooterRowStyles(): void; /** * Updates the column sticky styles. First resets all applied styles with respect to the cells * sticking to the left and right. Then sticky styles are added for the left and right according * to the column definitions for each cell in each row. This is automatically called when * the data source provides a new set of data or when a column definition changes its sticky * input. May be called manually for cases where the cell content changes outside of these events. */ updateStickyColumnStyles(): void; /** * Get the list of RenderRow objects to render according to the current list of data and defined * row definitions. If the previous list already contained a particular pair, it should be reused * so that the differ equates their references. */ private _getAllRenderRows; /** * Gets a list of `RenderRow<T>` for the provided data object and any `CdkRowDef` objects that * should be rendered for this data. Reuses the cached RenderRow objects if they match the same * `(T, CdkRowDef)` pair. */ private _getRenderRowsForData; /** Update the map containing the content's column definitions. */ private _cacheColumnDefs; /** Update the list of all available row definitions that can be used. */ private _cacheRowDefs; /** * Check if the header, data, or footer rows have changed what columns they want to display or * whether the sticky states have changed for the header or footer. If there is a diff, then * re-render that section. */ private _renderUpdatedColumns; /** * Switch to the provided data source by resetting the data and unsubscribing from the current * render change subscription if one exists. If the data source is null, interpret this by * clearing the row outlet. Otherwise start listening for new data. */ private _switchDataSource; /** Set up a subscription for the data provided by the data source. */ private _observeRenderChanges; /** * Clears any existing content in the header row outlet and creates a new embedded view * in the outlet using the header row definition. */ private _forceRenderHeaderRows; /** * Clears any existing content in the footer row outlet and creates a new embedded view * in the outlet using the footer row definition. */ private _forceRenderFooterRows; /** Adds the sticky column styles for the rows according to the columns' stick states. */ private _addStickyColumnStyles; /** Gets the list of rows that have been rendered in the row outlet. */ _getRenderedRows(rowOutlet: RowOutlet): HTMLElement[]; /** * Get the matching row definitions that should be used for this row data. If there is only * one row definition, it is returned. Otherwise, find the row definitions that has a when * predicate that returns true with the data. If none return true, return the default row * definition. */ _getRowDefs(data: T, dataIndex: number): CdkRowDef<T>[]; /** * Create the embedded view for the data row template and place it in the correct index location * within the data row view container. */ private _insertRow; /** * Creates a new row template in the outlet and fills it with the set of cell templates. * Optionally takes a context to provide to the row and cells, as well as an optional index * of where to place the new row template in the outlet. */ private _renderRow; /** * Updates the index-related context for each row to reflect any changes in the index of the rows, * e.g. first/last/even/odd. */ private _updateRowIndexContext; /** Gets the column definitions for the provided row def. */ private _getCellTemplates; /** Adds native table sections (e.g. tbody) and moves the row outlets into them. */ private _applyNativeTableSections; /** * Forces a re-render of the data rows. Should be called in cases where there has been an input * change that affects the evaluation of which rows should be rendered, e.g. toggling * `multiTemplateDataRows` or adding/removing row definitions. */ private _forceRenderDataRows; /** * Checks if there has been a change in sticky states since last check and applies the correct * sticky styles. Since checking resets the "dirty" state, this should only be performed once * during a change detection and after the inputs are settled (after content check). */ private _checkStickyStates; /** * Creates the sticky styler that will be used for sticky rows and columns. Listens * for directionality changes and provides the latest direction to the styler. Re-applies column * stickiness when directionality changes. */ private _setupStickyStyler; static ษตfac: ษตngcc0.ษตษตFactoryDef<CdkTable<any>, [null, null, null, { attribute: "role"; }, { optional: true; }, null, null]>; static ษตcmp: ษตngcc0.ษตษตComponentDefWithMeta<CdkTable<any>, "cdk-table, table[cdk-table]", ["cdkTable"], { "trackBy": "trackBy"; "dataSource": "dataSource"; "multiTemplateDataRows": "multiTemplateDataRows"; }, {}, ["_contentColumnDefs", "_contentRowDefs", "_contentHeaderRowDefs", "_contentFooterRowDefs"], ["caption"]>; } export {};
* Uses the dataSource input to determine the data to be rendered. The data can be provided either * as a data array, an Observable stream that emits the data array to render, or a DataSource with a * connect function that will return an Observable stream that emits the data array to render. */
random_line_split
table.d.ts
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ import { Directionality } from '@angular/cdk/bidi'; import { CollectionViewer, DataSource } from '@angular/cdk/collections'; import { Platform } from '@angular/cdk/platform'; import { AfterContentChecked, ChangeDetectorRef, ElementRef, IterableDiffers, OnDestroy, OnInit, QueryList, TrackByFunction, ViewContainerRef } from '@angular/core'; import { BehaviorSubject, Observable } from 'rxjs'; import { CdkColumnDef } from './cell'; import { CdkCellOutletMultiRowContext, CdkCellOutletRowContext, CdkFooterRowDef, CdkHeaderRowDef, CdkRowDef } from './row'; /** Interface used to provide an outlet for rows to be inserted into. */ import * as ษตngcc0 from '@angular/core'; export interface RowOutlet { viewContainer: ViewContainerRef; } /** * Union of the types that can be set as the data source for a `CdkTable`. * @docs-private */ declare type CdkTableDataSourceInput<T> = DataSource<T> | Observable<ReadonlyArray<T> | T[]> | ReadonlyArray<T> | T[]; /** * Provides a handle for the table to grab the view container's ng-container to insert data rows. * @docs-private */ export declare class DataRowOutlet implements RowOutlet { viewContainer: ViewContainerRef; elementRef: ElementRef; constructor(viewContainer: ViewContainerRef, elementRef: ElementRef); static ษตfac: ษตngcc0.ษตษตFactoryDef<DataRowOutlet, never>; static ษตdir: ษตngcc0.ษตษตDirectiveDefWithMeta<DataRowOutlet, "[rowOutlet]", never, {}, {}, never>; } /** * Provides a handle for the table to grab the view container's ng-container to insert the header. * @docs-private */ export declare class HeaderRowOutlet implements RowOutlet { viewContainer: ViewContainerRef; elementRef: ElementRef; constructor(viewContainer: ViewContainerRef, elementRef: ElementRef); static ษตfac: ษตngcc0.ษตษตFactoryDef<HeaderRowOutlet, never>; static ษตdir: ษตngcc0.ษตษตDirectiveDefWithMeta<HeaderRowOutlet, "[headerRowOutlet]", never, {}, {}, never>; } /** * Provides a handle for the table to grab the view container's ng-container to insert the footer. * @docs-private */ export declare class FooterRowOutlet i
tlet { viewContainer: ViewContainerRef; elementRef: ElementRef; constructor(viewContainer: ViewContainerRef, elementRef: ElementRef); static ษตfac: ษตngcc0.ษตษตFactoryDef<FooterRowOutlet, never>; static ษตdir: ษตngcc0.ษตษตDirectiveDefWithMeta<FooterRowOutlet, "[footerRowOutlet]", never, {}, {}, never>; } /** * The table template that can be used by the mat-table. Should not be used outside of the * material library. * @docs-private */ export declare const CDK_TABLE_TEMPLATE = "\n <ng-content select=\"caption\"></ng-content>\n <ng-container headerRowOutlet></ng-container>\n <ng-container rowOutlet></ng-container>\n <ng-container footerRowOutlet></ng-container>\n"; /** * Interface used to conveniently type the possible context interfaces for the render row. * @docs-private */ export interface RowContext<T> extends CdkCellOutletMultiRowContext<T>, CdkCellOutletRowContext<T> { } /** * Set of properties that represents the identity of a single rendered row. * * When the table needs to determine the list of rows to render, it will do so by iterating through * each data object and evaluating its list of row templates to display (when multiTemplateDataRows * is false, there is only one template per data object). For each pair of data object and row * template, a `RenderRow` is added to the list of rows to render. If the data object and row * template pair has already been rendered, the previously used `RenderRow` is added; else a new * `RenderRow` is * created. Once the list is complete and all data objects have been itereated * through, a diff is performed to determine the changes that need to be made to the rendered rows. * * @docs-private */ export interface RenderRow<T> { data: T; dataIndex: number; rowDef: CdkRowDef<T>; } /** * A data table that can render a header row, data rows, and a footer row. * Uses the dataSource input to determine the data to be rendered. The data can be provided either * as a data array, an Observable stream that emits the data array to render, or a DataSource with a * connect function that will return an Observable stream that emits the data array to render. */ export declare class CdkTable<T> implements AfterContentChecked, CollectionViewer, OnDestroy, OnInit { protected readonly _differs: IterableDiffers; protected readonly _changeDetectorRef: ChangeDetectorRef; protected readonly _elementRef: ElementRef; protected readonly _dir: Directionality; private _platform; private _document; /** Latest data provided by the data source. */ protected _data: T[] | ReadonlyArray<T>; /** Subject that emits when the component has been destroyed. */ private _onDestroy; /** List of the rendered rows as identified by their `RenderRow` object. */ private _renderRows; /** Subscription that listens for the data provided by the data source. */ private _renderChangeSubscription; /** * Map of all the user's defined columns (header, data, and footer cell template) identified by * name. Collection populated by the column definitions gathered by `ContentChildren` as well as * any custom column definitions added to `_customColumnDefs`. */ private _columnDefsByName; /** * Set of all row definitions that can be used by this table. Populated by the rows gathered by * using `ContentChildren` as well as any custom row definitions added to `_customRowDefs`. */ private _rowDefs; /** * Set of all header row definitions that can be used by this table. Populated by the rows * gathered by using `ContentChildren` as well as any custom row definitions added to * `_customHeaderRowDefs`. */ private _headerRowDefs; /** * Set of all row definitions that can be used by this table. Populated by the rows gathered by * using `ContentChildren` as well as any custom row definitions added to * `_customFooterRowDefs`. */ private _footerRowDefs; /** Differ used to find the changes in the data provided by the data source. */ private _dataDiffer; /** Stores the row definition that does not have a when predicate. */ private _defaultRowDef; /** * Column definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has * column definitions as *its* content child. */ private _customColumnDefs; /** * Data row definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has * built-in data rows as *its* content child. */ private _customRowDefs; /** * Header row definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has * built-in header rows as *its* content child. */ private _customHeaderRowDefs; /** * Footer row definitions that were defined outside of the direct content children of the table. * These will be defined when, e.g., creating a wrapper around the cdkTable that has a * built-in footer row as *its* content child. */ private _customFooterRowDefs; /** * Whether the header row definition has been changed. Triggers an update to the header row after * content is checked. Initialized as true so that the table renders the initial set of rows. */ private _headerRowDefChanged; /** * Whether the footer row definition has been changed. Triggers an update to the footer row after * content is checked. Initialized as true so that the table renders the initial set of rows. */ private _footerRowDefChanged; /** * Cache of the latest rendered `RenderRow` objects as a map for easy retrieval when constructing * a new list of `RenderRow` objects for rendering rows. Since the new list is constructed with * the cached `RenderRow` objects when possible, the row identity is preserved when the data * and row template matches, which allows the `IterableDiffer` to check rows by reference * and understand which rows are added/moved/removed. * * Implemented as a map of maps where the first key is the `data: T` object and the second is the * `CdkRowDef<T>` object. With the two keys, the cache points to a `RenderRow<T>` object that * contains an array of created pairs. The array is necessary to handle cases where the data * array contains multiple duplicate data objects and each instantiated `RenderRow` must be * stored. */ private _cachedRenderRowsMap; /** Whether the table is applied to a native `<table>`. */ private _isNativeHtmlTable; /** * Utility class that is responsible for applying the appropriate sticky positioning styles to * the table's rows and cells. */ private _stickyStyler; /** * CSS class added to any row or cell that has sticky positioning applied. May be overriden by * table subclasses. */ protected stickyCssClass: string; /** * Tracking function that will be used to check the differences in data changes. Used similarly * to `ngFor` `trackBy` function. Optimize row operations by identifying a row based on its data * relative to the function to know if a row should be added/removed/moved. * Accepts a function that takes two parameters, `index` and `item`. */ trackBy: TrackByFunction<T>; private _trackByFn; /** * The table's source of data, which can be provided in three ways (in order of complexity): * - Simple data array (each object represents one table row) * - Stream that emits a data array each time the array changes * - `DataSource` object that implements the connect/disconnect interface. * * If a data array is provided, the table must be notified when the array's objects are * added, removed, or moved. This can be done by calling the `renderRows()` function which will * render the diff since the last table render. If the data array reference is changed, the table * will automatically trigger an update to the rows. * * When providing an Observable stream, the table will trigger an update automatically when the * stream emits a new array of data. * * Finally, when providing a `DataSource` object, the table will use the Observable stream * provided by the connect function and trigger updates when that stream emits new data array * values. During the table's ngOnDestroy or when the data source is removed from the table, the * table will call the DataSource's `disconnect` function (may be useful for cleaning up any * subscriptions registered during the connect process). */ dataSource: CdkTableDataSourceInput<T>; private _dataSource; /** * Whether to allow multiple rows per data object by evaluating which rows evaluate their 'when' * predicate to true. If `multiTemplateDataRows` is false, which is the default value, then each * dataobject will render the first row that evaluates its when predicate to true, in the order * defined in the table, or otherwise the default row which does not have a when predicate. */ multiTemplateDataRows: boolean; _multiTemplateDataRows: boolean; /** * Stream containing the latest information on what rows are being displayed on screen. * Can be used by the data source to as a heuristic of what data should be provided. * * @docs-private */ viewChange: BehaviorSubject<{ start: number; end: number; }>; _rowOutlet: DataRowOutlet; _headerRowOutlet: HeaderRowOutlet; _footerRowOutlet: FooterRowOutlet; /** * The column definitions provided by the user that contain what the header, data, and footer * cells should render for each column. */ _contentColumnDefs: QueryList<CdkColumnDef>; /** Set of data row definitions that were provided to the table as content children. */ _contentRowDefs: QueryList<CdkRowDef<T>>; /** Set of header row definitions that were provided to the table as content children. */ _contentHeaderRowDefs: QueryList<CdkHeaderRowDef>; /** Set of footer row definitions that were provided to the table as content children. */ _contentFooterRowDefs: QueryList<CdkFooterRowDef>; constructor(_differs: IterableDiffers, _changeDetectorRef: ChangeDetectorRef, _elementRef: ElementRef, role: string, _dir: Directionality, _document: any, _platform: Platform); ngOnInit(): void; ngAfterContentChecked(): void; ngOnDestroy(): void; /** * Renders rows based on the table's latest set of data, which was either provided directly as an * input or retrieved through an Observable stream (directly or from a DataSource). * Checks for differences in the data since the last diff to perform only the necessary * changes (add/remove/move rows). * * If the table's data source is a DataSource or Observable, this will be invoked automatically * each time the provided Observable stream emits a new data array. Otherwise if your data is * an array, this function will need to be called to render any changes. */ renderRows(): void; /** * Sets the header row definition to be used. Overrides the header row definition gathered by * using `ContentChild`, if one exists. Sets a flag that will re-render the header row after the * table's content is checked. * @docs-private * @deprecated Use `addHeaderRowDef` and `removeHeaderRowDef` instead * @breaking-change 8.0.0 */ setHeaderRowDef(headerRowDef: CdkHeaderRowDef): void; /** * Sets the footer row definition to be used. Overrides the footer row definition gathered by * using `ContentChild`, if one exists. Sets a flag that will re-render the footer row after the * table's content is checked. * @docs-private * @deprecated Use `addFooterRowDef` and `removeFooterRowDef` instead * @breaking-change 8.0.0 */ setFooterRowDef(footerRowDef: CdkFooterRowDef): void; /** Adds a column definition that was not included as part of the content children. */ addColumnDef(columnDef: CdkColumnDef): void; /** Removes a column definition that was not included as part of the content children. */ removeColumnDef(columnDef: CdkColumnDef): void; /** Adds a row definition that was not included as part of the content children. */ addRowDef(rowDef: CdkRowDef<T>): void; /** Removes a row definition that was not included as part of the content children. */ removeRowDef(rowDef: CdkRowDef<T>): void; /** Adds a header row definition that was not included as part of the content children. */ addHeaderRowDef(headerRowDef: CdkHeaderRowDef): void; /** Removes a header row definition that was not included as part of the content children. */ removeHeaderRowDef(headerRowDef: CdkHeaderRowDef): void; /** Adds a footer row definition that was not included as part of the content children. */ addFooterRowDef(footerRowDef: CdkFooterRowDef): void; /** Removes a footer row definition that was not included as part of the content children. */ removeFooterRowDef(footerRowDef: CdkFooterRowDef): void; /** * Updates the header sticky styles. First resets all applied styles with respect to the cells * sticking to the top. Then, evaluating which cells need to be stuck to the top. This is * automatically called when the header row changes its displayed set of columns, or if its * sticky input changes. May be called manually for cases where the cell content changes outside * of these events. */ updateStickyHeaderRowStyles(): void; /** * Updates the footer sticky styles. First resets all applied styles with respect to the cells * sticking to the bottom. Then, evaluating which cells need to be stuck to the bottom. This is * automatically called when the footer row changes its displayed set of columns, or if its * sticky input changes. May be called manually for cases where the cell content changes outside * of these events. */ updateStickyFooterRowStyles(): void; /** * Updates the column sticky styles. First resets all applied styles with respect to the cells * sticking to the left and right. Then sticky styles are added for the left and right according * to the column definitions for each cell in each row. This is automatically called when * the data source provides a new set of data or when a column definition changes its sticky * input. May be called manually for cases where the cell content changes outside of these events. */ updateStickyColumnStyles(): void; /** * Get the list of RenderRow objects to render according to the current list of data and defined * row definitions. If the previous list already contained a particular pair, it should be reused * so that the differ equates their references. */ private _getAllRenderRows; /** * Gets a list of `RenderRow<T>` for the provided data object and any `CdkRowDef` objects that * should be rendered for this data. Reuses the cached RenderRow objects if they match the same * `(T, CdkRowDef)` pair. */ private _getRenderRowsForData; /** Update the map containing the content's column definitions. */ private _cacheColumnDefs; /** Update the list of all available row definitions that can be used. */ private _cacheRowDefs; /** * Check if the header, data, or footer rows have changed what columns they want to display or * whether the sticky states have changed for the header or footer. If there is a diff, then * re-render that section. */ private _renderUpdatedColumns; /** * Switch to the provided data source by resetting the data and unsubscribing from the current * render change subscription if one exists. If the data source is null, interpret this by * clearing the row outlet. Otherwise start listening for new data. */ private _switchDataSource; /** Set up a subscription for the data provided by the data source. */ private _observeRenderChanges; /** * Clears any existing content in the header row outlet and creates a new embedded view * in the outlet using the header row definition. */ private _forceRenderHeaderRows; /** * Clears any existing content in the footer row outlet and creates a new embedded view * in the outlet using the footer row definition. */ private _forceRenderFooterRows; /** Adds the sticky column styles for the rows according to the columns' stick states. */ private _addStickyColumnStyles; /** Gets the list of rows that have been rendered in the row outlet. */ _getRenderedRows(rowOutlet: RowOutlet): HTMLElement[]; /** * Get the matching row definitions that should be used for this row data. If there is only * one row definition, it is returned. Otherwise, find the row definitions that has a when * predicate that returns true with the data. If none return true, return the default row * definition. */ _getRowDefs(data: T, dataIndex: number): CdkRowDef<T>[]; /** * Create the embedded view for the data row template and place it in the correct index location * within the data row view container. */ private _insertRow; /** * Creates a new row template in the outlet and fills it with the set of cell templates. * Optionally takes a context to provide to the row and cells, as well as an optional index * of where to place the new row template in the outlet. */ private _renderRow; /** * Updates the index-related context for each row to reflect any changes in the index of the rows, * e.g. first/last/even/odd. */ private _updateRowIndexContext; /** Gets the column definitions for the provided row def. */ private _getCellTemplates; /** Adds native table sections (e.g. tbody) and moves the row outlets into them. */ private _applyNativeTableSections; /** * Forces a re-render of the data rows. Should be called in cases where there has been an input * change that affects the evaluation of which rows should be rendered, e.g. toggling * `multiTemplateDataRows` or adding/removing row definitions. */ private _forceRenderDataRows; /** * Checks if there has been a change in sticky states since last check and applies the correct * sticky styles. Since checking resets the "dirty" state, this should only be performed once * during a change detection and after the inputs are settled (after content check). */ private _checkStickyStates; /** * Creates the sticky styler that will be used for sticky rows and columns. Listens * for directionality changes and provides the latest direction to the styler. Re-applies column * stickiness when directionality changes. */ private _setupStickyStyler; static ษตfac: ษตngcc0.ษตษตFactoryDef<CdkTable<any>, [null, null, null, { attribute: "role"; }, { optional: true; }, null, null]>; static ษตcmp: ษตngcc0.ษตษตComponentDefWithMeta<CdkTable<any>, "cdk-table, table[cdk-table]", ["cdkTable"], { "trackBy": "trackBy"; "dataSource": "dataSource"; "multiTemplateDataRows": "multiTemplateDataRows"; }, {}, ["_contentColumnDefs", "_contentRowDefs", "_contentHeaderRowDefs", "_contentFooterRowDefs"], ["caption"]>; } export {};
mplements RowOu
identifier_name
strategy.py
from abc import ABCMeta, abstractmethod import pandas as pd import numpy as np from datetime import datetime from collections import deque from sortedcontainers import SortedList from bisect import bisect_left, bisect from analyser import * from profilehooks import profile import sys debug = False eps = 1e-10 tradeRatio = 7.0 / 24.0 secInDay = 86400 * tradeRatio blockLen = 60 jumpSize = int(secInDay / blockLen) class Strategy(object): __metaclass__ = ABCMeta @abstractmethod def generateSignal(self, input): """Implement signal generation method""" raise NotImplementedError("generateSignals() not implemented!") class MovingAvg(Strategy): def __init__(self): # Not implemented yet print 'MovingAvg strategy: Did Nothing...' def generateSignal(self, backData, currDay): # backData should only contain data before the currDay order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) period1 = 90 period2 = 30 # Only taking a look till the current day marketsClose = backData['close'][:currDay] marketOpen = backData['open'][:currDay] avg_p1 = marketsClose[-period1 : ].sum() / period1 avg_p2 = marketsClose[-period2 : ].sum() / period2 difference = avg_p1 - avg_p2 deviation = difference.copy() totalDev = np.absolute(deviation).sum() if (totalDev == 0): return order else: order['signal'] = np.sign(deviation) # Ad-Hoc qty, just for experiments order['qty'] = np.absolute(deviation/totalDev) order['position'] = (deviation/totalDev) return order class SimpleMom(Strategy): def __init__(self): # Not implemented yet print 'Did Nothing...' def generateSignal(self, backData, currDay): threshold = 0.0025 window = 1 close = backData['close'][currDay-window:currDay] opens = backData['open'][currDay-window:currDay] high = backData['high'][currDay-window:currDay] low = backData['low'][currDay-window:currDay] finVal = -1 * ((close - opens) / ((high - low) + 0.001)).iloc[-1] finVal[np.abs(finVal) < threshold] = 0.0 finVal[np.abs(finVal) > 5.0] = np.sign(finVal) * 5.0 order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) order['signal'] = np.sign(finVal) order['qty'] = np.abs(finVal) order['position'] = finVal return order class SimpleVol(Strategy): def __init__(self, config): self.volLookback = config['VOL_LOOKBACK'] self.retPeriod = config['RET_PERIOD'] self.stdDevMethod = config['STD_DEV_METHOD'] self.lag = config['LAG'] # self.volLookback = volLookback # self.retPeriod = retPeriod # self.stdDevMethod = stdDevMethod # self.lag = lag def getReturn(self, period, opens): # Percentage based return for now # return (opens.iloc[-1] / opens.iloc[-period]) - 1 return (opens.iloc[-1] - opens.iloc[-period]) def getRollVol(self, period, opens): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - period - self.lag: openSize - self.lag] retData = (windowData / windowData.shift(1)) - 1 # retData = (windowData - windowData.shift(1)) # print windowData # print retData if (self.stdDevMethod == "EMA"): retData = retData ** 2 ewmRet = retData.ewm(span = period).mean() rollVol = np.sqrt(ewmRet) if (debug): print 'Before', retData print 'EWM', ewmRet.iloc[-1] print 'After ema:', rollVol.iloc[-1] else: rollVol = np.std(retData) # print rollVol # return rollVol.iloc[-1] return rollVol def generateSignal(self, backData, currDay): order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) opens = backData['open'][:currDay] close = backData['close'][:currDay] retVal = self.getReturn(self.retPeriod, opens) rollVol = self.getRollVol(self.volLookback, opens) alpha = (retVal / (rollVol + eps)) alphaNorm = alpha - np.mean(alpha) alphaNorm = - alphaNorm # print 'retval:', np.array(retVal)[:5] # print 'vol:', np.array(rollVol)[:5] # print 'alpha:', np.array(alphaNorm[:5]) # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum()) # print 'alpha:', np.array(alphaNorm[:5]) # NOTE: Destroy's the beta constant assumption # alphaNorm[np.abs(alphaNorm) > 1.0] = np.sign(alphaNorm) * 1.0 if (debug): print 'retVal:', retVal print 'rollVol:', rollVol print 'alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm return order class SimpleGapFade(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.percFlag = config['PERC_FLAG'] self.stopLoss = config['STOP_LOSS_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] if (self.percFlag): # Setting up the percentile objects self.absFlag = config['ABS_FLAG'] self.winSize = config['PERC_WINDOW'] self.stockList = config['STOCK_LIST'] self.threshold = config['PERC_THRESHOLD'] self.currSamples = 0 self.gapQueue = {} self.orderedGaps = {} for stock in self.stockList: self.gapQueue[stock] = deque([], maxlen = self.winSize) self.orderedGaps[stock] = SortedList(load = 10) def getRollVol(self, opens, period): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - (period*375): openSize] retData = ((windowData + eps) / (windowData.shift(1) + eps)) - 1 rollVol = np.std(retData) return rollVol def getPercentile(self, gapSize): # Returns a dataframe containing their percentiles (in 0-1) perc = gapSize * 0.0 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) percentile = self.orderedGaps[stock].bisect_left(searchKey) currSize = len(self.gapQueue[stock]) # To avoid having percentile as 1.0, since percentile <= percSize + 1 percentile = percentile / (currSize + 2.0) perc[stock] = percentile return perc def updatePercentile(self, gapSize): # Update the values in the percentile objects
def getVolAvgPrice(self, opens, close, vol, left, right): ''' Computes the volume weighted price for the range [left, right) price = (open + close)/2 ''' avgPrice = (opens.iloc[left:right] + close.iloc[left:right])/2.0 volAvgPrice = (avgPrice * vol[left:right]).sum() / (vol[left:right].sum() + eps) return volAvgPrice def generateSignal(self, backData, currPos): # Generate signal should only be called when the day begins i.e. after a minute currTime = backData['open'].iloc[currPos].name currTimeStamp = datetime.fromtimestamp(currTime) currDay = currTimeStamp.date() currHour = currTimeStamp.time().hour currMins = currTimeStamp.time().minute order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) if (self.stopLoss): order['stopLoss'] = -1.0 if (not ((currHour == 9) and (currMins == 15 + self.window))): return order opens = backData['open'][:currPos] close = backData['close'][:currPos] vol = backData['vol'][:currPos] currOpen = self.getVolAvgPrice(opens, close, vol, currPos - self.window, currPos) prevClose = self.getVolAvgPrice(opens, close, vol, currPos - (2 * self.window), currPos - self.window) gapSize = (currOpen - prevClose) / (prevClose + eps) alpha = -gapSize # Percentile based filtering if (self.percFlag): self.updatePercentile(gapSize) if (self.currSamples >= self.winSize): perc = self.getPercentile(gapSize) else: return order alpha[perc < self.threshold] = 0.0 volN = 70 if (self.hedge == 'volBased'): if (opens.shape[0] < volN): return order vol = self.getRollVol(opens, period = volN) gapSize = gapSize / (vol + eps) beta = ((gapSize * 0.0) + 1.0) if (self.hedge == 'volBased'): beta = vol / vol[self.index] alphaNorm = np.sign(alpha) numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm if (self.stopLoss): order['stopLoss'] = self.stopVal return order class UnifiedSample(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.hold = config['HOLD'] self.percFlag = config['PERC_FLAG'] self.limitFlag = config['LIMIT_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] self.stockList = config['STOCK_LIST'] self.hedgeFlag = config['HEDGE'] self.window = config['VWAP_GAP_WINDOW'] self.analyser = Analyser(config) def analyseTrades(self, dataDf): self.analyser.setPriceData(dataDf) # tradeList = self.analyser.getGapStats([self.hold]) tradeList = self.analyser.getGapStats(range(2, self.hold + 3, 1)) # tradeList = self.analyser.getGapStats(range(2, 361, 3)) # tradeList = self.analyser.getGapStats([3, 6, 15, 30, 60, 90, 120, 180, 240, 300, 360]) # tradeList = self.analyser.getGapStats(range(3, 30, 3) + range(30, 120, 6) + range(120, 361, 12)) # self.analyser.printStats([self.hold]) # self.analyser.printStats([3, 30, 60, 90, 180, 360]) self.timeSet = {} for stock in self.stockList: self.timeSet[stock] = set(self.analyser.tradeList[stock].index) return tradeList def globalPrint(self, globalTradeList): # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) # self.analyser.printStats((range(3, 30, 3) + range(30, 120, 6) + range(120, 301, 12)), globalTradeList, globalFlag = True) # self.analyser.printStats([3, 15, 30, 60, 90, 120, 180, 240, 300, 360], globalTradeList, globalFlag = True) self.analyser.printStats([30, 60], globalTradeList, globalFlag = True) # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) def tradeCriteria(self, trade): ''' Effectively represents the strategy in the new framework Passes a potential trade vector to check if it's valid trade contains the following columns, 'currOpen', 'prevClose', 'entryPrice', 'gapSize', 'dailyRet', 'signal', 'vol', 'gapRatio', 'percentile', 'finClose_hold', .... Can be made of (In the current scenario), - Filters based on gapSize, Percentile, etc ''' votes = 0 if (trade['openInLowHigh'] < -0.0118): votes += 1 if ((1.0 - trade['vol']) < 0.985): votes += 1 if (votes >= 2): return trade['signal'] else: return 0 # if (trade['percentile'] >= 90.0): # return trade['signal'] # if (trade['round_pcile'] >= 0): # return trade['signal'] # if (trade['profit_' + str(self.hold)] < 0.0001): # return trade['signal'] # else: # return 0 # @profile def generateSignal(self, currTime): order = pd.DataFrame(0, index = self.stockList, columns = ['signal', 'qty', 'position']) alpha = order['signal'] * 0.0 beta = order['signal'] * 0.0 hold = (order['signal'] * 0.0) + self.hold for stock in self.stockList: absent = ((currTime - (self.window * 60)) not in self.timeSet[stock]) if absent: alpha[stock] = 0.0 else: trade = self.analyser.tradeList[stock].loc[currTime - (self.window * 60)] alpha[stock] = self.tradeCriteria(trade) if (self.hedgeFlag): beta[stock] = trade['beta'] if (self.limitFlag): hold[stock] = trade['limit_hold_' + str(self.hold)] alphaNorm = np.sign(alpha) if (self.hedgeFlag): numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Required in holding period computation in execStrat order['beta'] = beta # Normalizing the positions # alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm order['hold'] = hold return order
if (self.currSamples >= self.winSize): # Updating the queue and removing elements from the tree for stock in self.stockList: lastVal = self.gapQueue[stock].popleft() self.orderedGaps[stock].remove(lastVal) self.currSamples -= 1 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) self.gapQueue[stock].append(searchKey) self.orderedGaps[stock].add(searchKey) self.currSamples += 1
identifier_body
strategy.py
from abc import ABCMeta, abstractmethod import pandas as pd import numpy as np from datetime import datetime from collections import deque from sortedcontainers import SortedList from bisect import bisect_left, bisect from analyser import * from profilehooks import profile import sys debug = False eps = 1e-10 tradeRatio = 7.0 / 24.0 secInDay = 86400 * tradeRatio blockLen = 60 jumpSize = int(secInDay / blockLen) class Strategy(object): __metaclass__ = ABCMeta @abstractmethod def generateSignal(self, input): """Implement signal generation method""" raise NotImplementedError("generateSignals() not implemented!") class MovingAvg(Strategy): def __init__(self): # Not implemented yet print 'MovingAvg strategy: Did Nothing...' def generateSignal(self, backData, currDay): # backData should only contain data before the currDay order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) period1 = 90 period2 = 30 # Only taking a look till the current day marketsClose = backData['close'][:currDay] marketOpen = backData['open'][:currDay] avg_p1 = marketsClose[-period1 : ].sum() / period1 avg_p2 = marketsClose[-period2 : ].sum() / period2 difference = avg_p1 - avg_p2 deviation = difference.copy() totalDev = np.absolute(deviation).sum() if (totalDev == 0): return order else: order['signal'] = np.sign(deviation) # Ad-Hoc qty, just for experiments order['qty'] = np.absolute(deviation/totalDev) order['position'] = (deviation/totalDev) return order class SimpleMom(Strategy): def __init__(self): # Not implemented yet print 'Did Nothing...' def generateSignal(self, backData, currDay): threshold = 0.0025 window = 1 close = backData['close'][currDay-window:currDay] opens = backData['open'][currDay-window:currDay] high = backData['high'][currDay-window:currDay] low = backData['low'][currDay-window:currDay] finVal = -1 * ((close - opens) / ((high - low) + 0.001)).iloc[-1] finVal[np.abs(finVal) < threshold] = 0.0 finVal[np.abs(finVal) > 5.0] = np.sign(finVal) * 5.0 order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) order['signal'] = np.sign(finVal) order['qty'] = np.abs(finVal) order['position'] = finVal return order class SimpleVol(Strategy): def __init__(self, config): self.volLookback = config['VOL_LOOKBACK'] self.retPeriod = config['RET_PERIOD'] self.stdDevMethod = config['STD_DEV_METHOD'] self.lag = config['LAG'] # self.volLookback = volLookback # self.retPeriod = retPeriod # self.stdDevMethod = stdDevMethod # self.lag = lag def getReturn(self, period, opens): # Percentage based return for now # return (opens.iloc[-1] / opens.iloc[-period]) - 1 return (opens.iloc[-1] - opens.iloc[-period]) def getRollVol(self, period, opens): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - period - self.lag: openSize - self.lag] retData = (windowData / windowData.shift(1)) - 1 # retData = (windowData - windowData.shift(1)) # print windowData # print retData if (self.stdDevMethod == "EMA"): retData = retData ** 2 ewmRet = retData.ewm(span = period).mean() rollVol = np.sqrt(ewmRet) if (debug): print 'Before', retData print 'EWM', ewmRet.iloc[-1] print 'After ema:', rollVol.iloc[-1] else: rollVol = np.std(retData) # print rollVol # return rollVol.iloc[-1]
order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) opens = backData['open'][:currDay] close = backData['close'][:currDay] retVal = self.getReturn(self.retPeriod, opens) rollVol = self.getRollVol(self.volLookback, opens) alpha = (retVal / (rollVol + eps)) alphaNorm = alpha - np.mean(alpha) alphaNorm = - alphaNorm # print 'retval:', np.array(retVal)[:5] # print 'vol:', np.array(rollVol)[:5] # print 'alpha:', np.array(alphaNorm[:5]) # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum()) # print 'alpha:', np.array(alphaNorm[:5]) # NOTE: Destroy's the beta constant assumption # alphaNorm[np.abs(alphaNorm) > 1.0] = np.sign(alphaNorm) * 1.0 if (debug): print 'retVal:', retVal print 'rollVol:', rollVol print 'alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm return order class SimpleGapFade(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.percFlag = config['PERC_FLAG'] self.stopLoss = config['STOP_LOSS_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] if (self.percFlag): # Setting up the percentile objects self.absFlag = config['ABS_FLAG'] self.winSize = config['PERC_WINDOW'] self.stockList = config['STOCK_LIST'] self.threshold = config['PERC_THRESHOLD'] self.currSamples = 0 self.gapQueue = {} self.orderedGaps = {} for stock in self.stockList: self.gapQueue[stock] = deque([], maxlen = self.winSize) self.orderedGaps[stock] = SortedList(load = 10) def getRollVol(self, opens, period): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - (period*375): openSize] retData = ((windowData + eps) / (windowData.shift(1) + eps)) - 1 rollVol = np.std(retData) return rollVol def getPercentile(self, gapSize): # Returns a dataframe containing their percentiles (in 0-1) perc = gapSize * 0.0 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) percentile = self.orderedGaps[stock].bisect_left(searchKey) currSize = len(self.gapQueue[stock]) # To avoid having percentile as 1.0, since percentile <= percSize + 1 percentile = percentile / (currSize + 2.0) perc[stock] = percentile return perc def updatePercentile(self, gapSize): # Update the values in the percentile objects if (self.currSamples >= self.winSize): # Updating the queue and removing elements from the tree for stock in self.stockList: lastVal = self.gapQueue[stock].popleft() self.orderedGaps[stock].remove(lastVal) self.currSamples -= 1 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) self.gapQueue[stock].append(searchKey) self.orderedGaps[stock].add(searchKey) self.currSamples += 1 def getVolAvgPrice(self, opens, close, vol, left, right): ''' Computes the volume weighted price for the range [left, right) price = (open + close)/2 ''' avgPrice = (opens.iloc[left:right] + close.iloc[left:right])/2.0 volAvgPrice = (avgPrice * vol[left:right]).sum() / (vol[left:right].sum() + eps) return volAvgPrice def generateSignal(self, backData, currPos): # Generate signal should only be called when the day begins i.e. after a minute currTime = backData['open'].iloc[currPos].name currTimeStamp = datetime.fromtimestamp(currTime) currDay = currTimeStamp.date() currHour = currTimeStamp.time().hour currMins = currTimeStamp.time().minute order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) if (self.stopLoss): order['stopLoss'] = -1.0 if (not ((currHour == 9) and (currMins == 15 + self.window))): return order opens = backData['open'][:currPos] close = backData['close'][:currPos] vol = backData['vol'][:currPos] currOpen = self.getVolAvgPrice(opens, close, vol, currPos - self.window, currPos) prevClose = self.getVolAvgPrice(opens, close, vol, currPos - (2 * self.window), currPos - self.window) gapSize = (currOpen - prevClose) / (prevClose + eps) alpha = -gapSize # Percentile based filtering if (self.percFlag): self.updatePercentile(gapSize) if (self.currSamples >= self.winSize): perc = self.getPercentile(gapSize) else: return order alpha[perc < self.threshold] = 0.0 volN = 70 if (self.hedge == 'volBased'): if (opens.shape[0] < volN): return order vol = self.getRollVol(opens, period = volN) gapSize = gapSize / (vol + eps) beta = ((gapSize * 0.0) + 1.0) if (self.hedge == 'volBased'): beta = vol / vol[self.index] alphaNorm = np.sign(alpha) numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm if (self.stopLoss): order['stopLoss'] = self.stopVal return order class UnifiedSample(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.hold = config['HOLD'] self.percFlag = config['PERC_FLAG'] self.limitFlag = config['LIMIT_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] self.stockList = config['STOCK_LIST'] self.hedgeFlag = config['HEDGE'] self.window = config['VWAP_GAP_WINDOW'] self.analyser = Analyser(config) def analyseTrades(self, dataDf): self.analyser.setPriceData(dataDf) # tradeList = self.analyser.getGapStats([self.hold]) tradeList = self.analyser.getGapStats(range(2, self.hold + 3, 1)) # tradeList = self.analyser.getGapStats(range(2, 361, 3)) # tradeList = self.analyser.getGapStats([3, 6, 15, 30, 60, 90, 120, 180, 240, 300, 360]) # tradeList = self.analyser.getGapStats(range(3, 30, 3) + range(30, 120, 6) + range(120, 361, 12)) # self.analyser.printStats([self.hold]) # self.analyser.printStats([3, 30, 60, 90, 180, 360]) self.timeSet = {} for stock in self.stockList: self.timeSet[stock] = set(self.analyser.tradeList[stock].index) return tradeList def globalPrint(self, globalTradeList): # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) # self.analyser.printStats((range(3, 30, 3) + range(30, 120, 6) + range(120, 301, 12)), globalTradeList, globalFlag = True) # self.analyser.printStats([3, 15, 30, 60, 90, 120, 180, 240, 300, 360], globalTradeList, globalFlag = True) self.analyser.printStats([30, 60], globalTradeList, globalFlag = True) # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) def tradeCriteria(self, trade): ''' Effectively represents the strategy in the new framework Passes a potential trade vector to check if it's valid trade contains the following columns, 'currOpen', 'prevClose', 'entryPrice', 'gapSize', 'dailyRet', 'signal', 'vol', 'gapRatio', 'percentile', 'finClose_hold', .... Can be made of (In the current scenario), - Filters based on gapSize, Percentile, etc ''' votes = 0 if (trade['openInLowHigh'] < -0.0118): votes += 1 if ((1.0 - trade['vol']) < 0.985): votes += 1 if (votes >= 2): return trade['signal'] else: return 0 # if (trade['percentile'] >= 90.0): # return trade['signal'] # if (trade['round_pcile'] >= 0): # return trade['signal'] # if (trade['profit_' + str(self.hold)] < 0.0001): # return trade['signal'] # else: # return 0 # @profile def generateSignal(self, currTime): order = pd.DataFrame(0, index = self.stockList, columns = ['signal', 'qty', 'position']) alpha = order['signal'] * 0.0 beta = order['signal'] * 0.0 hold = (order['signal'] * 0.0) + self.hold for stock in self.stockList: absent = ((currTime - (self.window * 60)) not in self.timeSet[stock]) if absent: alpha[stock] = 0.0 else: trade = self.analyser.tradeList[stock].loc[currTime - (self.window * 60)] alpha[stock] = self.tradeCriteria(trade) if (self.hedgeFlag): beta[stock] = trade['beta'] if (self.limitFlag): hold[stock] = trade['limit_hold_' + str(self.hold)] alphaNorm = np.sign(alpha) if (self.hedgeFlag): numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Required in holding period computation in execStrat order['beta'] = beta # Normalizing the positions # alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm order['hold'] = hold return order
return rollVol def generateSignal(self, backData, currDay):
random_line_split
strategy.py
from abc import ABCMeta, abstractmethod import pandas as pd import numpy as np from datetime import datetime from collections import deque from sortedcontainers import SortedList from bisect import bisect_left, bisect from analyser import * from profilehooks import profile import sys debug = False eps = 1e-10 tradeRatio = 7.0 / 24.0 secInDay = 86400 * tradeRatio blockLen = 60 jumpSize = int(secInDay / blockLen) class Strategy(object): __metaclass__ = ABCMeta @abstractmethod def generateSignal(self, input): """Implement signal generation method""" raise NotImplementedError("generateSignals() not implemented!") class MovingAvg(Strategy): def __init__(self): # Not implemented yet print 'MovingAvg strategy: Did Nothing...' def generateSignal(self, backData, currDay): # backData should only contain data before the currDay order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) period1 = 90 period2 = 30 # Only taking a look till the current day marketsClose = backData['close'][:currDay] marketOpen = backData['open'][:currDay] avg_p1 = marketsClose[-period1 : ].sum() / period1 avg_p2 = marketsClose[-period2 : ].sum() / period2 difference = avg_p1 - avg_p2 deviation = difference.copy() totalDev = np.absolute(deviation).sum() if (totalDev == 0): return order else: order['signal'] = np.sign(deviation) # Ad-Hoc qty, just for experiments order['qty'] = np.absolute(deviation/totalDev) order['position'] = (deviation/totalDev) return order class SimpleMom(Strategy): def __init__(self): # Not implemented yet print 'Did Nothing...' def generateSignal(self, backData, currDay): threshold = 0.0025 window = 1 close = backData['close'][currDay-window:currDay] opens = backData['open'][currDay-window:currDay] high = backData['high'][currDay-window:currDay] low = backData['low'][currDay-window:currDay] finVal = -1 * ((close - opens) / ((high - low) + 0.001)).iloc[-1] finVal[np.abs(finVal) < threshold] = 0.0 finVal[np.abs(finVal) > 5.0] = np.sign(finVal) * 5.0 order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) order['signal'] = np.sign(finVal) order['qty'] = np.abs(finVal) order['position'] = finVal return order class
(Strategy): def __init__(self, config): self.volLookback = config['VOL_LOOKBACK'] self.retPeriod = config['RET_PERIOD'] self.stdDevMethod = config['STD_DEV_METHOD'] self.lag = config['LAG'] # self.volLookback = volLookback # self.retPeriod = retPeriod # self.stdDevMethod = stdDevMethod # self.lag = lag def getReturn(self, period, opens): # Percentage based return for now # return (opens.iloc[-1] / opens.iloc[-period]) - 1 return (opens.iloc[-1] - opens.iloc[-period]) def getRollVol(self, period, opens): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - period - self.lag: openSize - self.lag] retData = (windowData / windowData.shift(1)) - 1 # retData = (windowData - windowData.shift(1)) # print windowData # print retData if (self.stdDevMethod == "EMA"): retData = retData ** 2 ewmRet = retData.ewm(span = period).mean() rollVol = np.sqrt(ewmRet) if (debug): print 'Before', retData print 'EWM', ewmRet.iloc[-1] print 'After ema:', rollVol.iloc[-1] else: rollVol = np.std(retData) # print rollVol # return rollVol.iloc[-1] return rollVol def generateSignal(self, backData, currDay): order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) opens = backData['open'][:currDay] close = backData['close'][:currDay] retVal = self.getReturn(self.retPeriod, opens) rollVol = self.getRollVol(self.volLookback, opens) alpha = (retVal / (rollVol + eps)) alphaNorm = alpha - np.mean(alpha) alphaNorm = - alphaNorm # print 'retval:', np.array(retVal)[:5] # print 'vol:', np.array(rollVol)[:5] # print 'alpha:', np.array(alphaNorm[:5]) # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum()) # print 'alpha:', np.array(alphaNorm[:5]) # NOTE: Destroy's the beta constant assumption # alphaNorm[np.abs(alphaNorm) > 1.0] = np.sign(alphaNorm) * 1.0 if (debug): print 'retVal:', retVal print 'rollVol:', rollVol print 'alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm return order class SimpleGapFade(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.percFlag = config['PERC_FLAG'] self.stopLoss = config['STOP_LOSS_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] if (self.percFlag): # Setting up the percentile objects self.absFlag = config['ABS_FLAG'] self.winSize = config['PERC_WINDOW'] self.stockList = config['STOCK_LIST'] self.threshold = config['PERC_THRESHOLD'] self.currSamples = 0 self.gapQueue = {} self.orderedGaps = {} for stock in self.stockList: self.gapQueue[stock] = deque([], maxlen = self.winSize) self.orderedGaps[stock] = SortedList(load = 10) def getRollVol(self, opens, period): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - (period*375): openSize] retData = ((windowData + eps) / (windowData.shift(1) + eps)) - 1 rollVol = np.std(retData) return rollVol def getPercentile(self, gapSize): # Returns a dataframe containing their percentiles (in 0-1) perc = gapSize * 0.0 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) percentile = self.orderedGaps[stock].bisect_left(searchKey) currSize = len(self.gapQueue[stock]) # To avoid having percentile as 1.0, since percentile <= percSize + 1 percentile = percentile / (currSize + 2.0) perc[stock] = percentile return perc def updatePercentile(self, gapSize): # Update the values in the percentile objects if (self.currSamples >= self.winSize): # Updating the queue and removing elements from the tree for stock in self.stockList: lastVal = self.gapQueue[stock].popleft() self.orderedGaps[stock].remove(lastVal) self.currSamples -= 1 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) self.gapQueue[stock].append(searchKey) self.orderedGaps[stock].add(searchKey) self.currSamples += 1 def getVolAvgPrice(self, opens, close, vol, left, right): ''' Computes the volume weighted price for the range [left, right) price = (open + close)/2 ''' avgPrice = (opens.iloc[left:right] + close.iloc[left:right])/2.0 volAvgPrice = (avgPrice * vol[left:right]).sum() / (vol[left:right].sum() + eps) return volAvgPrice def generateSignal(self, backData, currPos): # Generate signal should only be called when the day begins i.e. after a minute currTime = backData['open'].iloc[currPos].name currTimeStamp = datetime.fromtimestamp(currTime) currDay = currTimeStamp.date() currHour = currTimeStamp.time().hour currMins = currTimeStamp.time().minute order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) if (self.stopLoss): order['stopLoss'] = -1.0 if (not ((currHour == 9) and (currMins == 15 + self.window))): return order opens = backData['open'][:currPos] close = backData['close'][:currPos] vol = backData['vol'][:currPos] currOpen = self.getVolAvgPrice(opens, close, vol, currPos - self.window, currPos) prevClose = self.getVolAvgPrice(opens, close, vol, currPos - (2 * self.window), currPos - self.window) gapSize = (currOpen - prevClose) / (prevClose + eps) alpha = -gapSize # Percentile based filtering if (self.percFlag): self.updatePercentile(gapSize) if (self.currSamples >= self.winSize): perc = self.getPercentile(gapSize) else: return order alpha[perc < self.threshold] = 0.0 volN = 70 if (self.hedge == 'volBased'): if (opens.shape[0] < volN): return order vol = self.getRollVol(opens, period = volN) gapSize = gapSize / (vol + eps) beta = ((gapSize * 0.0) + 1.0) if (self.hedge == 'volBased'): beta = vol / vol[self.index] alphaNorm = np.sign(alpha) numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm if (self.stopLoss): order['stopLoss'] = self.stopVal return order class UnifiedSample(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.hold = config['HOLD'] self.percFlag = config['PERC_FLAG'] self.limitFlag = config['LIMIT_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] self.stockList = config['STOCK_LIST'] self.hedgeFlag = config['HEDGE'] self.window = config['VWAP_GAP_WINDOW'] self.analyser = Analyser(config) def analyseTrades(self, dataDf): self.analyser.setPriceData(dataDf) # tradeList = self.analyser.getGapStats([self.hold]) tradeList = self.analyser.getGapStats(range(2, self.hold + 3, 1)) # tradeList = self.analyser.getGapStats(range(2, 361, 3)) # tradeList = self.analyser.getGapStats([3, 6, 15, 30, 60, 90, 120, 180, 240, 300, 360]) # tradeList = self.analyser.getGapStats(range(3, 30, 3) + range(30, 120, 6) + range(120, 361, 12)) # self.analyser.printStats([self.hold]) # self.analyser.printStats([3, 30, 60, 90, 180, 360]) self.timeSet = {} for stock in self.stockList: self.timeSet[stock] = set(self.analyser.tradeList[stock].index) return tradeList def globalPrint(self, globalTradeList): # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) # self.analyser.printStats((range(3, 30, 3) + range(30, 120, 6) + range(120, 301, 12)), globalTradeList, globalFlag = True) # self.analyser.printStats([3, 15, 30, 60, 90, 120, 180, 240, 300, 360], globalTradeList, globalFlag = True) self.analyser.printStats([30, 60], globalTradeList, globalFlag = True) # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) def tradeCriteria(self, trade): ''' Effectively represents the strategy in the new framework Passes a potential trade vector to check if it's valid trade contains the following columns, 'currOpen', 'prevClose', 'entryPrice', 'gapSize', 'dailyRet', 'signal', 'vol', 'gapRatio', 'percentile', 'finClose_hold', .... Can be made of (In the current scenario), - Filters based on gapSize, Percentile, etc ''' votes = 0 if (trade['openInLowHigh'] < -0.0118): votes += 1 if ((1.0 - trade['vol']) < 0.985): votes += 1 if (votes >= 2): return trade['signal'] else: return 0 # if (trade['percentile'] >= 90.0): # return trade['signal'] # if (trade['round_pcile'] >= 0): # return trade['signal'] # if (trade['profit_' + str(self.hold)] < 0.0001): # return trade['signal'] # else: # return 0 # @profile def generateSignal(self, currTime): order = pd.DataFrame(0, index = self.stockList, columns = ['signal', 'qty', 'position']) alpha = order['signal'] * 0.0 beta = order['signal'] * 0.0 hold = (order['signal'] * 0.0) + self.hold for stock in self.stockList: absent = ((currTime - (self.window * 60)) not in self.timeSet[stock]) if absent: alpha[stock] = 0.0 else: trade = self.analyser.tradeList[stock].loc[currTime - (self.window * 60)] alpha[stock] = self.tradeCriteria(trade) if (self.hedgeFlag): beta[stock] = trade['beta'] if (self.limitFlag): hold[stock] = trade['limit_hold_' + str(self.hold)] alphaNorm = np.sign(alpha) if (self.hedgeFlag): numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Required in holding period computation in execStrat order['beta'] = beta # Normalizing the positions # alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm order['hold'] = hold return order
SimpleVol
identifier_name
strategy.py
from abc import ABCMeta, abstractmethod import pandas as pd import numpy as np from datetime import datetime from collections import deque from sortedcontainers import SortedList from bisect import bisect_left, bisect from analyser import * from profilehooks import profile import sys debug = False eps = 1e-10 tradeRatio = 7.0 / 24.0 secInDay = 86400 * tradeRatio blockLen = 60 jumpSize = int(secInDay / blockLen) class Strategy(object): __metaclass__ = ABCMeta @abstractmethod def generateSignal(self, input): """Implement signal generation method""" raise NotImplementedError("generateSignals() not implemented!") class MovingAvg(Strategy): def __init__(self): # Not implemented yet print 'MovingAvg strategy: Did Nothing...' def generateSignal(self, backData, currDay): # backData should only contain data before the currDay order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) period1 = 90 period2 = 30 # Only taking a look till the current day marketsClose = backData['close'][:currDay] marketOpen = backData['open'][:currDay] avg_p1 = marketsClose[-period1 : ].sum() / period1 avg_p2 = marketsClose[-period2 : ].sum() / period2 difference = avg_p1 - avg_p2 deviation = difference.copy() totalDev = np.absolute(deviation).sum() if (totalDev == 0): return order else: order['signal'] = np.sign(deviation) # Ad-Hoc qty, just for experiments order['qty'] = np.absolute(deviation/totalDev) order['position'] = (deviation/totalDev) return order class SimpleMom(Strategy): def __init__(self): # Not implemented yet print 'Did Nothing...' def generateSignal(self, backData, currDay): threshold = 0.0025 window = 1 close = backData['close'][currDay-window:currDay] opens = backData['open'][currDay-window:currDay] high = backData['high'][currDay-window:currDay] low = backData['low'][currDay-window:currDay] finVal = -1 * ((close - opens) / ((high - low) + 0.001)).iloc[-1] finVal[np.abs(finVal) < threshold] = 0.0 finVal[np.abs(finVal) > 5.0] = np.sign(finVal) * 5.0 order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) order['signal'] = np.sign(finVal) order['qty'] = np.abs(finVal) order['position'] = finVal return order class SimpleVol(Strategy): def __init__(self, config): self.volLookback = config['VOL_LOOKBACK'] self.retPeriod = config['RET_PERIOD'] self.stdDevMethod = config['STD_DEV_METHOD'] self.lag = config['LAG'] # self.volLookback = volLookback # self.retPeriod = retPeriod # self.stdDevMethod = stdDevMethod # self.lag = lag def getReturn(self, period, opens): # Percentage based return for now # return (opens.iloc[-1] / opens.iloc[-period]) - 1 return (opens.iloc[-1] - opens.iloc[-period]) def getRollVol(self, period, opens): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - period - self.lag: openSize - self.lag] retData = (windowData / windowData.shift(1)) - 1 # retData = (windowData - windowData.shift(1)) # print windowData # print retData if (self.stdDevMethod == "EMA"):
else: rollVol = np.std(retData) # print rollVol # return rollVol.iloc[-1] return rollVol def generateSignal(self, backData, currDay): order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) opens = backData['open'][:currDay] close = backData['close'][:currDay] retVal = self.getReturn(self.retPeriod, opens) rollVol = self.getRollVol(self.volLookback, opens) alpha = (retVal / (rollVol + eps)) alphaNorm = alpha - np.mean(alpha) alphaNorm = - alphaNorm # print 'retval:', np.array(retVal)[:5] # print 'vol:', np.array(rollVol)[:5] # print 'alpha:', np.array(alphaNorm[:5]) # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum()) # print 'alpha:', np.array(alphaNorm[:5]) # NOTE: Destroy's the beta constant assumption # alphaNorm[np.abs(alphaNorm) > 1.0] = np.sign(alphaNorm) * 1.0 if (debug): print 'retVal:', retVal print 'rollVol:', rollVol print 'alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm return order class SimpleGapFade(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.percFlag = config['PERC_FLAG'] self.stopLoss = config['STOP_LOSS_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] if (self.percFlag): # Setting up the percentile objects self.absFlag = config['ABS_FLAG'] self.winSize = config['PERC_WINDOW'] self.stockList = config['STOCK_LIST'] self.threshold = config['PERC_THRESHOLD'] self.currSamples = 0 self.gapQueue = {} self.orderedGaps = {} for stock in self.stockList: self.gapQueue[stock] = deque([], maxlen = self.winSize) self.orderedGaps[stock] = SortedList(load = 10) def getRollVol(self, opens, period): # Returns volatility based on open prices openSize = opens.shape[0] windowData = opens[openSize - (period*375): openSize] retData = ((windowData + eps) / (windowData.shift(1) + eps)) - 1 rollVol = np.std(retData) return rollVol def getPercentile(self, gapSize): # Returns a dataframe containing their percentiles (in 0-1) perc = gapSize * 0.0 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) percentile = self.orderedGaps[stock].bisect_left(searchKey) currSize = len(self.gapQueue[stock]) # To avoid having percentile as 1.0, since percentile <= percSize + 1 percentile = percentile / (currSize + 2.0) perc[stock] = percentile return perc def updatePercentile(self, gapSize): # Update the values in the percentile objects if (self.currSamples >= self.winSize): # Updating the queue and removing elements from the tree for stock in self.stockList: lastVal = self.gapQueue[stock].popleft() self.orderedGaps[stock].remove(lastVal) self.currSamples -= 1 for stock, gap in gapSize.iteritems(): searchKey = gap if (self.absFlag): searchKey = np.abs(searchKey) self.gapQueue[stock].append(searchKey) self.orderedGaps[stock].add(searchKey) self.currSamples += 1 def getVolAvgPrice(self, opens, close, vol, left, right): ''' Computes the volume weighted price for the range [left, right) price = (open + close)/2 ''' avgPrice = (opens.iloc[left:right] + close.iloc[left:right])/2.0 volAvgPrice = (avgPrice * vol[left:right]).sum() / (vol[left:right].sum() + eps) return volAvgPrice def generateSignal(self, backData, currPos): # Generate signal should only be called when the day begins i.e. after a minute currTime = backData['open'].iloc[currPos].name currTimeStamp = datetime.fromtimestamp(currTime) currDay = currTimeStamp.date() currHour = currTimeStamp.time().hour currMins = currTimeStamp.time().minute order = pd.DataFrame(0, index = backData['open'].columns, columns = ['signal', 'qty', 'position']) if (self.stopLoss): order['stopLoss'] = -1.0 if (not ((currHour == 9) and (currMins == 15 + self.window))): return order opens = backData['open'][:currPos] close = backData['close'][:currPos] vol = backData['vol'][:currPos] currOpen = self.getVolAvgPrice(opens, close, vol, currPos - self.window, currPos) prevClose = self.getVolAvgPrice(opens, close, vol, currPos - (2 * self.window), currPos - self.window) gapSize = (currOpen - prevClose) / (prevClose + eps) alpha = -gapSize # Percentile based filtering if (self.percFlag): self.updatePercentile(gapSize) if (self.currSamples >= self.winSize): perc = self.getPercentile(gapSize) else: return order alpha[perc < self.threshold] = 0.0 volN = 70 if (self.hedge == 'volBased'): if (opens.shape[0] < volN): return order vol = self.getRollVol(opens, period = volN) gapSize = gapSize / (vol + eps) beta = ((gapSize * 0.0) + 1.0) if (self.hedge == 'volBased'): beta = vol / vol[self.index] alphaNorm = np.sign(alpha) numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Normalizing the positions alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm if (self.stopLoss): order['stopLoss'] = self.stopVal return order class UnifiedSample(Strategy): def __init__(self, config): self.window = config['WINDOW'] self.index = config['INDEX'] self.hedge = config['HEDGE_METHOD'] self.hold = config['HOLD'] self.percFlag = config['PERC_FLAG'] self.limitFlag = config['LIMIT_FLAG'] self.stopVal = config['STOP_LOSS_VAL'] self.stockList = config['STOCK_LIST'] self.hedgeFlag = config['HEDGE'] self.window = config['VWAP_GAP_WINDOW'] self.analyser = Analyser(config) def analyseTrades(self, dataDf): self.analyser.setPriceData(dataDf) # tradeList = self.analyser.getGapStats([self.hold]) tradeList = self.analyser.getGapStats(range(2, self.hold + 3, 1)) # tradeList = self.analyser.getGapStats(range(2, 361, 3)) # tradeList = self.analyser.getGapStats([3, 6, 15, 30, 60, 90, 120, 180, 240, 300, 360]) # tradeList = self.analyser.getGapStats(range(3, 30, 3) + range(30, 120, 6) + range(120, 361, 12)) # self.analyser.printStats([self.hold]) # self.analyser.printStats([3, 30, 60, 90, 180, 360]) self.timeSet = {} for stock in self.stockList: self.timeSet[stock] = set(self.analyser.tradeList[stock].index) return tradeList def globalPrint(self, globalTradeList): # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) # self.analyser.printStats((range(3, 30, 3) + range(30, 120, 6) + range(120, 301, 12)), globalTradeList, globalFlag = True) # self.analyser.printStats([3, 15, 30, 60, 90, 120, 180, 240, 300, 360], globalTradeList, globalFlag = True) self.analyser.printStats([30, 60], globalTradeList, globalFlag = True) # self.analyser.printStats([self.hold], globalTradeList, globalFlag = True) def tradeCriteria(self, trade): ''' Effectively represents the strategy in the new framework Passes a potential trade vector to check if it's valid trade contains the following columns, 'currOpen', 'prevClose', 'entryPrice', 'gapSize', 'dailyRet', 'signal', 'vol', 'gapRatio', 'percentile', 'finClose_hold', .... Can be made of (In the current scenario), - Filters based on gapSize, Percentile, etc ''' votes = 0 if (trade['openInLowHigh'] < -0.0118): votes += 1 if ((1.0 - trade['vol']) < 0.985): votes += 1 if (votes >= 2): return trade['signal'] else: return 0 # if (trade['percentile'] >= 90.0): # return trade['signal'] # if (trade['round_pcile'] >= 0): # return trade['signal'] # if (trade['profit_' + str(self.hold)] < 0.0001): # return trade['signal'] # else: # return 0 # @profile def generateSignal(self, currTime): order = pd.DataFrame(0, index = self.stockList, columns = ['signal', 'qty', 'position']) alpha = order['signal'] * 0.0 beta = order['signal'] * 0.0 hold = (order['signal'] * 0.0) + self.hold for stock in self.stockList: absent = ((currTime - (self.window * 60)) not in self.timeSet[stock]) if absent: alpha[stock] = 0.0 else: trade = self.analyser.tradeList[stock].loc[currTime - (self.window * 60)] alpha[stock] = self.tradeCriteria(trade) if (self.hedgeFlag): beta[stock] = trade['beta'] if (self.limitFlag): hold[stock] = trade['limit_hold_' + str(self.hold)] alphaNorm = np.sign(alpha) if (self.hedgeFlag): numIndex = -np.sum(alphaNorm * beta) alphaNorm[self.index] += numIndex # Required in holding period computation in execStrat order['beta'] = beta # Normalizing the positions # alphaNorm = alphaNorm / (np.abs(alphaNorm).sum() + eps) if (debug): print 'Normalized Alpha:', alphaNorm order['signal'] = np.sign(alphaNorm) order['qty'] = np.abs(alphaNorm) order['position'] = alphaNorm order['hold'] = hold return order
retData = retData ** 2 ewmRet = retData.ewm(span = period).mean() rollVol = np.sqrt(ewmRet) if (debug): print 'Before', retData print 'EWM', ewmRet.iloc[-1] print 'After ema:', rollVol.iloc[-1]
conditional_block
rectAreaLightShadow.ts
import { ElapsedTime } from '../three/timeUtility' import { Controls } from '../three/controls' import { DataGUI, Statistic } from '../three/uiUtility' import { LinearDepthRenderMaterial } from '../three/shaderUtility'; import { BoxUpdateHelper, boundingBoxInViewSpace, boxFromOrthographicViewVolume, getMaxSamples, setOrthographicViewVolumeFromBox } from '../three/threeUtility' import { RenderOverrideVisibility, RenderPass, } from '../three/renderPass' import { RectAreaLightAndShadow, RectAreaLightAndShadowWithDirectionalLight } from '../three/rectAreaLightAndShadow' import * as THREE from 'three' import { RoomEnvironment } from 'three/examples/jsm/environments/RoomEnvironment.js'; export const rectAreaLightShadow = (canvas: any) => { const width = window.innerWidth; const height = window.innerHeight; const renderer = new THREE.WebGLRenderer({canvas: canvas, antialias: true, alpha: true}) renderer.setSize(width, height) renderer.setPixelRatio(window.devicePixelRatio); renderer.shadowMap.enabled = true; renderer.shadowMap.type = THREE.PCFSoftShadowMap; document.body.appendChild(renderer.domElement); const statistic = new Statistic(); const renderPass = new RenderPass(); const maxSamples = getMaxSamples(renderer); const camera = new THREE.PerspectiveCamera(45, width / height, 0.1, 100); camera.position.y = 10; camera.position.z = 15; const controls = new Controls(renderer, camera); const scene = new THREE.Scene(); scene.background = new THREE.Color(0xc0c0c0); const pmremGenerator = new THREE.PMREMGenerator(renderer); scene.environment = pmremGenerator.fromScene(new RoomEnvironment(), 0.04).texture; const sceneBox = new THREE.Box3(new THREE.Vector3(-3.5, 0, -3), new THREE.Vector3(3, 3, 2.5)); const sceneBoxHelper = new BoxUpdateHelper(sceneBox, { color: 0xff8080, opacity: 0.25 }); sceneBoxHelper.addTo(scene); sceneBoxHelper.visible = false; const directionalLight = new THREE.DirectionalLight(0xffffff, 0.5); directionalLight.position.set(2, 5, 2); directionalLight.lookAt(0, 0, 0); directionalLight.castShadow = true; directionalLight.shadow.mapSize = new THREE.Vector2(1024, 1024); const directionalLightCamera = directionalLight.shadow.camera as THREE.OrthographicCamera; directionalLightCamera.far = 20; scene.add(directionalLight); const rectAreaLight = new THREE.RectAreaLight(0xffffff, 100, 1, 1); const viewportSize = new THREE.Vector2(width, height); //const rectAreaLightAndShadow: RectAreaLightAndShadow = new RectAreaLightAndShadowWithShadowMap(rectAreaLight, viewportSize, undefined, false); const rectAreaLightAndShadow: RectAreaLightAndShadow = new RectAreaLightAndShadowWithDirectionalLight(rectAreaLight, viewportSize, { samples: maxSamples, shadowIntensity: 0.5, alwaysUpdate: true }); rectAreaLightAndShadow.addToScene(scene); const shadowBoxHelper = new BoxUpdateHelper(boxFromOrthographicViewVolume(directionalLightCamera), { color: 0x80ff80, opacity: 0.25 }); shadowBoxHelper.visible = false; shadowBoxHelper.addTo(scene); const setLightSource = (source: string) => { const enableRectAreaLight = source === 'rectAreaLight'; rectAreaLightAndShadow.visible = enableRectAreaLight; directionalLight.visible = !enableRectAreaLight; } const updateShadowBox = () => { const pos = directionalLight.position.clone(); directionalLight.lookAt(0, 0, 0); shadowBoxHelper.object.position.set(pos.x, pos.y, pos.z); shadowBoxHelper.object.lookAt(0, 0, 0); rectAreaLightAndShadow.setLineOfSight(pos.clone(), new THREE.Vector3(0, 0, 0)); const shadowVolumeBox = boundingBoxInViewSpace(sceneBox, rectAreaLightAndShadow.shadowCamera); rectAreaLightAndShadow.setShadowVolume(shadowVolumeBox); setOrthographicViewVolumeFromBox(directionalLightCamera, shadowVolumeBox); shadowBoxHelper.box = boxFromOrthographicViewVolume(directionalLightCamera) shadowBoxHelper.update(); }; updateShadowBox(); const transformControl = controls.addTransformControl(directionalLight, scene); transformControl.addEventListener('objectChange', (event: any) => { updateShadowBox(); }); let meshes: THREE.Mesh[] = []; const addMesh = (position: THREE.Vector3, geometry: THREE.BufferGeometry, material: THREE.Material): THREE.Mesh => { const mesh = new THREE.Mesh(geometry, material); mesh.position.set(position.x, position.y, position.z); mesh.castShadow = true; mesh.receiveShadow = true; meshes.push(mesh); scene.add(mesh); return mesh; }; const envMapIntensity = 0.25; const sqrt3 = Math.sqrt(3) addMesh(new THREE.Vector3(-1, 1, 1), new THREE.BoxGeometry(1/sqrt3, 1/sqrt3, 1/sqrt3), new THREE.MeshPhysicalMaterial({color: 0xffff00, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(1, 1, 1), new THREE.IcosahedronGeometry(0.5, 0), new THREE.MeshPhysicalMaterial({color: 0xff0000, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(-2, 1, -1), new THREE.TorusGeometry(0.5, 0.2, 32, 100), new THREE.MeshPhysicalMaterial({color: 0x00ff00, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(0, 1, -1), new THREE.SphereGeometry(0.5, 32, 16), new THREE.MeshPhysicalMaterial({color: 0x0000ff, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(2, 1, -1), new THREE.TorusKnotGeometry(0.3, 0.1, 100, 32), new THREE.MeshPhysicalMaterial({color: 0xff00ff, envMapIntensity: envMapIntensity})); const groundGroup = new THREE.Group(); groundGroup.rotation.x = -Math.PI / 2; scene.add(groundGroup); const groundGeometry = new THREE.PlaneGeometry(10, 10, 10, 10); const groundMaterial = new THREE.MeshPhysicalMaterial({color: 0x808080, envMapIntensity: envMapIntensity}); const groundMesh = new THREE.Mesh(groundGeometry, groundMaterial); groundMesh.receiveShadow = true; groundGroup.add(groundMesh); let shadowDebugPlane: THREE.Mesh | undefined; if (rectAreaLightAndShadow.shadowMapTexture) { shadowDebugPlane = new THREE.Mesh( new THREE.PlaneGeometry(5, 5), new THREE.MeshBasicMaterial({ map: rectAreaLightAndShadow.shadowMapTexture }) ); shadowDebugPlane.position.x = -8; shadowDebugPlane.position.y = 3; shadowDebugPlane.visible = false; scene.add(shadowDebugPlane); } const generalUiProperties = { 'light source': 'rectAreaLight', 'scene volume': sceneBoxHelper.visible, 'shadow volume': shadowBoxHelper.visible, 'debug shadow map': false, 'debug output': 'off', }; setLightSource(generalUiProperties['light source']); const dataGui = new DataGUI(); dataGui.gui.add(generalUiProperties, 'light source', { 'rectAreaLight': 'rectAreaLight', 'directionalLight': 'directionalLight' }).onChange((value) => setLightSource(value)); dataGui.gui.add(generalUiProperties, 'scene volume').onChange((enabled: boolean) => sceneBoxHelper.visible = enabled); dataGui.gui.add(generalUiProperties, 'shadow volume').onChange((enabled: boolean) => shadowBoxHelper.visible = enabled); dataGui.gui.add(generalUiProperties, 'debug shadow map'); let debugOptions: any = {}; debugOptions['off'] = 'off'; if (rectAreaLightAndShadow.depthTexture) { debugOptions['depth'] = 'depth'; } debugOptions['shadow'] = 'shadow'; dataGui.gui.add(generalUiProperties, 'debug output', debugOptions); const shadowFolder = dataGui.gui.addFolder('rectAreaLight shadow'); shadowFolder.add<any>(rectAreaLightAndShadow, 'shadowIntensity', 0.0, 1.0); const onWindowResize = () => { const width = window.innerWidth; const height = window.innerHeight; camera.aspect = width / height; camera.updateProjectionMatrix(); renderer.setSize(width, height); rectAreaLightAndShadow.setSize(width, height); }; window.addEventListener('resize', onWindowResize, false); const elapsedTime = new ElapsedTime(); const animate = (timestamp: number) => { elapsedTime.update(timestamp); requestAnimationFrame(animate); for (let i = 0; i < meshes.length; ++i) { meshes[i].position.y = 1.5 + Math.sin(elapsedTime.allTimeMs / 1000 * Math.PI * 2 * 0.2 + i / 5 * Math.PI * 2) * 1; meshes[i].rotation.x += elapsedTime.getDegreePerSecond((1 + i / 5) * 60, true); meshes[i].rotation.y += elapsedTime.getDegreePerSecond((1 + i / (i-5)) * 60, true); } controls.update(); render(); statistic.update(); } let invisibleObjects: any[]= [sceneBoxHelper, shadowBoxHelper, transformControl, rectAreaLightAndShadow.rectAreaLight]; if (shadowDebugPlane) { invisibleObjects.push(shadowDebugPlane); } const renderOverrideVisibility = new RenderOverrideVisibility( false, (object: any) => invisibleObjects.includes(object), null); const renderShadow = () => { renderOverrideVisibility.render(scene, () => { if (generalUiProperties['debug output'] === 'off' && generalUiProperties['light source'] !== 'rectAreaLight' && !generalUiProperties['debug output']) { return; } rectAreaLightAndShadow.renderShadow(renderer, scene, camera); }); } const depthRenderMaterial = rectAreaLightAndShadow.depthTexture ? new LinearDepthRenderMaterial({ depthTexture: rectAreaLightAndShadow.depthTexture }) : null; const render = () => { renderShadow(); switch(generalUiProperties['debug output']) { case 'depth': if (depthRenderMaterial) { renderPass.renderScreenSpace(renderer, depthRenderMaterial.update({camera: camera}), null); } break; case 'shadow': rectAreaLightAndShadow.blendShadow(renderer, true); break; default: if (shadowDebugPlane) { shadowDebugPlane.visible = generalUiProperties['debug shadow map']; } renderer.render(scene, camera); if (shadowDebugPlane) { shadowDebugPlane.visible = false; } if (generalUiProperties['light source'] === 'rectAreaLight') { rectAreaLightAndShadow.blendShadow(renderer, false); } break; } } animate(0);
updateShadowBox(); }
random_line_split
rectAreaLightShadow.ts
import { ElapsedTime } from '../three/timeUtility' import { Controls } from '../three/controls' import { DataGUI, Statistic } from '../three/uiUtility' import { LinearDepthRenderMaterial } from '../three/shaderUtility'; import { BoxUpdateHelper, boundingBoxInViewSpace, boxFromOrthographicViewVolume, getMaxSamples, setOrthographicViewVolumeFromBox } from '../three/threeUtility' import { RenderOverrideVisibility, RenderPass, } from '../three/renderPass' import { RectAreaLightAndShadow, RectAreaLightAndShadowWithDirectionalLight } from '../three/rectAreaLightAndShadow' import * as THREE from 'three' import { RoomEnvironment } from 'three/examples/jsm/environments/RoomEnvironment.js'; export const rectAreaLightShadow = (canvas: any) => { const width = window.innerWidth; const height = window.innerHeight; const renderer = new THREE.WebGLRenderer({canvas: canvas, antialias: true, alpha: true}) renderer.setSize(width, height) renderer.setPixelRatio(window.devicePixelRatio); renderer.shadowMap.enabled = true; renderer.shadowMap.type = THREE.PCFSoftShadowMap; document.body.appendChild(renderer.domElement); const statistic = new Statistic(); const renderPass = new RenderPass(); const maxSamples = getMaxSamples(renderer); const camera = new THREE.PerspectiveCamera(45, width / height, 0.1, 100); camera.position.y = 10; camera.position.z = 15; const controls = new Controls(renderer, camera); const scene = new THREE.Scene(); scene.background = new THREE.Color(0xc0c0c0); const pmremGenerator = new THREE.PMREMGenerator(renderer); scene.environment = pmremGenerator.fromScene(new RoomEnvironment(), 0.04).texture; const sceneBox = new THREE.Box3(new THREE.Vector3(-3.5, 0, -3), new THREE.Vector3(3, 3, 2.5)); const sceneBoxHelper = new BoxUpdateHelper(sceneBox, { color: 0xff8080, opacity: 0.25 }); sceneBoxHelper.addTo(scene); sceneBoxHelper.visible = false; const directionalLight = new THREE.DirectionalLight(0xffffff, 0.5); directionalLight.position.set(2, 5, 2); directionalLight.lookAt(0, 0, 0); directionalLight.castShadow = true; directionalLight.shadow.mapSize = new THREE.Vector2(1024, 1024); const directionalLightCamera = directionalLight.shadow.camera as THREE.OrthographicCamera; directionalLightCamera.far = 20; scene.add(directionalLight); const rectAreaLight = new THREE.RectAreaLight(0xffffff, 100, 1, 1); const viewportSize = new THREE.Vector2(width, height); //const rectAreaLightAndShadow: RectAreaLightAndShadow = new RectAreaLightAndShadowWithShadowMap(rectAreaLight, viewportSize, undefined, false); const rectAreaLightAndShadow: RectAreaLightAndShadow = new RectAreaLightAndShadowWithDirectionalLight(rectAreaLight, viewportSize, { samples: maxSamples, shadowIntensity: 0.5, alwaysUpdate: true }); rectAreaLightAndShadow.addToScene(scene); const shadowBoxHelper = new BoxUpdateHelper(boxFromOrthographicViewVolume(directionalLightCamera), { color: 0x80ff80, opacity: 0.25 }); shadowBoxHelper.visible = false; shadowBoxHelper.addTo(scene); const setLightSource = (source: string) => { const enableRectAreaLight = source === 'rectAreaLight'; rectAreaLightAndShadow.visible = enableRectAreaLight; directionalLight.visible = !enableRectAreaLight; } const updateShadowBox = () => { const pos = directionalLight.position.clone(); directionalLight.lookAt(0, 0, 0); shadowBoxHelper.object.position.set(pos.x, pos.y, pos.z); shadowBoxHelper.object.lookAt(0, 0, 0); rectAreaLightAndShadow.setLineOfSight(pos.clone(), new THREE.Vector3(0, 0, 0)); const shadowVolumeBox = boundingBoxInViewSpace(sceneBox, rectAreaLightAndShadow.shadowCamera); rectAreaLightAndShadow.setShadowVolume(shadowVolumeBox); setOrthographicViewVolumeFromBox(directionalLightCamera, shadowVolumeBox); shadowBoxHelper.box = boxFromOrthographicViewVolume(directionalLightCamera) shadowBoxHelper.update(); }; updateShadowBox(); const transformControl = controls.addTransformControl(directionalLight, scene); transformControl.addEventListener('objectChange', (event: any) => { updateShadowBox(); }); let meshes: THREE.Mesh[] = []; const addMesh = (position: THREE.Vector3, geometry: THREE.BufferGeometry, material: THREE.Material): THREE.Mesh => { const mesh = new THREE.Mesh(geometry, material); mesh.position.set(position.x, position.y, position.z); mesh.castShadow = true; mesh.receiveShadow = true; meshes.push(mesh); scene.add(mesh); return mesh; }; const envMapIntensity = 0.25; const sqrt3 = Math.sqrt(3) addMesh(new THREE.Vector3(-1, 1, 1), new THREE.BoxGeometry(1/sqrt3, 1/sqrt3, 1/sqrt3), new THREE.MeshPhysicalMaterial({color: 0xffff00, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(1, 1, 1), new THREE.IcosahedronGeometry(0.5, 0), new THREE.MeshPhysicalMaterial({color: 0xff0000, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(-2, 1, -1), new THREE.TorusGeometry(0.5, 0.2, 32, 100), new THREE.MeshPhysicalMaterial({color: 0x00ff00, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(0, 1, -1), new THREE.SphereGeometry(0.5, 32, 16), new THREE.MeshPhysicalMaterial({color: 0x0000ff, envMapIntensity: envMapIntensity})); addMesh(new THREE.Vector3(2, 1, -1), new THREE.TorusKnotGeometry(0.3, 0.1, 100, 32), new THREE.MeshPhysicalMaterial({color: 0xff00ff, envMapIntensity: envMapIntensity})); const groundGroup = new THREE.Group(); groundGroup.rotation.x = -Math.PI / 2; scene.add(groundGroup); const groundGeometry = new THREE.PlaneGeometry(10, 10, 10, 10); const groundMaterial = new THREE.MeshPhysicalMaterial({color: 0x808080, envMapIntensity: envMapIntensity}); const groundMesh = new THREE.Mesh(groundGeometry, groundMaterial); groundMesh.receiveShadow = true; groundGroup.add(groundMesh); let shadowDebugPlane: THREE.Mesh | undefined; if (rectAreaLightAndShadow.shadowMapTexture)
const generalUiProperties = { 'light source': 'rectAreaLight', 'scene volume': sceneBoxHelper.visible, 'shadow volume': shadowBoxHelper.visible, 'debug shadow map': false, 'debug output': 'off', }; setLightSource(generalUiProperties['light source']); const dataGui = new DataGUI(); dataGui.gui.add(generalUiProperties, 'light source', { 'rectAreaLight': 'rectAreaLight', 'directionalLight': 'directionalLight' }).onChange((value) => setLightSource(value)); dataGui.gui.add(generalUiProperties, 'scene volume').onChange((enabled: boolean) => sceneBoxHelper.visible = enabled); dataGui.gui.add(generalUiProperties, 'shadow volume').onChange((enabled: boolean) => shadowBoxHelper.visible = enabled); dataGui.gui.add(generalUiProperties, 'debug shadow map'); let debugOptions: any = {}; debugOptions['off'] = 'off'; if (rectAreaLightAndShadow.depthTexture) { debugOptions['depth'] = 'depth'; } debugOptions['shadow'] = 'shadow'; dataGui.gui.add(generalUiProperties, 'debug output', debugOptions); const shadowFolder = dataGui.gui.addFolder('rectAreaLight shadow'); shadowFolder.add<any>(rectAreaLightAndShadow, 'shadowIntensity', 0.0, 1.0); const onWindowResize = () => { const width = window.innerWidth; const height = window.innerHeight; camera.aspect = width / height; camera.updateProjectionMatrix(); renderer.setSize(width, height); rectAreaLightAndShadow.setSize(width, height); }; window.addEventListener('resize', onWindowResize, false); const elapsedTime = new ElapsedTime(); const animate = (timestamp: number) => { elapsedTime.update(timestamp); requestAnimationFrame(animate); for (let i = 0; i < meshes.length; ++i) { meshes[i].position.y = 1.5 + Math.sin(elapsedTime.allTimeMs / 1000 * Math.PI * 2 * 0.2 + i / 5 * Math.PI * 2) * 1; meshes[i].rotation.x += elapsedTime.getDegreePerSecond((1 + i / 5) * 60, true); meshes[i].rotation.y += elapsedTime.getDegreePerSecond((1 + i / (i-5)) * 60, true); } controls.update(); render(); statistic.update(); } let invisibleObjects: any[]= [sceneBoxHelper, shadowBoxHelper, transformControl, rectAreaLightAndShadow.rectAreaLight]; if (shadowDebugPlane) { invisibleObjects.push(shadowDebugPlane); } const renderOverrideVisibility = new RenderOverrideVisibility( false, (object: any) => invisibleObjects.includes(object), null); const renderShadow = () => { renderOverrideVisibility.render(scene, () => { if (generalUiProperties['debug output'] === 'off' && generalUiProperties['light source'] !== 'rectAreaLight' && !generalUiProperties['debug output']) { return; } rectAreaLightAndShadow.renderShadow(renderer, scene, camera); }); } const depthRenderMaterial = rectAreaLightAndShadow.depthTexture ? new LinearDepthRenderMaterial({ depthTexture: rectAreaLightAndShadow.depthTexture }) : null; const render = () => { renderShadow(); switch(generalUiProperties['debug output']) { case 'depth': if (depthRenderMaterial) { renderPass.renderScreenSpace(renderer, depthRenderMaterial.update({camera: camera}), null); } break; case 'shadow': rectAreaLightAndShadow.blendShadow(renderer, true); break; default: if (shadowDebugPlane) { shadowDebugPlane.visible = generalUiProperties['debug shadow map']; } renderer.render(scene, camera); if (shadowDebugPlane) { shadowDebugPlane.visible = false; } if (generalUiProperties['light source'] === 'rectAreaLight') { rectAreaLightAndShadow.blendShadow(renderer, false); } break; } } animate(0); updateShadowBox(); }
{ shadowDebugPlane = new THREE.Mesh( new THREE.PlaneGeometry(5, 5), new THREE.MeshBasicMaterial({ map: rectAreaLightAndShadow.shadowMapTexture }) ); shadowDebugPlane.position.x = -8; shadowDebugPlane.position.y = 3; shadowDebugPlane.visible = false; scene.add(shadowDebugPlane); }
conditional_block
test_sync.py
# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from unittest.mock import AsyncMock, Mock, patch from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, JoinRules from synapse.api.errors import Codes, ResourceLimitError from synapse.api.filtering import Filtering from synapse.api.room_versions import RoomVersions from synapse.handlers.sync import SyncConfig, SyncResult from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util import Clock import tests.unittest import tests.utils class SyncTestCase(tests.unittest.HomeserverTestCase): """Tests Sync Handler.""" servlets = [ admin.register_servlets, knock.register_servlets, login.register_servlets, room.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.sync_handler = self.hs.get_sync_handler() self.store = self.hs.get_datastores().main # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' self.auth_blocking = self.hs.get_auth_blocking() def test_wait_for_sync_for_user_auth_blocking(self) -> None: user_id1 = "@user1:test" user_id2 = "@user2:test" sync_config = generate_sync_config(user_id1) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._max_mau_value = 1 # Check that the happy case does not throw errors self.get_success(self.store.upsert_monthly_active_user(user_id1)) self.get_success( self.sync_handler.wait_for_sync_for_user(requester, sync_config) ) # Test that global lock works self.auth_blocking._hs_disabled = True e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.auth_blocking._hs_disabled = False sync_config = generate_sync_config(user_id2) requester = create_requester(user_id2) e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def test_unknown_room_version(self) -> None: """ A room with an unknown room version should not break sync (and should be excluded). """ inviter = self.register_user("creator", "pass", admin=True) inviter_tok = self.login("@creator:test", "pass") user = self.register_user("user", "pass") tok = self.login("user", "pass") # Do an initial sync on a different device. requester = create_requester(user) initial_result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev") ) ) # Create a room as the user. joined_room = self.helper.create_room_as(user, tok=tok) # Invite the user to the room as someone else. invite_room = self.helper.create_room_as(inviter, tok=inviter_tok) self.helper.invite(invite_room, targ=user, tok=inviter_tok) knock_room = self.helper.create_room_as( inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok ) self.helper.send_state( knock_room, EventTypes.JoinRules, {"join_rule": JoinRules.KNOCK}, tok=inviter_tok, ) channel = self.make_request( "POST", "/_matrix/client/r0/knock/%s" % (knock_room,), b"{}", tok, ) self.assertEqual(200, channel.code, channel.result) # The rooms should appear in the sync response. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Test a incremental sync (by providing a since_token). result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Poke the database and update the room version to an unknown one. for room_id in (joined_room, invite_room, knock_room):
# Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() self.store.get_rooms_for_user.invalidate_all() self.store._get_event_cache.clear() self.store._event_ref.clear() # The rooms should be excluded from the sync response. # Get a new request key. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) # The rooms should also not be in an incremental sync. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) def test_ban_wins_race_with_join(self) -> None: """Rooms shouldn't appear under "joined" if a join loses a race to a ban. A complicated edge case. Imagine the following scenario: * you attempt to join a room * racing with that is a ban which comes in over federation, which ends up with an earlier stream_ordering than the join. * you get a sync response with a sync token which is _after_ the ban, but before the join * now your join lands; it is a valid event because its `prev_event`s predate the ban, but will not make it into current_state_events (because bans win over joins in state res, essentially). * When we do a sync from the incremental sync, the only event in the timeline is your join ... and yet you aren't joined. The ban coming in over federation isn't crucial for this behaviour; the key requirements are: 1. the homeserver generates a join event with prev_events that precede the ban (so that it passes the "are you banned" test) 2. the join event has a stream_ordering after that of the ban. We use monkeypatching to artificially trigger condition (1). """ # A local user Alice creates a room. owner = self.register_user("alice", "password") owner_tok = self.login(owner, "password") room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok) # Do a sync as Alice to get the latest event in the room. alice_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(owner), generate_sync_config(owner) ) ) self.assertEqual(len(alice_sync_result.joined), 1) self.assertEqual(alice_sync_result.joined[0].room_id, room_id) last_room_creation_event_id = ( alice_sync_result.joined[0].timeline.events[-1].event_id ) # Eve, a ne'er-do-well, registers. eve = self.register_user("eve", "password") eve_token = self.login(eve, "password") # Alice preemptively bans Eve. self.helper.ban(room_id, owner, eve, tok=owner_tok) # Eve syncs. eve_requester = create_requester(eve) eve_sync_config = generate_sync_config(eve) eve_sync_after_ban: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config) ) # Sanity check this sync result. We shouldn't be joined to the room. self.assertEqual(eve_sync_after_ban.joined, []) # Eve tries to join the room. We monkey patch the internal logic which selects # the prev_events used when creating the join event, such that the ban does not # precede the join. mocked_get_prev_events = patch.object( self.hs.get_datastores().main, "get_prev_events_for_room", new_callable=AsyncMock, return_value=[last_room_creation_event_id], ) with mocked_get_prev_events: self.helper.join(room_id, eve, tok=eve_token) # Eve makes a second, incremental sync. eve_incremental_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=eve_sync_after_ban.next_batch, ) ) # Eve should not see herself as joined to the room. self.assertEqual(eve_incremental_sync_after_join.joined, []) # If we did a third initial sync, we should _still_ see eve is not joined to the room. eve_initial_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=None, ) ) self.assertEqual(eve_initial_sync_after_join.joined, []) _request_key = 0 def generate_sync_config( user_id: str, device_id: Optional[str] = "device_id" ) -> SyncConfig: """Generate a sync config (with a unique request key).""" global _request_key _request_key += 1 return SyncConfig( user=UserID.from_string(user_id), filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False, request_key=("request_key", _request_key), device_id=device_id, )
self.get_success( self.hs.get_datastores().main.db_pool.simple_update( "rooms", keyvalues={"room_id": room_id}, updatevalues={"room_version": "unknown-room-version"}, desc="updated-room-version", ) )
conditional_block
test_sync.py
# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from unittest.mock import AsyncMock, Mock, patch from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, JoinRules from synapse.api.errors import Codes, ResourceLimitError from synapse.api.filtering import Filtering from synapse.api.room_versions import RoomVersions from synapse.handlers.sync import SyncConfig, SyncResult from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util import Clock import tests.unittest import tests.utils class SyncTestCase(tests.unittest.HomeserverTestCase):
login.register_servlets, room.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.sync_handler = self.hs.get_sync_handler() self.store = self.hs.get_datastores().main # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' self.auth_blocking = self.hs.get_auth_blocking() def test_wait_for_sync_for_user_auth_blocking(self) -> None: user_id1 = "@user1:test" user_id2 = "@user2:test" sync_config = generate_sync_config(user_id1) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._max_mau_value = 1 # Check that the happy case does not throw errors self.get_success(self.store.upsert_monthly_active_user(user_id1)) self.get_success( self.sync_handler.wait_for_sync_for_user(requester, sync_config) ) # Test that global lock works self.auth_blocking._hs_disabled = True e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.auth_blocking._hs_disabled = False sync_config = generate_sync_config(user_id2) requester = create_requester(user_id2) e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def test_unknown_room_version(self) -> None: """ A room with an unknown room version should not break sync (and should be excluded). """ inviter = self.register_user("creator", "pass", admin=True) inviter_tok = self.login("@creator:test", "pass") user = self.register_user("user", "pass") tok = self.login("user", "pass") # Do an initial sync on a different device. requester = create_requester(user) initial_result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev") ) ) # Create a room as the user. joined_room = self.helper.create_room_as(user, tok=tok) # Invite the user to the room as someone else. invite_room = self.helper.create_room_as(inviter, tok=inviter_tok) self.helper.invite(invite_room, targ=user, tok=inviter_tok) knock_room = self.helper.create_room_as( inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok ) self.helper.send_state( knock_room, EventTypes.JoinRules, {"join_rule": JoinRules.KNOCK}, tok=inviter_tok, ) channel = self.make_request( "POST", "/_matrix/client/r0/knock/%s" % (knock_room,), b"{}", tok, ) self.assertEqual(200, channel.code, channel.result) # The rooms should appear in the sync response. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Test a incremental sync (by providing a since_token). result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Poke the database and update the room version to an unknown one. for room_id in (joined_room, invite_room, knock_room): self.get_success( self.hs.get_datastores().main.db_pool.simple_update( "rooms", keyvalues={"room_id": room_id}, updatevalues={"room_version": "unknown-room-version"}, desc="updated-room-version", ) ) # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() self.store.get_rooms_for_user.invalidate_all() self.store._get_event_cache.clear() self.store._event_ref.clear() # The rooms should be excluded from the sync response. # Get a new request key. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) # The rooms should also not be in an incremental sync. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) def test_ban_wins_race_with_join(self) -> None: """Rooms shouldn't appear under "joined" if a join loses a race to a ban. A complicated edge case. Imagine the following scenario: * you attempt to join a room * racing with that is a ban which comes in over federation, which ends up with an earlier stream_ordering than the join. * you get a sync response with a sync token which is _after_ the ban, but before the join * now your join lands; it is a valid event because its `prev_event`s predate the ban, but will not make it into current_state_events (because bans win over joins in state res, essentially). * When we do a sync from the incremental sync, the only event in the timeline is your join ... and yet you aren't joined. The ban coming in over federation isn't crucial for this behaviour; the key requirements are: 1. the homeserver generates a join event with prev_events that precede the ban (so that it passes the "are you banned" test) 2. the join event has a stream_ordering after that of the ban. We use monkeypatching to artificially trigger condition (1). """ # A local user Alice creates a room. owner = self.register_user("alice", "password") owner_tok = self.login(owner, "password") room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok) # Do a sync as Alice to get the latest event in the room. alice_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(owner), generate_sync_config(owner) ) ) self.assertEqual(len(alice_sync_result.joined), 1) self.assertEqual(alice_sync_result.joined[0].room_id, room_id) last_room_creation_event_id = ( alice_sync_result.joined[0].timeline.events[-1].event_id ) # Eve, a ne'er-do-well, registers. eve = self.register_user("eve", "password") eve_token = self.login(eve, "password") # Alice preemptively bans Eve. self.helper.ban(room_id, owner, eve, tok=owner_tok) # Eve syncs. eve_requester = create_requester(eve) eve_sync_config = generate_sync_config(eve) eve_sync_after_ban: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config) ) # Sanity check this sync result. We shouldn't be joined to the room. self.assertEqual(eve_sync_after_ban.joined, []) # Eve tries to join the room. We monkey patch the internal logic which selects # the prev_events used when creating the join event, such that the ban does not # precede the join. mocked_get_prev_events = patch.object( self.hs.get_datastores().main, "get_prev_events_for_room", new_callable=AsyncMock, return_value=[last_room_creation_event_id], ) with mocked_get_prev_events: self.helper.join(room_id, eve, tok=eve_token) # Eve makes a second, incremental sync. eve_incremental_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=eve_sync_after_ban.next_batch, ) ) # Eve should not see herself as joined to the room. self.assertEqual(eve_incremental_sync_after_join.joined, []) # If we did a third initial sync, we should _still_ see eve is not joined to the room. eve_initial_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=None, ) ) self.assertEqual(eve_initial_sync_after_join.joined, []) _request_key = 0 def generate_sync_config( user_id: str, device_id: Optional[str] = "device_id" ) -> SyncConfig: """Generate a sync config (with a unique request key).""" global _request_key _request_key += 1 return SyncConfig( user=UserID.from_string(user_id), filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False, request_key=("request_key", _request_key), device_id=device_id, )
"""Tests Sync Handler.""" servlets = [ admin.register_servlets, knock.register_servlets,
random_line_split
test_sync.py
# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from unittest.mock import AsyncMock, Mock, patch from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, JoinRules from synapse.api.errors import Codes, ResourceLimitError from synapse.api.filtering import Filtering from synapse.api.room_versions import RoomVersions from synapse.handlers.sync import SyncConfig, SyncResult from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util import Clock import tests.unittest import tests.utils class SyncTestCase(tests.unittest.HomeserverTestCase): """Tests Sync Handler.""" servlets = [ admin.register_servlets, knock.register_servlets, login.register_servlets, room.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.sync_handler = self.hs.get_sync_handler() self.store = self.hs.get_datastores().main # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' self.auth_blocking = self.hs.get_auth_blocking() def test_wait_for_sync_for_user_auth_blocking(self) -> None: user_id1 = "@user1:test" user_id2 = "@user2:test" sync_config = generate_sync_config(user_id1) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._max_mau_value = 1 # Check that the happy case does not throw errors self.get_success(self.store.upsert_monthly_active_user(user_id1)) self.get_success( self.sync_handler.wait_for_sync_for_user(requester, sync_config) ) # Test that global lock works self.auth_blocking._hs_disabled = True e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.auth_blocking._hs_disabled = False sync_config = generate_sync_config(user_id2) requester = create_requester(user_id2) e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def test_unknown_room_version(self) -> None: """ A room with an unknown room version should not break sync (and should be excluded). """ inviter = self.register_user("creator", "pass", admin=True) inviter_tok = self.login("@creator:test", "pass") user = self.register_user("user", "pass") tok = self.login("user", "pass") # Do an initial sync on a different device. requester = create_requester(user) initial_result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev") ) ) # Create a room as the user. joined_room = self.helper.create_room_as(user, tok=tok) # Invite the user to the room as someone else. invite_room = self.helper.create_room_as(inviter, tok=inviter_tok) self.helper.invite(invite_room, targ=user, tok=inviter_tok) knock_room = self.helper.create_room_as( inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok ) self.helper.send_state( knock_room, EventTypes.JoinRules, {"join_rule": JoinRules.KNOCK}, tok=inviter_tok, ) channel = self.make_request( "POST", "/_matrix/client/r0/knock/%s" % (knock_room,), b"{}", tok, ) self.assertEqual(200, channel.code, channel.result) # The rooms should appear in the sync response. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Test a incremental sync (by providing a since_token). result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Poke the database and update the room version to an unknown one. for room_id in (joined_room, invite_room, knock_room): self.get_success( self.hs.get_datastores().main.db_pool.simple_update( "rooms", keyvalues={"room_id": room_id}, updatevalues={"room_version": "unknown-room-version"}, desc="updated-room-version", ) ) # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() self.store.get_rooms_for_user.invalidate_all() self.store._get_event_cache.clear() self.store._event_ref.clear() # The rooms should be excluded from the sync response. # Get a new request key. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) # The rooms should also not be in an incremental sync. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) def
(self) -> None: """Rooms shouldn't appear under "joined" if a join loses a race to a ban. A complicated edge case. Imagine the following scenario: * you attempt to join a room * racing with that is a ban which comes in over federation, which ends up with an earlier stream_ordering than the join. * you get a sync response with a sync token which is _after_ the ban, but before the join * now your join lands; it is a valid event because its `prev_event`s predate the ban, but will not make it into current_state_events (because bans win over joins in state res, essentially). * When we do a sync from the incremental sync, the only event in the timeline is your join ... and yet you aren't joined. The ban coming in over federation isn't crucial for this behaviour; the key requirements are: 1. the homeserver generates a join event with prev_events that precede the ban (so that it passes the "are you banned" test) 2. the join event has a stream_ordering after that of the ban. We use monkeypatching to artificially trigger condition (1). """ # A local user Alice creates a room. owner = self.register_user("alice", "password") owner_tok = self.login(owner, "password") room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok) # Do a sync as Alice to get the latest event in the room. alice_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(owner), generate_sync_config(owner) ) ) self.assertEqual(len(alice_sync_result.joined), 1) self.assertEqual(alice_sync_result.joined[0].room_id, room_id) last_room_creation_event_id = ( alice_sync_result.joined[0].timeline.events[-1].event_id ) # Eve, a ne'er-do-well, registers. eve = self.register_user("eve", "password") eve_token = self.login(eve, "password") # Alice preemptively bans Eve. self.helper.ban(room_id, owner, eve, tok=owner_tok) # Eve syncs. eve_requester = create_requester(eve) eve_sync_config = generate_sync_config(eve) eve_sync_after_ban: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config) ) # Sanity check this sync result. We shouldn't be joined to the room. self.assertEqual(eve_sync_after_ban.joined, []) # Eve tries to join the room. We monkey patch the internal logic which selects # the prev_events used when creating the join event, such that the ban does not # precede the join. mocked_get_prev_events = patch.object( self.hs.get_datastores().main, "get_prev_events_for_room", new_callable=AsyncMock, return_value=[last_room_creation_event_id], ) with mocked_get_prev_events: self.helper.join(room_id, eve, tok=eve_token) # Eve makes a second, incremental sync. eve_incremental_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=eve_sync_after_ban.next_batch, ) ) # Eve should not see herself as joined to the room. self.assertEqual(eve_incremental_sync_after_join.joined, []) # If we did a third initial sync, we should _still_ see eve is not joined to the room. eve_initial_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=None, ) ) self.assertEqual(eve_initial_sync_after_join.joined, []) _request_key = 0 def generate_sync_config( user_id: str, device_id: Optional[str] = "device_id" ) -> SyncConfig: """Generate a sync config (with a unique request key).""" global _request_key _request_key += 1 return SyncConfig( user=UserID.from_string(user_id), filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False, request_key=("request_key", _request_key), device_id=device_id, )
test_ban_wins_race_with_join
identifier_name
test_sync.py
# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from unittest.mock import AsyncMock, Mock, patch from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, JoinRules from synapse.api.errors import Codes, ResourceLimitError from synapse.api.filtering import Filtering from synapse.api.room_versions import RoomVersions from synapse.handlers.sync import SyncConfig, SyncResult from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.types import UserID, create_requester from synapse.util import Clock import tests.unittest import tests.utils class SyncTestCase(tests.unittest.HomeserverTestCase): """Tests Sync Handler.""" servlets = [ admin.register_servlets, knock.register_servlets, login.register_servlets, room.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.sync_handler = self.hs.get_sync_handler() self.store = self.hs.get_datastores().main # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' self.auth_blocking = self.hs.get_auth_blocking() def test_wait_for_sync_for_user_auth_blocking(self) -> None: user_id1 = "@user1:test" user_id2 = "@user2:test" sync_config = generate_sync_config(user_id1) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._max_mau_value = 1 # Check that the happy case does not throw errors self.get_success(self.store.upsert_monthly_active_user(user_id1)) self.get_success( self.sync_handler.wait_for_sync_for_user(requester, sync_config) ) # Test that global lock works self.auth_blocking._hs_disabled = True e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.auth_blocking._hs_disabled = False sync_config = generate_sync_config(user_id2) requester = create_requester(user_id2) e = self.get_failure( self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def test_unknown_room_version(self) -> None: """ A room with an unknown room version should not break sync (and should be excluded). """ inviter = self.register_user("creator", "pass", admin=True) inviter_tok = self.login("@creator:test", "pass") user = self.register_user("user", "pass") tok = self.login("user", "pass") # Do an initial sync on a different device. requester = create_requester(user) initial_result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev") ) ) # Create a room as the user. joined_room = self.helper.create_room_as(user, tok=tok) # Invite the user to the room as someone else. invite_room = self.helper.create_room_as(inviter, tok=inviter_tok) self.helper.invite(invite_room, targ=user, tok=inviter_tok) knock_room = self.helper.create_room_as( inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok ) self.helper.send_state( knock_room, EventTypes.JoinRules, {"join_rule": JoinRules.KNOCK}, tok=inviter_tok, ) channel = self.make_request( "POST", "/_matrix/client/r0/knock/%s" % (knock_room,), b"{}", tok, ) self.assertEqual(200, channel.code, channel.result) # The rooms should appear in the sync response. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Test a incremental sync (by providing a since_token). result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertIn(joined_room, [r.room_id for r in result.joined]) self.assertIn(invite_room, [r.room_id for r in result.invited]) self.assertIn(knock_room, [r.room_id for r in result.knocked]) # Poke the database and update the room version to an unknown one. for room_id in (joined_room, invite_room, knock_room): self.get_success( self.hs.get_datastores().main.db_pool.simple_update( "rooms", keyvalues={"room_id": room_id}, updatevalues={"room_version": "unknown-room-version"}, desc="updated-room-version", ) ) # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() self.store.get_rooms_for_user.invalidate_all() self.store._get_event_cache.clear() self.store._event_ref.clear() # The rooms should be excluded from the sync response. # Get a new request key. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user) ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) # The rooms should also not be in an incremental sync. result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, sync_config=generate_sync_config(user, device_id="dev"), since_token=initial_result.next_batch, ) ) self.assertNotIn(joined_room, [r.room_id for r in result.joined]) self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) def test_ban_wins_race_with_join(self) -> None: """Rooms shouldn't appear under "joined" if a join loses a race to a ban. A complicated edge case. Imagine the following scenario: * you attempt to join a room * racing with that is a ban which comes in over federation, which ends up with an earlier stream_ordering than the join. * you get a sync response with a sync token which is _after_ the ban, but before the join * now your join lands; it is a valid event because its `prev_event`s predate the ban, but will not make it into current_state_events (because bans win over joins in state res, essentially). * When we do a sync from the incremental sync, the only event in the timeline is your join ... and yet you aren't joined. The ban coming in over federation isn't crucial for this behaviour; the key requirements are: 1. the homeserver generates a join event with prev_events that precede the ban (so that it passes the "are you banned" test) 2. the join event has a stream_ordering after that of the ban. We use monkeypatching to artificially trigger condition (1). """ # A local user Alice creates a room. owner = self.register_user("alice", "password") owner_tok = self.login(owner, "password") room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok) # Do a sync as Alice to get the latest event in the room. alice_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(owner), generate_sync_config(owner) ) ) self.assertEqual(len(alice_sync_result.joined), 1) self.assertEqual(alice_sync_result.joined[0].room_id, room_id) last_room_creation_event_id = ( alice_sync_result.joined[0].timeline.events[-1].event_id ) # Eve, a ne'er-do-well, registers. eve = self.register_user("eve", "password") eve_token = self.login(eve, "password") # Alice preemptively bans Eve. self.helper.ban(room_id, owner, eve, tok=owner_tok) # Eve syncs. eve_requester = create_requester(eve) eve_sync_config = generate_sync_config(eve) eve_sync_after_ban: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config) ) # Sanity check this sync result. We shouldn't be joined to the room. self.assertEqual(eve_sync_after_ban.joined, []) # Eve tries to join the room. We monkey patch the internal logic which selects # the prev_events used when creating the join event, such that the ban does not # precede the join. mocked_get_prev_events = patch.object( self.hs.get_datastores().main, "get_prev_events_for_room", new_callable=AsyncMock, return_value=[last_room_creation_event_id], ) with mocked_get_prev_events: self.helper.join(room_id, eve, tok=eve_token) # Eve makes a second, incremental sync. eve_incremental_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=eve_sync_after_ban.next_batch, ) ) # Eve should not see herself as joined to the room. self.assertEqual(eve_incremental_sync_after_join.joined, []) # If we did a third initial sync, we should _still_ see eve is not joined to the room. eve_initial_sync_after_join: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, eve_sync_config, since_token=None, ) ) self.assertEqual(eve_initial_sync_after_join.joined, []) _request_key = 0 def generate_sync_config( user_id: str, device_id: Optional[str] = "device_id" ) -> SyncConfig:
"""Generate a sync config (with a unique request key).""" global _request_key _request_key += 1 return SyncConfig( user=UserID.from_string(user_id), filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION, is_guest=False, request_key=("request_key", _request_key), device_id=device_id, )
identifier_body
start-trip.js
import React, { Component } from 'react'; import { render } from 'react-dom'; import { Router, Route, IndexRoute, hashHistory, Link } from 'react-router'; import $, { ajax } from 'jquery'; window.$ = $; import DatePicker from 'material-ui/lib/date-picker/date-picker'; import ReactDatePicker from 'react-date-picker'; import Cookies from 'js-cookie'; import SSF from 'react-simple-serial-form'; import moment from 'moment'; export default class StartTrip extends Component{ constructor(...args){ super(...args) this.state = { citiesWithGames: [], startDate: "", mapProps: {}, currentTrip: [], waypts: [], route: [], totalPitStops: 0, start_address: [], mapStyle: {} } this.action = null; } componentWillMount() { if (Cookies.get('user_email', 'auth_token')) { return true; } else { hashHistory.replace('/'); } } dateChangeHandler(dateString) { document.querySelector('.games').classList.remove('hide'); this.setState({startDate: dateString}); ajax({ url:'https://shielded-hollows-39012.herokuapp.com/firstgame', type: 'POST', data: {'local_datetime': dateString}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { Cookies.set('itinerary_id', data.itinerary); data.seatgeek.events.map(event => { this.setState({citiesWithGames: data.seatgeek.events}); })}); } drawMap(){ var directionsService = new google.maps.DirectionsService; var directionsDisplay = new google.maps.DirectionsRenderer; var mapDiv = document.getElementById('map'); this.setState({ mapProps: { center: {lat: 44.540, lng: -78.546}, zoom: 8, styles: [ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "on" }, { "lightness": 33 } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2e5d4" } ] }, { "featureType": "poi.park", "elementType": "geometry", "stylers": [ { "color": "#c5dac6" } ] }, { "featureType": "poi.park", "elementType": "labels", "stylers": [ { "visibility": "on" }, { "lightness": 20 } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "lightness": 20 } ] }, { "featureType": "road.highway", "elementType": "geometry", "stylers": [ { "color": "#c5c6c6" } ] }, { "featureType": "road.arterial", "elementType": "geometry", "stylers": [ { "color": "#e4d7c6" } ] }, { "featureType": "road.local", "elementType": "geometry", "stylers": [ { "color": "#fbfaf7" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "visibility": "on" }, { "color": "#acbcc9" } ] } ] } }) var map = new google.maps.Map(mapDiv, this.state.mapProps); directionsDisplay.setMap(map); var waypts = this.state.waypts; let updatedWaypts; directionsService.route({ origin: this.start_address.location, destination: this.end_address.location, waypoints: waypts, optimizeWaypoints: false, travelMode: google.maps.TravelMode.DRIVING }, function(response, status) { if (status === google.maps.DirectionsStatus.OK) { directionsDisplay.setDirections(response); var route = response.routes[0]; //var summaryPanel = document.getElementById('directions-panel'); //summaryPanel.innerHTML = ''; // For each route, display summary information. for (var i = 0; i < route.legs.length; i++) { var routeSegment = i + 1; } } else { window.alert('Directions request failed due to ' + status); } }); } addGameHandler(id){ document.querySelector('.calendar').classList.add('calendar-hidden'); document.querySelector('#show-calendar').classList.remove('show-calendar'); let local_datetime = this.state.startDate; ////////////UNCOMMENT TO TEST BACKEND DATA ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: [{id:1, title: "Loading..."}]}); ///////Below, send them the city/state data. Will need to make an ajax call first ajax({ url:'https://shielded-hollows-39012.herokuapp.com/nextgame', type: 'POST', data: {"itinerary_id": Cookies.get ('itinerary_id')}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: data.seatgeek.events, startDate: data.local_datetime})}); // data.events.map(event => { // citiesWithGames.push(event.venue.city); // }); }); ////////TURN ON THE STUFF ABOVE DO NOT DELETE ajax(`https://api.seatgeek.com/2/events?id=${id.id}`).then(data=>{ let address = data.events[0].venue.address + " " + data.events[0].venue.extended_address; let totalPitStops = this.state.totalPitStops + 1; this.setState({totalPitStops}); let route = this.state.route; route.push(data.events[0].venue.city); this.setState({route}); if (totalPitStops === 1){ console.log("i ran"); this.start_address = {location: address, stopover: true}; console.log(this.start_address); this.end_address = {location: address, stopover: true}; console.log(this.end_address); this.drawMap(); } if (totalPitStops === 2){ this.end_address = {location: address, stopover: true}; this.setState({mapStyle: {'border': '4px double grey'}}); document.querySelector('#map').classList.remove('hide-map'); this.drawMap(); } if(totalPitStops >= 3){ let waypts = this.state.waypts; waypts.push(this.end_address); this.setState({waypts}); this.end_address = {location: address, stopover: true}; this.drawMap(); } }); } getIteneraryHandler(id){ ///if a city has been clicked, add it to the trip, otherwise render the trip as is if(id.id)
else{ console.log("else ran"); hashHistory.push('/itinerary'); } } freeDayHandler(){ console.log('free day added'); } dataHandler(data) { switch (this.action){ case 'add': this.addGameHandler(data); break; case 'get': ::this.getIteneraryHandler(data); // hashHistory.push('/itinerary'); break; case 'skip': this.freeDayHandler(); break; } this.action = null; } logOutHandler() { Cookies.remove('user_email', 'auth_token', 'id'); hashHistory.push('/'); } showCalendarHandler(){ document.querySelector('.calendar').classList.remove('calendar-hidden'); document.querySelector('#show-calendar').classList.add('show-calendar'); document.querySelector('#map').classList.add('hide-map'); this.setState({route: [], totalPitStops:0, waypts:[]}); this.start_address = null; this.end_address = null; } render(){ let { citiesWithGames, startDate } = this.state; let gameDate = function(){ return moment(startDate).format('dddd, MMMM Do YYYY') === "Invalid date" ? "Click calendar to see available games" : moment(startDate).format('dddd, MMMM Do YYYY')} return( <div> <header> <Link to="/start-trip"><h1 id="title">Inside the Park</h1></Link> <i onClick={this.logOutHandler} className="fa fa-sign-out" aria-hidden="true"><span className='icon-label'> Log Out</span></i> </header> <div className="start-trip-wrapper"> <div className="calendar-map-wrapper"> <div className="calendar"> <h2>Select date below to see that day's games!</h2> <ReactDatePicker style={{"borderRadius": "5px", "boxShadow": "2px 2px 2px black"}} onChange={::this.dateChangeHandler} hideFooter={true}/> </div> <button id="show-calendar" className="show-calendar" onClick={::this.showCalendarHandler}>Reset</button> <div style={{"color": "#c7d4e5"}}>{this.state.route.join(' >> ')}</div> <div id="map" style={this.state.mapStyle}></div> </div> <div className="games hide"> <div id="game-date">{gameDate()}</div> <div id='game-picker'></div> <SSF onData={::this.dataHandler}> <div className="game-choices"> <button onClick={() => this.action = 'add'}>Add selected game to trip</button> <div className="get-itinerary"> <button onClick={() => this.action = 'get'}>Finalize Trip</button> {/*<input type="submit" value="Add Another Game" name="action"/> <input type="submit" value="Get Itenerary" name="action"/>*/} </div> </div> <div className="matchup-list"> {citiesWithGames.map(event => <div key={event.id} className="matchups"><label><input name="id" type="radio" value={event.id} key={event.id}></input> {event.title} </label></div>)} </div> </SSF> </div> </div> </div> ); } }
{ let local_datetime = this.state.startDate; ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(() => { hashHistory.push('/itinerary')}); }
conditional_block
start-trip.js
import React, { Component } from 'react'; import { render } from 'react-dom'; import { Router, Route, IndexRoute, hashHistory, Link } from 'react-router'; import $, { ajax } from 'jquery'; window.$ = $; import DatePicker from 'material-ui/lib/date-picker/date-picker'; import ReactDatePicker from 'react-date-picker'; import Cookies from 'js-cookie'; import SSF from 'react-simple-serial-form'; import moment from 'moment'; export default class StartTrip extends Component{ constructor(...args){ super(...args) this.state = { citiesWithGames: [], startDate: "", mapProps: {}, currentTrip: [], waypts: [], route: [], totalPitStops: 0, start_address: [], mapStyle: {} } this.action = null; } componentWillMount() { if (Cookies.get('user_email', 'auth_token')) { return true; } else { hashHistory.replace('/'); } } dateChangeHandler(dateString) { document.querySelector('.games').classList.remove('hide'); this.setState({startDate: dateString}); ajax({ url:'https://shielded-hollows-39012.herokuapp.com/firstgame', type: 'POST', data: {'local_datetime': dateString}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { Cookies.set('itinerary_id', data.itinerary); data.seatgeek.events.map(event => { this.setState({citiesWithGames: data.seatgeek.events}); })}); } drawMap(){ var directionsService = new google.maps.DirectionsService; var directionsDisplay = new google.maps.DirectionsRenderer; var mapDiv = document.getElementById('map'); this.setState({ mapProps: { center: {lat: 44.540, lng: -78.546}, zoom: 8, styles: [ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "on" }, { "lightness": 33 } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2e5d4" } ] }, { "featureType": "poi.park", "elementType": "geometry", "stylers": [ { "color": "#c5dac6" } ] }, { "featureType": "poi.park", "elementType": "labels", "stylers": [ { "visibility": "on" }, { "lightness": 20 } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "lightness": 20 } ] }, { "featureType": "road.highway", "elementType": "geometry", "stylers": [ { "color": "#c5c6c6" } ] }, { "featureType": "road.arterial", "elementType": "geometry", "stylers": [ { "color": "#e4d7c6" } ] }, { "featureType": "road.local", "elementType": "geometry", "stylers": [ { "color": "#fbfaf7" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "visibility": "on" }, { "color": "#acbcc9" } ] } ] } }) var map = new google.maps.Map(mapDiv, this.state.mapProps); directionsDisplay.setMap(map); var waypts = this.state.waypts; let updatedWaypts; directionsService.route({ origin: this.start_address.location, destination: this.end_address.location, waypoints: waypts, optimizeWaypoints: false, travelMode: google.maps.TravelMode.DRIVING }, function(response, status) { if (status === google.maps.DirectionsStatus.OK) { directionsDisplay.setDirections(response); var route = response.routes[0]; //var summaryPanel = document.getElementById('directions-panel'); //summaryPanel.innerHTML = ''; // For each route, display summary information. for (var i = 0; i < route.legs.length; i++) { var routeSegment = i + 1; } } else { window.alert('Directions request failed due to ' + status); } }); } addGameHandler(id)
getIteneraryHandler(id){ ///if a city has been clicked, add it to the trip, otherwise render the trip as is if(id.id){ let local_datetime = this.state.startDate; ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(() => { hashHistory.push('/itinerary')}); }else{ console.log("else ran"); hashHistory.push('/itinerary'); } } freeDayHandler(){ console.log('free day added'); } dataHandler(data) { switch (this.action){ case 'add': this.addGameHandler(data); break; case 'get': ::this.getIteneraryHandler(data); // hashHistory.push('/itinerary'); break; case 'skip': this.freeDayHandler(); break; } this.action = null; } logOutHandler() { Cookies.remove('user_email', 'auth_token', 'id'); hashHistory.push('/'); } showCalendarHandler(){ document.querySelector('.calendar').classList.remove('calendar-hidden'); document.querySelector('#show-calendar').classList.add('show-calendar'); document.querySelector('#map').classList.add('hide-map'); this.setState({route: [], totalPitStops:0, waypts:[]}); this.start_address = null; this.end_address = null; } render(){ let { citiesWithGames, startDate } = this.state; let gameDate = function(){ return moment(startDate).format('dddd, MMMM Do YYYY') === "Invalid date" ? "Click calendar to see available games" : moment(startDate).format('dddd, MMMM Do YYYY')} return( <div> <header> <Link to="/start-trip"><h1 id="title">Inside the Park</h1></Link> <i onClick={this.logOutHandler} className="fa fa-sign-out" aria-hidden="true"><span className='icon-label'> Log Out</span></i> </header> <div className="start-trip-wrapper"> <div className="calendar-map-wrapper"> <div className="calendar"> <h2>Select date below to see that day's games!</h2> <ReactDatePicker style={{"borderRadius": "5px", "boxShadow": "2px 2px 2px black"}} onChange={::this.dateChangeHandler} hideFooter={true}/> </div> <button id="show-calendar" className="show-calendar" onClick={::this.showCalendarHandler}>Reset</button> <div style={{"color": "#c7d4e5"}}>{this.state.route.join(' >> ')}</div> <div id="map" style={this.state.mapStyle}></div> </div> <div className="games hide"> <div id="game-date">{gameDate()}</div> <div id='game-picker'></div> <SSF onData={::this.dataHandler}> <div className="game-choices"> <button onClick={() => this.action = 'add'}>Add selected game to trip</button> <div className="get-itinerary"> <button onClick={() => this.action = 'get'}>Finalize Trip</button> {/*<input type="submit" value="Add Another Game" name="action"/> <input type="submit" value="Get Itenerary" name="action"/>*/} </div> </div> <div className="matchup-list"> {citiesWithGames.map(event => <div key={event.id} className="matchups"><label><input name="id" type="radio" value={event.id} key={event.id}></input> {event.title} </label></div>)} </div> </SSF> </div> </div> </div> ); } }
{ document.querySelector('.calendar').classList.add('calendar-hidden'); document.querySelector('#show-calendar').classList.remove('show-calendar'); let local_datetime = this.state.startDate; ////////////UNCOMMENT TO TEST BACKEND DATA ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: [{id:1, title: "Loading..."}]}); ///////Below, send them the city/state data. Will need to make an ajax call first ajax({ url:'https://shielded-hollows-39012.herokuapp.com/nextgame', type: 'POST', data: {"itinerary_id": Cookies.get ('itinerary_id')}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: data.seatgeek.events, startDate: data.local_datetime})}); // data.events.map(event => { // citiesWithGames.push(event.venue.city); // }); }); ////////TURN ON THE STUFF ABOVE DO NOT DELETE ajax(`https://api.seatgeek.com/2/events?id=${id.id}`).then(data=>{ let address = data.events[0].venue.address + " " + data.events[0].venue.extended_address; let totalPitStops = this.state.totalPitStops + 1; this.setState({totalPitStops}); let route = this.state.route; route.push(data.events[0].venue.city); this.setState({route}); if (totalPitStops === 1){ console.log("i ran"); this.start_address = {location: address, stopover: true}; console.log(this.start_address); this.end_address = {location: address, stopover: true}; console.log(this.end_address); this.drawMap(); } if (totalPitStops === 2){ this.end_address = {location: address, stopover: true}; this.setState({mapStyle: {'border': '4px double grey'}}); document.querySelector('#map').classList.remove('hide-map'); this.drawMap(); } if(totalPitStops >= 3){ let waypts = this.state.waypts; waypts.push(this.end_address); this.setState({waypts}); this.end_address = {location: address, stopover: true}; this.drawMap(); } }); }
identifier_body
start-trip.js
import React, { Component } from 'react'; import { render } from 'react-dom'; import { Router, Route, IndexRoute, hashHistory, Link } from 'react-router'; import $, { ajax } from 'jquery'; window.$ = $; import DatePicker from 'material-ui/lib/date-picker/date-picker'; import ReactDatePicker from 'react-date-picker'; import Cookies from 'js-cookie'; import SSF from 'react-simple-serial-form'; import moment from 'moment'; export default class StartTrip extends Component{ constructor(...args){ super(...args) this.state = { citiesWithGames: [], startDate: "", mapProps: {}, currentTrip: [], waypts: [], route: [], totalPitStops: 0, start_address: [], mapStyle: {} } this.action = null; } componentWillMount() { if (Cookies.get('user_email', 'auth_token')) { return true; } else { hashHistory.replace('/'); } } dateChangeHandler(dateString) { document.querySelector('.games').classList.remove('hide'); this.setState({startDate: dateString}); ajax({ url:'https://shielded-hollows-39012.herokuapp.com/firstgame', type: 'POST', data: {'local_datetime': dateString}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { Cookies.set('itinerary_id', data.itinerary); data.seatgeek.events.map(event => { this.setState({citiesWithGames: data.seatgeek.events}); })}); } drawMap(){ var directionsService = new google.maps.DirectionsService; var directionsDisplay = new google.maps.DirectionsRenderer; var mapDiv = document.getElementById('map'); this.setState({ mapProps: { center: {lat: 44.540, lng: -78.546}, zoom: 8, styles: [ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "on" }, { "lightness": 33 } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2e5d4" } ] }, { "featureType": "poi.park", "elementType": "geometry", "stylers": [ { "color": "#c5dac6" } ] }, { "featureType": "poi.park", "elementType": "labels", "stylers": [ { "visibility": "on" }, { "lightness": 20 } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "lightness": 20 } ] }, { "featureType": "road.highway", "elementType": "geometry", "stylers": [ { "color": "#c5c6c6" } ] }, { "featureType": "road.arterial", "elementType": "geometry", "stylers": [ { "color": "#e4d7c6" } ] }, { "featureType": "road.local", "elementType": "geometry", "stylers": [ { "color": "#fbfaf7" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "visibility": "on" }, { "color": "#acbcc9" } ] } ] } }) var map = new google.maps.Map(mapDiv, this.state.mapProps); directionsDisplay.setMap(map); var waypts = this.state.waypts; let updatedWaypts; directionsService.route({ origin: this.start_address.location, destination: this.end_address.location, waypoints: waypts, optimizeWaypoints: false, travelMode: google.maps.TravelMode.DRIVING }, function(response, status) { if (status === google.maps.DirectionsStatus.OK) { directionsDisplay.setDirections(response); var route = response.routes[0]; //var summaryPanel = document.getElementById('directions-panel'); //summaryPanel.innerHTML = ''; // For each route, display summary information. for (var i = 0; i < route.legs.length; i++) { var routeSegment = i + 1; } } else { window.alert('Directions request failed due to ' + status); } }); } addGameHandler(id){ document.querySelector('.calendar').classList.add('calendar-hidden'); document.querySelector('#show-calendar').classList.remove('show-calendar'); let local_datetime = this.state.startDate; ////////////UNCOMMENT TO TEST BACKEND DATA ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: [{id:1, title: "Loading..."}]}); ///////Below, send them the city/state data. Will need to make an ajax call first ajax({ url:'https://shielded-hollows-39012.herokuapp.com/nextgame', type: 'POST', data: {"itinerary_id": Cookies.get ('itinerary_id')}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: data.seatgeek.events, startDate: data.local_datetime})}); // data.events.map(event => { // citiesWithGames.push(event.venue.city); // }); }); ////////TURN ON THE STUFF ABOVE DO NOT DELETE ajax(`https://api.seatgeek.com/2/events?id=${id.id}`).then(data=>{ let address = data.events[0].venue.address + " " + data.events[0].venue.extended_address; let totalPitStops = this.state.totalPitStops + 1; this.setState({totalPitStops}); let route = this.state.route; route.push(data.events[0].venue.city); this.setState({route}); if (totalPitStops === 1){ console.log("i ran"); this.start_address = {location: address, stopover: true}; console.log(this.start_address); this.end_address = {location: address, stopover: true}; console.log(this.end_address); this.drawMap(); } if (totalPitStops === 2){ this.end_address = {location: address, stopover: true}; this.setState({mapStyle: {'border': '4px double grey'}}); document.querySelector('#map').classList.remove('hide-map'); this.drawMap(); } if(totalPitStops >= 3){ let waypts = this.state.waypts; waypts.push(this.end_address); this.setState({waypts}); this.end_address = {location: address, stopover: true}; this.drawMap(); } }); } getIteneraryHandler(id){ ///if a city has been clicked, add it to the trip, otherwise render the trip as is if(id.id){ let local_datetime = this.state.startDate; ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(() => { hashHistory.push('/itinerary')}); }else{ console.log("else ran"); hashHistory.push('/itinerary'); } } freeDayHandler(){ console.log('free day added'); } dataHandler(data) { switch (this.action){ case 'add': this.addGameHandler(data); break; case 'get': ::this.getIteneraryHandler(data); // hashHistory.push('/itinerary'); break; case 'skip': this.freeDayHandler(); break; } this.action = null; } logOutHandler() { Cookies.remove('user_email', 'auth_token', 'id'); hashHistory.push('/'); } showCalendarHandler(){ document.querySelector('.calendar').classList.remove('calendar-hidden'); document.querySelector('#show-calendar').classList.add('show-calendar'); document.querySelector('#map').classList.add('hide-map'); this.setState({route: [], totalPitStops:0, waypts:[]}); this.start_address = null; this.end_address = null; }
(){ let { citiesWithGames, startDate } = this.state; let gameDate = function(){ return moment(startDate).format('dddd, MMMM Do YYYY') === "Invalid date" ? "Click calendar to see available games" : moment(startDate).format('dddd, MMMM Do YYYY')} return( <div> <header> <Link to="/start-trip"><h1 id="title">Inside the Park</h1></Link> <i onClick={this.logOutHandler} className="fa fa-sign-out" aria-hidden="true"><span className='icon-label'> Log Out</span></i> </header> <div className="start-trip-wrapper"> <div className="calendar-map-wrapper"> <div className="calendar"> <h2>Select date below to see that day's games!</h2> <ReactDatePicker style={{"borderRadius": "5px", "boxShadow": "2px 2px 2px black"}} onChange={::this.dateChangeHandler} hideFooter={true}/> </div> <button id="show-calendar" className="show-calendar" onClick={::this.showCalendarHandler}>Reset</button> <div style={{"color": "#c7d4e5"}}>{this.state.route.join(' >> ')}</div> <div id="map" style={this.state.mapStyle}></div> </div> <div className="games hide"> <div id="game-date">{gameDate()}</div> <div id='game-picker'></div> <SSF onData={::this.dataHandler}> <div className="game-choices"> <button onClick={() => this.action = 'add'}>Add selected game to trip</button> <div className="get-itinerary"> <button onClick={() => this.action = 'get'}>Finalize Trip</button> {/*<input type="submit" value="Add Another Game" name="action"/> <input type="submit" value="Get Itenerary" name="action"/>*/} </div> </div> <div className="matchup-list"> {citiesWithGames.map(event => <div key={event.id} className="matchups"><label><input name="id" type="radio" value={event.id} key={event.id}></input> {event.title} </label></div>)} </div> </SSF> </div> </div> </div> ); } }
render
identifier_name
start-trip.js
import React, { Component } from 'react'; import { render } from 'react-dom'; import { Router, Route, IndexRoute, hashHistory, Link } from 'react-router'; import $, { ajax } from 'jquery'; window.$ = $; import DatePicker from 'material-ui/lib/date-picker/date-picker'; import ReactDatePicker from 'react-date-picker'; import Cookies from 'js-cookie'; import SSF from 'react-simple-serial-form'; import moment from 'moment'; export default class StartTrip extends Component{ constructor(...args){ super(...args) this.state = { citiesWithGames: [], startDate: "", mapProps: {}, currentTrip: [], waypts: [], route: [], totalPitStops: 0, start_address: [], mapStyle: {} } this.action = null; } componentWillMount() { if (Cookies.get('user_email', 'auth_token')) { return true; } else { hashHistory.replace('/'); } } dateChangeHandler(dateString) { document.querySelector('.games').classList.remove('hide'); this.setState({startDate: dateString}); ajax({ url:'https://shielded-hollows-39012.herokuapp.com/firstgame', type: 'POST', data: {'local_datetime': dateString}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { Cookies.set('itinerary_id', data.itinerary); data.seatgeek.events.map(event => { this.setState({citiesWithGames: data.seatgeek.events}); })}); } drawMap(){ var directionsService = new google.maps.DirectionsService; var directionsDisplay = new google.maps.DirectionsRenderer; var mapDiv = document.getElementById('map'); this.setState({ mapProps: { center: {lat: 44.540, lng: -78.546}, zoom: 8, styles: [ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "on" }, { "lightness": 33 } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2e5d4" } ] }, { "featureType": "poi.park", "elementType": "geometry", "stylers": [ { "color": "#c5dac6" } ] }, { "featureType": "poi.park", "elementType": "labels", "stylers": [ { "visibility": "on" }, { "lightness": 20 } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "lightness": 20 } ] }, { "featureType": "road.highway", "elementType": "geometry", "stylers": [ { "color": "#c5c6c6" } ] }, { "featureType": "road.arterial", "elementType": "geometry", "stylers": [ { "color": "#e4d7c6" } ] }, { "featureType": "road.local", "elementType": "geometry", "stylers": [ { "color": "#fbfaf7" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "visibility": "on" }, { "color": "#acbcc9" } ] } ] } }) var map = new google.maps.Map(mapDiv, this.state.mapProps); directionsDisplay.setMap(map); var waypts = this.state.waypts; let updatedWaypts; directionsService.route({ origin: this.start_address.location, destination: this.end_address.location, waypoints: waypts, optimizeWaypoints: false, travelMode: google.maps.TravelMode.DRIVING }, function(response, status) { if (status === google.maps.DirectionsStatus.OK) { directionsDisplay.setDirections(response); var route = response.routes[0]; //var summaryPanel = document.getElementById('directions-panel'); //summaryPanel.innerHTML = ''; // For each route, display summary information. for (var i = 0; i < route.legs.length; i++) { var routeSegment = i + 1; } } else { window.alert('Directions request failed due to ' + status); } }); } addGameHandler(id){ document.querySelector('.calendar').classList.add('calendar-hidden'); document.querySelector('#show-calendar').classList.remove('show-calendar'); let local_datetime = this.state.startDate; ////////////UNCOMMENT TO TEST BACKEND DATA ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: [{id:1, title: "Loading..."}]}); ///////Below, send them the city/state data. Will need to make an ajax call first ajax({ url:'https://shielded-hollows-39012.herokuapp.com/nextgame', type: 'POST', data: {"itinerary_id": Cookies.get ('itinerary_id')}, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(data => { this.setState({citiesWithGames: data.seatgeek.events, startDate: data.local_datetime})}); // data.events.map(event => { // citiesWithGames.push(event.venue.city); // }); }); ////////TURN ON THE STUFF ABOVE DO NOT DELETE ajax(`https://api.seatgeek.com/2/events?id=${id.id}`).then(data=>{ let address = data.events[0].venue.address + " " + data.events[0].venue.extended_address; let totalPitStops = this.state.totalPitStops + 1; this.setState({totalPitStops}); let route = this.state.route; route.push(data.events[0].venue.city); this.setState({route}); if (totalPitStops === 1){ console.log("i ran"); this.start_address = {location: address, stopover: true}; console.log(this.start_address); this.end_address = {location: address, stopover: true}; console.log(this.end_address); this.drawMap(); } if (totalPitStops === 2){ this.end_address = {location: address, stopover: true}; this.setState({mapStyle: {'border': '4px double grey'}}); document.querySelector('#map').classList.remove('hide-map'); this.drawMap(); } if(totalPitStops >= 3){ let waypts = this.state.waypts; waypts.push(this.end_address); this.setState({waypts}); this.end_address = {location: address, stopover: true}; this.drawMap(); } }); } getIteneraryHandler(id){ ///if a city has been clicked, add it to the trip, otherwise render the trip as is if(id.id){ let local_datetime = this.state.startDate; ajax({ url:'https://shielded-hollows-39012.herokuapp.com/selectgame', type: 'POST', data: {"local_datetime": local_datetime, "itinerary_id": Cookies.get('itinerary_id'), "game_number": id.id }, headers: { 'X-Auth-Token': Cookies.get('auth_token') } }).then(() => { hashHistory.push('/itinerary')}); }else{ console.log("else ran"); hashHistory.push('/itinerary'); } } freeDayHandler(){ console.log('free day added'); } dataHandler(data) { switch (this.action){ case 'add': this.addGameHandler(data); break; case 'get': ::this.getIteneraryHandler(data); // hashHistory.push('/itinerary'); break; case 'skip': this.freeDayHandler(); break; } this.action = null; } logOutHandler() { Cookies.remove('user_email', 'auth_token', 'id'); hashHistory.push('/'); } showCalendarHandler(){ document.querySelector('.calendar').classList.remove('calendar-hidden'); document.querySelector('#show-calendar').classList.add('show-calendar'); document.querySelector('#map').classList.add('hide-map'); this.setState({route: [], totalPitStops:0, waypts:[]}); this.start_address = null; this.end_address = null; } render(){ let { citiesWithGames, startDate } = this.state; let gameDate = function(){ return moment(startDate).format('dddd, MMMM Do YYYY') === "Invalid date" ? "Click calendar to see available games" : moment(startDate).format('dddd, MMMM Do YYYY')} return( <div> <header> <Link to="/start-trip"><h1 id="title">Inside the Park</h1></Link> <i onClick={this.logOutHandler} className="fa fa-sign-out" aria-hidden="true"><span className='icon-label'> Log Out</span></i> </header> <div className="start-trip-wrapper"> <div className="calendar-map-wrapper"> <div className="calendar"> <h2>Select date below to see that day's games!</h2> <ReactDatePicker style={{"borderRadius": "5px", "boxShadow": "2px 2px 2px black"}} onChange={::this.dateChangeHandler} hideFooter={true}/> </div> <button id="show-calendar" className="show-calendar" onClick={::this.showCalendarHandler}>Reset</button> <div style={{"color": "#c7d4e5"}}>{this.state.route.join(' >> ')}</div> <div id="map" style={this.state.mapStyle}></div> </div> <div className="games hide"> <div id="game-date">{gameDate()}</div>
<div className="game-choices"> <button onClick={() => this.action = 'add'}>Add selected game to trip</button> <div className="get-itinerary"> <button onClick={() => this.action = 'get'}>Finalize Trip</button> {/*<input type="submit" value="Add Another Game" name="action"/> <input type="submit" value="Get Itenerary" name="action"/>*/} </div> </div> <div className="matchup-list"> {citiesWithGames.map(event => <div key={event.id} className="matchups"><label><input name="id" type="radio" value={event.id} key={event.id}></input> {event.title} </label></div>)} </div> </SSF> </div> </div> </div> ); } }
<div id='game-picker'></div> <SSF onData={::this.dataHandler}>
random_line_split
day3.rs
use { aoc_runner_derive::aoc, re_parse::{Error as ReParseError, ReParse, Regex}, serde_derive::Deserialize, std::{ cmp::max, mem::replace, ops::{Index, IndexMut}, slice::Iter as SliceIter, str::{FromStr, Split}, }, }; struct ClaimIterator<'s> { input: Split<'s, char>, } impl<'s> ClaimIterator<'s> { pub fn new(input: &'s str) -> Self { ClaimIterator { input: input.split('\n'), } } } #[derive(Debug, Deserialize, ReParse)] #[re_parse( regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"# )] struct RawClaim { id: usize, left: usize, top: usize, width: usize, height: usize, } #[derive(Clone, Debug)] struct Claim { id: usize, left: usize, top: usize, right: usize, bottom: usize, } impl Claim { // FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive. // There should be no need to call this twice! fn contains_edge_of(&self, other: &Self) -> (bool, bool) { let intersects_horizontally = { let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom; let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom; bottom_in_horizontal_band || top_in_horizontal_band }; let intersects_vertically = { let left_in_vertical_band = self.left >= other.left && self.left < other.right; let right_in_vertical_band = self.right > other.left && self.right <= other.right; left_in_vertical_band || right_in_vertical_band }; (intersects_horizontally, intersects_vertically) } pub fn intersects(&self, other: &Self) -> bool { let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other); let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self); (self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert) } } #[test] fn test_intersection() { const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3"; let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap(); for other in &[ // Close but not touching "#0 @ 1,1: 1x1", "#0 @ 2,1: 1x1", "#0 @ 3,1: 1x1", "#0 @ 4,1: 1x1", "#0 @ 5,1: 1x1", "#0 @ 5,2: 1x1", "#0 @ 5,3: 1x1", "#0 @ 5,4: 1x1", "#0 @ 5,5: 1x1", "#0 @ 4,5: 1x1", "#0 @ 3,5: 1x1", "#0 @ 2,5: 1x1", "#0 @ 1,5: 1x1", "#0 @ 1,4: 1x1", "#0 @ 1,3: 1x1", "#0 @ 1,2: 1x1", // Way out there ] { if claim.intersects(&other.parse().unwrap()) { panic!("{:?} is not supposed to intersect {:?}", other, claim); } } for other in &[ // Same thing CLAIM_TO_COMPARE_TO, // Other encompasses first "#0 @ 1,1: 5x5", // First encompasses other "#0 @ 3,3: 1x1", // Edges "#0 @ 1,1: 2x2", "#0 @ 2,1: 2x2", "#0 @ 3,1: 2x2", "#0 @ 3,2: 2x2", "#0 @ 3,3: 2x2", "#0 @ 2,3: 2x2", "#0 @ 1,3: 2x2", "#0 @ 1,2: 2x2", ] { if !claim.intersects(&other.parse().unwrap()) { panic!("{:?} is supposed to intersect {:?}", other, claim); } } // Other failing cases found fn intersects(s1: &str, s2: &str) -> bool { s1.parse::<Claim>() .unwrap() .intersects(&s2.parse().unwrap()) } //"#1236 @ ".parse().unwrap() assert!(intersects( "#1236 @ 420,613: 19x12", "#344 @ 426,611: 12x21" )); } #[derive(Debug)] enum ClaimParseError { ParseFailed(ReParseError), InvalidDimensions(usize, usize), } impl FromStr for Claim { type Err = ClaimParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { use self::ClaimParseError::*; let RawClaim { id, left, top, width, height, } = RawClaim::from_str(s).map_err(ParseFailed)?; if width == 0 || height == 0 { return Err(InvalidDimensions(width, height)); } Ok(Self { id, left, top, right: left.checked_add(width).unwrap(), bottom: top.checked_add(height).unwrap(), }) } } impl<'s> Iterator for ClaimIterator<'s> { type Item = Claim; fn next(&mut self) -> Option<Self::Item> { match self.input.next()? { "" => None, other => Some(other.parse().unwrap()), } } } struct GrowOnlyGrid<T> { inner: Vec<T>, len_x: usize, len_y: usize, } impl<T> GrowOnlyGrid<T> { pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self { Self { inner: { let len = x.checked_mul(y).unwrap(); let mut inner = Vec::with_capacity(len); // OPT: Use the soon-to-be-stable `resize_with` instead. while inner.len() < len { inner.push(f()); } inner }, len_x: x, len_y: y, } } pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F) where T: Default, { let old_len_x = self.len_x; let old_len_y = self.len_y; let old = replace( self, Self::new_with(max(x, old_len_x), max(y, old_len_y), f), ); let mut old_values = old.inner.into_iter(); for y in 0..old_len_y { // OPT: We could probably just copy slices here directly for x in 0..old_len_x { let idx = unsafe { self.index_from_coords_unchecked(x, y) }; self.inner[idx] = old_values.next().unwrap(); } } } pub fn dimensions(&self) -> (usize, usize) { (self.len_x, self.len_y) } fn index_from_coords(&self, x: usize, y: usize) -> usize { if x >= self.len_x || y >= self.len_y { panic!( "coordinates {:?} exceed current dimensions of {:?}", (x, y), self.dimensions() ); } unsafe { self.index_from_coords_unchecked(x, y) } } unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize { y * self.len_x + x } } impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> { type Output = T; fn index(&self, (x, y): (usize, usize)) -> &Self::Output { let idx = self.index_from_coords(x, y); &self.inner[idx] } } impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> { fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output { let idx = self.index_from_coords(x, y); &mut self.inner[idx] } } impl<T> GrowOnlyGrid<T> { pub fn iter_flat(&self) -> SliceIter<T> { self.inner[..].iter() } } #[aoc(day3, part1)] pub fn day3_part1(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); for claim in ClaimIterator::new(input) { let Claim { id: _, left, top, right, bottom, } = claim; grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in top..bottom { for x in left..right { let blarg = &mut grid[(x, y)]; *blarg = blarg.checked_add(1).unwrap(); } } } grid.iter_flat().filter(|x| x > &&1).count() } #[cfg(test)] const INPUT: &'static str = include_str!("../input/2018/day3.txt"); #[cfg(test)] const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4 #2 @ 3,1: 4x4 #3 @ 5,5: 2x2 "#; #[cfg(test)] const HINT_EXPECTED_PART1_OUTPUT: usize = 4; #[cfg(test)] const HINT_EXPECTED_PART2_OUTPUT: usize = 3; #[cfg(test)] const EXPECTED_PART2_OUTPUT: usize = 603; #[test] fn test_day3_part1_hint() { assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT); } #[aoc(day3, part2, square_iteration)] pub fn day3_part2_square_iteration(input: &str) -> usize { // OPT: Use ArrayVec for even more performance? Depends on max size. // OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char // splits. let mut claims = ClaimIterator::new(input) .map(|c| (c, true)) .collect::<Vec<_>>(); for i in 0..claims.len() { for j in i + 1..claims.len() { if claims[i].0.intersects(&claims[j].0)
} } let uncontested = claims .into_iter() .filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None }) .collect::<Vec<_>>(); if uncontested.len() != 1 { panic!("Expected single remaining claim, got {:?}", uncontested); } uncontested[0].id } #[test] fn test_day3_part2_square_iteration_hint() { assert_eq!( day3_part2_square_iteration(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_square_iteration_answer() { assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT); } #[aoc(day3, part2, grid_again)] pub fn day3_part2_grid_again(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); let claims = ClaimIterator::new(input).collect::<Vec<_>>(); for Claim { id: _, left, top, right, bottom, } in claims.iter() { grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in *top..*bottom { for x in *left..*right { *(&mut grid[(x, y)]) += 1; } } } let uncontested = claims .into_iter() .filter( |Claim { left, top, bottom, right, .. }| { for y in *top..*bottom { for x in *left..*right { let count = grid[(x, y)]; assert!(count != 0); if count > 1 { return false; } } } true }, ) .collect::<Vec<_>>(); assert_eq!(uncontested.len(), 1); uncontested[0].id } #[test] fn test_day3_part2_grid_again_hint() { assert_eq!( day3_part2_grid_again(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_grid_again_answer() { assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT); }
{ (&mut claims[i]).1 = false; (&mut claims[j]).1 = false; }
conditional_block
day3.rs
use { aoc_runner_derive::aoc, re_parse::{Error as ReParseError, ReParse, Regex}, serde_derive::Deserialize, std::{ cmp::max, mem::replace, ops::{Index, IndexMut}, slice::Iter as SliceIter, str::{FromStr, Split}, }, }; struct ClaimIterator<'s> { input: Split<'s, char>, } impl<'s> ClaimIterator<'s> { pub fn new(input: &'s str) -> Self { ClaimIterator { input: input.split('\n'), } } } #[derive(Debug, Deserialize, ReParse)] #[re_parse( regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"# )] struct RawClaim { id: usize, left: usize, top: usize, width: usize, height: usize, } #[derive(Clone, Debug)] struct Claim { id: usize, left: usize, top: usize, right: usize, bottom: usize, } impl Claim { // FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive. // There should be no need to call this twice! fn contains_edge_of(&self, other: &Self) -> (bool, bool) { let intersects_horizontally = { let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom; let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom; bottom_in_horizontal_band || top_in_horizontal_band }; let intersects_vertically = { let left_in_vertical_band = self.left >= other.left && self.left < other.right; let right_in_vertical_band = self.right > other.left && self.right <= other.right; left_in_vertical_band || right_in_vertical_band }; (intersects_horizontally, intersects_vertically) } pub fn intersects(&self, other: &Self) -> bool { let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other); let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self); (self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert) } } #[test] fn
() { const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3"; let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap(); for other in &[ // Close but not touching "#0 @ 1,1: 1x1", "#0 @ 2,1: 1x1", "#0 @ 3,1: 1x1", "#0 @ 4,1: 1x1", "#0 @ 5,1: 1x1", "#0 @ 5,2: 1x1", "#0 @ 5,3: 1x1", "#0 @ 5,4: 1x1", "#0 @ 5,5: 1x1", "#0 @ 4,5: 1x1", "#0 @ 3,5: 1x1", "#0 @ 2,5: 1x1", "#0 @ 1,5: 1x1", "#0 @ 1,4: 1x1", "#0 @ 1,3: 1x1", "#0 @ 1,2: 1x1", // Way out there ] { if claim.intersects(&other.parse().unwrap()) { panic!("{:?} is not supposed to intersect {:?}", other, claim); } } for other in &[ // Same thing CLAIM_TO_COMPARE_TO, // Other encompasses first "#0 @ 1,1: 5x5", // First encompasses other "#0 @ 3,3: 1x1", // Edges "#0 @ 1,1: 2x2", "#0 @ 2,1: 2x2", "#0 @ 3,1: 2x2", "#0 @ 3,2: 2x2", "#0 @ 3,3: 2x2", "#0 @ 2,3: 2x2", "#0 @ 1,3: 2x2", "#0 @ 1,2: 2x2", ] { if !claim.intersects(&other.parse().unwrap()) { panic!("{:?} is supposed to intersect {:?}", other, claim); } } // Other failing cases found fn intersects(s1: &str, s2: &str) -> bool { s1.parse::<Claim>() .unwrap() .intersects(&s2.parse().unwrap()) } //"#1236 @ ".parse().unwrap() assert!(intersects( "#1236 @ 420,613: 19x12", "#344 @ 426,611: 12x21" )); } #[derive(Debug)] enum ClaimParseError { ParseFailed(ReParseError), InvalidDimensions(usize, usize), } impl FromStr for Claim { type Err = ClaimParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { use self::ClaimParseError::*; let RawClaim { id, left, top, width, height, } = RawClaim::from_str(s).map_err(ParseFailed)?; if width == 0 || height == 0 { return Err(InvalidDimensions(width, height)); } Ok(Self { id, left, top, right: left.checked_add(width).unwrap(), bottom: top.checked_add(height).unwrap(), }) } } impl<'s> Iterator for ClaimIterator<'s> { type Item = Claim; fn next(&mut self) -> Option<Self::Item> { match self.input.next()? { "" => None, other => Some(other.parse().unwrap()), } } } struct GrowOnlyGrid<T> { inner: Vec<T>, len_x: usize, len_y: usize, } impl<T> GrowOnlyGrid<T> { pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self { Self { inner: { let len = x.checked_mul(y).unwrap(); let mut inner = Vec::with_capacity(len); // OPT: Use the soon-to-be-stable `resize_with` instead. while inner.len() < len { inner.push(f()); } inner }, len_x: x, len_y: y, } } pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F) where T: Default, { let old_len_x = self.len_x; let old_len_y = self.len_y; let old = replace( self, Self::new_with(max(x, old_len_x), max(y, old_len_y), f), ); let mut old_values = old.inner.into_iter(); for y in 0..old_len_y { // OPT: We could probably just copy slices here directly for x in 0..old_len_x { let idx = unsafe { self.index_from_coords_unchecked(x, y) }; self.inner[idx] = old_values.next().unwrap(); } } } pub fn dimensions(&self) -> (usize, usize) { (self.len_x, self.len_y) } fn index_from_coords(&self, x: usize, y: usize) -> usize { if x >= self.len_x || y >= self.len_y { panic!( "coordinates {:?} exceed current dimensions of {:?}", (x, y), self.dimensions() ); } unsafe { self.index_from_coords_unchecked(x, y) } } unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize { y * self.len_x + x } } impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> { type Output = T; fn index(&self, (x, y): (usize, usize)) -> &Self::Output { let idx = self.index_from_coords(x, y); &self.inner[idx] } } impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> { fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output { let idx = self.index_from_coords(x, y); &mut self.inner[idx] } } impl<T> GrowOnlyGrid<T> { pub fn iter_flat(&self) -> SliceIter<T> { self.inner[..].iter() } } #[aoc(day3, part1)] pub fn day3_part1(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); for claim in ClaimIterator::new(input) { let Claim { id: _, left, top, right, bottom, } = claim; grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in top..bottom { for x in left..right { let blarg = &mut grid[(x, y)]; *blarg = blarg.checked_add(1).unwrap(); } } } grid.iter_flat().filter(|x| x > &&1).count() } #[cfg(test)] const INPUT: &'static str = include_str!("../input/2018/day3.txt"); #[cfg(test)] const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4 #2 @ 3,1: 4x4 #3 @ 5,5: 2x2 "#; #[cfg(test)] const HINT_EXPECTED_PART1_OUTPUT: usize = 4; #[cfg(test)] const HINT_EXPECTED_PART2_OUTPUT: usize = 3; #[cfg(test)] const EXPECTED_PART2_OUTPUT: usize = 603; #[test] fn test_day3_part1_hint() { assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT); } #[aoc(day3, part2, square_iteration)] pub fn day3_part2_square_iteration(input: &str) -> usize { // OPT: Use ArrayVec for even more performance? Depends on max size. // OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char // splits. let mut claims = ClaimIterator::new(input) .map(|c| (c, true)) .collect::<Vec<_>>(); for i in 0..claims.len() { for j in i + 1..claims.len() { if claims[i].0.intersects(&claims[j].0) { (&mut claims[i]).1 = false; (&mut claims[j]).1 = false; } } } let uncontested = claims .into_iter() .filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None }) .collect::<Vec<_>>(); if uncontested.len() != 1 { panic!("Expected single remaining claim, got {:?}", uncontested); } uncontested[0].id } #[test] fn test_day3_part2_square_iteration_hint() { assert_eq!( day3_part2_square_iteration(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_square_iteration_answer() { assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT); } #[aoc(day3, part2, grid_again)] pub fn day3_part2_grid_again(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); let claims = ClaimIterator::new(input).collect::<Vec<_>>(); for Claim { id: _, left, top, right, bottom, } in claims.iter() { grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in *top..*bottom { for x in *left..*right { *(&mut grid[(x, y)]) += 1; } } } let uncontested = claims .into_iter() .filter( |Claim { left, top, bottom, right, .. }| { for y in *top..*bottom { for x in *left..*right { let count = grid[(x, y)]; assert!(count != 0); if count > 1 { return false; } } } true }, ) .collect::<Vec<_>>(); assert_eq!(uncontested.len(), 1); uncontested[0].id } #[test] fn test_day3_part2_grid_again_hint() { assert_eq!( day3_part2_grid_again(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_grid_again_answer() { assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT); }
test_intersection
identifier_name
day3.rs
use { aoc_runner_derive::aoc, re_parse::{Error as ReParseError, ReParse, Regex}, serde_derive::Deserialize, std::{ cmp::max, mem::replace, ops::{Index, IndexMut}, slice::Iter as SliceIter, str::{FromStr, Split}, }, }; struct ClaimIterator<'s> { input: Split<'s, char>, } impl<'s> ClaimIterator<'s> { pub fn new(input: &'s str) -> Self { ClaimIterator { input: input.split('\n'), } } } #[derive(Debug, Deserialize, ReParse)] #[re_parse( regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"# )] struct RawClaim { id: usize, left: usize, top: usize, width: usize, height: usize, } #[derive(Clone, Debug)] struct Claim { id: usize, left: usize, top: usize, right: usize, bottom: usize, } impl Claim { // FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive. // There should be no need to call this twice! fn contains_edge_of(&self, other: &Self) -> (bool, bool) { let intersects_horizontally = { let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom; let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom; bottom_in_horizontal_band || top_in_horizontal_band }; let intersects_vertically = { let left_in_vertical_band = self.left >= other.left && self.left < other.right; let right_in_vertical_band = self.right > other.left && self.right <= other.right; left_in_vertical_band || right_in_vertical_band }; (intersects_horizontally, intersects_vertically) } pub fn intersects(&self, other: &Self) -> bool { let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other); let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self); (self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert) } } #[test] fn test_intersection() { const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3"; let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap(); for other in &[ // Close but not touching "#0 @ 1,1: 1x1", "#0 @ 2,1: 1x1", "#0 @ 3,1: 1x1", "#0 @ 4,1: 1x1", "#0 @ 5,1: 1x1", "#0 @ 5,2: 1x1", "#0 @ 5,3: 1x1", "#0 @ 5,4: 1x1", "#0 @ 5,5: 1x1", "#0 @ 4,5: 1x1", "#0 @ 3,5: 1x1", "#0 @ 2,5: 1x1", "#0 @ 1,5: 1x1", "#0 @ 1,4: 1x1", "#0 @ 1,3: 1x1", "#0 @ 1,2: 1x1", // Way out there ] { if claim.intersects(&other.parse().unwrap()) { panic!("{:?} is not supposed to intersect {:?}", other, claim); } } for other in &[ // Same thing CLAIM_TO_COMPARE_TO, // Other encompasses first "#0 @ 1,1: 5x5", // First encompasses other "#0 @ 3,3: 1x1", // Edges "#0 @ 1,1: 2x2", "#0 @ 2,1: 2x2", "#0 @ 3,1: 2x2", "#0 @ 3,2: 2x2", "#0 @ 3,3: 2x2", "#0 @ 2,3: 2x2", "#0 @ 1,3: 2x2", "#0 @ 1,2: 2x2", ] { if !claim.intersects(&other.parse().unwrap()) { panic!("{:?} is supposed to intersect {:?}", other, claim); } } // Other failing cases found fn intersects(s1: &str, s2: &str) -> bool { s1.parse::<Claim>() .unwrap() .intersects(&s2.parse().unwrap()) } //"#1236 @ ".parse().unwrap() assert!(intersects( "#1236 @ 420,613: 19x12", "#344 @ 426,611: 12x21" )); } #[derive(Debug)] enum ClaimParseError { ParseFailed(ReParseError), InvalidDimensions(usize, usize), } impl FromStr for Claim { type Err = ClaimParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { use self::ClaimParseError::*; let RawClaim { id, left, top, width, height, } = RawClaim::from_str(s).map_err(ParseFailed)?; if width == 0 || height == 0 { return Err(InvalidDimensions(width, height)); } Ok(Self { id, left, top, right: left.checked_add(width).unwrap(), bottom: top.checked_add(height).unwrap(), }) } } impl<'s> Iterator for ClaimIterator<'s> { type Item = Claim; fn next(&mut self) -> Option<Self::Item> { match self.input.next()? { "" => None, other => Some(other.parse().unwrap()), } } } struct GrowOnlyGrid<T> { inner: Vec<T>, len_x: usize, len_y: usize, } impl<T> GrowOnlyGrid<T> { pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self { Self { inner: { let len = x.checked_mul(y).unwrap(); let mut inner = Vec::with_capacity(len); // OPT: Use the soon-to-be-stable `resize_with` instead. while inner.len() < len { inner.push(f()); } inner }, len_x: x, len_y: y, } } pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F) where T: Default, { let old_len_x = self.len_x; let old_len_y = self.len_y; let old = replace( self, Self::new_with(max(x, old_len_x), max(y, old_len_y), f), ); let mut old_values = old.inner.into_iter(); for y in 0..old_len_y { // OPT: We could probably just copy slices here directly for x in 0..old_len_x { let idx = unsafe { self.index_from_coords_unchecked(x, y) }; self.inner[idx] = old_values.next().unwrap(); } } } pub fn dimensions(&self) -> (usize, usize) { (self.len_x, self.len_y) } fn index_from_coords(&self, x: usize, y: usize) -> usize { if x >= self.len_x || y >= self.len_y { panic!( "coordinates {:?} exceed current dimensions of {:?}", (x, y), self.dimensions() ); } unsafe { self.index_from_coords_unchecked(x, y) } } unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize { y * self.len_x + x } } impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> { type Output = T; fn index(&self, (x, y): (usize, usize)) -> &Self::Output { let idx = self.index_from_coords(x, y); &self.inner[idx] } } impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> { fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output { let idx = self.index_from_coords(x, y); &mut self.inner[idx] } } impl<T> GrowOnlyGrid<T> { pub fn iter_flat(&self) -> SliceIter<T> { self.inner[..].iter() } } #[aoc(day3, part1)] pub fn day3_part1(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); for claim in ClaimIterator::new(input) { let Claim { id: _, left, top, right, bottom, } = claim; grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in top..bottom { for x in left..right { let blarg = &mut grid[(x, y)]; *blarg = blarg.checked_add(1).unwrap(); } } } grid.iter_flat().filter(|x| x > &&1).count() } #[cfg(test)] const INPUT: &'static str = include_str!("../input/2018/day3.txt"); #[cfg(test)] const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4 #2 @ 3,1: 4x4 #3 @ 5,5: 2x2 "#; #[cfg(test)] const HINT_EXPECTED_PART1_OUTPUT: usize = 4; #[cfg(test)] const HINT_EXPECTED_PART2_OUTPUT: usize = 3; #[cfg(test)] const EXPECTED_PART2_OUTPUT: usize = 603; #[test] fn test_day3_part1_hint() { assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT); } #[aoc(day3, part2, square_iteration)] pub fn day3_part2_square_iteration(input: &str) -> usize { // OPT: Use ArrayVec for even more performance? Depends on max size. // OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char // splits. let mut claims = ClaimIterator::new(input) .map(|c| (c, true)) .collect::<Vec<_>>(); for i in 0..claims.len() { for j in i + 1..claims.len() { if claims[i].0.intersects(&claims[j].0) { (&mut claims[i]).1 = false; (&mut claims[j]).1 = false; } } } let uncontested = claims .into_iter() .filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None }) .collect::<Vec<_>>(); if uncontested.len() != 1 { panic!("Expected single remaining claim, got {:?}", uncontested); } uncontested[0].id } #[test] fn test_day3_part2_square_iteration_hint() { assert_eq!( day3_part2_square_iteration(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_square_iteration_answer() { assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT); } #[aoc(day3, part2, grid_again)] pub fn day3_part2_grid_again(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); let claims = ClaimIterator::new(input).collect::<Vec<_>>(); for Claim { id: _, left, top, right, bottom, } in claims.iter() { grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in *top..*bottom { for x in *left..*right { *(&mut grid[(x, y)]) += 1; } } } let uncontested = claims .into_iter() .filter( |Claim { left, top, bottom,
let count = grid[(x, y)]; assert!(count != 0); if count > 1 { return false; } } } true }, ) .collect::<Vec<_>>(); assert_eq!(uncontested.len(), 1); uncontested[0].id } #[test] fn test_day3_part2_grid_again_hint() { assert_eq!( day3_part2_grid_again(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_grid_again_answer() { assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT); }
right, .. }| { for y in *top..*bottom { for x in *left..*right {
random_line_split
day3.rs
use { aoc_runner_derive::aoc, re_parse::{Error as ReParseError, ReParse, Regex}, serde_derive::Deserialize, std::{ cmp::max, mem::replace, ops::{Index, IndexMut}, slice::Iter as SliceIter, str::{FromStr, Split}, }, }; struct ClaimIterator<'s> { input: Split<'s, char>, } impl<'s> ClaimIterator<'s> { pub fn new(input: &'s str) -> Self { ClaimIterator { input: input.split('\n'), } } } #[derive(Debug, Deserialize, ReParse)] #[re_parse( regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"# )] struct RawClaim { id: usize, left: usize, top: usize, width: usize, height: usize, } #[derive(Clone, Debug)] struct Claim { id: usize, left: usize, top: usize, right: usize, bottom: usize, } impl Claim { // FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive. // There should be no need to call this twice! fn contains_edge_of(&self, other: &Self) -> (bool, bool) { let intersects_horizontally = { let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom; let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom; bottom_in_horizontal_band || top_in_horizontal_band }; let intersects_vertically = { let left_in_vertical_band = self.left >= other.left && self.left < other.right; let right_in_vertical_band = self.right > other.left && self.right <= other.right; left_in_vertical_band || right_in_vertical_band }; (intersects_horizontally, intersects_vertically) } pub fn intersects(&self, other: &Self) -> bool { let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other); let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self); (self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert) } } #[test] fn test_intersection() { const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3"; let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap(); for other in &[ // Close but not touching "#0 @ 1,1: 1x1", "#0 @ 2,1: 1x1", "#0 @ 3,1: 1x1", "#0 @ 4,1: 1x1", "#0 @ 5,1: 1x1", "#0 @ 5,2: 1x1", "#0 @ 5,3: 1x1", "#0 @ 5,4: 1x1", "#0 @ 5,5: 1x1", "#0 @ 4,5: 1x1", "#0 @ 3,5: 1x1", "#0 @ 2,5: 1x1", "#0 @ 1,5: 1x1", "#0 @ 1,4: 1x1", "#0 @ 1,3: 1x1", "#0 @ 1,2: 1x1", // Way out there ] { if claim.intersects(&other.parse().unwrap()) { panic!("{:?} is not supposed to intersect {:?}", other, claim); } } for other in &[ // Same thing CLAIM_TO_COMPARE_TO, // Other encompasses first "#0 @ 1,1: 5x5", // First encompasses other "#0 @ 3,3: 1x1", // Edges "#0 @ 1,1: 2x2", "#0 @ 2,1: 2x2", "#0 @ 3,1: 2x2", "#0 @ 3,2: 2x2", "#0 @ 3,3: 2x2", "#0 @ 2,3: 2x2", "#0 @ 1,3: 2x2", "#0 @ 1,2: 2x2", ] { if !claim.intersects(&other.parse().unwrap()) { panic!("{:?} is supposed to intersect {:?}", other, claim); } } // Other failing cases found fn intersects(s1: &str, s2: &str) -> bool { s1.parse::<Claim>() .unwrap() .intersects(&s2.parse().unwrap()) } //"#1236 @ ".parse().unwrap() assert!(intersects( "#1236 @ 420,613: 19x12", "#344 @ 426,611: 12x21" )); } #[derive(Debug)] enum ClaimParseError { ParseFailed(ReParseError), InvalidDimensions(usize, usize), } impl FromStr for Claim { type Err = ClaimParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { use self::ClaimParseError::*; let RawClaim { id, left, top, width, height, } = RawClaim::from_str(s).map_err(ParseFailed)?; if width == 0 || height == 0 { return Err(InvalidDimensions(width, height)); } Ok(Self { id, left, top, right: left.checked_add(width).unwrap(), bottom: top.checked_add(height).unwrap(), }) } } impl<'s> Iterator for ClaimIterator<'s> { type Item = Claim; fn next(&mut self) -> Option<Self::Item> { match self.input.next()? { "" => None, other => Some(other.parse().unwrap()), } } } struct GrowOnlyGrid<T> { inner: Vec<T>, len_x: usize, len_y: usize, } impl<T> GrowOnlyGrid<T> { pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self { Self { inner: { let len = x.checked_mul(y).unwrap(); let mut inner = Vec::with_capacity(len); // OPT: Use the soon-to-be-stable `resize_with` instead. while inner.len() < len { inner.push(f()); } inner }, len_x: x, len_y: y, } } pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F) where T: Default, { let old_len_x = self.len_x; let old_len_y = self.len_y; let old = replace( self, Self::new_with(max(x, old_len_x), max(y, old_len_y), f), ); let mut old_values = old.inner.into_iter(); for y in 0..old_len_y { // OPT: We could probably just copy slices here directly for x in 0..old_len_x { let idx = unsafe { self.index_from_coords_unchecked(x, y) }; self.inner[idx] = old_values.next().unwrap(); } } } pub fn dimensions(&self) -> (usize, usize) { (self.len_x, self.len_y) } fn index_from_coords(&self, x: usize, y: usize) -> usize { if x >= self.len_x || y >= self.len_y { panic!( "coordinates {:?} exceed current dimensions of {:?}", (x, y), self.dimensions() ); } unsafe { self.index_from_coords_unchecked(x, y) } } unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize { y * self.len_x + x } } impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> { type Output = T; fn index(&self, (x, y): (usize, usize)) -> &Self::Output { let idx = self.index_from_coords(x, y); &self.inner[idx] } } impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> { fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output { let idx = self.index_from_coords(x, y); &mut self.inner[idx] } } impl<T> GrowOnlyGrid<T> { pub fn iter_flat(&self) -> SliceIter<T> { self.inner[..].iter() } } #[aoc(day3, part1)] pub fn day3_part1(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); for claim in ClaimIterator::new(input) { let Claim { id: _, left, top, right, bottom, } = claim; grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in top..bottom { for x in left..right { let blarg = &mut grid[(x, y)]; *blarg = blarg.checked_add(1).unwrap(); } } } grid.iter_flat().filter(|x| x > &&1).count() } #[cfg(test)] const INPUT: &'static str = include_str!("../input/2018/day3.txt"); #[cfg(test)] const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4 #2 @ 3,1: 4x4 #3 @ 5,5: 2x2 "#; #[cfg(test)] const HINT_EXPECTED_PART1_OUTPUT: usize = 4; #[cfg(test)] const HINT_EXPECTED_PART2_OUTPUT: usize = 3; #[cfg(test)] const EXPECTED_PART2_OUTPUT: usize = 603; #[test] fn test_day3_part1_hint()
#[aoc(day3, part2, square_iteration)] pub fn day3_part2_square_iteration(input: &str) -> usize { // OPT: Use ArrayVec for even more performance? Depends on max size. // OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char // splits. let mut claims = ClaimIterator::new(input) .map(|c| (c, true)) .collect::<Vec<_>>(); for i in 0..claims.len() { for j in i + 1..claims.len() { if claims[i].0.intersects(&claims[j].0) { (&mut claims[i]).1 = false; (&mut claims[j]).1 = false; } } } let uncontested = claims .into_iter() .filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None }) .collect::<Vec<_>>(); if uncontested.len() != 1 { panic!("Expected single remaining claim, got {:?}", uncontested); } uncontested[0].id } #[test] fn test_day3_part2_square_iteration_hint() { assert_eq!( day3_part2_square_iteration(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_square_iteration_answer() { assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT); } #[aoc(day3, part2, grid_again)] pub fn day3_part2_grid_again(input: &str) -> usize { let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default); let claims = ClaimIterator::new(input).collect::<Vec<_>>(); for Claim { id: _, left, top, right, bottom, } in claims.iter() { grid.grow_with( right.checked_add(1).unwrap(), bottom.checked_add(1).unwrap(), Default::default, ); for y in *top..*bottom { for x in *left..*right { *(&mut grid[(x, y)]) += 1; } } } let uncontested = claims .into_iter() .filter( |Claim { left, top, bottom, right, .. }| { for y in *top..*bottom { for x in *left..*right { let count = grid[(x, y)]; assert!(count != 0); if count > 1 { return false; } } } true }, ) .collect::<Vec<_>>(); assert_eq!(uncontested.len(), 1); uncontested[0].id } #[test] fn test_day3_part2_grid_again_hint() { assert_eq!( day3_part2_grid_again(HINT_INPUT), HINT_EXPECTED_PART2_OUTPUT ); } #[test] fn test_day3_part2_grid_again_answer() { assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT); }
{ assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT); }
identifier_body
train.py
import os import random import re import json import glob import multiprocessing import argparse from importlib import import_module from pathlib import Path from typing import Union, List, Tuple from collections import defaultdict import pickle as pickle import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn import metrics from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification from transformers import Trainer, TrainingArguments from transformers import T5Tokenizer, T5ForConditionalGeneration from transformers.optimization import AdamW from tqdm import tqdm from transformers.utils.dummy_pt_objects import ModalEmbeddings import wandb import requests ###################################### # HELPER FUNCTIONS ###################################### def set_all_seeds(seed, verbose=False): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) random.seed(seed) if verbose: print("All random seeds set to", seed) def parse_arguments(parser): # Set random seed parser.add_argument('--seed', type=int, default=None, help="random seed (default: None)") parser.add_argument('--verbose', type=str, default="n", choices=["y", "n"], help="verbose (default: n)") # Container environment parser.add_argument('--data_dir', type=str, default=os.environ.get('SM_CHANNEL_TRAIN', '/opt/ml/dataset')) parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './saved')) parser.add_argument('--log_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './logs')) parser.add_argument('--name', type=str, default="exp", help='name of the custom model and experiment') parser.add_argument('--load_model', type=str, help="Load pretrained model if not None") # Load Dataset and construct DataLoader parser.add_argument('--dataset', type=str, default='BaselineDataset', help="name of dataset (default: BaselineDataset)") parser.add_argument('--additional', type=str, nargs='*', help="list of additional dataset file names") parser.add_argument('--batch_size', metavar='B', type=int, default=1, help="train set batch size (default: 1)") parser.add_argument('--val_file', type=str, choices=["y", "n"], default="n", help="whether to use valid.csv file (default: n)") parser.add_argument('--val_ratio', type=float, default=0.2, help="valid set ratio (default: 0.2)") parser.add_argument('--val_batch_size', metavar='B', type=int, help="valid set batch size (default set to batch_size)") # Preprocessor and Data Augmentation parser.add_argument('--preprocessor', type=str, default='BaselinePreprocessor', help="type of preprocessor (default: BaselinePreprocessor)") parser.add_argument('--augmentation', type=str, help="type of augmentation (default: None)") # Load model and set optimizer parser.add_argument('--model', type=str, default='BaseModel', help="model name (default: BaseModel)") parser.add_argument('--num_labels', type=int, default=30, help="number of labels for classification (default: 30)") parser.add_argument('--optim', type=str, default='AdamW', help="optimizer name (default: AdamW)") parser.add_argument('--momentum', type=float, default=0., help="SGD with momentum (default: 0.0)") # training setup parser.add_argument('--epochs', type=int, metavar='N', default=1, help="number of epochs (default 1)") parser.add_argument('--lr', type=float, default=1e-5, help="learning rate (default: 1e-5)") parser.add_argument('--max_seq_len', type=int, metavar='L', default=256, help="max sequence length (default 256)") parser.add_argument('--max_pad_len', type=int, metavar='L', default=8, help="max padding length for bucketing (default 8)") parser.add_argument('--log_every', type=int, metavar='N', default=500, help="log every N steps (default: 500)") parser.add_argument('--eval_every', type=int, metavar='N', default=500, help="evaluation interval for every N steps (default: 500)") parser.add_argument('--save_every', type=int, metavar='N', default=500, help="save model interval for every N steps (default: 500)") parser.add_argument('--save_total_limit', type=int, metavar='N', default=5, help="save total limit (choosing the best eval scores) (default: 5)") # Learning Rate Scheduler group_lr = parser.add_argument_group('lr_scheduler') group_lr.add_argument("--lr_type", type=str, metavar='TYPE', default="constant", help="lr scheduler type (default: constant)") group_lr.add_argument("--lr_weight_decay", type=float, metavar='LAMBDA', default=0.01, help="weight decay rate for AdamW (default: 0.01)") group_lr.add_argument("--lr_gamma", type=float, metavar='GAMMA', default=0.95, help="lr scheduler gamma (default: 0.95)") group_lr.add_argument("--lr_decay_step", type=int, metavar='STEP', default=100, help="lr scheduler decay step (default: 100)") group_lr.add_argument("--lr_warmup_steps", type=int, metavar='N', default=500, help="lr scheduler warmup steps (default: 500)") group_lr.add_argument("--lr_warmup_ratio", type=float, metavar='N', default=0.1, help="lr scheduler warmup ratio (default: 0.1)") group_lr.add_argument("--lr_adamw_beta2", type=float, metavar='BETA2', default=0.99, help="AdamW BETA2 (default: 0.99)") args = parser.parse_args() return args def increment_path(path, overwrite=False): """ Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc. Args: path (str or pathlib.Path): f"{model_dir}/{args.name}". overwrite (bool): whether to overwrite or increment path (increment if False). Returns: path: new path """ path = Path(path) if (path.exists() and overwrite) or (not path.exists()): if not os.path.exists(str(path).split('/')[0]): os.mkdir(str(path).split('/')[0]) if not path.exists(): os.mkdir(path) return str(path) else: dirs = glob.glob(f"{path}*") matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] n = max(i) + 1 if i else 2 path = f"{path}{n}" if not os.path.exists(path): os.mkdir(path) return path ###################################### # KLUE SPECIFICS ###################################### def klue_re_micro_f1(preds, labels): """KLUE-RE micro f1 (except no_relation)""" label_list = ['no_relation', 'org:top_members/employees', 'org:members', 'org:product', 'per:title', 'org:alternate_names', 'per:employee_of', 'org:place_of_headquarters', 'per:product', 'org:number_of_employees/members', 'per:children', 'per:place_of_residence', 'per:alternate_names', 'per:other_family', 'per:colleagues', 'per:origin', 'per:siblings', 'per:spouse', 'org:founded', 'org:political/religious_affiliation', 'org:member_of', 'per:parents', 'org:dissolved', 'per:schools_attended', 'per:date_of_death', 'per:date_of_birth', 'per:place_of_birth', 'per:place_of_death', 'org:founded_by', 'per:religion'] no_relation_label_idx = label_list.index("no_relation") label_indices = list(range(len(label_list))) label_indices.remove(no_relation_label_idx) return metrics.f1_score(labels, preds, average="micro", labels=label_indices) * 100.0 def klue_re_auprc(probs, labels): """KLUE-RE AUPRC (with no_relation)""" labels = np.eye(30)[labels] score = np.zeros((30,)) for c in range(30): targets_c = labels.take([c], axis=1).ravel() preds_c = probs.take([c], axis=1).ravel() precision, recall, _ = metrics.precision_recall_curve( targets_c, preds_c) score[c] = metrics.auc(recall, precision) return np.average(score) * 100.0 def compute_metrics(pred): """ validation์„ ์œ„ํ•œ metrics function """ labels = pred.label_ids preds = pred.predictions.argmax(-1) probs = pred.predictions # calculate accuracy using sklearn's function f1 = klue_re_micro_f1(preds, labels) auprc = klue_re_auprc(probs, labels) acc = metrics.accuracy_score(labels, preds) # ๋ฆฌ๋”๋ณด๋“œ ํ‰๊ฐ€์—๋Š” ํฌํ•จ๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. return { 'micro f1 score': f1, 'auprc': auprc, 'accuracy': acc, } def label_to_num(label): num_label = [] with open('dict_label_to_num.pkl', 'rb') as f: dict_label_to_num = pickle.load(f) for v in label: num_label.append(dict_label_to_num[v]) return num_label ###################################### # DATA LOADER RELATED ###################################### # TODO: bucketed_batch_indicies ์ˆ˜์ •ํ•˜๊ธฐ! def bucketed_batch_indices( src_lens: Union[List[int], np.ndarray, pd.Series], batch_size: int, max_pad_len: int ) -> List[List[int]]: batch_map = defaultdict(list) batch_indices_list = [] src_len_min = np.min(src_lens) for idx, src_len in enumerate(src_lens): src = (src_len - src_len_min + 1) // max_pad_len batch_map[src].append(idx) for _, value in batch_map.items(): batch_indices_list += [value[i:i+batch_size] for i in range(0, len(value), batch_size)] random.shuffle(batch_indices_list) return batch_indices_list # TODO: collate_fn ํ˜„ ๋ฐ์ดํ„ฐ์…‹์— ๋งž์ถฐ ์ˆ˜์ •ํ•˜๊ธฐ! # we don't need collate_fn # since huggingface automatically creates default collate function def collate_fn( batched_samples: List[Tuple[List[int], List[int], List[int]]], pad_token_idx ) -> Tuple[torch.Tensor, torch.Tensor]: PAD = pad_token_idx B = len(batched_samples) batched_samples = sorted( batched_samples, key=lambda x: x["src_idx"], reverse=True) src_sentences = [] src_attention = [] tgt_sentences = [] for sample in batched_samples: src_sentences.append(torch.tensor(sample["src_idx"])) src_attention.append(torch.tensor(sample["src_attn"])) tgt_sentences.append(torch.tensor(sample["tgt_idx"])) src_sentences = torch.nn.utils.rnn.pad_sequence( src_sentences, padding_value=PAD, batch_first=True) src_attention = torch.nn.utils.rnn.pad_sequence( src_attention, padding_value=0, batch_first=True) tgt_sentences = torch.nn.utils.rnn.pad_sequence( tgt_sentences, padding_value=PAD, batch_first=True) assert src_sentences.size(0) == B and tgt_sentences.size(0) == B assert src_sentences.dtype == torch.long and tgt_sentences.dtype == torch.long return {'src_idx': src_sentences, 'src_attn': src_attention, 'tgt_idx': tgt_sentences} def send_web_hooks(text, url): # Please keep your url privately payload = {"text": text} requests.post(url, json=payload) def get_model_and_tokenizer(args, **kwargs): # Here, you also need to define tokenizer as well # since the type of tokenizer depends on the model NUM_LABELS = 30 model = None tokenizer = None if args.model.lower().count("klue/bert-base"): MODEL_NAME = "klue/bert-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained( args.load_model) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = NUM_LABELS model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("ke-t5"): MODEL_NAME = "" CLASS_NAME = "T5EncoderForSequenceClassificationMeanSubmeanObjmean" if args.model.count("large"): MODEL_NAME = 'KETI-AIR/ke-t5-large' elif args.model.count("small"): MODEL_NAME = 'KETI-AIR/ke-t5-small' else: MODEL_NAME = 'KETI-AIR/ke-t5-base' if args.load_model: LOAD_MODEL = args.load_model config = AutoConfig.from_pretrained(LOAD_MODEL) else: LOAD_MODEL = MODEL_NAME config = AutoConfig.from_pretrained(LOAD_MODEL) config.num_labels = 30 config.dropout_p = 0.4 config.focal_loss = False model_module = getattr(import_module("model.models"), CLASS_NAME) model = model_module(config) try: tokenizer = T5Tokenizer.from_pretrained(LOAD_MODEL) except: tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("klue/roberta"): MODEL_NAME = "" if args.model.count("large"): MODEL_NAME = "klue/roberta-large" elif args.model.count("small"): MODEL_NAME = "klue/roberta-small" else: MODEL_NAME = "klue/roberta-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained(LOAD_MODEL) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: # If the model is not specified above, # it first tries to look up for "model/{args.model}.py" and "model/models.py" file. # Additional setting should be provided with kwargs above. # If still not found, it tries to find the model in huggingface # with AutoModelForSequenceClassification & AutoTokenizer try: model_module = getattr(import_module( "model."+args.model), args.model) model = model_module() tokenizer = model.tokenizer except: try: model_module = getattr( import_module("model.models"), args.model) model = model_module() tokenizer = model.tokenizer except: MODEL_NAME = args.model model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) return model, tokenizer def train(args, verbose: bool=True): # Create folder SAVE_DIR = increment_path(os.path.join(args.model_dir, args.name)) LOG_DIR = increment_path(os.path.join(args.log_dir, args.name)) if verbose: print("save_dir:", SAVE_DIR) print("log_dir: ", LOG_DIR) # Device setting use_cuda = torch.cuda.is_available() device = torch.device('cuda:0' if use_cuda else 'cpu') if verbose: print('training on:', device) # Load Model & Tokenizer # because the type of tokenizer depends on the model model, tokenizer = get_model_and_tokenizer(args) model.to(device) # Build Dataset try: dataset_module = getattr(import_module( "dataset."+args.dataset), args.dataset) except: dataset_module = getattr(import_module( "dataset.dataset"), args.dataset) MAX_SEQ_LEN = args.max_seq_len NUM_LABELS = args.num_labels # max_length sometimes refers to maximum length in text generation # so, I used MAX_SEQ_LEN to indicate maximum input length fed to the model dataset, train_dataset, valid_dataset = None, None, None if args.val_file == "y": train_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=False, dropna=True) valid_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=True, dropna=True) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(train_dataset.data['label'].value_counts()) print("test:") print(valid_dataset.data['label'].value_counts()) print("="*20) else: dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, dropna=True) # dataset must return # dict containing at least {'input_ids', 'attention_mask', 'labels'} # in order to work properly # TODO: Build Preprocessor preprocessor = None if args.preprocessor: try: preprocessor_module = getattr(import_module( "dataset.preprocessor."+args.preprocessor), args.preprocessor) except: preprocessor_module = getattr(import_module( "dataset.preprocessor.preprocessors"), args.preprocessor) preprocessor = preprocessor_module() # Build Augmentation # unk, RE, RI, ... # this result will be fixed for entire training steps augmentation = None if args.augmentation: try: augmentation_module = getattr(import_module( "dataset.augmentation."+args.augmentation), args.augmentation) except: augmentation_module = getattr(import_module( "dataset.augmentation.augmentations"), args.augmentation) augmentation = augmentation_module(tokenizer) added_token_num = 0 if dataset is not None: dataset.set_tokenizer(tokenizer) dataset.set_preprocessor(preprocessor) if augmentation is not None: dataset.set_augmentation(augmentation) dataset.preprocess() added_token_num = dataset.get_special_token_num() if train_dataset is not None: train_dataset.set_tokenizer(tokenizer) train_dataset.set_preprocessor(preprocessor) if augmentation is not None: train_dataset.set_augmentation(augmentation) train_dataset.preprocess() added_token_num = train_dataset.get_special_token_num() if valid_dataset is not None: valid_dataset.set_tokenizer(tokenizer) valid_dataset.set_preprocessor(preprocessor) # if augmentation is not None: # valid_dataset.set_augmentation(augmentation) valid_dataset.preprocess() added_token_num = valid_dataset.get_special_token_num() if added_token_num > 0: model.resize_token_embeddings(tokenizer.vocab_size + added_token_num) # TODO: train-valid split # TODO: do not split (= train with whole data) if val_ratio == 0.0 if args.val_ratio > 0.0 and dataset is not None: train_ids, valid_ids = train_test_split(list(range(len(dataset.data))), test_size=args.val_ratio, stratify=dataset.data['label']) train_dataset = torch.utils.data.Subset(dataset, train_ids) valid_dataset = torch.utils.data.Subset(dataset, valid_ids) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(dataset.data['label'].iloc[train_ids].value_counts()) print("test:") print(dataset.data['label'].iloc[valid_ids].value_counts()) print("="*20) # Build DataLoader BATCH_SIZE = args.batch_size
# Train NUM_EPOCHS = args.epochs SAVE_EVERY = args.save_every EVAL_EVERY = args.eval_every LOG_EVERY = args.log_every SAVE_TOTAL_LIMIT = args.save_total_limit LEARNING_RATE = args.lr LR_TYPE = args.lr_type DECAY_RATE = args.lr_weight_decay WARMUP_RATIO = args.lr_warmup_ratio WARMUP_STEPS = args.lr_warmup_steps ADAM_BETA2 = args.lr_adamw_beta2 training_args = TrainingArguments( output_dir=SAVE_DIR, # output directory logging_dir=LOG_DIR, # directory for storing logs save_total_limit=SAVE_TOTAL_LIMIT, # number of total models saved. save_steps=SAVE_EVERY, # model saving step. logging_steps=LOG_EVERY, # log saving step. eval_steps=EVAL_EVERY, # evaluation step. num_train_epochs=NUM_EPOCHS, # total number of training epochs evaluation_strategy='steps', save_strategy='steps', # evaluation strategy to adopt during training # `no` : No evaluation during training. # `steps`: Evaluate every `eval_steps`. # `epoch`: Evaluate every end of epoch. load_best_model_at_end=True, per_device_train_batch_size=BATCH_SIZE, # batch size per device during training per_device_eval_batch_size=VAL_BATCH_SIZE, # batch size for evaluation learning_rate=LEARNING_RATE, # learning_rate lr_scheduler_type=LR_TYPE, # linear, cosine, cosine_with_restarts, # polynomial, constant, constant_with_warmup adam_beta2=ADAM_BETA2, # Beta 2 hyperparameter for AdamW warmup_ratio=WARMUP_RATIO, # ratio of warmup steps for learning rate scheduler warmup_steps=WARMUP_STEPS, # number of warmup steps for learning rate scheduler (overrides warmup_ratio) weight_decay=DECAY_RATE, # strength of weight decay ) trainer = None if valid_dataset is not None: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=train_dataset, # training dataset eval_dataset=valid_dataset, # evaluation dataset compute_metrics=compute_metrics # define metrics function ) else: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=dataset, # training dataset eval_dataset=dataset, # evaluate with the whole dataset compute_metrics=compute_metrics # define metrics function ) # train model trainer.train() model.save_pretrained(os.path.join(SAVE_DIR, args.name + "_final")) def main(): parser = argparse.ArgumentParser( description="Train the model with the arguments given") args = parse_arguments(parser) v = args.verbose == "y" if args.seed is not None: set_all_seeds(args.seed, verbose=v) train(args, verbose=v) if __name__ == '__main__': main()
VAL_BATCH_SIZE = args.val_batch_size if args.val_batch_size else BATCH_SIZE MAX_PAD_LEN = args.max_pad_len
random_line_split
train.py
import os import random import re import json import glob import multiprocessing import argparse from importlib import import_module from pathlib import Path from typing import Union, List, Tuple from collections import defaultdict import pickle as pickle import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn import metrics from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification from transformers import Trainer, TrainingArguments from transformers import T5Tokenizer, T5ForConditionalGeneration from transformers.optimization import AdamW from tqdm import tqdm from transformers.utils.dummy_pt_objects import ModalEmbeddings import wandb import requests ###################################### # HELPER FUNCTIONS ###################################### def set_all_seeds(seed, verbose=False): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) random.seed(seed) if verbose: print("All random seeds set to", seed) def
(parser): # Set random seed parser.add_argument('--seed', type=int, default=None, help="random seed (default: None)") parser.add_argument('--verbose', type=str, default="n", choices=["y", "n"], help="verbose (default: n)") # Container environment parser.add_argument('--data_dir', type=str, default=os.environ.get('SM_CHANNEL_TRAIN', '/opt/ml/dataset')) parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './saved')) parser.add_argument('--log_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './logs')) parser.add_argument('--name', type=str, default="exp", help='name of the custom model and experiment') parser.add_argument('--load_model', type=str, help="Load pretrained model if not None") # Load Dataset and construct DataLoader parser.add_argument('--dataset', type=str, default='BaselineDataset', help="name of dataset (default: BaselineDataset)") parser.add_argument('--additional', type=str, nargs='*', help="list of additional dataset file names") parser.add_argument('--batch_size', metavar='B', type=int, default=1, help="train set batch size (default: 1)") parser.add_argument('--val_file', type=str, choices=["y", "n"], default="n", help="whether to use valid.csv file (default: n)") parser.add_argument('--val_ratio', type=float, default=0.2, help="valid set ratio (default: 0.2)") parser.add_argument('--val_batch_size', metavar='B', type=int, help="valid set batch size (default set to batch_size)") # Preprocessor and Data Augmentation parser.add_argument('--preprocessor', type=str, default='BaselinePreprocessor', help="type of preprocessor (default: BaselinePreprocessor)") parser.add_argument('--augmentation', type=str, help="type of augmentation (default: None)") # Load model and set optimizer parser.add_argument('--model', type=str, default='BaseModel', help="model name (default: BaseModel)") parser.add_argument('--num_labels', type=int, default=30, help="number of labels for classification (default: 30)") parser.add_argument('--optim', type=str, default='AdamW', help="optimizer name (default: AdamW)") parser.add_argument('--momentum', type=float, default=0., help="SGD with momentum (default: 0.0)") # training setup parser.add_argument('--epochs', type=int, metavar='N', default=1, help="number of epochs (default 1)") parser.add_argument('--lr', type=float, default=1e-5, help="learning rate (default: 1e-5)") parser.add_argument('--max_seq_len', type=int, metavar='L', default=256, help="max sequence length (default 256)") parser.add_argument('--max_pad_len', type=int, metavar='L', default=8, help="max padding length for bucketing (default 8)") parser.add_argument('--log_every', type=int, metavar='N', default=500, help="log every N steps (default: 500)") parser.add_argument('--eval_every', type=int, metavar='N', default=500, help="evaluation interval for every N steps (default: 500)") parser.add_argument('--save_every', type=int, metavar='N', default=500, help="save model interval for every N steps (default: 500)") parser.add_argument('--save_total_limit', type=int, metavar='N', default=5, help="save total limit (choosing the best eval scores) (default: 5)") # Learning Rate Scheduler group_lr = parser.add_argument_group('lr_scheduler') group_lr.add_argument("--lr_type", type=str, metavar='TYPE', default="constant", help="lr scheduler type (default: constant)") group_lr.add_argument("--lr_weight_decay", type=float, metavar='LAMBDA', default=0.01, help="weight decay rate for AdamW (default: 0.01)") group_lr.add_argument("--lr_gamma", type=float, metavar='GAMMA', default=0.95, help="lr scheduler gamma (default: 0.95)") group_lr.add_argument("--lr_decay_step", type=int, metavar='STEP', default=100, help="lr scheduler decay step (default: 100)") group_lr.add_argument("--lr_warmup_steps", type=int, metavar='N', default=500, help="lr scheduler warmup steps (default: 500)") group_lr.add_argument("--lr_warmup_ratio", type=float, metavar='N', default=0.1, help="lr scheduler warmup ratio (default: 0.1)") group_lr.add_argument("--lr_adamw_beta2", type=float, metavar='BETA2', default=0.99, help="AdamW BETA2 (default: 0.99)") args = parser.parse_args() return args def increment_path(path, overwrite=False): """ Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc. Args: path (str or pathlib.Path): f"{model_dir}/{args.name}". overwrite (bool): whether to overwrite or increment path (increment if False). Returns: path: new path """ path = Path(path) if (path.exists() and overwrite) or (not path.exists()): if not os.path.exists(str(path).split('/')[0]): os.mkdir(str(path).split('/')[0]) if not path.exists(): os.mkdir(path) return str(path) else: dirs = glob.glob(f"{path}*") matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] n = max(i) + 1 if i else 2 path = f"{path}{n}" if not os.path.exists(path): os.mkdir(path) return path ###################################### # KLUE SPECIFICS ###################################### def klue_re_micro_f1(preds, labels): """KLUE-RE micro f1 (except no_relation)""" label_list = ['no_relation', 'org:top_members/employees', 'org:members', 'org:product', 'per:title', 'org:alternate_names', 'per:employee_of', 'org:place_of_headquarters', 'per:product', 'org:number_of_employees/members', 'per:children', 'per:place_of_residence', 'per:alternate_names', 'per:other_family', 'per:colleagues', 'per:origin', 'per:siblings', 'per:spouse', 'org:founded', 'org:political/religious_affiliation', 'org:member_of', 'per:parents', 'org:dissolved', 'per:schools_attended', 'per:date_of_death', 'per:date_of_birth', 'per:place_of_birth', 'per:place_of_death', 'org:founded_by', 'per:religion'] no_relation_label_idx = label_list.index("no_relation") label_indices = list(range(len(label_list))) label_indices.remove(no_relation_label_idx) return metrics.f1_score(labels, preds, average="micro", labels=label_indices) * 100.0 def klue_re_auprc(probs, labels): """KLUE-RE AUPRC (with no_relation)""" labels = np.eye(30)[labels] score = np.zeros((30,)) for c in range(30): targets_c = labels.take([c], axis=1).ravel() preds_c = probs.take([c], axis=1).ravel() precision, recall, _ = metrics.precision_recall_curve( targets_c, preds_c) score[c] = metrics.auc(recall, precision) return np.average(score) * 100.0 def compute_metrics(pred): """ validation์„ ์œ„ํ•œ metrics function """ labels = pred.label_ids preds = pred.predictions.argmax(-1) probs = pred.predictions # calculate accuracy using sklearn's function f1 = klue_re_micro_f1(preds, labels) auprc = klue_re_auprc(probs, labels) acc = metrics.accuracy_score(labels, preds) # ๋ฆฌ๋”๋ณด๋“œ ํ‰๊ฐ€์—๋Š” ํฌํ•จ๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. return { 'micro f1 score': f1, 'auprc': auprc, 'accuracy': acc, } def label_to_num(label): num_label = [] with open('dict_label_to_num.pkl', 'rb') as f: dict_label_to_num = pickle.load(f) for v in label: num_label.append(dict_label_to_num[v]) return num_label ###################################### # DATA LOADER RELATED ###################################### # TODO: bucketed_batch_indicies ์ˆ˜์ •ํ•˜๊ธฐ! def bucketed_batch_indices( src_lens: Union[List[int], np.ndarray, pd.Series], batch_size: int, max_pad_len: int ) -> List[List[int]]: batch_map = defaultdict(list) batch_indices_list = [] src_len_min = np.min(src_lens) for idx, src_len in enumerate(src_lens): src = (src_len - src_len_min + 1) // max_pad_len batch_map[src].append(idx) for _, value in batch_map.items(): batch_indices_list += [value[i:i+batch_size] for i in range(0, len(value), batch_size)] random.shuffle(batch_indices_list) return batch_indices_list # TODO: collate_fn ํ˜„ ๋ฐ์ดํ„ฐ์…‹์— ๋งž์ถฐ ์ˆ˜์ •ํ•˜๊ธฐ! # we don't need collate_fn # since huggingface automatically creates default collate function def collate_fn( batched_samples: List[Tuple[List[int], List[int], List[int]]], pad_token_idx ) -> Tuple[torch.Tensor, torch.Tensor]: PAD = pad_token_idx B = len(batched_samples) batched_samples = sorted( batched_samples, key=lambda x: x["src_idx"], reverse=True) src_sentences = [] src_attention = [] tgt_sentences = [] for sample in batched_samples: src_sentences.append(torch.tensor(sample["src_idx"])) src_attention.append(torch.tensor(sample["src_attn"])) tgt_sentences.append(torch.tensor(sample["tgt_idx"])) src_sentences = torch.nn.utils.rnn.pad_sequence( src_sentences, padding_value=PAD, batch_first=True) src_attention = torch.nn.utils.rnn.pad_sequence( src_attention, padding_value=0, batch_first=True) tgt_sentences = torch.nn.utils.rnn.pad_sequence( tgt_sentences, padding_value=PAD, batch_first=True) assert src_sentences.size(0) == B and tgt_sentences.size(0) == B assert src_sentences.dtype == torch.long and tgt_sentences.dtype == torch.long return {'src_idx': src_sentences, 'src_attn': src_attention, 'tgt_idx': tgt_sentences} def send_web_hooks(text, url): # Please keep your url privately payload = {"text": text} requests.post(url, json=payload) def get_model_and_tokenizer(args, **kwargs): # Here, you also need to define tokenizer as well # since the type of tokenizer depends on the model NUM_LABELS = 30 model = None tokenizer = None if args.model.lower().count("klue/bert-base"): MODEL_NAME = "klue/bert-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained( args.load_model) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = NUM_LABELS model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("ke-t5"): MODEL_NAME = "" CLASS_NAME = "T5EncoderForSequenceClassificationMeanSubmeanObjmean" if args.model.count("large"): MODEL_NAME = 'KETI-AIR/ke-t5-large' elif args.model.count("small"): MODEL_NAME = 'KETI-AIR/ke-t5-small' else: MODEL_NAME = 'KETI-AIR/ke-t5-base' if args.load_model: LOAD_MODEL = args.load_model config = AutoConfig.from_pretrained(LOAD_MODEL) else: LOAD_MODEL = MODEL_NAME config = AutoConfig.from_pretrained(LOAD_MODEL) config.num_labels = 30 config.dropout_p = 0.4 config.focal_loss = False model_module = getattr(import_module("model.models"), CLASS_NAME) model = model_module(config) try: tokenizer = T5Tokenizer.from_pretrained(LOAD_MODEL) except: tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("klue/roberta"): MODEL_NAME = "" if args.model.count("large"): MODEL_NAME = "klue/roberta-large" elif args.model.count("small"): MODEL_NAME = "klue/roberta-small" else: MODEL_NAME = "klue/roberta-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained(LOAD_MODEL) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: # If the model is not specified above, # it first tries to look up for "model/{args.model}.py" and "model/models.py" file. # Additional setting should be provided with kwargs above. # If still not found, it tries to find the model in huggingface # with AutoModelForSequenceClassification & AutoTokenizer try: model_module = getattr(import_module( "model."+args.model), args.model) model = model_module() tokenizer = model.tokenizer except: try: model_module = getattr( import_module("model.models"), args.model) model = model_module() tokenizer = model.tokenizer except: MODEL_NAME = args.model model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) return model, tokenizer def train(args, verbose: bool=True): # Create folder SAVE_DIR = increment_path(os.path.join(args.model_dir, args.name)) LOG_DIR = increment_path(os.path.join(args.log_dir, args.name)) if verbose: print("save_dir:", SAVE_DIR) print("log_dir: ", LOG_DIR) # Device setting use_cuda = torch.cuda.is_available() device = torch.device('cuda:0' if use_cuda else 'cpu') if verbose: print('training on:', device) # Load Model & Tokenizer # because the type of tokenizer depends on the model model, tokenizer = get_model_and_tokenizer(args) model.to(device) # Build Dataset try: dataset_module = getattr(import_module( "dataset."+args.dataset), args.dataset) except: dataset_module = getattr(import_module( "dataset.dataset"), args.dataset) MAX_SEQ_LEN = args.max_seq_len NUM_LABELS = args.num_labels # max_length sometimes refers to maximum length in text generation # so, I used MAX_SEQ_LEN to indicate maximum input length fed to the model dataset, train_dataset, valid_dataset = None, None, None if args.val_file == "y": train_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=False, dropna=True) valid_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=True, dropna=True) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(train_dataset.data['label'].value_counts()) print("test:") print(valid_dataset.data['label'].value_counts()) print("="*20) else: dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, dropna=True) # dataset must return # dict containing at least {'input_ids', 'attention_mask', 'labels'} # in order to work properly # TODO: Build Preprocessor preprocessor = None if args.preprocessor: try: preprocessor_module = getattr(import_module( "dataset.preprocessor."+args.preprocessor), args.preprocessor) except: preprocessor_module = getattr(import_module( "dataset.preprocessor.preprocessors"), args.preprocessor) preprocessor = preprocessor_module() # Build Augmentation # unk, RE, RI, ... # this result will be fixed for entire training steps augmentation = None if args.augmentation: try: augmentation_module = getattr(import_module( "dataset.augmentation."+args.augmentation), args.augmentation) except: augmentation_module = getattr(import_module( "dataset.augmentation.augmentations"), args.augmentation) augmentation = augmentation_module(tokenizer) added_token_num = 0 if dataset is not None: dataset.set_tokenizer(tokenizer) dataset.set_preprocessor(preprocessor) if augmentation is not None: dataset.set_augmentation(augmentation) dataset.preprocess() added_token_num = dataset.get_special_token_num() if train_dataset is not None: train_dataset.set_tokenizer(tokenizer) train_dataset.set_preprocessor(preprocessor) if augmentation is not None: train_dataset.set_augmentation(augmentation) train_dataset.preprocess() added_token_num = train_dataset.get_special_token_num() if valid_dataset is not None: valid_dataset.set_tokenizer(tokenizer) valid_dataset.set_preprocessor(preprocessor) # if augmentation is not None: # valid_dataset.set_augmentation(augmentation) valid_dataset.preprocess() added_token_num = valid_dataset.get_special_token_num() if added_token_num > 0: model.resize_token_embeddings(tokenizer.vocab_size + added_token_num) # TODO: train-valid split # TODO: do not split (= train with whole data) if val_ratio == 0.0 if args.val_ratio > 0.0 and dataset is not None: train_ids, valid_ids = train_test_split(list(range(len(dataset.data))), test_size=args.val_ratio, stratify=dataset.data['label']) train_dataset = torch.utils.data.Subset(dataset, train_ids) valid_dataset = torch.utils.data.Subset(dataset, valid_ids) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(dataset.data['label'].iloc[train_ids].value_counts()) print("test:") print(dataset.data['label'].iloc[valid_ids].value_counts()) print("="*20) # Build DataLoader BATCH_SIZE = args.batch_size VAL_BATCH_SIZE = args.val_batch_size if args.val_batch_size else BATCH_SIZE MAX_PAD_LEN = args.max_pad_len # Train NUM_EPOCHS = args.epochs SAVE_EVERY = args.save_every EVAL_EVERY = args.eval_every LOG_EVERY = args.log_every SAVE_TOTAL_LIMIT = args.save_total_limit LEARNING_RATE = args.lr LR_TYPE = args.lr_type DECAY_RATE = args.lr_weight_decay WARMUP_RATIO = args.lr_warmup_ratio WARMUP_STEPS = args.lr_warmup_steps ADAM_BETA2 = args.lr_adamw_beta2 training_args = TrainingArguments( output_dir=SAVE_DIR, # output directory logging_dir=LOG_DIR, # directory for storing logs save_total_limit=SAVE_TOTAL_LIMIT, # number of total models saved. save_steps=SAVE_EVERY, # model saving step. logging_steps=LOG_EVERY, # log saving step. eval_steps=EVAL_EVERY, # evaluation step. num_train_epochs=NUM_EPOCHS, # total number of training epochs evaluation_strategy='steps', save_strategy='steps', # evaluation strategy to adopt during training # `no` : No evaluation during training. # `steps`: Evaluate every `eval_steps`. # `epoch`: Evaluate every end of epoch. load_best_model_at_end=True, per_device_train_batch_size=BATCH_SIZE, # batch size per device during training per_device_eval_batch_size=VAL_BATCH_SIZE, # batch size for evaluation learning_rate=LEARNING_RATE, # learning_rate lr_scheduler_type=LR_TYPE, # linear, cosine, cosine_with_restarts, # polynomial, constant, constant_with_warmup adam_beta2=ADAM_BETA2, # Beta 2 hyperparameter for AdamW warmup_ratio=WARMUP_RATIO, # ratio of warmup steps for learning rate scheduler warmup_steps=WARMUP_STEPS, # number of warmup steps for learning rate scheduler (overrides warmup_ratio) weight_decay=DECAY_RATE, # strength of weight decay ) trainer = None if valid_dataset is not None: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=train_dataset, # training dataset eval_dataset=valid_dataset, # evaluation dataset compute_metrics=compute_metrics # define metrics function ) else: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=dataset, # training dataset eval_dataset=dataset, # evaluate with the whole dataset compute_metrics=compute_metrics # define metrics function ) # train model trainer.train() model.save_pretrained(os.path.join(SAVE_DIR, args.name + "_final")) def main(): parser = argparse.ArgumentParser( description="Train the model with the arguments given") args = parse_arguments(parser) v = args.verbose == "y" if args.seed is not None: set_all_seeds(args.seed, verbose=v) train(args, verbose=v) if __name__ == '__main__': main()
parse_arguments
identifier_name
train.py
import os import random import re import json import glob import multiprocessing import argparse from importlib import import_module from pathlib import Path from typing import Union, List, Tuple from collections import defaultdict import pickle as pickle import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn import metrics from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification from transformers import Trainer, TrainingArguments from transformers import T5Tokenizer, T5ForConditionalGeneration from transformers.optimization import AdamW from tqdm import tqdm from transformers.utils.dummy_pt_objects import ModalEmbeddings import wandb import requests ###################################### # HELPER FUNCTIONS ###################################### def set_all_seeds(seed, verbose=False): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) random.seed(seed) if verbose: print("All random seeds set to", seed) def parse_arguments(parser): # Set random seed parser.add_argument('--seed', type=int, default=None, help="random seed (default: None)") parser.add_argument('--verbose', type=str, default="n", choices=["y", "n"], help="verbose (default: n)") # Container environment parser.add_argument('--data_dir', type=str, default=os.environ.get('SM_CHANNEL_TRAIN', '/opt/ml/dataset')) parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './saved')) parser.add_argument('--log_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './logs')) parser.add_argument('--name', type=str, default="exp", help='name of the custom model and experiment') parser.add_argument('--load_model', type=str, help="Load pretrained model if not None") # Load Dataset and construct DataLoader parser.add_argument('--dataset', type=str, default='BaselineDataset', help="name of dataset (default: BaselineDataset)") parser.add_argument('--additional', type=str, nargs='*', help="list of additional dataset file names") parser.add_argument('--batch_size', metavar='B', type=int, default=1, help="train set batch size (default: 1)") parser.add_argument('--val_file', type=str, choices=["y", "n"], default="n", help="whether to use valid.csv file (default: n)") parser.add_argument('--val_ratio', type=float, default=0.2, help="valid set ratio (default: 0.2)") parser.add_argument('--val_batch_size', metavar='B', type=int, help="valid set batch size (default set to batch_size)") # Preprocessor and Data Augmentation parser.add_argument('--preprocessor', type=str, default='BaselinePreprocessor', help="type of preprocessor (default: BaselinePreprocessor)") parser.add_argument('--augmentation', type=str, help="type of augmentation (default: None)") # Load model and set optimizer parser.add_argument('--model', type=str, default='BaseModel', help="model name (default: BaseModel)") parser.add_argument('--num_labels', type=int, default=30, help="number of labels for classification (default: 30)") parser.add_argument('--optim', type=str, default='AdamW', help="optimizer name (default: AdamW)") parser.add_argument('--momentum', type=float, default=0., help="SGD with momentum (default: 0.0)") # training setup parser.add_argument('--epochs', type=int, metavar='N', default=1, help="number of epochs (default 1)") parser.add_argument('--lr', type=float, default=1e-5, help="learning rate (default: 1e-5)") parser.add_argument('--max_seq_len', type=int, metavar='L', default=256, help="max sequence length (default 256)") parser.add_argument('--max_pad_len', type=int, metavar='L', default=8, help="max padding length for bucketing (default 8)") parser.add_argument('--log_every', type=int, metavar='N', default=500, help="log every N steps (default: 500)") parser.add_argument('--eval_every', type=int, metavar='N', default=500, help="evaluation interval for every N steps (default: 500)") parser.add_argument('--save_every', type=int, metavar='N', default=500, help="save model interval for every N steps (default: 500)") parser.add_argument('--save_total_limit', type=int, metavar='N', default=5, help="save total limit (choosing the best eval scores) (default: 5)") # Learning Rate Scheduler group_lr = parser.add_argument_group('lr_scheduler') group_lr.add_argument("--lr_type", type=str, metavar='TYPE', default="constant", help="lr scheduler type (default: constant)") group_lr.add_argument("--lr_weight_decay", type=float, metavar='LAMBDA', default=0.01, help="weight decay rate for AdamW (default: 0.01)") group_lr.add_argument("--lr_gamma", type=float, metavar='GAMMA', default=0.95, help="lr scheduler gamma (default: 0.95)") group_lr.add_argument("--lr_decay_step", type=int, metavar='STEP', default=100, help="lr scheduler decay step (default: 100)") group_lr.add_argument("--lr_warmup_steps", type=int, metavar='N', default=500, help="lr scheduler warmup steps (default: 500)") group_lr.add_argument("--lr_warmup_ratio", type=float, metavar='N', default=0.1, help="lr scheduler warmup ratio (default: 0.1)") group_lr.add_argument("--lr_adamw_beta2", type=float, metavar='BETA2', default=0.99, help="AdamW BETA2 (default: 0.99)") args = parser.parse_args() return args def increment_path(path, overwrite=False): """ Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc. Args: path (str or pathlib.Path): f"{model_dir}/{args.name}". overwrite (bool): whether to overwrite or increment path (increment if False). Returns: path: new path """ path = Path(path) if (path.exists() and overwrite) or (not path.exists()): if not os.path.exists(str(path).split('/')[0]): os.mkdir(str(path).split('/')[0]) if not path.exists(): os.mkdir(path) return str(path) else: dirs = glob.glob(f"{path}*") matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] n = max(i) + 1 if i else 2 path = f"{path}{n}" if not os.path.exists(path): os.mkdir(path) return path ###################################### # KLUE SPECIFICS ###################################### def klue_re_micro_f1(preds, labels): """KLUE-RE micro f1 (except no_relation)""" label_list = ['no_relation', 'org:top_members/employees', 'org:members', 'org:product', 'per:title', 'org:alternate_names', 'per:employee_of', 'org:place_of_headquarters', 'per:product', 'org:number_of_employees/members', 'per:children', 'per:place_of_residence', 'per:alternate_names', 'per:other_family', 'per:colleagues', 'per:origin', 'per:siblings', 'per:spouse', 'org:founded', 'org:political/religious_affiliation', 'org:member_of', 'per:parents', 'org:dissolved', 'per:schools_attended', 'per:date_of_death', 'per:date_of_birth', 'per:place_of_birth', 'per:place_of_death', 'org:founded_by', 'per:religion'] no_relation_label_idx = label_list.index("no_relation") label_indices = list(range(len(label_list))) label_indices.remove(no_relation_label_idx) return metrics.f1_score(labels, preds, average="micro", labels=label_indices) * 100.0 def klue_re_auprc(probs, labels): """KLUE-RE AUPRC (with no_relation)""" labels = np.eye(30)[labels] score = np.zeros((30,)) for c in range(30): targets_c = labels.take([c], axis=1).ravel() preds_c = probs.take([c], axis=1).ravel() precision, recall, _ = metrics.precision_recall_curve( targets_c, preds_c) score[c] = metrics.auc(recall, precision) return np.average(score) * 100.0 def compute_metrics(pred):
bel = [] with open('dict_label_to_num.pkl', 'rb') as f: dict_label_to_num = pickle.load(f) for v in label: num_label.append(dict_label_to_num[v]) return num_label ###################################### # DATA LOADER RELATED ###################################### # TODO: bucketed_batch_indicies ์ˆ˜์ •ํ•˜๊ธฐ! def bucketed_batch_indices( src_lens: Union[List[int], np.ndarray, pd.Series], batch_size: int, max_pad_len: int ) -> List[List[int]]: batch_map = defaultdict(list) batch_indices_list = [] src_len_min = np.min(src_lens) for idx, src_len in enumerate(src_lens): src = (src_len - src_len_min + 1) // max_pad_len batch_map[src].append(idx) for _, value in batch_map.items(): batch_indices_list += [value[i:i+batch_size] for i in range(0, len(value), batch_size)] random.shuffle(batch_indices_list) return batch_indices_list # TODO: collate_fn ํ˜„ ๋ฐ์ดํ„ฐ์…‹์— ๋งž์ถฐ ์ˆ˜์ •ํ•˜๊ธฐ! # we don't need collate_fn # since huggingface automatically creates default collate function def collate_fn( batched_samples: List[Tuple[List[int], List[int], List[int]]], pad_token_idx ) -> Tuple[torch.Tensor, torch.Tensor]: PAD = pad_token_idx B = len(batched_samples) batched_samples = sorted( batched_samples, key=lambda x: x["src_idx"], reverse=True) src_sentences = [] src_attention = [] tgt_sentences = [] for sample in batched_samples: src_sentences.append(torch.tensor(sample["src_idx"])) src_attention.append(torch.tensor(sample["src_attn"])) tgt_sentences.append(torch.tensor(sample["tgt_idx"])) src_sentences = torch.nn.utils.rnn.pad_sequence( src_sentences, padding_value=PAD, batch_first=True) src_attention = torch.nn.utils.rnn.pad_sequence( src_attention, padding_value=0, batch_first=True) tgt_sentences = torch.nn.utils.rnn.pad_sequence( tgt_sentences, padding_value=PAD, batch_first=True) assert src_sentences.size(0) == B and tgt_sentences.size(0) == B assert src_sentences.dtype == torch.long and tgt_sentences.dtype == torch.long return {'src_idx': src_sentences, 'src_attn': src_attention, 'tgt_idx': tgt_sentences} def send_web_hooks(text, url): # Please keep your url privately payload = {"text": text} requests.post(url, json=payload) def get_model_and_tokenizer(args, **kwargs): # Here, you also need to define tokenizer as well # since the type of tokenizer depends on the model NUM_LABELS = 30 model = None tokenizer = None if args.model.lower().count("klue/bert-base"): MODEL_NAME = "klue/bert-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained( args.load_model) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = NUM_LABELS model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("ke-t5"): MODEL_NAME = "" CLASS_NAME = "T5EncoderForSequenceClassificationMeanSubmeanObjmean" if args.model.count("large"): MODEL_NAME = 'KETI-AIR/ke-t5-large' elif args.model.count("small"): MODEL_NAME = 'KETI-AIR/ke-t5-small' else: MODEL_NAME = 'KETI-AIR/ke-t5-base' if args.load_model: LOAD_MODEL = args.load_model config = AutoConfig.from_pretrained(LOAD_MODEL) else: LOAD_MODEL = MODEL_NAME config = AutoConfig.from_pretrained(LOAD_MODEL) config.num_labels = 30 config.dropout_p = 0.4 config.focal_loss = False model_module = getattr(import_module("model.models"), CLASS_NAME) model = model_module(config) try: tokenizer = T5Tokenizer.from_pretrained(LOAD_MODEL) except: tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("klue/roberta"): MODEL_NAME = "" if args.model.count("large"): MODEL_NAME = "klue/roberta-large" elif args.model.count("small"): MODEL_NAME = "klue/roberta-small" else: MODEL_NAME = "klue/roberta-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained(LOAD_MODEL) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: # If the model is not specified above, # it first tries to look up for "model/{args.model}.py" and "model/models.py" file. # Additional setting should be provided with kwargs above. # If still not found, it tries to find the model in huggingface # with AutoModelForSequenceClassification & AutoTokenizer try: model_module = getattr(import_module( "model."+args.model), args.model) model = model_module() tokenizer = model.tokenizer except: try: model_module = getattr( import_module("model.models"), args.model) model = model_module() tokenizer = model.tokenizer except: MODEL_NAME = args.model model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) return model, tokenizer def train(args, verbose: bool=True): # Create folder SAVE_DIR = increment_path(os.path.join(args.model_dir, args.name)) LOG_DIR = increment_path(os.path.join(args.log_dir, args.name)) if verbose: print("save_dir:", SAVE_DIR) print("log_dir: ", LOG_DIR) # Device setting use_cuda = torch.cuda.is_available() device = torch.device('cuda:0' if use_cuda else 'cpu') if verbose: print('training on:', device) # Load Model & Tokenizer # because the type of tokenizer depends on the model model, tokenizer = get_model_and_tokenizer(args) model.to(device) # Build Dataset try: dataset_module = getattr(import_module( "dataset."+args.dataset), args.dataset) except: dataset_module = getattr(import_module( "dataset.dataset"), args.dataset) MAX_SEQ_LEN = args.max_seq_len NUM_LABELS = args.num_labels # max_length sometimes refers to maximum length in text generation # so, I used MAX_SEQ_LEN to indicate maximum input length fed to the model dataset, train_dataset, valid_dataset = None, None, None if args.val_file == "y": train_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=False, dropna=True) valid_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=True, dropna=True) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(train_dataset.data['label'].value_counts()) print("test:") print(valid_dataset.data['label'].value_counts()) print("="*20) else: dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, dropna=True) # dataset must return # dict containing at least {'input_ids', 'attention_mask', 'labels'} # in order to work properly # TODO: Build Preprocessor preprocessor = None if args.preprocessor: try: preprocessor_module = getattr(import_module( "dataset.preprocessor."+args.preprocessor), args.preprocessor) except: preprocessor_module = getattr(import_module( "dataset.preprocessor.preprocessors"), args.preprocessor) preprocessor = preprocessor_module() # Build Augmentation # unk, RE, RI, ... # this result will be fixed for entire training steps augmentation = None if args.augmentation: try: augmentation_module = getattr(import_module( "dataset.augmentation."+args.augmentation), args.augmentation) except: augmentation_module = getattr(import_module( "dataset.augmentation.augmentations"), args.augmentation) augmentation = augmentation_module(tokenizer) added_token_num = 0 if dataset is not None: dataset.set_tokenizer(tokenizer) dataset.set_preprocessor(preprocessor) if augmentation is not None: dataset.set_augmentation(augmentation) dataset.preprocess() added_token_num = dataset.get_special_token_num() if train_dataset is not None: train_dataset.set_tokenizer(tokenizer) train_dataset.set_preprocessor(preprocessor) if augmentation is not None: train_dataset.set_augmentation(augmentation) train_dataset.preprocess() added_token_num = train_dataset.get_special_token_num() if valid_dataset is not None: valid_dataset.set_tokenizer(tokenizer) valid_dataset.set_preprocessor(preprocessor) # if augmentation is not None: # valid_dataset.set_augmentation(augmentation) valid_dataset.preprocess() added_token_num = valid_dataset.get_special_token_num() if added_token_num > 0: model.resize_token_embeddings(tokenizer.vocab_size + added_token_num) # TODO: train-valid split # TODO: do not split (= train with whole data) if val_ratio == 0.0 if args.val_ratio > 0.0 and dataset is not None: train_ids, valid_ids = train_test_split(list(range(len(dataset.data))), test_size=args.val_ratio, stratify=dataset.data['label']) train_dataset = torch.utils.data.Subset(dataset, train_ids) valid_dataset = torch.utils.data.Subset(dataset, valid_ids) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(dataset.data['label'].iloc[train_ids].value_counts()) print("test:") print(dataset.data['label'].iloc[valid_ids].value_counts()) print("="*20) # Build DataLoader BATCH_SIZE = args.batch_size VAL_BATCH_SIZE = args.val_batch_size if args.val_batch_size else BATCH_SIZE MAX_PAD_LEN = args.max_pad_len # Train NUM_EPOCHS = args.epochs SAVE_EVERY = args.save_every EVAL_EVERY = args.eval_every LOG_EVERY = args.log_every SAVE_TOTAL_LIMIT = args.save_total_limit LEARNING_RATE = args.lr LR_TYPE = args.lr_type DECAY_RATE = args.lr_weight_decay WARMUP_RATIO = args.lr_warmup_ratio WARMUP_STEPS = args.lr_warmup_steps ADAM_BETA2 = args.lr_adamw_beta2 training_args = TrainingArguments( output_dir=SAVE_DIR, # output directory logging_dir=LOG_DIR, # directory for storing logs save_total_limit=SAVE_TOTAL_LIMIT, # number of total models saved. save_steps=SAVE_EVERY, # model saving step. logging_steps=LOG_EVERY, # log saving step. eval_steps=EVAL_EVERY, # evaluation step. num_train_epochs=NUM_EPOCHS, # total number of training epochs evaluation_strategy='steps', save_strategy='steps', # evaluation strategy to adopt during training # `no` : No evaluation during training. # `steps`: Evaluate every `eval_steps`. # `epoch`: Evaluate every end of epoch. load_best_model_at_end=True, per_device_train_batch_size=BATCH_SIZE, # batch size per device during training per_device_eval_batch_size=VAL_BATCH_SIZE, # batch size for evaluation learning_rate=LEARNING_RATE, # learning_rate lr_scheduler_type=LR_TYPE, # linear, cosine, cosine_with_restarts, # polynomial, constant, constant_with_warmup adam_beta2=ADAM_BETA2, # Beta 2 hyperparameter for AdamW warmup_ratio=WARMUP_RATIO, # ratio of warmup steps for learning rate scheduler warmup_steps=WARMUP_STEPS, # number of warmup steps for learning rate scheduler (overrides warmup_ratio) weight_decay=DECAY_RATE, # strength of weight decay ) trainer = None if valid_dataset is not None: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=train_dataset, # training dataset eval_dataset=valid_dataset, # evaluation dataset compute_metrics=compute_metrics # define metrics function ) else: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=dataset, # training dataset eval_dataset=dataset, # evaluate with the whole dataset compute_metrics=compute_metrics # define metrics function ) # train model trainer.train() model.save_pretrained(os.path.join(SAVE_DIR, args.name + "_final")) def main(): parser = argparse.ArgumentParser( description="Train the model with the arguments given") args = parse_arguments(parser) v = args.verbose == "y" if args.seed is not None: set_all_seeds(args.seed, verbose=v) train(args, verbose=v) if __name__ == '__main__': main()
""" validation์„ ์œ„ํ•œ metrics function """ labels = pred.label_ids preds = pred.predictions.argmax(-1) probs = pred.predictions # calculate accuracy using sklearn's function f1 = klue_re_micro_f1(preds, labels) auprc = klue_re_auprc(probs, labels) acc = metrics.accuracy_score(labels, preds) # ๋ฆฌ๋”๋ณด๋“œ ํ‰๊ฐ€์—๋Š” ํฌํ•จ๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. return { 'micro f1 score': f1, 'auprc': auprc, 'accuracy': acc, } def label_to_num(label): num_la
identifier_body
train.py
import os import random import re import json import glob import multiprocessing import argparse from importlib import import_module from pathlib import Path from typing import Union, List, Tuple from collections import defaultdict import pickle as pickle import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn import metrics from sklearn.model_selection import train_test_split import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification from transformers import Trainer, TrainingArguments from transformers import T5Tokenizer, T5ForConditionalGeneration from transformers.optimization import AdamW from tqdm import tqdm from transformers.utils.dummy_pt_objects import ModalEmbeddings import wandb import requests ###################################### # HELPER FUNCTIONS ###################################### def set_all_seeds(seed, verbose=False): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(seed) random.seed(seed) if verbose: print("All random seeds set to", seed) def parse_arguments(parser): # Set random seed parser.add_argument('--seed', type=int, default=None, help="random seed (default: None)") parser.add_argument('--verbose', type=str, default="n", choices=["y", "n"], help="verbose (default: n)") # Container environment parser.add_argument('--data_dir', type=str, default=os.environ.get('SM_CHANNEL_TRAIN', '/opt/ml/dataset')) parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './saved')) parser.add_argument('--log_dir', type=str, default=os.environ.get('SM_MODEL_DIR', './logs')) parser.add_argument('--name', type=str, default="exp", help='name of the custom model and experiment') parser.add_argument('--load_model', type=str, help="Load pretrained model if not None") # Load Dataset and construct DataLoader parser.add_argument('--dataset', type=str, default='BaselineDataset', help="name of dataset (default: BaselineDataset)") parser.add_argument('--additional', type=str, nargs='*', help="list of additional dataset file names") parser.add_argument('--batch_size', metavar='B', type=int, default=1, help="train set batch size (default: 1)") parser.add_argument('--val_file', type=str, choices=["y", "n"], default="n", help="whether to use valid.csv file (default: n)") parser.add_argument('--val_ratio', type=float, default=0.2, help="valid set ratio (default: 0.2)") parser.add_argument('--val_batch_size', metavar='B', type=int, help="valid set batch size (default set to batch_size)") # Preprocessor and Data Augmentation parser.add_argument('--preprocessor', type=str, default='BaselinePreprocessor', help="type of preprocessor (default: BaselinePreprocessor)") parser.add_argument('--augmentation', type=str, help="type of augmentation (default: None)") # Load model and set optimizer parser.add_argument('--model', type=str, default='BaseModel', help="model name (default: BaseModel)") parser.add_argument('--num_labels', type=int, default=30, help="number of labels for classification (default: 30)") parser.add_argument('--optim', type=str, default='AdamW', help="optimizer name (default: AdamW)") parser.add_argument('--momentum', type=float, default=0., help="SGD with momentum (default: 0.0)") # training setup parser.add_argument('--epochs', type=int, metavar='N', default=1, help="number of epochs (default 1)") parser.add_argument('--lr', type=float, default=1e-5, help="learning rate (default: 1e-5)") parser.add_argument('--max_seq_len', type=int, metavar='L', default=256, help="max sequence length (default 256)") parser.add_argument('--max_pad_len', type=int, metavar='L', default=8, help="max padding length for bucketing (default 8)") parser.add_argument('--log_every', type=int, metavar='N', default=500, help="log every N steps (default: 500)") parser.add_argument('--eval_every', type=int, metavar='N', default=500, help="evaluation interval for every N steps (default: 500)") parser.add_argument('--save_every', type=int, metavar='N', default=500, help="save model interval for every N steps (default: 500)") parser.add_argument('--save_total_limit', type=int, metavar='N', default=5, help="save total limit (choosing the best eval scores) (default: 5)") # Learning Rate Scheduler group_lr = parser.add_argument_group('lr_scheduler') group_lr.add_argument("--lr_type", type=str, metavar='TYPE', default="constant", help="lr scheduler type (default: constant)") group_lr.add_argument("--lr_weight_decay", type=float, metavar='LAMBDA', default=0.01, help="weight decay rate for AdamW (default: 0.01)") group_lr.add_argument("--lr_gamma", type=float, metavar='GAMMA', default=0.95, help="lr scheduler gamma (default: 0.95)") group_lr.add_argument("--lr_decay_step", type=int, metavar='STEP', default=100, help="lr scheduler decay step (default: 100)") group_lr.add_argument("--lr_warmup_steps", type=int, metavar='N', default=500, help="lr scheduler warmup steps (default: 500)") group_lr.add_argument("--lr_warmup_ratio", type=float, metavar='N', default=0.1, help="lr scheduler warmup ratio (default: 0.1)") group_lr.add_argument("--lr_adamw_beta2", type=float, metavar='BETA2', default=0.99, help="AdamW BETA2 (default: 0.99)") args = parser.parse_args() return args def increment_path(path, overwrite=False): """ Automatically increment path, i.e. runs/exp --> runs/exp0, runs/exp1 etc. Args: path (str or pathlib.Path): f"{model_dir}/{args.name}". overwrite (bool): whether to overwrite or increment path (increment if False). Returns: path: new path """ path = Path(path) if (path.exists() and overwrite) or (not path.exists()): if not os.path.exists(str(path).split('/')[0]): os.mkdir(str(path).split('/')[0]) if not path.exists(): os.mkdir(path) return str(path) else:
###################################### # KLUE SPECIFICS ###################################### def klue_re_micro_f1(preds, labels): """KLUE-RE micro f1 (except no_relation)""" label_list = ['no_relation', 'org:top_members/employees', 'org:members', 'org:product', 'per:title', 'org:alternate_names', 'per:employee_of', 'org:place_of_headquarters', 'per:product', 'org:number_of_employees/members', 'per:children', 'per:place_of_residence', 'per:alternate_names', 'per:other_family', 'per:colleagues', 'per:origin', 'per:siblings', 'per:spouse', 'org:founded', 'org:political/religious_affiliation', 'org:member_of', 'per:parents', 'org:dissolved', 'per:schools_attended', 'per:date_of_death', 'per:date_of_birth', 'per:place_of_birth', 'per:place_of_death', 'org:founded_by', 'per:religion'] no_relation_label_idx = label_list.index("no_relation") label_indices = list(range(len(label_list))) label_indices.remove(no_relation_label_idx) return metrics.f1_score(labels, preds, average="micro", labels=label_indices) * 100.0 def klue_re_auprc(probs, labels): """KLUE-RE AUPRC (with no_relation)""" labels = np.eye(30)[labels] score = np.zeros((30,)) for c in range(30): targets_c = labels.take([c], axis=1).ravel() preds_c = probs.take([c], axis=1).ravel() precision, recall, _ = metrics.precision_recall_curve( targets_c, preds_c) score[c] = metrics.auc(recall, precision) return np.average(score) * 100.0 def compute_metrics(pred): """ validation์„ ์œ„ํ•œ metrics function """ labels = pred.label_ids preds = pred.predictions.argmax(-1) probs = pred.predictions # calculate accuracy using sklearn's function f1 = klue_re_micro_f1(preds, labels) auprc = klue_re_auprc(probs, labels) acc = metrics.accuracy_score(labels, preds) # ๋ฆฌ๋”๋ณด๋“œ ํ‰๊ฐ€์—๋Š” ํฌํ•จ๋˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. return { 'micro f1 score': f1, 'auprc': auprc, 'accuracy': acc, } def label_to_num(label): num_label = [] with open('dict_label_to_num.pkl', 'rb') as f: dict_label_to_num = pickle.load(f) for v in label: num_label.append(dict_label_to_num[v]) return num_label ###################################### # DATA LOADER RELATED ###################################### # TODO: bucketed_batch_indicies ์ˆ˜์ •ํ•˜๊ธฐ! def bucketed_batch_indices( src_lens: Union[List[int], np.ndarray, pd.Series], batch_size: int, max_pad_len: int ) -> List[List[int]]: batch_map = defaultdict(list) batch_indices_list = [] src_len_min = np.min(src_lens) for idx, src_len in enumerate(src_lens): src = (src_len - src_len_min + 1) // max_pad_len batch_map[src].append(idx) for _, value in batch_map.items(): batch_indices_list += [value[i:i+batch_size] for i in range(0, len(value), batch_size)] random.shuffle(batch_indices_list) return batch_indices_list # TODO: collate_fn ํ˜„ ๋ฐ์ดํ„ฐ์…‹์— ๋งž์ถฐ ์ˆ˜์ •ํ•˜๊ธฐ! # we don't need collate_fn # since huggingface automatically creates default collate function def collate_fn( batched_samples: List[Tuple[List[int], List[int], List[int]]], pad_token_idx ) -> Tuple[torch.Tensor, torch.Tensor]: PAD = pad_token_idx B = len(batched_samples) batched_samples = sorted( batched_samples, key=lambda x: x["src_idx"], reverse=True) src_sentences = [] src_attention = [] tgt_sentences = [] for sample in batched_samples: src_sentences.append(torch.tensor(sample["src_idx"])) src_attention.append(torch.tensor(sample["src_attn"])) tgt_sentences.append(torch.tensor(sample["tgt_idx"])) src_sentences = torch.nn.utils.rnn.pad_sequence( src_sentences, padding_value=PAD, batch_first=True) src_attention = torch.nn.utils.rnn.pad_sequence( src_attention, padding_value=0, batch_first=True) tgt_sentences = torch.nn.utils.rnn.pad_sequence( tgt_sentences, padding_value=PAD, batch_first=True) assert src_sentences.size(0) == B and tgt_sentences.size(0) == B assert src_sentences.dtype == torch.long and tgt_sentences.dtype == torch.long return {'src_idx': src_sentences, 'src_attn': src_attention, 'tgt_idx': tgt_sentences} def send_web_hooks(text, url): # Please keep your url privately payload = {"text": text} requests.post(url, json=payload) def get_model_and_tokenizer(args, **kwargs): # Here, you also need to define tokenizer as well # since the type of tokenizer depends on the model NUM_LABELS = 30 model = None tokenizer = None if args.model.lower().count("klue/bert-base"): MODEL_NAME = "klue/bert-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained( args.load_model) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = NUM_LABELS model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("ke-t5"): MODEL_NAME = "" CLASS_NAME = "T5EncoderForSequenceClassificationMeanSubmeanObjmean" if args.model.count("large"): MODEL_NAME = 'KETI-AIR/ke-t5-large' elif args.model.count("small"): MODEL_NAME = 'KETI-AIR/ke-t5-small' else: MODEL_NAME = 'KETI-AIR/ke-t5-base' if args.load_model: LOAD_MODEL = args.load_model config = AutoConfig.from_pretrained(LOAD_MODEL) else: LOAD_MODEL = MODEL_NAME config = AutoConfig.from_pretrained(LOAD_MODEL) config.num_labels = 30 config.dropout_p = 0.4 config.focal_loss = False model_module = getattr(import_module("model.models"), CLASS_NAME) model = model_module(config) try: tokenizer = T5Tokenizer.from_pretrained(LOAD_MODEL) except: tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME) elif args.model.lower().count("klue/roberta"): MODEL_NAME = "" if args.model.count("large"): MODEL_NAME = "klue/roberta-large" elif args.model.count("small"): MODEL_NAME = "klue/roberta-small" else: MODEL_NAME = "klue/roberta-base" LOAD_MODEL = args.load_model if args.load_model else MODEL_NAME if args.load_model: model = AutoModelForSequenceClassification.from_pretrained(LOAD_MODEL) try: tokenizer = AutoTokenizer.from_pretrained(LOAD_MODEL) except: # in case, pretrained tokenizer doesn't exists tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, model_input_names = ["input_ids", "attention_mask"]) else: # If the model is not specified above, # it first tries to look up for "model/{args.model}.py" and "model/models.py" file. # Additional setting should be provided with kwargs above. # If still not found, it tries to find the model in huggingface # with AutoModelForSequenceClassification & AutoTokenizer try: model_module = getattr(import_module( "model."+args.model), args.model) model = model_module() tokenizer = model.tokenizer except: try: model_module = getattr( import_module("model.models"), args.model) model = model_module() tokenizer = model.tokenizer except: MODEL_NAME = args.model model_config = AutoConfig.from_pretrained(MODEL_NAME) model_config.num_labels = 30 model = AutoModelForSequenceClassification.from_pretrained( MODEL_NAME, config=model_config) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) return model, tokenizer def train(args, verbose: bool=True): # Create folder SAVE_DIR = increment_path(os.path.join(args.model_dir, args.name)) LOG_DIR = increment_path(os.path.join(args.log_dir, args.name)) if verbose: print("save_dir:", SAVE_DIR) print("log_dir: ", LOG_DIR) # Device setting use_cuda = torch.cuda.is_available() device = torch.device('cuda:0' if use_cuda else 'cpu') if verbose: print('training on:', device) # Load Model & Tokenizer # because the type of tokenizer depends on the model model, tokenizer = get_model_and_tokenizer(args) model.to(device) # Build Dataset try: dataset_module = getattr(import_module( "dataset."+args.dataset), args.dataset) except: dataset_module = getattr(import_module( "dataset.dataset"), args.dataset) MAX_SEQ_LEN = args.max_seq_len NUM_LABELS = args.num_labels # max_length sometimes refers to maximum length in text generation # so, I used MAX_SEQ_LEN to indicate maximum input length fed to the model dataset, train_dataset, valid_dataset = None, None, None if args.val_file == "y": train_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=False, dropna=True) valid_dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, valid=True, dropna=True) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(train_dataset.data['label'].value_counts()) print("test:") print(valid_dataset.data['label'].value_counts()) print("="*20) else: dataset = dataset_module( data_dir=args.data_dir, max_length=MAX_SEQ_LEN, num_labels=NUM_LABELS, additional=args.additional, dropna=True) # dataset must return # dict containing at least {'input_ids', 'attention_mask', 'labels'} # in order to work properly # TODO: Build Preprocessor preprocessor = None if args.preprocessor: try: preprocessor_module = getattr(import_module( "dataset.preprocessor."+args.preprocessor), args.preprocessor) except: preprocessor_module = getattr(import_module( "dataset.preprocessor.preprocessors"), args.preprocessor) preprocessor = preprocessor_module() # Build Augmentation # unk, RE, RI, ... # this result will be fixed for entire training steps augmentation = None if args.augmentation: try: augmentation_module = getattr(import_module( "dataset.augmentation."+args.augmentation), args.augmentation) except: augmentation_module = getattr(import_module( "dataset.augmentation.augmentations"), args.augmentation) augmentation = augmentation_module(tokenizer) added_token_num = 0 if dataset is not None: dataset.set_tokenizer(tokenizer) dataset.set_preprocessor(preprocessor) if augmentation is not None: dataset.set_augmentation(augmentation) dataset.preprocess() added_token_num = dataset.get_special_token_num() if train_dataset is not None: train_dataset.set_tokenizer(tokenizer) train_dataset.set_preprocessor(preprocessor) if augmentation is not None: train_dataset.set_augmentation(augmentation) train_dataset.preprocess() added_token_num = train_dataset.get_special_token_num() if valid_dataset is not None: valid_dataset.set_tokenizer(tokenizer) valid_dataset.set_preprocessor(preprocessor) # if augmentation is not None: # valid_dataset.set_augmentation(augmentation) valid_dataset.preprocess() added_token_num = valid_dataset.get_special_token_num() if added_token_num > 0: model.resize_token_embeddings(tokenizer.vocab_size + added_token_num) # TODO: train-valid split # TODO: do not split (= train with whole data) if val_ratio == 0.0 if args.val_ratio > 0.0 and dataset is not None: train_ids, valid_ids = train_test_split(list(range(len(dataset.data))), test_size=args.val_ratio, stratify=dataset.data['label']) train_dataset = torch.utils.data.Subset(dataset, train_ids) valid_dataset = torch.utils.data.Subset(dataset, valid_ids) if verbose: print("="*20) print("train-valid split to train:", len(train_dataset), "valid:", len(valid_dataset)) print("train:") print(dataset.data['label'].iloc[train_ids].value_counts()) print("test:") print(dataset.data['label'].iloc[valid_ids].value_counts()) print("="*20) # Build DataLoader BATCH_SIZE = args.batch_size VAL_BATCH_SIZE = args.val_batch_size if args.val_batch_size else BATCH_SIZE MAX_PAD_LEN = args.max_pad_len # Train NUM_EPOCHS = args.epochs SAVE_EVERY = args.save_every EVAL_EVERY = args.eval_every LOG_EVERY = args.log_every SAVE_TOTAL_LIMIT = args.save_total_limit LEARNING_RATE = args.lr LR_TYPE = args.lr_type DECAY_RATE = args.lr_weight_decay WARMUP_RATIO = args.lr_warmup_ratio WARMUP_STEPS = args.lr_warmup_steps ADAM_BETA2 = args.lr_adamw_beta2 training_args = TrainingArguments( output_dir=SAVE_DIR, # output directory logging_dir=LOG_DIR, # directory for storing logs save_total_limit=SAVE_TOTAL_LIMIT, # number of total models saved. save_steps=SAVE_EVERY, # model saving step. logging_steps=LOG_EVERY, # log saving step. eval_steps=EVAL_EVERY, # evaluation step. num_train_epochs=NUM_EPOCHS, # total number of training epochs evaluation_strategy='steps', save_strategy='steps', # evaluation strategy to adopt during training # `no` : No evaluation during training. # `steps`: Evaluate every `eval_steps`. # `epoch`: Evaluate every end of epoch. load_best_model_at_end=True, per_device_train_batch_size=BATCH_SIZE, # batch size per device during training per_device_eval_batch_size=VAL_BATCH_SIZE, # batch size for evaluation learning_rate=LEARNING_RATE, # learning_rate lr_scheduler_type=LR_TYPE, # linear, cosine, cosine_with_restarts, # polynomial, constant, constant_with_warmup adam_beta2=ADAM_BETA2, # Beta 2 hyperparameter for AdamW warmup_ratio=WARMUP_RATIO, # ratio of warmup steps for learning rate scheduler warmup_steps=WARMUP_STEPS, # number of warmup steps for learning rate scheduler (overrides warmup_ratio) weight_decay=DECAY_RATE, # strength of weight decay ) trainer = None if valid_dataset is not None: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=train_dataset, # training dataset eval_dataset=valid_dataset, # evaluation dataset compute_metrics=compute_metrics # define metrics function ) else: trainer = Trainer( model=model, tokenizer=tokenizer, args=training_args, # training arguments, defined above train_dataset=dataset, # training dataset eval_dataset=dataset, # evaluate with the whole dataset compute_metrics=compute_metrics # define metrics function ) # train model trainer.train() model.save_pretrained(os.path.join(SAVE_DIR, args.name + "_final")) def main(): parser = argparse.ArgumentParser( description="Train the model with the arguments given") args = parse_arguments(parser) v = args.verbose == "y" if args.seed is not None: set_all_seeds(args.seed, verbose=v) train(args, verbose=v) if __name__ == '__main__': main()
dirs = glob.glob(f"{path}*") matches = [re.search(rf"%s(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] n = max(i) + 1 if i else 2 path = f"{path}{n}" if not os.path.exists(path): os.mkdir(path) return path
conditional_block
InternetMonitorClient.ts
// smithy-typescript generated code import { getHostHeaderPlugin, HostHeaderInputConfig, HostHeaderResolvedConfig, resolveHostHeaderConfig, } from "@aws-sdk/middleware-host-header"; import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; import { getRecursionDetectionPlugin } from "@aws-sdk/middleware-recursion-detection"; import { AwsAuthInputConfig, AwsAuthResolvedConfig, getAwsAuthPlugin, resolveAwsAuthConfig, } from "@aws-sdk/middleware-signing"; import { getUserAgentPlugin, resolveUserAgentConfig, UserAgentInputConfig, UserAgentResolvedConfig, } from "@aws-sdk/middleware-user-agent"; import { Credentials as __Credentials } from "@aws-sdk/types";
import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@smithy/middleware-retry"; import { HttpHandler as __HttpHandler } from "@smithy/protocol-http"; import { Client as __Client, DefaultsMode as __DefaultsMode, SmithyConfiguration as __SmithyConfiguration, SmithyResolvedConfiguration as __SmithyResolvedConfiguration, } from "@smithy/smithy-client"; import { BodyLengthCalculator as __BodyLengthCalculator, CheckOptionalClientConfig as __CheckOptionalClientConfig, Checksum as __Checksum, ChecksumConstructor as __ChecksumConstructor, Decoder as __Decoder, Encoder as __Encoder, EndpointV2 as __EndpointV2, Hash as __Hash, HashConstructor as __HashConstructor, HttpHandlerOptions as __HttpHandlerOptions, Logger as __Logger, Provider as __Provider, Provider, StreamCollector as __StreamCollector, UrlParser as __UrlParser, UserAgent as __UserAgent, } from "@smithy/types"; import { CreateMonitorCommandInput, CreateMonitorCommandOutput } from "./commands/CreateMonitorCommand"; import { DeleteMonitorCommandInput, DeleteMonitorCommandOutput } from "./commands/DeleteMonitorCommand"; import { GetHealthEventCommandInput, GetHealthEventCommandOutput } from "./commands/GetHealthEventCommand"; import { GetMonitorCommandInput, GetMonitorCommandOutput } from "./commands/GetMonitorCommand"; import { ListHealthEventsCommandInput, ListHealthEventsCommandOutput } from "./commands/ListHealthEventsCommand"; import { ListMonitorsCommandInput, ListMonitorsCommandOutput } from "./commands/ListMonitorsCommand"; import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; import { UpdateMonitorCommandInput, UpdateMonitorCommandOutput } from "./commands/UpdateMonitorCommand"; import { ClientInputEndpointParameters, ClientResolvedEndpointParameters, EndpointParameters, resolveClientEndpointParameters, } from "./endpoint/EndpointParameters"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; import { resolveRuntimeExtensions, RuntimeExtension, RuntimeExtensionsConfig } from "./runtimeExtensions"; export { __Client }; /** * @public */ export type ServiceInputTypes = | CreateMonitorCommandInput | DeleteMonitorCommandInput | GetHealthEventCommandInput | GetMonitorCommandInput | ListHealthEventsCommandInput | ListMonitorsCommandInput | ListTagsForResourceCommandInput | TagResourceCommandInput | UntagResourceCommandInput | UpdateMonitorCommandInput; /** * @public */ export type ServiceOutputTypes = | CreateMonitorCommandOutput | DeleteMonitorCommandOutput | GetHealthEventCommandOutput | GetMonitorCommandOutput | ListHealthEventsCommandOutput | ListMonitorsCommandOutput | ListTagsForResourceCommandOutput | TagResourceCommandOutput | UntagResourceCommandOutput | UpdateMonitorCommandOutput; /** * @public */ export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** * The HTTP handler to use. Fetch in browser and Https in Nodejs. */ requestHandler?: __HttpHandler; /** * A constructor for a class implementing the {@link @smithy/types#ChecksumConstructor} interface * that computes the SHA-256 HMAC or checksum of a string or binary buffer. * @internal */ sha256?: __ChecksumConstructor | __HashConstructor; /** * The function that will be used to convert strings into HTTP endpoints. * @internal */ urlParser?: __UrlParser; /** * A function that can calculate the length of a request body. * @internal */ bodyLengthChecker?: __BodyLengthCalculator; /** * A function that converts a stream into an array of bytes. * @internal */ streamCollector?: __StreamCollector; /** * The function that will be used to convert a base64-encoded string to a byte array. * @internal */ base64Decoder?: __Decoder; /** * The function that will be used to convert binary data to a base64-encoded string. * @internal */ base64Encoder?: __Encoder; /** * The function that will be used to convert a UTF8-encoded string to a byte array. * @internal */ utf8Decoder?: __Decoder; /** * The function that will be used to convert binary data to a UTF-8 encoded string. * @internal */ utf8Encoder?: __Encoder; /** * The runtime environment. * @internal */ runtime?: string; /** * Disable dynamically changing the endpoint of the client based on the hostPrefix * trait of an operation. */ disableHostPrefix?: boolean; /** * Unique service identifier. * @internal */ serviceId?: string; /** * Enables IPv6/IPv4 dualstack endpoint. */ useDualstackEndpoint?: boolean | __Provider<boolean>; /** * Enables FIPS compatible endpoints. */ useFipsEndpoint?: boolean | __Provider<boolean>; /** * The AWS region to which this client will send requests */ region?: string | __Provider<string>; /** * Default credentials provider; Not available in browser runtime. * @internal */ credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; /** * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header * @internal */ defaultUserAgentProvider?: Provider<__UserAgent>; /** * Value for how many times a request will be made at most in case of retry. */ maxAttempts?: number | __Provider<number>; /** * Specifies which retry algorithm to use. */ retryMode?: string | __Provider<string>; /** * Optional logger for logging debug/info/warn/error. */ logger?: __Logger; /** * Optional extensions */ extensions?: RuntimeExtension[]; /** * The {@link @smithy/smithy-client#DefaultsMode} that will be used to determine how certain default configuration options are resolved in the SDK. */ defaultsMode?: __DefaultsMode | __Provider<__DefaultsMode>; } /** * @public */ export type InternetMonitorClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & ClientDefaults & RegionInputConfig & EndpointInputConfig<EndpointParameters> & RetryInputConfig & HostHeaderInputConfig & AwsAuthInputConfig & UserAgentInputConfig & ClientInputEndpointParameters; /** * @public * * The configuration interface of InternetMonitorClient class constructor that set the region, credentials and other options. */ export interface InternetMonitorClientConfig extends InternetMonitorClientConfigType {} /** * @public */ export type InternetMonitorClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & Required<ClientDefaults> & RuntimeExtensionsConfig & RegionResolvedConfig & EndpointResolvedConfig<EndpointParameters> & RetryResolvedConfig & HostHeaderResolvedConfig & AwsAuthResolvedConfig & UserAgentResolvedConfig & ClientResolvedEndpointParameters; /** * @public * * The resolved configuration interface of InternetMonitorClient class. This is resolved and normalized from the {@link InternetMonitorClientConfig | constructor configuration interface}. */ export interface InternetMonitorClientResolvedConfig extends InternetMonitorClientResolvedConfigType {} /** * @public * <p>Amazon CloudWatch Internet Monitor provides visibility into how internet issues impact the performance and availability * between your applications hosted on Amazon Web Services and your end users. It can reduce the time it takes for you to diagnose * internet issues from days to minutes. Internet Monitor uses the connectivity data that Amazon Web Services captures from its global * networking footprint to calculate a baseline of performance and availability for internet traffic. This * is the same data that Amazon Web Services uses to monitor internet uptime and availability. With those measurements * as a baseline, Internet Monitor raises awareness for you when there are significant problems for your * end users in the different geographic locations where your application runs.</p> * <p>Internet Monitor publishes internet measurements to CloudWatch Logs and CloudWatch Metrics, * to easily support using CloudWatch tools with health information for geographies and networks specific to your application. * Internet Monitor sends health events to Amazon EventBridge so that you can set up notifications. If an issue is caused by the Amazon Web Services network, * you also automatically receive an Amazon Web Services Health Dashboard notification with the steps that Amazon Web Services is taking to mitigate the problem.</p> * <p>To use Internet Monitor, you create a <i>monitor</i> and associate your application's resources * with it - VPCs, NLBs, CloudFront distributions, or WorkSpaces directories - so Internet Monitor can determine * where your application's internet traffic is. Internet Monitor then provides internet measurements from Amazon Web Services that are specific to * the locations and ASNs (typically, internet service providers or ISPs) that communicate with your application.</p> * <p>For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-InternetMonitor.html">Using Amazon CloudWatch Internet Monitor</a> in the <i>Amazon CloudWatch User Guide</i>.</p> */ export class InternetMonitorClient extends __Client< __HttpHandlerOptions, ServiceInputTypes, ServiceOutputTypes, InternetMonitorClientResolvedConfig > { /** * The resolved configuration of InternetMonitorClient class. This is resolved and normalized from the {@link InternetMonitorClientConfig | constructor configuration interface}. */ readonly config: InternetMonitorClientResolvedConfig; constructor(...[configuration]: __CheckOptionalClientConfig<InternetMonitorClientConfig>) { const _config_0 = __getRuntimeConfig(configuration || {}); const _config_1 = resolveClientEndpointParameters(_config_0); const _config_2 = resolveRegionConfig(_config_1); const _config_3 = resolveEndpointConfig(_config_2); const _config_4 = resolveRetryConfig(_config_3); const _config_5 = resolveHostHeaderConfig(_config_4); const _config_6 = resolveAwsAuthConfig(_config_5); const _config_7 = resolveUserAgentConfig(_config_6); const _config_8 = resolveRuntimeExtensions(_config_7, configuration?.extensions || []); super(_config_8); this.config = _config_8; this.middlewareStack.use(getRetryPlugin(this.config)); this.middlewareStack.use(getContentLengthPlugin(this.config)); this.middlewareStack.use(getHostHeaderPlugin(this.config)); this.middlewareStack.use(getLoggerPlugin(this.config)); this.middlewareStack.use(getRecursionDetectionPlugin(this.config)); this.middlewareStack.use(getAwsAuthPlugin(this.config)); this.middlewareStack.use(getUserAgentPlugin(this.config)); } /** * Destroy underlying resources, like sockets. It's usually not necessary to do this. * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. * Otherwise, sockets might stay open for quite a long time before the server terminates them. */ destroy(): void { super.destroy(); } }
import { RegionInputConfig, RegionResolvedConfig, resolveRegionConfig } from "@smithy/config-resolver"; import { getContentLengthPlugin } from "@smithy/middleware-content-length"; import { EndpointInputConfig, EndpointResolvedConfig, resolveEndpointConfig } from "@smithy/middleware-endpoint";
random_line_split
InternetMonitorClient.ts
// smithy-typescript generated code import { getHostHeaderPlugin, HostHeaderInputConfig, HostHeaderResolvedConfig, resolveHostHeaderConfig, } from "@aws-sdk/middleware-host-header"; import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; import { getRecursionDetectionPlugin } from "@aws-sdk/middleware-recursion-detection"; import { AwsAuthInputConfig, AwsAuthResolvedConfig, getAwsAuthPlugin, resolveAwsAuthConfig, } from "@aws-sdk/middleware-signing"; import { getUserAgentPlugin, resolveUserAgentConfig, UserAgentInputConfig, UserAgentResolvedConfig, } from "@aws-sdk/middleware-user-agent"; import { Credentials as __Credentials } from "@aws-sdk/types"; import { RegionInputConfig, RegionResolvedConfig, resolveRegionConfig } from "@smithy/config-resolver"; import { getContentLengthPlugin } from "@smithy/middleware-content-length"; import { EndpointInputConfig, EndpointResolvedConfig, resolveEndpointConfig } from "@smithy/middleware-endpoint"; import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@smithy/middleware-retry"; import { HttpHandler as __HttpHandler } from "@smithy/protocol-http"; import { Client as __Client, DefaultsMode as __DefaultsMode, SmithyConfiguration as __SmithyConfiguration, SmithyResolvedConfiguration as __SmithyResolvedConfiguration, } from "@smithy/smithy-client"; import { BodyLengthCalculator as __BodyLengthCalculator, CheckOptionalClientConfig as __CheckOptionalClientConfig, Checksum as __Checksum, ChecksumConstructor as __ChecksumConstructor, Decoder as __Decoder, Encoder as __Encoder, EndpointV2 as __EndpointV2, Hash as __Hash, HashConstructor as __HashConstructor, HttpHandlerOptions as __HttpHandlerOptions, Logger as __Logger, Provider as __Provider, Provider, StreamCollector as __StreamCollector, UrlParser as __UrlParser, UserAgent as __UserAgent, } from "@smithy/types"; import { CreateMonitorCommandInput, CreateMonitorCommandOutput } from "./commands/CreateMonitorCommand"; import { DeleteMonitorCommandInput, DeleteMonitorCommandOutput } from "./commands/DeleteMonitorCommand"; import { GetHealthEventCommandInput, GetHealthEventCommandOutput } from "./commands/GetHealthEventCommand"; import { GetMonitorCommandInput, GetMonitorCommandOutput } from "./commands/GetMonitorCommand"; import { ListHealthEventsCommandInput, ListHealthEventsCommandOutput } from "./commands/ListHealthEventsCommand"; import { ListMonitorsCommandInput, ListMonitorsCommandOutput } from "./commands/ListMonitorsCommand"; import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; import { UpdateMonitorCommandInput, UpdateMonitorCommandOutput } from "./commands/UpdateMonitorCommand"; import { ClientInputEndpointParameters, ClientResolvedEndpointParameters, EndpointParameters, resolveClientEndpointParameters, } from "./endpoint/EndpointParameters"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; import { resolveRuntimeExtensions, RuntimeExtension, RuntimeExtensionsConfig } from "./runtimeExtensions"; export { __Client }; /** * @public */ export type ServiceInputTypes = | CreateMonitorCommandInput | DeleteMonitorCommandInput | GetHealthEventCommandInput | GetMonitorCommandInput | ListHealthEventsCommandInput | ListMonitorsCommandInput | ListTagsForResourceCommandInput | TagResourceCommandInput | UntagResourceCommandInput | UpdateMonitorCommandInput; /** * @public */ export type ServiceOutputTypes = | CreateMonitorCommandOutput | DeleteMonitorCommandOutput | GetHealthEventCommandOutput | GetMonitorCommandOutput | ListHealthEventsCommandOutput | ListMonitorsCommandOutput | ListTagsForResourceCommandOutput | TagResourceCommandOutput | UntagResourceCommandOutput | UpdateMonitorCommandOutput; /** * @public */ export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** * The HTTP handler to use. Fetch in browser and Https in Nodejs. */ requestHandler?: __HttpHandler; /** * A constructor for a class implementing the {@link @smithy/types#ChecksumConstructor} interface * that computes the SHA-256 HMAC or checksum of a string or binary buffer. * @internal */ sha256?: __ChecksumConstructor | __HashConstructor; /** * The function that will be used to convert strings into HTTP endpoints. * @internal */ urlParser?: __UrlParser; /** * A function that can calculate the length of a request body. * @internal */ bodyLengthChecker?: __BodyLengthCalculator; /** * A function that converts a stream into an array of bytes. * @internal */ streamCollector?: __StreamCollector; /** * The function that will be used to convert a base64-encoded string to a byte array. * @internal */ base64Decoder?: __Decoder; /** * The function that will be used to convert binary data to a base64-encoded string. * @internal */ base64Encoder?: __Encoder; /** * The function that will be used to convert a UTF8-encoded string to a byte array. * @internal */ utf8Decoder?: __Decoder; /** * The function that will be used to convert binary data to a UTF-8 encoded string. * @internal */ utf8Encoder?: __Encoder; /** * The runtime environment. * @internal */ runtime?: string; /** * Disable dynamically changing the endpoint of the client based on the hostPrefix * trait of an operation. */ disableHostPrefix?: boolean; /** * Unique service identifier. * @internal */ serviceId?: string; /** * Enables IPv6/IPv4 dualstack endpoint. */ useDualstackEndpoint?: boolean | __Provider<boolean>; /** * Enables FIPS compatible endpoints. */ useFipsEndpoint?: boolean | __Provider<boolean>; /** * The AWS region to which this client will send requests */ region?: string | __Provider<string>; /** * Default credentials provider; Not available in browser runtime. * @internal */ credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; /** * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header * @internal */ defaultUserAgentProvider?: Provider<__UserAgent>; /** * Value for how many times a request will be made at most in case of retry. */ maxAttempts?: number | __Provider<number>; /** * Specifies which retry algorithm to use. */ retryMode?: string | __Provider<string>; /** * Optional logger for logging debug/info/warn/error. */ logger?: __Logger; /** * Optional extensions */ extensions?: RuntimeExtension[]; /** * The {@link @smithy/smithy-client#DefaultsMode} that will be used to determine how certain default configuration options are resolved in the SDK. */ defaultsMode?: __DefaultsMode | __Provider<__DefaultsMode>; } /** * @public */ export type InternetMonitorClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & ClientDefaults & RegionInputConfig & EndpointInputConfig<EndpointParameters> & RetryInputConfig & HostHeaderInputConfig & AwsAuthInputConfig & UserAgentInputConfig & ClientInputEndpointParameters; /** * @public * * The configuration interface of InternetMonitorClient class constructor that set the region, credentials and other options. */ export interface InternetMonitorClientConfig extends InternetMonitorClientConfigType {} /** * @public */ export type InternetMonitorClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & Required<ClientDefaults> & RuntimeExtensionsConfig & RegionResolvedConfig & EndpointResolvedConfig<EndpointParameters> & RetryResolvedConfig & HostHeaderResolvedConfig & AwsAuthResolvedConfig & UserAgentResolvedConfig & ClientResolvedEndpointParameters; /** * @public * * The resolved configuration interface of InternetMonitorClient class. This is resolved and normalized from the {@link InternetMonitorClientConfig | constructor configuration interface}. */ export interface InternetMonitorClientResolvedConfig extends InternetMonitorClientResolvedConfigType {} /** * @public * <p>Amazon CloudWatch Internet Monitor provides visibility into how internet issues impact the performance and availability * between your applications hosted on Amazon Web Services and your end users. It can reduce the time it takes for you to diagnose * internet issues from days to minutes. Internet Monitor uses the connectivity data that Amazon Web Services captures from its global * networking footprint to calculate a baseline of performance and availability for internet traffic. This * is the same data that Amazon Web Services uses to monitor internet uptime and availability. With those measurements * as a baseline, Internet Monitor raises awareness for you when there are significant problems for your * end users in the different geographic locations where your application runs.</p> * <p>Internet Monitor publishes internet measurements to CloudWatch Logs and CloudWatch Metrics, * to easily support using CloudWatch tools with health information for geographies and networks specific to your application. * Internet Monitor sends health events to Amazon EventBridge so that you can set up notifications. If an issue is caused by the Amazon Web Services network, * you also automatically receive an Amazon Web Services Health Dashboard notification with the steps that Amazon Web Services is taking to mitigate the problem.</p> * <p>To use Internet Monitor, you create a <i>monitor</i> and associate your application's resources * with it - VPCs, NLBs, CloudFront distributions, or WorkSpaces directories - so Internet Monitor can determine * where your application's internet traffic is. Internet Monitor then provides internet measurements from Amazon Web Services that are specific to * the locations and ASNs (typically, internet service providers or ISPs) that communicate with your application.</p> * <p>For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-InternetMonitor.html">Using Amazon CloudWatch Internet Monitor</a> in the <i>Amazon CloudWatch User Guide</i>.</p> */ export class
extends __Client< __HttpHandlerOptions, ServiceInputTypes, ServiceOutputTypes, InternetMonitorClientResolvedConfig > { /** * The resolved configuration of InternetMonitorClient class. This is resolved and normalized from the {@link InternetMonitorClientConfig | constructor configuration interface}. */ readonly config: InternetMonitorClientResolvedConfig; constructor(...[configuration]: __CheckOptionalClientConfig<InternetMonitorClientConfig>) { const _config_0 = __getRuntimeConfig(configuration || {}); const _config_1 = resolveClientEndpointParameters(_config_0); const _config_2 = resolveRegionConfig(_config_1); const _config_3 = resolveEndpointConfig(_config_2); const _config_4 = resolveRetryConfig(_config_3); const _config_5 = resolveHostHeaderConfig(_config_4); const _config_6 = resolveAwsAuthConfig(_config_5); const _config_7 = resolveUserAgentConfig(_config_6); const _config_8 = resolveRuntimeExtensions(_config_7, configuration?.extensions || []); super(_config_8); this.config = _config_8; this.middlewareStack.use(getRetryPlugin(this.config)); this.middlewareStack.use(getContentLengthPlugin(this.config)); this.middlewareStack.use(getHostHeaderPlugin(this.config)); this.middlewareStack.use(getLoggerPlugin(this.config)); this.middlewareStack.use(getRecursionDetectionPlugin(this.config)); this.middlewareStack.use(getAwsAuthPlugin(this.config)); this.middlewareStack.use(getUserAgentPlugin(this.config)); } /** * Destroy underlying resources, like sockets. It's usually not necessary to do this. * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. * Otherwise, sockets might stay open for quite a long time before the server terminates them. */ destroy(): void { super.destroy(); } }
InternetMonitorClient
identifier_name